AudioTrack的源码解读(1)
本篇介绍
Android上使用AudioTrack可以实现播放PCM,本篇介绍下AudioTrack的创建过程。
代码解读
使用AudioTrack的第一个操作是创建一个AudiioTrack, 那就从AudiioTrack的构造开始:
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId)
throws IllegalArgumentException {
this(attributes, format, bufferSizeInBytes, mode, sessionId, false /*offload*/,
ENCAPSULATION_MODE_NONE, null /* tunerConfiguration */);
}
这儿的AudioAttributes可以指定该音频的用途,类型,标记等。 用途可以分为以下几种:
public final static int[] SDK_USAGES = {
USAGE_UNKNOWN,
USAGE_MEDIA,
USAGE_VOICE_COMMUNICATION,
USAGE_VOICE_COMMUNICATION_SIGNALLING,
USAGE_ALARM,
USAGE_NOTIFICATION,
USAGE_NOTIFICATION_RINGTONE,
USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
USAGE_NOTIFICATION_COMMUNICATION_INSTANT,
USAGE_NOTIFICATION_COMMUNICATION_DELAYED,
USAGE_NOTIFICATION_EVENT,
USAGE_ASSISTANCE_ACCESSIBILITY,
USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
USAGE_ASSISTANCE_SONIFICATION,
USAGE_GAME,
USAGE_ASSISTANT,
};
类型可以分为以下几种:
public Builder setContentType(@AttributeContentType int contentType) {
switch (contentType) {
case CONTENT_TYPE_UNKNOWN:
case CONTENT_TYPE_MOVIE:
case CONTENT_TYPE_MUSIC:
case CONTENT_TYPE_SONIFICATION:
case CONTENT_TYPE_SPEECH:
mContentType = contentType;
break;
default:
throw new IllegalArgumentException("Invalid content type " + contentType);
return this;
}
标记有以下几种:
FLAG_ALL = FLAG_AUDIBILITY_ENFORCED | FLAG_SECURE | FLAG_SCO
| FLAG_BEACON | FLAG_HW_AV_SYNC | FLAG_HW_HOTWORD | FLAG_BYPASS_INTERRUPTION_POLICY
| FLAG_BYPASS_MUTE | FLAG_LOW_LATENCY | FLAG_DEEP_BUFFER | FLAG_NO_MEDIA_PROJECTION
| FLAG_NO_SYSTEM_CAPTURE | FLAG_CAPTURE_PRIVATE;
Flags会影响到AudioFlinger中播放线程的选择。 AudioFormat负责音频参数配置,比如采样率,声道,精度等。 接下来就是buffer大小,buffer大小可以通过getMinBufferSize获取到。 mode用来指定数据是一次性提供还是多次提供,对于少量音频,可以直接指定STATIC,对于网络音频或者低延时场景,可以指定STREAM。 最后一个参数是sessionId,用来将音频特效和播放器建立关联。 再继续看构造之前先看下getMinBufferSize:
static public int getMinBufferSize(int sampleRateInHz, int channelConfig, int audioFormat) {
int channelCount = 0;
switch(channelConfig) { // 转换声道数量
case AudioFormat.CHANNEL_OUT_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_OUT_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
channelCount = 2;
break;
default:
if (!isMultichannelConfigSupported(channelConfig, audioFormat)) {
loge("getMinBufferSize(): Invalid channel configuration.");
return ERROR_BAD_VALUE;
} else {
channelCount = AudioFormat.channelCountFromOutChannelMask(channelConfig);
// 格式检查,可以看到除了PCM格式,还有MP3,OPUS等
if (!AudioFormat.isPublicEncoding(audioFormat)) {
loge("getMinBufferSize(): Invalid audio format.");
return ERROR_BAD_VALUE;
// 频率合法性检查,通过JNI下沉到Native
// sample rate, note these values are subject to change
// Note: AudioFormat.SAMPLE_RATE_UNSPECIFIED is not allowed
if ( (sampleRateInHz < AudioFormat.SAMPLE_RATE_HZ_MIN) ||
(sampleRateInHz > AudioFormat.SAMPLE_RATE_HZ_MAX) ) {
loge("getMinBufferSize(): " + sampleRateInHz + " Hz is not a supported sample rate.");
return ERROR_BAD_VALUE;
int size = native_get_min_buff_size(sampleRateInHz, channelCount, audioFormat);
if (size <= 0) {
loge("getMinBufferSize(): error querying hardware");
return ERROR;
else {
return size;
}
再看下native_get_min_buff_size 实现:
static jint android_media_AudioTrack_get_min_buff_size(JNIEnv *env, jobject thiz,
jint sampleRateInHertz, jint channelCount, jint audioFormat) {
size_t frameCount;
const status_t status = AudioTrack::getMinFrameCount(&frameCount, AUDIO_STREAM_DEFAULT,
sampleRateInHertz); // 获取最小帧数
if (status != NO_ERROR) {
ALOGE("AudioTrack::getMinFrameCount() for sample rate %d failed with status %d",
sampleRateInHertz, status);
return -1;
const audio_format_t format = audioFormatToNative(audioFormat);
if (audio_has_proportional_frames(format)) {
const size_t bytesPerSample = audio_bytes_per_sample(format);
return frameCount * channelCount * bytesPerSample;
} else {
return frameCount;
}
getMinFrameCount的调用时序图如下:
image.png
可以看到,这个调用基本涵盖了Audio Native的主要组件,另外对于Engine,手机厂商也可以实现自己单独的策略,Android工程里面也提供了一个默认的Engine。 接下来开始正式的构造AudioTrack:
private AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId, boolean offload, int encapsulationMode,
@Nullable TunerConfiguration tunerConfiguration)
throws IllegalArgumentException {
super(attributes, AudioPlaybackConfiguration.PLAYER_TYPE_JAM_AUDIOTRACK);
// mState already == STATE_UNINITIALIZED
mConfiguredAudioAttributes = attributes; // object copy not needed, immutable.
// Check if we should enable deep buffer mode
if (shouldEnablePowerSaving(mAttributes, format, bufferSizeInBytes, mode)) {
mAttributes = new AudioAttributes.Builder(mAttributes)
.replaceFlags((mAttributes.getAllFlags()
| AudioAttributes.FLAG_DEEP_BUFFER)
& ~AudioAttributes.FLAG_LOW_LATENCY)
.build();
int rate = format.getSampleRate();
if (rate == AudioFormat.SAMPLE_RATE_UNSPECIFIED) {
rate = 0;
int channelIndexMask = 0;
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_INDEX_MASK) != 0) {
channelIndexMask = format.getChannelIndexMask();
int channelMask = 0;
if ((format.getPropertySetMask()
& AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_CHANNEL_MASK) != 0) {
channelMask = format.getChannelMask();
} else if (channelIndexMask == 0) { // if no masks at all, use stereo
channelMask = AudioFormat.CHANNEL_OUT_FRONT_LEFT
| AudioFormat.CHANNEL_OUT_FRONT_RIGHT;
int encoding = AudioFormat.ENCODING_DEFAULT;
if ((format.getPropertySetMask() & AudioFormat.AUDIO_FORMAT_HAS_PROPERTY_ENCODING) != 0) {
encoding = format.getEncoding();
audioParamCheck(rate, channelMask, channelIndexMask, encoding, mode);
mOffloaded = offload;
mStreamType = AudioSystem.STREAM_DEFAULT;
audioBuffSizeCheck(bufferSizeInBytes);
mInitializationLooper = looper;
if (sessionId < 0) {
throw new IllegalArgumentException("Invalid audio session ID: "+sessionId);
int[] sampleRate = new int[] {mSampleRate};
int[] session = new int[1];
session[0] = sessionId;
// native initialization
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
sampleRate, mChannelMask, mChannelIndexMask, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session, 0 /*nativeTrackInJavaObj*/,
offload, encapsulationMode, tunerConfiguration,
getCurrentOpPackageName());
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
mSampleRate = sampleRate[0];
mSessionId = session[0];
// TODO: consider caching encapsulationMode and tunerConfiguration in the Java object.
if ((mAttributes.getFlags() & AudioAttributes.FLAG_HW_AV_SYNC) != 0) {
int frameSizeInBytes;
if (AudioFormat.isEncodingLinearFrames(mAudioFormat)) {
frameSizeInBytes = mChannelCount * AudioFormat.getBytesPerSample(mAudioFormat);
} else {
frameSizeInBytes = 1;
mOffset = ((int) Math.ceil(HEADER_V2_SIZE_BYTES / frameSizeInBytes)) * frameSizeInBytes;
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
baseRegisterPlayer();
}
这块做了以下事情:
- 参数检查
- 调用native_setup在Native创建AudioTrack
- 更新状态
- 调用baseRegisterPlayer注册到AudioService 接下来看下native_setup: 这儿代码比较长,可以抽取关键部分看
static jint android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa, jintArray jSampleRate,
jint channelPositionMask, jint channelIndexMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode,
jintArray jSession, jlong nativeAudioTrack,
jboolean offload, jint encapsulationMode,
jobject tunerConfiguration, jstring opPackageName)
// 如果Native对应的AudioTrack还没创建,那么就需要先创建Native的AudioTrack
if (nativeAudioTrack == 0) {
// create the native AudioTrack object
ScopedUtfChars opPackageNameStr(env, opPackageName);
lpTrack = new AudioTrack(opPackageNameStr.c_str());
// 创建回调结构,用来传递数据
// initialize the callback information:
// this data will be passed with every AudioTrack callback
lpJniStorage = new AudioTrackJniStorage();
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioTrack object can be garbage collected.
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
lpJniStorage->mCallbackData.isOffload = offload;
lpJniStorage->mCallbackData.busy = false;
// 分别处理Static和Stream模式,可以看到对音频数据的内存处理有明显差异。
// initialize the native AudioTrack object
status_t status = NO_ERROR;
switch (memoryMode) {
case MODE_STREAM:
status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
// in paa (last argument)
sampleRateInHertz,
format, // word length, PCM
nativeChannelMask, offload ? 0 : frameCount,
offload ? AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD
: AUDIO_OUTPUT_FLAG_NONE,
audioCallback,
&(lpJniStorage->mCallbackData), // callback, callback data (user)
0, // notificationFrames == 0 since not using EVENT_MORE_DATA
// to feed the AudioTrack
0, // shared mem
true, // thread can call Java
sessionId, // audio session ID
offload ? AudioTrack::TRANSFER_SYNC_NOTIF_CALLBACK
: AudioTrack::TRANSFER_SYNC,
(offload || encapsulationMode) ? &offloadInfo : NULL, -1,
-1, // default uid, pid values
paa.get());
break;
case MODE_STATIC:
// AudioTrack is using shared memory
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
status = lpTrack->set(AUDIO_STREAM_DEFAULT, // stream type, but more info conveyed
// in paa (last argument)
sampleRateInHertz,
format, // word length, PCM
nativeChannelMask, frameCount, AUDIO_OUTPUT_FLAG_NONE,
audioCallback,
&(lpJniStorage->mCallbackData), // callback, callback data (user)
0, // notificationFrames == 0 since not using EVENT_MORE_DATA
// to feed the AudioTrack
lpJniStorage->mMemBase, // shared mem
true, // thread can call Java
sessionId, // audio session ID
AudioTrack::TRANSFER_SHARED,
NULL, // default offloadInfo
-1, -1, // default uid, pid values
paa.get());
break;
default:
ALOGE("Unknown mode %d", memoryMode);
goto native_init_failure;
// 如果Native有对应的AudioTrack,需要做的事情就是将Native的AudioTrack对象关联到Java的AudioTrack
// end if (nativeAudioTrack == 0)
lpTrack = (AudioTrack*)nativeAudioTrack;
lpJniStorage = new AudioTrackJniStorage();
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioTrack object can be garbage collected.
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
lpJniStorage->mCallbackData.busy = false;
lpJniStorage->mAudioTrackCallback =
new JNIAudioTrackCallback(env, thiz, lpJniStorage->mCallbackData.audioTrack_ref,
javaAudioTrackFields.postNativeEventInJava);
lpTrack->setAudioTrackCallback(lpJniStorage->mAudioTrackCallback);
nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
ALOGE("Error creating AudioTrack: Error retrieving session id pointer");
goto native_init_failure;
// read the audio session ID back from AudioTrack in case we create a new session
nSession[0] = lpTrack->getSessionId();
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
const jint elements[1] = { (jint) lpTrack->getSampleRate() };
env->SetIntArrayRegion(jSampleRate, 0, 1, elements);
{ // scope for the lock
Mutex::Autolock l(sLock);
sAudioTrackCallBackCookies.add(&lpJniStorage->mCallbackData);
// save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
// of the Java object (in mNativeTrackInJavaObj)
setAudioTrack(env, thiz, lpTrack);
// save the JNI resources so we can free them later
//ALOGV("storing lpJniStorage: %x\n", (long)lpJniStorage);
env->SetLongField(thiz, javaAudioTrackFields.jniData, (jlong)lpJniStorage);
// since we had audio attributes, the stream type was derived from them during the
// creation of the native AudioTrack: push the same value to the Java object
env->SetIntField(thiz, javaAudioTrackFields.fieldStreamType, (jint) lpTrack->streamType());
return (jint) AUDIO_JAVA_SUCCESS;
}
接下来继续看Native AudioTrack的set:
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
int32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
bool doNotReconnect,
float maxRequiredSpeed,
audio_port_handle_t selectedDeviceId)
// handle default values first. 默认streamType
if (streamType == AUDIO_STREAM_DEFAULT) {
streamType = AUDIO_STREAM_MUSIC;
if (pAttributes == NULL) {
if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("%s(): Invalid stream type %d", __func__, streamType);
status = BAD_VALUE;
goto exit;
mStreamType = streamType;
} else {
// stream type shouldn't be looked at, this track has audio attributes
memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
ALOGV("%s(): Building AudioTrack with attributes:"
" usage=%d content=%d flags=0x%x tags=[%s]",
__func__,
mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
mStreamType = AUDIO_STREAM_DEFAULT;
audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
// these below should probably come from the audioFlinger too...
if (format == AUDIO_FORMAT_DEFAULT) {
format = AUDIO_FORMAT_PCM_16_BIT;
} else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
// validate parameters
//格式检查
if (!audio_is_valid_format(format)) {
ALOGE("%s(): Invalid format %#x", __func__, format);
status = BAD_VALUE;
goto exit;
mFormat = format;
// 通道检查
if (!audio_is_output_channel(channelMask)) {
ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
status = BAD_VALUE;
goto exit;
mChannelMask = channelMask;
channelCount = audio_channel_count_from_out_mask(channelMask);
mChannelCount = channelCount;
// force direct flag if format is not linear PCM
// or offload was requested
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
|| !audio_is_linear_pcm(format)) {
ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
? "%s(): Offload request, forcing to Direct Output"
: "%s(): Not linear PCM, forcing to Direct Output",
__func__);
flags = (audio_output_flags_t)
// FIXME why can't we allow direct AND fast?
((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
// force direct flag if HW A/V sync requested
if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (audio_has_proportional_frames(format)) {
mFrameSize = channelCount * audio_bytes_per_sample(format);
} else {
mFrameSize = sizeof(uint8_t);
} else {
ALOG_ASSERT(audio_has_proportional_frames(format));
mFrameSize = channelCount * audio_bytes_per_sample(format);
// createTrack will return an error if PCM format is not supported by server,
// so no need to check for specific PCM formats here
// sampling rate must be specified for direct outputs
if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
status = BAD_VALUE;
goto exit;
mSampleRate = sampleRate;
mOriginalSampleRate = sampleRate;
mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
// 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
// Make copy of input parameter offloadInfo so that in the future:
// (a) createTrack_l doesn't need it as an input parameter
// (b) we can support re-creation of offloaded tracks
if (offloadInfo != NULL) {
mOffloadInfoCopy = *offloadInfo;
mOffloadInfo = &mOffloadInfoCopy;
} else {
mOffloadInfo = NULL;
memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
mSendLevel = 0.0f;
// mFrameCount is initialized in createTrack_l
mReqFrameCount = frameCount;
if (notificationFrames >= 0) {
mNotificationFramesReq = notificationFrames;
mNotificationsPerBufferReq = 0;
} else {
if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
__func__, notificationFrames);
status = BAD_VALUE;
goto exit;
if (frameCount > 0) {
ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
__func__, notificationFrames, frameCount);
status = BAD_VALUE;
goto exit;
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
const uint32_t maxNotificationsPerBuffer = 8;
mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
"%s(): notificationFrames=%d clamped to the range -%u to -%u",
__func__,
notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
mNotificationFramesAct = 0;
callingPid = IPCThreadState::self()->getCallingPid();
myPid = getpid();
if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
mClientUid = IPCThreadState::self()->getCallingUid();
} else {
mClientUid = uid;
if (pid == -1 || (callingPid != myPid)) {
mClientPid = callingPid;
} else {
mClientPid = pid;
mAuxEffectId = 0;
mOrigFlags = mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
// create the IAudioTrack
AutoMutex lock(mLock);
status = createTrack_l();
// 接下来是一些参数设置,可以忽略
}
看到这儿需要整理下思路,创建AudioTrack最后一定会和AudioFlinger内部某个结构关联起来,这样AudioFlinger才可以处理系统中所有的音频。按照这个思路继续看 createTrack_l():
status_t AudioTrack::createTrack_l()
status_t status;
bool callbackAdded = false;
// 这儿就出现了AudioFlinger
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
// 接下来就是打包参数给flinger
// mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
// After fast request is denied, we will request again if IAudioTrack is re-created.
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
// either of these use cases:
// use case 1: shared buffer
bool sharedBuffer = mSharedBuffer != 0;
bool transferAllowed =
// use case 2: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
// use case 3: obtain/release mode
(mTransfer == TRANSFER_OBTAIN) ||
// use case 4: synchronous write
((mTransfer == TRANSFER_SYNC || mTransfer == TRANSFER_SYNC_NOTIF_CALLBACK)
&& mThreadCanCallJava);
bool fastAllowed = sharedBuffer || transferAllowed;
if (!fastAllowed) {
ALOGW("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by client,"
" not shared buffer and transfer = %s",
__func__, mPortId,
convertTransferToText(mTransfer));
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
IAudioFlinger::CreateTrackInput input;
if (mStreamType != AUDIO_STREAM_DEFAULT) {
input.attr = AudioSystem::streamTypeToAttributes(mStreamType);
} else {
input.attr = mAttributes;
input.config = AUDIO_CONFIG_INITIALIZER;
input.config.sample_rate = mSampleRate;
input.config.channel_mask = mChannelMask;
input.config.format = mFormat;
input.config.offload_info = mOffloadInfoCopy;
input.clientInfo.clientUid = mClientUid;
input.clientInfo.clientPid = mClientPid;
input.clientInfo.clientTid = -1;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
// It is currently meaningless to request SCHED_FIFO for a Java thread. Even if the
// application-level code follows all non-blocking design rules, the language runtime
// doesn't also follow those rules, so the thread will not benefit overall.
if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
input.clientInfo.clientTid = mAudioTrackThread->getTid();
input.sharedBuffer = mSharedBuffer;
input.notificationsPerBuffer = mNotificationsPerBufferReq;
input.speed = 1.0;
if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
(mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
input.speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
input.flags = mFlags;
input.frameCount = mReqFrameCount;
input.notificationFrameCount = mNotificationFramesReq;
input.selectedDeviceId = mSelectedDeviceId;
input.sessionId = mSessionId;
input.audioTrackCallback = mAudioTrackCallback;
input.opPackageName = mOpPackageName;
IAudioFlinger::CreateTrackOutput output;
// 调用AudioFlinger
sp<IAudioTrack> track = audioFlinger->createTrack(input,
output,
&status);
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
__func__, mPortId, status, output.outputId);
if (status == NO_ERROR) {
status = NO_INIT;
goto exit;
ALOG_ASSERT(track != 0);
// 利用flinger的返回重新设置参数
mFrameCount = output.frameCount;
mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
mRoutedDeviceId = output.selectedDeviceId;
mSessionId = output.sessionId;
mSampleRate = output.sampleRate;
if (mOriginalSampleRate == 0) {
mOriginalSampleRate = mSampleRate;
mAfFrameCount = output.afFrameCount;
mAfSampleRate = output.afSampleRate;
mAfLatency = output.afLatencyMs;
mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
// FIXME compare to AudioRecord
// 共享内存,这样应用可以和AudioFlinger可以以共享内存的方式传递音频了
sp<IMemory> iMem = track->getCblk();
if (iMem == 0) {
ALOGE("%s(%d): Could not get control block", __func__, mPortId);
status = NO_INIT;
goto exit;
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
void *iMemPointer = iMem->unsecurePointer();
mAudioTrack = track;
mCblkMemory = iMem;
IPCThreadState::self()->flushCommands();
audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
mCblk = cblk;
mAwaitBoost = false;
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
ALOGI("%s(%d): AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
__func__, mPortId, mReqFrameCount, mFrameCount);
if (!mThreadCanCallJava) {
mAwaitBoost = true;
} else {
ALOGD("%s(%d): AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu",
__func__, mPortId, mReqFrameCount, mFrameCount);
mFlags = output.flags;
//mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
if (mDeviceCallback != 0) {
if (mOutput != AUDIO_IO_HANDLE_NONE) {
AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
AudioSystem::addAudioDeviceCallback(this, output.outputId, output.portId);
callbackAdded = true;
mPortId = output.portId;
// We retain a copy of the I/O handle, but don't own the reference
mOutput = output.outputId;
mRefreshRemaining = true;
// Starting address of buffers in shared memory. If there is a shared buffer, buffers
// is the value of pointer() for the shared buffer, otherwise buffers points
// immediately after the control block. This address is for the mapping within client
// address space. AudioFlinger::TrackBase::mBuffer is for the server address space.
void* buffers;
if (mSharedBuffer == 0) {
buffers = cblk + 1; // Stream 模式
} else {
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
buffers = mSharedBuffer->unsecurePointer();
if (buffers == NULL) { // Static模式
ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
status = NO_INIT;
goto exit;
mAudioTrack->attachAuxEffect(mAuxEffectId);
// If IAudioTrack is re-created, don't let the requested frameCount
// decrease. This can confuse clients that cache frameCount().
if (mFrameCount > mReqFrameCount) {
mReqFrameCount = mFrameCount;
// reset server position to 0 as we have new cblk.
mServer = 0;
// update proxy
if (mSharedBuffer == 0) { // Stream场景
mStaticProxy.clear();
mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
} else {// Static场景
mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
mProxy = mStaticProxy;
mProxy->setVolumeLR(gain_minifloat_pack(
gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
mProxy->setSendLevel(mSendLevel);
const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
mProxy->setSampleRate(effectiveSampleRate);
AudioPlaybackRate playbackRateTemp = mPlaybackRate;
playbackRateTemp.mSpeed = effectiveSpeed;
playbackRateTemp.mPitch = effectivePitch;
mProxy->setPlaybackRate(playbackRateTemp);
mProxy->setMinimum(mNotificationFramesAct);
if (mDualMonoMode != AUDIO_DUAL_MONO_MODE_OFF) {
setDualMonoMode_l(mDualMonoMode);
if (mAudioDescriptionMixLeveldB != -std::numeric_limits<float>::infinity()) {
setAudioDescriptionMixLevel_l(mAudioDescriptionMixLeveldB);
mDeathNotifier = new DeathNotifier(this);
IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
// This is the first log sent from the AudioTrack client.
// The creation of the audio track by AudioFlinger (in the code above)
// is the first log of the AudioTrack and must be present before
// any AudioTrack client logs will be accepted.
mMetricsId = std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(mPortId);
return status;
}
可以看到这时候做了2件事
- 调用audioFlinger的createTrack在AudioFlinger中创建track
- 利用track的返回共享内存,关联音频数据buffer,使用的是AudioTrackClientProxy/StaticAudioTrackClientProxy
继续看AudioFlinger的createTrack,在看之前可以估计一下这个方法里面需要完成哪些事情:
- 创建对应的Track结构,用来和应用的Track对应起来,这个很可能支持binder,这样就可以执行pause等操作了;
- 将该结构和一个线程关联起来,用来执行音频输出
- 创建一个共享内存,用来传输音频数据
带着这几个猜想继续看createTrack
sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
CreateTrackOutput& output,
status_t *status)
//分配session id
audio_session_t sessionId = input.sessionId;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
sessionId = (audio_session_t) newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
} else if (audio_unique_id_get_use(sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
lStatus = BAD_VALUE;
goto Exit;
output.sessionId = sessionId;
output.outputId = AUDIO_IO_HANDLE_NONE;
output.selectedDeviceId = input.selectedDeviceId;
lStatus = AudioSystem::getOutputForAttr(&localAttr, &output.outputId, sessionId, &streamType,
clientPid, clientUid, &input.config, input.flags,
&output.selectedDeviceId, &portId, &secondaryOutputs);
if (lStatus != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
ALOGE("createTrack() getOutputForAttr() return error %d or invalid output handle", lStatus);
goto Exit;
{ // 获取播放线程, 猜想2得到验证
Mutex::Autolock _l(mLock);
PlaybackThread *thread = checkPlaybackThread_l(output.outputId);
if (thread == NULL) {
ALOGE("no playback thread found for output handle %d", output.outputId);
lStatus = BAD_VALUE;
goto Exit;
client = registerPid(clientPid); // 创建共享内存,猜想3得到验证
// 获取特效线程
PlaybackThread *effectThread = NULL;
// check if an effect chain with the same session ID is present on another
// output thread and move it here.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
sp<PlaybackThread> t = mPlaybackThreads.valueAt(i);
if (mPlaybackThreads.keyAt(i) != output.outputId) {
uint32_t sessions = t->hasAudioSession(sessionId);
if (sessions & ThreadBase::EFFECT_SESSION) {
effectThread = t.get();
break;
ALOGV("createTrack() sessionId: %d", sessionId);
output.sampleRate = input.config.sample_rate;
output.frameCount = input.frameCount;
output.notificationFrameCount = input.notificationFrameCount;
output.flags = input.flags;
// 创建track,猜想1得到验证
track = thread->createTrack_l(client, streamType, localAttr, &output.sampleRate,
input.config.format, input.config.channel_mask,
&output.frameCount, &output.notificationFrameCount,
input.notificationsPerBuffer, input.speed,
input.sharedBuffer, sessionId, &output.flags,
callingPid, input.clientInfo.clientTid, clientUid,
&lStatus, portId, input.audioTrackCallback,
input.opPackageName);
LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (track == 0));
// we don't abort yet if lStatus != NO_ERROR; there is still work to be done regardless
output.afFrameCount = thread->frameCount();
output.afSampleRate = thread->sampleRate();
output.afLatencyMs = thread->latency();
output.portId = portId;
if (lStatus == NO_ERROR) {
// Connect secondary outputs. Failure on a secondary output must not imped the primary
// Any secondary output setup failure will lead to a desync between the AP and AF until
// the track is destroyed.
TeePatches teePatches;
for (audio_io_handle_t secondaryOutput : secondaryOutputs) { // 候选输出
PlaybackThread *secondaryThread = checkPlaybackThread_l(secondaryOutput);
if (secondaryThread == NULL) {
ALOGE("no playback thread found for secondary output %d", output.outputId);
continue;
size_t sourceFrameCount = thread->frameCount() * output.sampleRate
/ thread->sampleRate();
size_t sinkFrameCount = secondaryThread->frameCount() * output.sampleRate
/ secondaryThread->sampleRate();
// If the secondary output has just been opened, the first secondaryThread write
// will not block as it will fill the empty startup buffer of the HAL,
// so a second sink buffer needs to be ready for the immediate next blocking write.
// Additionally, have a margin of one main thread buffer as the scheduling jitter
// can reorder the writes (eg if thread A&B have the same write intervale,
// the scheduler could schedule AB...BA)
size_t frameCountToBeReady = 2 * sinkFrameCount + sourceFrameCount;
// Total secondary output buffer must be at least as the read frames plus
// the margin of a few buffers on both sides in case the
// threads scheduling has some jitter.
// That value should not impact latency as the secondary track is started before
// its buffer is full, see frameCountToBeReady.
size_t frameCount = frameCountToBeReady + 2 * (sourceFrameCount + sinkFrameCount);
// The frameCount should also not be smaller than the secondary thread min frame
// count
size_t minFrameCount = AudioSystem::calculateMinFrameCount(
[&] { Mutex::Autolock _l(secondaryThread->mLock);
return secondaryThread->latency_l(); }(),
secondaryThread->mNormalFrameCount,
secondaryThread->mSampleRate,
output.sampleRate,
input.speed);
frameCount = std::max(frameCount, minFrameCount);
using namespace std::chrono_literals;
auto inChannelMask = audio_channel_mask_out_to_in(input.config.channel_mask);
sp patchRecord = new RecordThread::PatchRecord(nullptr /* thread */,
output.sampleRate,
inChannelMask,
input.config.format,
frameCount,
NULL /* buffer */,
(size_t)0 /* bufferSize */,
AUDIO_INPUT_FLAG_DIRECT,
0ns /* timeout */);
status_t status = patchRecord->initCheck();
if (status != NO_ERROR) {
ALOGE("Secondary output patchRecord init failed: %d", status);
continue;
// TODO: We could check compatibility of the secondaryThread with the PatchTrack
// for fast usage: thread has fast mixer, sample rate matches, etc.;
// for now, we exclude fast tracks by removing the Fast flag.
const audio_output_flags_t outputFlags =
(audio_output_flags_t)(output.flags & ~AUDIO_OUTPUT_FLAG_FAST);
sp patchTrack = new PlaybackThread::PatchTrack(secondaryThread,
streamType,
output.sampleRate,
input.config.channel_mask,
input.config.format,
frameCount,
patchRecord->buffer(),
patchRecord->bufferSize(),
outputFlags,
0ns /* timeout */,
frameCountToBeReady);
status = patchTrack->initCheck();
if (status != NO_ERROR) {
ALOGE("Secondary output patchTrack init failed: %d", status);
continue;
teePatches.push_back({patchRecord, patchTrack});
secondaryThread->addPatchTrack(patchTrack);
// In case the downstream patchTrack on the secondaryThread temporarily outlives
// our created track, ensure the corresponding patchRecord is still alive.
patchTrack->setPeerProxy(patchRecord, true /* holdReference */);
patchRecord->setPeerProxy(patchTrack, false /* holdReference */);
track->setTeePatches(std::move(teePatches));
// move effect chain to this output thread if an effect on same session was waiting
// for a track to be created
if (lStatus == NO_ERROR && effectThread != NULL) {
// no risk of deadlock because AudioFlinger::mLock is held
Mutex::Autolock _dl(thread->mLock);
Mutex::Autolock _sl(effectThread->mLock);
if (moveEffectChain_l(sessionId, effectThread, thread) == NO_ERROR) {
effectThreadId = thread->id();
effectIds = thread->getEffectIds_l(sessionId);
// Look for sync events awaiting for a session to be used.
for (size_t i = 0; i < mPendingSyncEvents.size(); i++) {
if (mPendingSyncEvents[i]->triggerSession() == sessionId) {
if (thread->isValidSyncEvent(mPendingSyncEvents[i])) {
if (lStatus == NO_ERROR) {
(void) track->setSyncEvent(mPendingSyncEvents[i]);
} else {
mPendingSyncEvents[i]->cancel();
mPendingSyncEvents.removeAt(i);
if ((output.flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) == AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
setAudioHwSyncForSession_l(thread, sessionId);
if (lStatus != NO_ERROR) {
// remove local strong reference to Client before deleting the Track so that the
// Client destructor is called by the TrackBase destructor with mClientLock held
// Don't hold mClientLock when releasing the reference on the track as the
// destructor will acquire it.
Mutex::Autolock _cl(mClientLock);
client.clear();
track.clear();
goto Exit;
// effectThreadId is not NONE if an effect chain corresponding to the track session
// was found on another thread and must be moved on this thread
if (effectThreadId != AUDIO_IO_HANDLE_NONE) {
AudioSystem::moveEffectsToIo(effectIds, effectThreadId);
// return handle to client
trackHandle = new TrackHandle(track); // 验证猜想1, 将track包装成binder
Exit:
if (lStatus != NO_ERROR && output.outputId != AUDIO_IO_HANDLE_NONE) {
AudioSystem::releaseOutput(portId);
*status = lStatus;
return trackHandle;
}
整理上述逻辑,主要完成了以下几件事
- 查找对应的播放线程
- 在对应的播放线程中创建track
- 将track包装成binder
- 创建共享内存
关于第一点查找对应output的线程,其实现如下:
// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
return mPlaybackThreads.valueFor(output).get();
}
这儿提到的output和真正的音频输出设备不一样,是和线程类型有关系的,当前有以下几种:
MIXER, // Thread class is MixerThread
DIRECT, // Thread class is DirectOutputThread
DUPLICATING, // Thread class is DuplicatingThread
RECORD, // Thread class is RecordThread
OFFLOAD, // Thread class is OffloadThread
MMAP_PLAYBACK, // Thread class for MMAP playback stream
MMAP_CAPTURE, // Thread class for MMAP capture stream
在支持对应模式的时候,线程会预先创建好,因此可以假定一定会找到对应的线程。 再看下创建共享内存 registerPid(clientPid):
sp<AudioFlinger::Client> AudioFlinger::registerPid(pid_t pid)
Mutex::Autolock _cl(mClientLock);
// If pid is already in the mClients wp<> map, then use that entry
// (for which promote() is always != 0), otherwise create a new entry and Client.
sp<Client> client = mClients.valueFor(pid).promote();
if (client == 0) {
client = new Client(this, pid);
mClients.add(pid, client);
return client;
AudioFlinger::Client::Client(const sp<AudioFlinger>& audioFlinger, pid_t pid)
: RefBase(),
mAudioFlinger(audioFlinger),
mPid(pid)
mMemoryDealer = new MemoryDealer(
audioFlinger->getClientSharedHeapSize(),
(std::string("AudioFlinger::Client(") + std::to_string(pid) + ")").c_str());
size_t AudioFlinger::getClientSharedHeapSize() const
size_t heapSizeInBytes = property_get_int32("ro.af.client_heap_size_kbyte", 0) * 1024;
if (heapSizeInBytes != 0) { // read-only property overrides all.
return heapSizeInBytes;
return mClientSharedHeapSize;
}
可以看到匿名共享内存是以pid 管理的,而且每个进程的共享内存是可配置的。 这时候共享内存也创建好了,接下来就需要将共享内存和Track关联起来了。 看下创建Track,这儿的client参数携带了共享内存:
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
const audio_attributes_t& attr,
uint32_t *pSampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
size_t *pNotificationFrameCount,
uint32_t notificationsPerBuffer,
float speed,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
audio_output_flags_t *flags,
pid_t creatorPid,
pid_t tid,
uid_t uid,
status_t *status,
audio_port_handle_t portId,
const sp<media::IAudioTrackCallback>& callback,
const std::string& opPackageName)
track = new Track(this, client, streamType, attr, sampleRate, format,
channelMask, frameCount,
nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
sessionId, creatorPid, uid, *flags, TrackBase::TYPE_DEFAULT, portId,
SIZE_MAX /*frameCountToBeReady*/, opPackageName);
lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
if (lStatus != NO_ERROR) {
ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
// track must be cleared from the caller as the caller has the AF lock
goto Exit;
mTracks.add(track);
Mutex::Autolock _atCbL(mAudioTrackCbLock);
if (callback.get() != nullptr) {
mAudioTrackCallbacks.emplace(track, callback);
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
继续看下Track的构造:
// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
AudioFlinger::PlaybackThread::Track::Track(
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
size_t bufferSize,
const sp<IMemory>& sharedBuffer,
audio_session_t sessionId,
pid_t creatorPid,
uid_t uid,
audio_output_flags_t flags,
track_type type,
audio_port_handle_t portId,
size_t frameCountToBeReady,
const std::string opPackageName)
: TrackBase(thread, client, attr, sampleRate, format, channelMask, frameCount,
// TODO: Using unsecurePointer() has some associated security pitfalls
// (see declaration for details).
// Either document why it is safe in this case or address the
// issue (e.g. by copying).
(sharedBuffer != 0) ? sharedBuffer->unsecurePointer() : buffer,
(sharedBuffer != 0) ? sharedBuffer->size() : bufferSize,
sessionId, creatorPid, uid, true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
type,
portId,
std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK) + std::to_string(portId)),
mFillingUpStatus(FS_INVALID),
// mRetryCount initialized later when needed
mSharedBuffer(sharedBuffer),
mStreamType(streamType),
mMainBuffer(thread->sinkBuffer()),
mAuxBuffer(NULL),
mAuxEffectId(0), mHasVolumeController(false),
mFrameMap(16 /* sink-frame-to-track-frame map memory */),
mVolumeHandler(new media::VolumeHandler(sampleRate)),
mOpPlayAudioMonitor(OpPlayAudioMonitor::createIfNeeded(
uid, attr, id(), streamType, opPackageName)),
// mSinkTimestamp
mFastIndex(-1),
mCachedVolume(1.0),
/* The track might not play immediately after being active, similarly as if its volume was 0.
* When the track starts playing, its volume will be computed. */
mFinalVolume(0.f),
mResumeToStopping(false),
mFlushHwPending(false),
mFlags(flags)
if (sharedBuffer == 0) {
mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, // Stream模式,把共享内存地址传递给proxy
mFrameSize, !isExternalTrack(), sampleRate);
} else {
mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, // STATIC 模式,把共享内存地址传递给proxy
mFrameSize, sampleRate);
mServerProxy = mAudioTrackServerProxy; // 到了这儿就可以通过mServerProxy获取共享内存了
mServerProxy->setStartThresholdInFrames(frameCountToBeReady); // update the Cblk value
if (channelMask & AUDIO_CHANNEL_HAPTIC_ALL) {
mAudioVibrationController = new AudioVibrationController(this);
mExternalVibration = new os::ExternalVibration(
mUid, opPackageName, mAttr, mAudioVibrationController);
}
看下TrackBase的实现:
// TrackBase constructor must be called with AudioFlinger::mLock held
AudioFlinger::ThreadBase::TrackBase::TrackBase(
ThreadBase *thread,
const sp<Client>& client,
const audio_attributes_t& attr,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
size_t bufferSize,
audio_session_t sessionId,
pid_t creatorPid,
uid_t clientUid,
bool isOut,
alloc_type alloc,
track_type type,
audio_port_handle_t portId,
std::string metricsId)
: RefBase(),
mThread(thread),
mClient(client),
mCblk(NULL),
// mBuffer, mBufferSize
mState(IDLE),
mAttr(attr),
mSampleRate(sampleRate),
mFormat(format),
mChannelMask(channelMask),
mChannelCount(isOut ?
audio_channel_count_from_out_mask(channelMask) :
audio_channel_count_from_in_mask(channelMask)),
mFrameSize(audio_has_proportional_frames(format) ?
mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
mFrameCount(frameCount),
mSessionId(sessionId),
mIsOut(isOut),
mId(android_atomic_inc(&nextTrackId)),
mTerminated(false),
mType(type),
mThreadIoHandle(thread ? thread->id() : AUDIO_IO_HANDLE_NONE),
mPortId(portId),
mIsInvalid(false),
mTrackMetrics(std::move(metricsId), isOut),
mCreatorPid(creatorPid)
if (client != 0) {
mCblkMemory = client->heap()->allocate(size); // 从共享内存中分配内存
if (mCblkMemory == 0 ||
(mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->unsecurePointer())) == NULL) { // mCblk 记录了内存地址
ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
client->heap()->dump("AudioTrack");
mCblkMemory.clear();
return;
} else {
mCblk = (audio_track_cblk_t *) malloc(size);
if (mCblk == NULL) {
ALOGE("%s(%d): not enough memory for AudioTrack size=%zu", __func__, mId, size);
return;
// construct the shared structure in-place.
if (mCblk != NULL) {
new(mCblk) audio_track_cblk_t();
switch (alloc) {
case ALLOC_READONLY: {
const sp<MemoryDealer> roHeap(thread->readOnlyHeap());
if (roHeap == 0 ||
(mBufferMemory = roHeap->allocate(bufferSize)) == 0 ||
(mBuffer = mBufferMemory->unsecurePointer()) == NULL) {
ALOGE("%s(%d): not enough memory for read-only buffer size=%zu",
__func__, mId, bufferSize);
if (roHeap != 0) {
roHeap->dump("buffer");
mCblkMemory.clear();
mBufferMemory.clear();
return;
memset(mBuffer, 0, bufferSize);
} break;
case ALLOC_PIPE:
mBufferMemory = thread->pipeMemory();
// mBuffer is the virtual address as seen from current process (mediaserver),
// and should normally be coming from mBufferMemory->unsecurePointer().
// However in this case the TrackBase does not reference the buffer directly.
// It should references the buffer via the pipe.
// Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL.
mBuffer = NULL;
bufferSize = 0;
break;
case ALLOC_CBLK:
// clear all buffers
if (buffer == NULL) {
mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
memset(mBuffer, 0, bufferSize);
} else {
mBuffer = buffer;
#if 0
mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic
#endif
break;
case ALLOC_LOCAL:
mBuffer = calloc(1, bufferSize);
break;
case ALLOC_NONE:
mBuffer = buffer;
break;
default:
LOG_ALWAYS_FATAL("%s(%d): invalid allocation type: %d", __func__, mId, (int)alloc);
mBufferSize = bufferSize;
#ifdef TEE_SINK
mTee.set(sampleRate, mChannelCount, format, NBAIO_Tee::TEE_FLAG_TRACK);
#endif
}
接下来继续看下Track支持binder的实现,这儿主要是借助了TrackHandle, TrackHandle 是支持binder的,只是包装了下track:
AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
: BnAudioTrack(),
mTrack(track)
AudioFlinger::TrackHandle::~TrackHandle() {
// just stop the track on deletion, associated resources
// will be freed from the main thread once all pending buffers have
// been played. Unless it's not in the active track list, in which
// case we free everything now...
mTrack->destroy();
sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
return mTrack->getCblk();
status_t AudioFlinger::TrackHandle::start() {
return mTrack->start();
void AudioFlinger::TrackHandle::stop() {
mTrack->stop();
void AudioFlinger::TrackHandle::flush() {