本文主要是介绍Andorid QQ for TV麦克调试问题,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
1.Android系统默认采样率是8000Hz;
2.QQ应用采样率是44100Hz或16000Hz;
3.QQ for TV应用会在真正开始语音聊天前,测试最佳采样率(即先会执行打开和关闭设备的一个测试;注意:有些驱动对快速打开关闭会有异常;这时可以考虑在HAL设置时间戳,当调用HAL关闭和打开函数的时间间隔不超过1S时、不执行关闭动作;完后记录状态,再次打开时也不再执行打开动作);
4.重采样由AudioFlinger服务完成,不需要驱动和HAL模块参与。一、流程
E/AudioRecord( 2019): Could not get audio input for record source 1
E/AudioRecord-JNI( 2019): Error creating AudioRecord instance: initialization check failed.
E/AudioRecord-Java( 2019): [ android.media.AudioRecord ] Error code -20 when initializing native AudioRecord object.
如上错误是当前mic的配置文件中不存在应用设置的采样率、采样精度或声道。
frameworks/base/media/java/android/media/AudioRecord.java
public AudioRecord(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat, int bufferSizeInBytes)throws IllegalArgumentException {int initResult = native_setup( new WeakReference<AudioRecord>(this), mRecordSource, mSampleRate, mChannels, mAudioFormat, mNativeBufferSizeInBytes,session);if (initResult != SUCCESS) {loge("Error code "+initResult+" when initializing native AudioRecord object.");return; // with mState == STATE_UNINITIALIZED}
}
frameworks/base/core/jni/android_media_AudioRecord.cpp
{"native_setup", "(Ljava/lang/Object;IIIII[I)I",(void *)android_media_AudioRecord_setup},
static int
android_media_AudioRecord_setup(JNIEnv *env, jobject thiz, jobject weak_this,jint source, jint sampleRateInHertz, jint channels,jint audioFormat, jint buffSizeInBytes, jintArray jSession){sp<AudioRecord> lpRecorder = new AudioRecord();lpRecorder->set((audio_source_t) source,sampleRateInHertz,format, // word length, PCMchannels,frameCount,recorderCallback,// callback_tlpCallbackData,// void* user0, // notificationFrames,true, // threadCanCallJava)sessionId);if (lpRecorder->initCheck() != NO_ERROR) {ALOGE("Error creating AudioRecord instance: initialization check failed.");goto native_init_failure;}
}
frameworks/av/media/libmedia/AudioRecord.cpp
status_t AudioRecord::set(audio_source_t inputSource,uint32_t sampleRate,audio_format_t format,audio_channel_mask_t channelMask,int frameCount,callback_t cbf,void* user,int notificationFrames,bool threadCanCallJava,int sessionId){audio_io_handle_t input = AudioSystem::getInput(inputSource,sampleRate,format,channelMask,mSessionId);if (input == 0) {ALOGE("Could not get audio input for record source %d", inputSource);return BAD_VALUE;}
}
audio_io_handle_t AudioSystem::getInput(audio_source_t inputSource,uint32_t samplingRate,audio_format_t format,audio_channel_mask_t channelMask,int sessionId){const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();if (aps == 0) return 0;return aps->getInput(inputSource, samplingRate, format, channelMask, sessionId);
}
Binder进程间通信
frameworks/av/services/audioflinger/AudioPolicyService.cpp
audio_io_handle_t AudioPolicyService::getInput(audio_source_t inputSource,uint32_t samplingRate,audio_format_t format,audio_channel_mask_t channelMask,int audioSession){audio_io_handle_t input = mpAudioPolicy->get_input(mpAudioPolicy, inputSource, samplingRate,format, channelMask, (audio_in_acoustics_t) 0);//rc = hw_get_module(AUDIO_POLICY_HARDWARE_MODULE_ID, &module);//rc = audio_policy_dev_open(module, &mpAudioPolicyDev);
}
hardware/libhardware_legacy/audio/Audio_policy.c
static audio_io_handle_t ap_get_input(struct audio_policy *pol, audio_source_t inputSource,uint32_t sampling_rate,audio_format_t format,audio_channel_mask_t channelMask,audio_in_acoustics_t acoustics){struct legacy_audio_policy *lap = to_lap(pol);return lap->apm->getInput((int) inputSource, sampling_rate, (int) format, channelMask,(AudioSystem::audio_in_acoustics)acoustics);
}
hardware/libhardware_legacy/audio/AudiopolicyManagerBase.cpp
audio_io_handle_t AudioPolicyManagerBase::getInput(int inputSource,uint32_t samplingRate,uint32_t format,uint32_t channelMask,AudioSystem::audio_in_acoustics acoustics){audio_devices_t device = getDeviceForInputSource(inputSource);//查询当前麦克IOProfile *profile = getInputProfile(device,samplingRate,format,channelMask);//根据audio_policy.conf中记录当前麦克信息,看是否支持采样率、采样位数以及声道。if (profile == NULL) {ALOGW("getInput() could not find profile for device %04x, samplingRate %d, format %d,""channelMask %04x",device, samplingRate, format, channelMask);return 0;}input = mpClientInterface->openInput(profile->mModule->mHandle,&inputDesc->mDevice,&inputDesc->mSamplingRate,&inputDesc->mFormat,&inputDesc->mChannelMask);
}
frameworks/av/services/audioflinger/AudioPolicyService.cpp
open_input_on_module : aps_open_input_on_module,
static audio_io_handle_t aps_open_input_on_module(void *service,audio_module_handle_t module,audio_devices_t *pDevices,uint32_t *pSamplingRate,audio_format_t *pFormat,audio_channel_mask_t *pChannelMask){sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();if (af == 0) {ALOGW("%s: could not get AudioFlinger", __func__);return 0;}return af->openInput(module, pDevices, pSamplingRate, pFormat, pChannelMask);
}
frameworks/av/services/audioflinger/AudioFlinger.cpp
audio_io_handle_t AudioFlinger::openInput(audio_module_handle_t module,audio_devices_t *pDevices,uint32_t *pSamplingRate,audio_format_t *pFormat,audio_channel_mask_t *pChannelMask){status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,&inStream);status = inHwHal->open_input_stream(inHwHal, id, *pDevices, &config,&inStream);if (status == NO_ERROR && inStream != NULL) {thread = new RecordThread(this,input,reqSamplingRate,reqChannels,id,device);}
}
AudioFlinger::RecordThread::RecordThread(const sp<AudioFlinger>& audioFlinger,AudioStreamIn *input,uint32_t sampleRate,audio_channel_mask_t channelMask,audio_io_handle_t id,audio_devices_t device) :ThreadBase(audioFlinger, id, AUDIO_DEVICE_NONE, device, RECORD),mInput(input), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpInBuffer(NULL),// mRsmpInIndex and mInputBytes set by readInputParameters()mReqChannelCount(popcount(channelMask)),mReqSampleRate(sampleRate)// mBytesRead is only meaningful while active, and so is cleared in start()// (but might be better to also clear here for dump?){readInputParameters()
}
void AudioFlinger::RecordThread::readInputParameters(){mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);if (mSampleRate != mReqSampleRate && mChannelCount <= FCC_2 && mReqChannelCount <= FCC_2){mResampler = AudioResampler::create(16, channelCount, mReqSampleRate);mResampler->setSampleRate(mSampleRate);}//重采样!!!
}
bool AudioFlinger::RecordThread::threadLoop(){if (CC_LIKELY(mActiveTrack->getNextBuffer(&buffer) == NO_ERROR)) {//正常从HAL获取数据}else{nsecs_t now = systemTime();if ((now - lastWarning) > kWarningThrottleNs) {ALOGW("RecordThread: buffer overflow");lastWarning = now;}}
}
二、应用如何拿取buffer数据
W/AudioFlinger( 918): RecordThread: buffer overflow
W/AudioRecord( 2238): obtainBuffer timed out (is the CPU pegged?) user=0002f260, server=0002f260
应该考虑buffer溢出;尤其是第二个问题,应该考虑底层音频数据有问题。
frameworks/base/media/java/android/media/AudioRecord.java
public int read(byte[] audioData, int offsetInBytes, int sizeInBytes) {if (mState != STATE_INITIALIZED) {return ERROR_INVALID_OPERATION;} if ( (audioData == null) || (offsetInBytes < 0 ) || (sizeInBytes < 0) || (offsetInBytes + sizeInBytes > audioData.length)) {return ERROR_BAD_VALUE;}return native_read_in_byte_array(audioData, offsetInBytes, sizeInBytes);
}
frameworks/base/core/jni/android_media_AudioRecord.cpp
static jint android_media_AudioRecord_readInByteArray(JNIEnv *env, jobject thiz,jbyteArray javaAudioData,jint offsetInBytes, jint sizeInBytes) {sp<AudioRecord> lpRecorder = getAudioRecord(env, thiz);ssize_t readSize = lpRecorder->read(recordBuff + offsetInBytes,sizeInBytes > (jint)recorderBuffSize ?(jint)recorderBuffSize : sizeInBytes );
}
frameworks/av/media/libmedia/AudioRecord.cpp
ssize_t AudioRecord::read(void* buffer, size_t userSize){status_t err = obtainBuffer(&audioBuffer, ((2 * MAX_RUN_TIMEOUT_MS) / WAIT_PERIOD_MS));
}
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount){cblk->waitTimeMs += waitTimeMs;if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) {ALOGW( "obtainBuffer timed out (is the CPU pegged?) ""user=%08x, server=%08x", cblk->user, cblk->server);}audioBuffer->raw = (int8_t*)cblk->buffer(u);//cblk->buffer(u)即是AudioFlinger中mActiveTrack->getNextBuffer(&buffer)读取的HAL数据
}
这篇关于Andorid QQ for TV麦克调试问题的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!