上一节《7-21 StartRecording处理逻辑》
本节源码分析录制麦克风流程,最终是调用StartRecording开始录制,该函数里面创建并启动了一个采集线程,线程函数是AudioDeviceWindowsCore::DoCaptureThreadPollDMO,注意默认是在打开了aec回声消除功能。
WebRtcVoiceMediaChannel::SetSend
void WebRtcVoiceMediaChannel::SetSend(bool send) {
***
// Change the settings on each send channel.
for (auto& kv : send_streams_) {
kv.second->SetSend(send);
}
send_ = send;
}
SetSend
void SetSend(bool send) {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
send_ = send;
UpdateSendState();
}
UpdateSendState
void UpdateSendState() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
RTC_DCHECK(stream_);
RTC_DCHECK_EQ(1UL, rtp_parameters_.encodings.size());
if (send_ && source_ != nullptr && rtp_parameters_.encodings[0].active) {
stream_->Start();
} else { // !send || source_ = nullptr
stream_->Stop();
}
}
AudioSendStream::Start
void AudioSendStream::Start() {
RTC_DCHECK_RUN_ON(&worker_thread_checker_);
if (sending_) {
return;
}
if (!config_.has_dscp && config_.min_bitrate_bps != -1 &&
config_.max_bitrate_bps != -1 &&
(allocate_audio_without_feedback_ || TransportSeqNumId(config_) != 0)) {
rtp_transport_->AccountForAudioPacketsInPacedSender(true);
if (send_side_bwe_with_overhead_)
rtp_transport_->IncludeOverheadInPacedSender();
rtp_rtcp_module_->SetAsPartOfAllocation(true);
ConfigureBitrateObserver();
} else {
rtp_rtcp_module_->SetAsPartOfAllocation(false);
}
channel_send_->StartSend();
sending_ = true;
audio_state()->AddSendingStream(this, encoder_sample_rate_hz_,
encoder_num_channels_);
}
AudioState::AddSendingStream
void (webrtc::AudioSendStream* stream,
int sample_rate_hz,
size_t num_channels) {
RTC_DCHECK(thread_checker_.IsCurrent());
auto& properties = sending_streams_[stream];
properties.sample_rate_hz = sample_rate_hz;
properties.num_channels = num_channels;
UpdateAudioTransportWithSendingStreams();
// Make sure recording is initialized; start recording if enabled.
auto* adm = config_.audio_device_module.get();
if (!adm->Recording()) {
if (adm->InitRecording() == 0) {
if (recording_enabled_) {
adm->StartRecording();
}
} else {
RTC_DLOG_F(LS_ERROR) << "Failed to initialize recording.";
}
}
}
AudioDeviceModuleImpl::StartRecording
int32_t AudioDeviceModuleImpl::StartRecording() {
RTC_LOG(INFO) << __FUNCTION__;
CHECKinitialized_();
if (Recording()) {
return 0;
}
audio_device_buffer_.StartRecording();
int32_t result = audio_device_->StartRecording();
RTC_LOG(INFO) << "output: " << result;
RTC_HISTOGRAM_BOOLEAN("WebRTC.Audio.StartRecordingSuccess",
static_cast<int>(result == 0));
return result;
}
AudioDeviceWindowsCore::StartRecording
int32_t AudioDeviceWindowsCore::StartRecording() {
if (!_recIsInitialized) {
return -1;
}
if (_hRecThread != NULL) {
return 0;
}
if (_recording) {
return 0;
}
{
MutexLock lockScoped(&mutex_);
// Create thread which will drive the capturing
LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
if (_builtInAecEnabled) { // 如果使用AEC,则重定向使用DMO采集音频
// Redirect to the DMO polling method.
lpStartAddress = WSAPICaptureThreadPollDMO;
if (!_playing) {
// The DMO won't provide us captured output data unless we
// give it render data to process.
RTC_LOG(LS_ERROR)
<< "Playout must be started before recording when using"
" the built-in AEC";
return -1;
}
}
// 前面的,如果使用了_builtInAecEnabled,则真正执行的是WSAPICaptureThreadPollDMO
assert(_hRecThread == NULL);
_hRecThread = CreateThread(NULL, 0, lpStartAddress, this, 0, NULL);
if (_hRecThread == NULL) {
RTC_LOG(LS_ERROR) << "failed to create the recording thread";
return -1;
}
// Set thread priority to highest possible
// 设置线程优先级别
SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
} // critScoped
// 当前线程挂起
DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
if (ret != WAIT_OBJECT_0) {
RTC_LOG(LS_VERBOSE) << "capturing did not start up properly";
return -1;
}
RTC_LOG(LS_VERBOSE) << "capture audio stream has now started...";
_recording = true;
return 0;
}
WSAPICaptureThreadPollDMO
DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context) {
return reinterpret_cast
}
采集音频线程
AudioDeviceWindowsCore::DoCaptureThreadPollDMO
DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO() {
assert(_mediaBuffer != NULL);
bool keepRecording = true;
// Initialize COM as MTA in this thread.
ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
if (!comInit.Succeeded()) {
RTC_LOG(LS_ERROR) << "failed to initialize COM in polling DMO thread";
return 1;
}
HRESULT hr = InitCaptureThreadPriority();
if (FAILED(hr)) {
return hr;
}
// Set event which will ensure that the calling thread modifies the
// recording state to true.
SetEvent(_hCaptureStartedEvent);
// >> ---------------------------- THREAD LOOP ----------------------------
while (keepRecording) {
// Poll the DMO every 5 ms.
// (The same interval used in the Wave implementation.)
DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
switch (waitResult) {
case WAIT_OBJECT_0: // _hShutdownCaptureEvent
keepRecording = false;
break;
case WAIT_TIMEOUT: // timeout notification
break;
default: // unexpected error
RTC_LOG(LS_WARNING) << "Unknown wait termination on capture side";
hr = -1; // To signal an error callback.
keepRecording = false;
break;
}
// 真正采集的核心逻辑
while (keepRecording) {
MutexLock lockScoped(&mutex_);
DWORD dwStatus = 0;
{
DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
dmoBuffer.pBuffer = _mediaBuffer;//必须是继承IMediaBuffer的
dmoBuffer.pBuffer->AddRef();
// Poll the DMO for AEC processed capture data. The DMO will
// copy available data to |dmoBuffer|, and should only return
// 10 ms frames. The value of |dwStatus| should be ignored.
hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
SAFE_RELEASE(dmoBuffer.pBuffer);
dwStatus = dmoBuffer.dwStatus;
}
if (FAILED(hr)) {
_TraceCOMError(hr);
keepRecording = false;
assert(false);
break;
}
ULONG bytesProduced = 0;
BYTE* data;
// Get a pointer to the data buffer. This should be valid until
// the next call to ProcessOutput.
// 获取buffer数据和数据大小
// bytesProduced=320,输出采样率16000,每个采样2个字节,每次采集10ms数据
hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
if (FAILED(hr)) {
_TraceCOMError(hr);
keepRecording = false;
assert(false);
break;
}
if (bytesProduced > 0) {
//断点值 160 = 320/2
const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
// TODO(andrew): verify that this is always satisfied. It might
// be that ProcessOutput will try to return more than 10 ms if
// we fail to call it frequently enough.
assert(kSamplesProduced == static_cast<int>(_recBlockSize));
assert(sizeof(BYTE) == sizeof(int8_t));
_ptrAudioBuffer->SetRecordedBuffer(reinterpret_cast<int8_t*>(data),
kSamplesProduced);
_ptrAudioBuffer->SetVQEData(0, 0);
_UnLock(); // Release lock while making the callback.
// 将数据输送到上层逻辑去处理
_ptrAudioBuffer->DeliverRecordedData();
_Lock();
}
// Reset length to indicate buffer availability.
hr = _mediaBuffer->SetLength(0);
if (FAILED(hr)) {
_TraceCOMError(hr);
keepRecording = false;
assert(false);
break;
}
if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE)) {
// The DMO cannot currently produce more data. This is the
// normal case; otherwise it means the DMO had more than 10 ms
// of data available and ProcessOutput should be called again.
break;
}
}
}
// ---------------------------- THREAD LOOP ---------------------------- <<
RevertCaptureThreadPriority();
if (FAILED(hr)) {
RTC_LOG(LS_ERROR)
<< "Recording error: capturing thread has ended prematurely";
} else {
RTC_LOG(LS_VERBOSE) << "Capturing thread is now terminated properly";
}
return hr;
}
将数据输送到上层逻辑去处理
_ptrAudioBuffer->DeliverRecordedData();
AudioDeviceBuffer::DeliverRecordedData
-》
audio_transport_cb_->RecordedDataIsAvailable(
rec_buffer_.data(), frames, bytes_per_frame, rec_channels_,
rec_sample_rate_, total_delay_ms, 0, 0, typing_status_,
new_mic_level_dummy);
H:\webrtc-20210315\webrtc-20210315\webrtc\webrtc-checkout\src\modules\audio_device\audio_device_data_observer.cc
// AudioTransport methods overrides.
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
const size_t nChannels,
const uint32_t samples_per_sec,
const uint32_t total_delay_ms,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) override {
int32_t res = 0;
// Capture PCM data of locally captured audio.
// std::unique_ptr<AudioDeviceDataObserver> observer_;
if (observer_) {
observer_->OnCaptureData(audioSamples, nSamples, nBytesPerSample,
nChannels, samples_per_sec);
}
// Send to the actual audio transport.
if (audio_transport_) {
res = audio_transport_->RecordedDataIsAvailable(
audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec,
total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel);
}
return res;
}
1 外面应用层调用
observer的值来自
ADMWrapper(rtc::scoped_refptr
AudioDeviceDataObserver* legacy_observer,
std::unique_ptr
: impl
legacyobserver(legacyobserver),
observer(std::move(observer))
-》
rtc::scoped_refptr
AudioDeviceModule::AudioLayer audio_layer,
TaskQueueFactory task_queue_factory,
AudioDeviceDataObserver legacy_observer) {
rtc::scoped_refptr
new rtc::RefCountedObject
legacy_observer, nullptr));
*
2 拷贝到每个发送流进行传输
if (audiotransport) {
res = audiotransport->RecordedDataIsAvailable(
audioSamples, nSamples, nBytesPerSample, nChannels, samples_per_sec,
total_delay_ms, clockDrift, currentMicLevel, keyPressed, newMicLevel);
}
audiotransport的值为RegisterAudioCallback传参值。
// Override AudioDeviceModule’s RegisterAudioCallback method to remember the
// actual audio transport (e.g.: voice engine).
int32t RegisterAudioCallback(AudioTransport* audio_callback) override {
// Remember the audio callback to forward PCM data
audio_transport = audio_callback;
return 0;
}