前言image.png

源码分析

Conductor::AddTracks

  1. void Conductor::AddTracks() {
  2. if (!peer_connection_->GetSenders().empty()) {
  3. return; // Already added tracks.
  4. }
  5. // 创建音频轨,并添加到peerconnection
  6. rtc::scoped_refptr<webrtc::AudioTrackInterface> audio_track(
  7. peer_connection_factory_->CreateAudioTrack(
  8. kAudioLabel, peer_connection_factory_->CreateAudioSource(
  9. cricket::AudioOptions())));
  10. auto result_or_error = peer_connection_->AddTrack(audio_track, {kStreamId});
  11. if (!result_or_error.ok()) {
  12. RTC_LOG(LS_ERROR) << "Failed to add audio track to PeerConnection: "
  13. << result_or_error.error().message();
  14. }
  15. // 创建视频设备
  16. rtc::scoped_refptr<CapturerTrackSource> video_device =
  17. CapturerTrackSource::Create();
  18. if (video_device) {
  19. // 创建视频轨
  20. rtc::scoped_refptr<webrtc::VideoTrackInterface> video_track_(
  21. peer_connection_factory_->CreateVideoTrack(kVideoLabel, video_device));
  22. // 显示本地视频
  23. main_wnd_->StartLocalRenderer(video_track_);
  24. // 添加视频轨
  25. result_or_error = peer_connection_->AddTrack(video_track_, {kStreamId});
  26. if (!result_or_error.ok()) {
  27. RTC_LOG(LS_ERROR) << "Failed to add video track to PeerConnection: "
  28. << result_or_error.error().message();
  29. }
  30. } else {
  31. RTC_LOG(LS_ERROR) << "OpenVideoCaptureDevice failed";
  32. }
  33. // 切换到显示视频渲染界面
  34. main_wnd_->SwitchToStreamingUI();
  35. }

—》 rtc::scoped_refptr video_device =
CapturerTrackSource::Create();

CapturerTrackSource::Create

  1. class CapturerTrackSource : public webrtc::VideoTrackSource {
  2. public:
  3. static rtc::scoped_refptr<CapturerTrackSource> Create() {
  4. const size_t kWidth = 640;
  5. const size_t kHeight = 480;
  6. const size_t kFps = 30;
  7. std::unique_ptr<webrtc::test::VcmCapturer> capturer;
  8. std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
  9. webrtc::VideoCaptureFactory::CreateDeviceInfo());
  10. if (!info) {
  11. return nullptr;
  12. }
  13. int num_devices = info->NumberOfDevices();
  14. for (int i = 0; i < num_devices; ++i) {
  15. capturer = absl::WrapUnique(
  16. webrtc::test::VcmCapturer::Create(kWidth, kHeight, kFps, i));
  17. if (capturer) {
  18. return new rtc::RefCountedObject<CapturerTrackSource>(
  19. std::move(capturer));
  20. }
  21. }
  22. return nullptr;
  23. }
  24. *********

—》 capturer = absl::WrapUnique(
webrtc::test::VcmCapturer::Create(kWidth, kHeight, kFps, i));

  1. VcmCapturer* VcmCapturer::Create(size_t width,
  2. size_t height,
  3. size_t target_fps,
  4. size_t capture_device_index) {
  5. std::unique_ptr<VcmCapturer> vcm_capturer(new VcmCapturer());
  6. if (!vcm_capturer->Init(width, height, target_fps, capture_device_index)) {
  7. RTC_LOG(LS_WARNING) << "Failed to create VcmCapturer(w = " << width
  8. << ", h = " << height << ", fps = " << target_fps
  9. << ")";
  10. return nullptr;
  11. }
  12. return vcm_capturer.release();
  13. }

—》vcm_capturer->Init(width, height, target_fps, capture_device_index)

  1. bool VcmCapturer::Init(size_t width,
  2. size_t height,
  3. size_t target_fps,
  4. size_t capture_device_index) {
  5. std::unique_ptr<VideoCaptureModule::DeviceInfo> device_info(
  6. VideoCaptureFactory::CreateDeviceInfo());
  7. // 获取设备名称
  8. char device_name[256];
  9. char unique_name[256];
  10. if (device_info->GetDeviceName(static_cast<uint32_t>(capture_device_index),
  11. device_name, sizeof(device_name), unique_name,
  12. sizeof(unique_name)) != 0) {
  13. Destroy();
  14. return false;
  15. }
  16. //根据设备名称,创建VideoCaptureModule实例
  17. vcm_ = webrtc::VideoCaptureFactory::Create(unique_name);
  18. if (!vcm_) {
  19. return false;
  20. }
  21. // 创建成功,则传入采集数据回调,当采集到视频数据就会调用回调
  22. vcm_->RegisterCaptureDataCallback(this);
  23. // 获取设备的能力
  24. device_info->GetCapability(vcm_->CurrentDeviceName(), 0, capability_);
  25. // 根据传入参数传入到capability_变量中
  26. capability_.width = static_cast<int32_t>(width);
  27. capability_.height = static_cast<int32_t>(height);
  28. capability_.maxFPS = static_cast<int32_t>(target_fps);
  29. capability_.videoType = VideoType::kI420;
  30. // vcm实例以capability_的值开始采集
  31. if (vcm_->StartCapture(capability_) != 0) {
  32. Destroy();
  33. return false;
  34. }
  35. RTC_CHECK(vcm_->CaptureStarted());
  36. return true;
  37. }

获取设备名称
image.png
获取设备能力
image.png
此时,设备已经开始工作了。
此时 rtc::scoped_refptr video_device =CapturerTrackSource::Create();就完成了。

接着是创建视频轨
rtc::scopedrefptr video_track(
peerconnection_factory->CreateVideoTrack(kVideoLabel, video_device));

PeerConnectionFactory::CreateVideoTrack

  1. rtc::scoped_refptr<VideoTrackInterface> PeerConnectionFactory::CreateVideoTrack(
  2. const std::string& id,
  3. VideoTrackSourceInterface* source) {
  4. RTC_DCHECK(signaling_thread()->IsCurrent());
  5. rtc::scoped_refptr<VideoTrackInterface> track(
  6. VideoTrack::Create(id, source, worker_thread()));
  7. return VideoTrackProxy::Create(signaling_thread(), worker_thread(), track);
  8. }

—》 rtc::scoped_refptr track(VideoTrack::Create(id, source, worker_thread()));

  1. rtc::scoped_refptr<VideoTrack> VideoTrack::Create(
  2. const std::string& id,
  3. VideoTrackSourceInterface* source,
  4. rtc::Thread* worker_thread) {
  5. rtc::RefCountedObject<VideoTrack>* track =
  6. new rtc::RefCountedObject<VideoTrack>(id, source, worker_thread);
  7. return track;
  8. }
  9. VideoTrack::VideoTrack(const std::string& label,
  10. VideoTrackSourceInterface* video_source,
  11. rtc::Thread* worker_thread)
  12. : MediaStreamTrack<VideoTrackInterface>(label),
  13. worker_thread_(worker_thread),
  14. video_source_(video_source),
  15. content_hint_(ContentHint::kNone) {
  16. video_source_->RegisterObserver(this);
  17. }
  18. rtc::scoped_refptr<VideoTrackSourceInterface> video_source_;

接下来是设置本地渲染

MainWnd::StartLocalRenderer

  1. void MainWnd::StartLocalRenderer(webrtc::VideoTrackInterface* local_video) {
  2. local_renderer_.reset(new VideoRenderer(handle(), 1, 1, local_video));
  3. }
  4. MainWnd::VideoRenderer::VideoRenderer(
  5. HWND wnd,
  6. int width,
  7. int height,
  8. webrtc::VideoTrackInterface* track_to_render)
  9. : wnd_(wnd), rendered_track_(track_to_render) {
  10. ::InitializeCriticalSection(&buffer_lock_);
  11. ZeroMemory(&bmi_, sizeof(bmi_));
  12. bmi_.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
  13. bmi_.bmiHeader.biPlanes = 1;
  14. bmi_.bmiHeader.biBitCount = 32;
  15. bmi_.bmiHeader.biCompression = BI_RGB;
  16. bmi_.bmiHeader.biWidth = width;
  17. bmi_.bmiHeader.biHeight = -height;
  18. bmi_.bmiHeader.biSizeImage =
  19. width * height * (bmi_.bmiHeader.biBitCount >> 3);
  20. // 设置接收视频数据的对象,如果想要接收视频数据,需要实现VideoSinkInterface,
  21. // 此接口暴露了OnFrame函数,只要将Sink通过AddOrUpdateSink函数注册给Source,
  22. // 那么Source就会通过OnFrame接口将数据传给Sink。
  23. rendered_track_->AddOrUpdateSink(this, rtc::VideoSinkWants());
  24. }

—》 renderedtrack->AddOrUpdateSink(this, rtc::VideoSinkWants());

  1. // AddOrUpdateSink and RemoveSink should be called on the worker
  2. // thread.
  3. void VideoTrack::AddOrUpdateSink(rtc::VideoSinkInterface<VideoFrame>* sink,
  4. const rtc::VideoSinkWants& wants) {
  5. RTC_DCHECK(worker_thread_->IsCurrent());
  6. VideoSourceBase::AddOrUpdateSink(sink, wants);
  7. rtc::VideoSinkWants modified_wants = wants;
  8. modified_wants.black_frames = !enabled();
  9. video_source_->AddOrUpdateSink(sink, modified_wants);
  10. }

—》videosource->AddOrUpdateSink(sink, modified_wants);

  1. void VideoTrackSource::AddOrUpdateSink(
  2. rtc::VideoSinkInterface<VideoFrame>* sink,
  3. const rtc::VideoSinkWants& wants) {
  4. RTC_DCHECK(worker_thread_checker_.IsCurrent());
  5. source()->AddOrUpdateSink(sink, wants);
  6. }

—》 source()->AddOrUpdateSink(sink, wants);

  1. void TestVideoCapturer::AddOrUpdateSink(
  2. rtc::VideoSinkInterface<VideoFrame>* sink,
  3. const rtc::VideoSinkWants& wants) {
  4. broadcaster_.AddOrUpdateSink(sink, wants);
  5. UpdateVideoAdapter();
  6. }

-》 broadcaster_.AddOrUpdateSink(sink, wants);

  1. void VideoBroadcaster::AddOrUpdateSink(
  2. VideoSinkInterface<webrtc::VideoFrame>* sink,
  3. const VideoSinkWants& wants) {
  4. RTC_DCHECK(sink != nullptr);
  5. webrtc::MutexLock lock(&sinks_and_wants_lock_);
  6. if (!FindSinkPair(sink)) {
  7. // |Sink| is a new sink, which didn't receive previous frame.
  8. previous_frame_sent_to_all_sinks_ = false;
  9. }
  10. VideoSourceBase::AddOrUpdateSink(sink, wants);
  11. UpdateWants();
  12. }

—》 VideoSourceBase::AddOrUpdateSink(sink, wants);

  1. void VideoSourceBase::AddOrUpdateSink(
  2. VideoSinkInterface<webrtc::VideoFrame>* sink,
  3. const VideoSinkWants& wants) {
  4. RTC_DCHECK(sink != nullptr);
  5. SinkPair* sink_pair = FindSinkPair(sink);
  6. if (!sink_pair) {
  7. sinks_.push_back(SinkPair(sink, wants));
  8. } else {
  9. sink_pair->wants = wants;
  10. }
  11. }
  12. std::vector<SinkPair> sinks_;
  13. 这里是将传入的sink添加到容器中,当有视频数据时,会分发到容器中的每个sink中去

image.png