AddTransceiver

image.png

SessionDescription

image.png
SimulcastDescription中
sendlayers:发送多少路simlucast流
receivelayers: 接收多少路simlucast流
如果一路流开启了Simulcast,则会有三路流,这些流信息保存在StreamParams的rids_中,每一项都是RidDescritiion 。

H264中的Encode函数

H264EncoderImpl::Encode
image.png
encoders长度,默认为1.开启Simulcast,则是3
image.png
OnEncodeImage 将编码后的数据上传到应用层。

  1. int32_t H264EncoderImpl::Encode(
  2. const VideoFrame& input_frame,
  3. const std::vector<VideoFrameType>* frame_types) {
  4. if (encoders_.empty()) {
  5. ReportError();
  6. return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  7. }
  8. if (!encoded_image_callback_) {
  9. RTC_LOG(LS_WARNING)
  10. << "InitEncode() has been called, but a callback function "
  11. "has not been set with RegisterEncodeCompleteCallback()";
  12. ReportError();
  13. return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  14. }
  15. rtc::scoped_refptr<const I420BufferInterface> frame_buffer =
  16. input_frame.video_frame_buffer()->ToI420();
  17. bool send_key_frame = false;
  18. for (size_t i = 0; i < configurations_.size(); ++i) {
  19. if (configurations_[i].key_frame_request && configurations_[i].sending) {
  20. send_key_frame = true;
  21. break;
  22. }
  23. }
  24. if (!send_key_frame && frame_types) {
  25. for (size_t i = 0; i < configurations_.size(); ++i) {
  26. const size_t simulcast_idx =
  27. static_cast<size_t>(configurations_[i].simulcast_idx);
  28. if (configurations_[i].sending && simulcast_idx < frame_types->size() &&
  29. (*frame_types)[simulcast_idx] == VideoFrameType::kVideoFrameKey) {
  30. send_key_frame = true;
  31. break;
  32. }
  33. }
  34. }
  35. RTC_DCHECK_EQ(configurations_[0].width, frame_buffer->width());
  36. RTC_DCHECK_EQ(configurations_[0].height, frame_buffer->height());
  37. // Encode image for each layer.
  38. for (size_t i = 0; i < encoders_.size(); ++i) {
  39. // EncodeFrame input.
  40. pictures_[i] = {0};
  41. pictures_[i].iPicWidth = configurations_[i].width;
  42. pictures_[i].iPicHeight = configurations_[i].height;
  43. pictures_[i].iColorFormat = EVideoFormatType::videoFormatI420;
  44. pictures_[i].uiTimeStamp = input_frame.ntp_time_ms();
  45. // Downscale images on second and ongoing layers.
  46. if (i == 0) {
  47. pictures_[i].iStride[0] = frame_buffer->StrideY();
  48. pictures_[i].iStride[1] = frame_buffer->StrideU();
  49. pictures_[i].iStride[2] = frame_buffer->StrideV();
  50. pictures_[i].pData[0] = const_cast<uint8_t*>(frame_buffer->DataY());
  51. pictures_[i].pData[1] = const_cast<uint8_t*>(frame_buffer->DataU());
  52. pictures_[i].pData[2] = const_cast<uint8_t*>(frame_buffer->DataV());
  53. } else {
  54. pictures_[i].iStride[0] = downscaled_buffers_[i - 1]->StrideY();
  55. pictures_[i].iStride[1] = downscaled_buffers_[i - 1]->StrideU();
  56. pictures_[i].iStride[2] = downscaled_buffers_[i - 1]->StrideV();
  57. pictures_[i].pData[0] =
  58. const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataY());
  59. pictures_[i].pData[1] =
  60. const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataU());
  61. pictures_[i].pData[2] =
  62. const_cast<uint8_t*>(downscaled_buffers_[i - 1]->DataV());
  63. // Scale the image down a number of times by downsampling factor.
  64. libyuv::I420Scale(pictures_[i - 1].pData[0], pictures_[i - 1].iStride[0],
  65. pictures_[i - 1].pData[1], pictures_[i - 1].iStride[1],
  66. pictures_[i - 1].pData[2], pictures_[i - 1].iStride[2],
  67. configurations_[i - 1].width,
  68. configurations_[i - 1].height, pictures_[i].pData[0],
  69. pictures_[i].iStride[0], pictures_[i].pData[1],
  70. pictures_[i].iStride[1], pictures_[i].pData[2],
  71. pictures_[i].iStride[2], configurations_[i].width,
  72. configurations_[i].height, libyuv::kFilterBilinear);
  73. }
  74. if (!configurations_[i].sending) {
  75. continue;
  76. }
  77. if (frame_types != nullptr) {
  78. // Skip frame?
  79. if ((*frame_types)[i] == VideoFrameType::kEmptyFrame) {
  80. continue;
  81. }
  82. }
  83. if (send_key_frame) {
  84. // API doc says ForceIntraFrame(false) does nothing, but calling this
  85. // function forces a key frame regardless of the |bIDR| argument's value.
  86. // (If every frame is a key frame we get lag/delays.)
  87. encoders_[i]->ForceIntraFrame(true);
  88. configurations_[i].key_frame_request = false;
  89. }
  90. // EncodeFrame output.
  91. SFrameBSInfo info;
  92. memset(&info, 0, sizeof(SFrameBSInfo));
  93. // Encode!
  94. int enc_ret = encoders_[i]->EncodeFrame(&pictures_[i], &info);
  95. if (enc_ret != 0) {
  96. RTC_LOG(LS_ERROR)
  97. << "OpenH264 frame encoding failed, EncodeFrame returned " << enc_ret
  98. << ".";
  99. ReportError();
  100. return WEBRTC_VIDEO_CODEC_ERROR;
  101. }
  102. encoded_images_[i]._encodedWidth = configurations_[i].width;
  103. encoded_images_[i]._encodedHeight = configurations_[i].height;
  104. encoded_images_[i].SetTimestamp(input_frame.timestamp());
  105. encoded_images_[i]._frameType = ConvertToVideoFrameType(info.eFrameType);
  106. encoded_images_[i].SetSpatialIndex(configurations_[i].simulcast_idx);
  107. // Split encoded image up into fragments. This also updates
  108. // |encoded_image_|.
  109. RtpFragmentize(&encoded_images_[i], &info);
  110. // Encoder can skip frames to save bandwidth in which case
  111. // |encoded_images_[i]._length| == 0.
  112. if (encoded_images_[i].size() > 0) {
  113. // Parse QP.
  114. h264_bitstream_parser_.ParseBitstream(encoded_images_[i]);
  115. encoded_images_[i].qp_ =
  116. h264_bitstream_parser_.GetLastSliceQp().value_or(-1);
  117. // Deliver encoded image.
  118. CodecSpecificInfo codec_specific;
  119. codec_specific.codecType = kVideoCodecH264;
  120. codec_specific.codecSpecific.H264.packetization_mode =
  121. packetization_mode_;
  122. codec_specific.codecSpecific.H264.temporal_idx = kNoTemporalIdx;
  123. codec_specific.codecSpecific.H264.idr_frame =
  124. info.eFrameType == videoFrameTypeIDR;
  125. codec_specific.codecSpecific.H264.base_layer_sync = false;
  126. if (configurations_[i].num_temporal_layers > 1) {
  127. const uint8_t tid = info.sLayerInfo[0].uiTemporalId;
  128. codec_specific.codecSpecific.H264.temporal_idx = tid;
  129. codec_specific.codecSpecific.H264.base_layer_sync =
  130. tid > 0 && tid < tl0sync_limit_[i];
  131. if (codec_specific.codecSpecific.H264.base_layer_sync) {
  132. tl0sync_limit_[i] = tid;
  133. }
  134. if (tid == 0) {
  135. tl0sync_limit_[i] = configurations_[i].num_temporal_layers;
  136. }
  137. }
  138. encoded_image_callback_->OnEncodedImage(encoded_images_[i],
  139. &codec_specific);
  140. }
  141. }
  142. return WEBRTC_VIDEO_CODEC_OK;
  143. }

VP8中的vpx_codec_encode函数

image.png
H:\webrtc-20210315\webrtc-20210315\webrtc\webrtc-checkout\src\third_party\libvpx\source\libvpx\vpx\src\vpx_encoder.c

  1. vpx_codec_err_t vpx_codec_encode(vpx_codec_ctx_t *ctx, const vpx_image_t *img,
  2. vpx_codec_pts_t pts, unsigned long duration,
  3. vpx_enc_frame_flags_t flags,
  4. unsigned long deadline) {
  5. vpx_codec_err_t res = VPX_CODEC_OK;
  6. if (!ctx || (img && !duration))
  7. res = VPX_CODEC_INVALID_PARAM;
  8. else if (!ctx->iface || !ctx->priv)
  9. res = VPX_CODEC_ERROR;
  10. else if (!(ctx->iface->caps & VPX_CODEC_CAP_ENCODER))
  11. res = VPX_CODEC_INCAPABLE;
  12. else {
  13. unsigned int num_enc = ctx->priv->enc.total_encoders;
  14. /* Execute in a normalized floating point environment, if the platform
  15. * requires it.
  16. */
  17. FLOATING_POINT_INIT();
  18. if (num_enc == 1)
  19. res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration, flags,
  20. deadline);
  21. else {
  22. /* Multi-resolution encoding:
  23. * Encode multi-levels in reverse order. For example,
  24. * if mr_total_resolutions = 3, first encode level 2,
  25. * then encode level 1, and finally encode level 0.
  26. */
  27. int i;
  28. ctx += num_enc - 1;
  29. if (img) img += num_enc - 1;
  30. for (i = num_enc - 1; i >= 0; i--) {
  31. if ((res = ctx->iface->enc.encode(get_alg_priv(ctx), img, pts, duration,
  32. flags, deadline)))
  33. break;
  34. ctx--;
  35. if (img) img--;
  36. }
  37. ctx++;
  38. }
  39. FLOATING_POINT_RESTORE();
  40. }
  41. return SAVE_STATUS(ctx, res);
  42. }

Simulcast还需要SFU服务器的协助,将各路流发送到不同网络情况的PC端