Nack调用栈

image.png
数据送解码器的图
9-7 Channel-Stream与编解码器(重要)
10-18 视频解码渲染(重点)
RtpDemuxer::OnRtpPacket 收到的如果是Rtx包,则会调用RtxReceiveStream::OnRtpPacket,否则会直接调用 RtpVideoStreamReceiver::OnRtpPacket.
RtxReceiveStream::OnRtpPacket处理完后,也会调用RtpVideoStreamReceiver::OnRtpPacket。

OnReceivedPacket

说明

image.png
正常收到rtp会使用fec和nack,这里为了避免fec干扰到nack处理,将关闭fec red。
当前分析的是视频的rtp包。

源码

  1. void RtpVideoStreamReceiver::ReceivePacket(const RtpPacketReceived& packet) {
  2. if (packet.payload_size() == 0) {
  3. // Padding or keep-alive packet.
  4. // TODO(nisse): Could drop empty packets earlier, but need to figure out how
  5. // they should be counted in stats.
  6. NotifyReceiverOfEmptyPacket(packet.SequenceNumber());
  7. return;
  8. }
  9. if (packet.PayloadType() == config_.rtp.red_payload_type) {
  10. ParseAndHandleEncapsulatingHeader(packet);
  11. return;
  12. }
  13. const auto type_it = payload_type_map_.find(packet.PayloadType());
  14. if (type_it == payload_type_map_.end()) {
  15. return;
  16. }
  17. absl::optional<VideoRtpDepacketizer::ParsedRtpPayload> parsed_payload =
  18. type_it->second->Parse(packet.PayloadBuffer());
  19. if (parsed_payload == absl::nullopt) {
  20. RTC_LOG(LS_WARNING) << "Failed parsing payload.";
  21. return;
  22. }
  23. OnReceivedPayloadData(std::move(parsed_payload->video_payload), packet,
  24. parsed_payload->video_header);
  25. }

OnReceivedPayloadData

说明

RtpVideoStreamReceiver::OnReceivedPayloadData
image.png
这里其实主要是三个if的分支,都是比较复杂和重要的。当前说的是第三个分支,检测丢包。 std::uniqueptr nack_module;
image.png

源码

  1. void RtpVideoStreamReceiver::OnReceivedPayloadData(
  2. rtc::CopyOnWriteBuffer codec_payload,
  3. const RtpPacketReceived& rtp_packet,
  4. const RTPVideoHeader& video) {
  5. RTC_DCHECK_RUN_ON(&worker_task_checker_);
  6. auto packet = std::make_unique<video_coding::PacketBuffer::Packet>(
  7. rtp_packet, video, clock_->TimeInMilliseconds());
  8. // Try to extrapolate absolute capture time if it is missing.
  9. packet->packet_info.set_absolute_capture_time(
  10. absolute_capture_time_receiver_.OnReceivePacket(
  11. AbsoluteCaptureTimeReceiver::GetSource(packet->packet_info.ssrc(),
  12. packet->packet_info.csrcs()),
  13. packet->packet_info.rtp_timestamp(),
  14. // Assume frequency is the same one for all video frames.
  15. kVideoPayloadTypeFrequency,
  16. packet->packet_info.absolute_capture_time()));
  17. RTPVideoHeader& video_header = packet->video_header;
  18. video_header.rotation = kVideoRotation_0;
  19. video_header.content_type = VideoContentType::UNSPECIFIED;
  20. video_header.video_timing.flags = VideoSendTiming::kInvalid;
  21. video_header.is_last_packet_in_frame |= rtp_packet.Marker();
  22. if (const auto* vp9_header =
  23. absl::get_if<RTPVideoHeaderVP9>(&video_header.video_type_header)) {
  24. video_header.is_last_packet_in_frame |= vp9_header->end_of_frame;
  25. video_header.is_first_packet_in_frame |= vp9_header->beginning_of_frame;
  26. }
  27. rtp_packet.GetExtension<VideoOrientation>(&video_header.rotation);
  28. rtp_packet.GetExtension<VideoContentTypeExtension>(
  29. &video_header.content_type);
  30. rtp_packet.GetExtension<VideoTimingExtension>(&video_header.video_timing);
  31. if (forced_playout_delay_max_ms_ && forced_playout_delay_min_ms_) {
  32. video_header.playout_delay.max_ms = *forced_playout_delay_max_ms_;
  33. video_header.playout_delay.min_ms = *forced_playout_delay_min_ms_;
  34. } else {
  35. rtp_packet.GetExtension<PlayoutDelayLimits>(&video_header.playout_delay);
  36. }
  37. ParseGenericDependenciesResult generic_descriptor_state =
  38. ParseGenericDependenciesExtension(rtp_packet, &video_header);
  39. if (generic_descriptor_state == kDropPacket)
  40. return;
  41. // Color space should only be transmitted in the last packet of a frame,
  42. // therefore, neglect it otherwise so that last_color_space_ is not reset by
  43. // mistake.
  44. if (video_header.is_last_packet_in_frame) {
  45. video_header.color_space = rtp_packet.GetExtension<ColorSpaceExtension>();
  46. if (video_header.color_space ||
  47. video_header.frame_type == VideoFrameType::kVideoFrameKey) {
  48. // Store color space since it's only transmitted when changed or for key
  49. // frames. Color space will be cleared if a key frame is transmitted
  50. // without color space information.
  51. last_color_space_ = video_header.color_space;
  52. } else if (last_color_space_) {
  53. video_header.color_space = last_color_space_;
  54. }
  55. }
  56. if (loss_notification_controller_) {
  57. if (rtp_packet.recovered()) {
  58. // TODO(bugs.webrtc.org/10336): Implement support for reordering.
  59. RTC_LOG(LS_INFO)
  60. << "LossNotificationController does not support reordering.";
  61. } else if (generic_descriptor_state == kNoGenericDescriptor) {
  62. RTC_LOG(LS_WARNING) << "LossNotificationController requires generic "
  63. "frame descriptor, but it is missing.";
  64. } else {
  65. if (video_header.is_first_packet_in_frame) {
  66. RTC_DCHECK(video_header.generic);
  67. LossNotificationController::FrameDetails frame;
  68. frame.is_keyframe =
  69. video_header.frame_type == VideoFrameType::kVideoFrameKey;
  70. frame.frame_id = video_header.generic->frame_id;
  71. frame.frame_dependencies = video_header.generic->dependencies;
  72. loss_notification_controller_->OnReceivedPacket(
  73. rtp_packet.SequenceNumber(), &frame);
  74. } else {
  75. loss_notification_controller_->OnReceivedPacket(
  76. rtp_packet.SequenceNumber(), nullptr);
  77. }
  78. }
  79. }
  80. if (nack_module_) {
  81. const bool is_keyframe =
  82. video_header.is_first_packet_in_frame &&
  83. video_header.frame_type == VideoFrameType::kVideoFrameKey;
  84. packet->times_nacked = nack_module_->OnReceivedPacket(
  85. rtp_packet.SequenceNumber(), is_keyframe, rtp_packet.recovered());
  86. } else {
  87. packet->times_nacked = -1;
  88. }
  89. if (codec_payload.size() == 0) {
  90. NotifyReceiverOfEmptyPacket(packet->seq_num);
  91. rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
  92. return;
  93. }
  94. if (packet->codec() == kVideoCodecH264) {
  95. // Only when we start to receive packets will we know what payload type
  96. // that will be used. When we know the payload type insert the correct
  97. // sps/pps into the tracker.
  98. if (packet->payload_type != last_payload_type_) {
  99. last_payload_type_ = packet->payload_type;
  100. InsertSpsPpsIntoTracker(packet->payload_type);
  101. }
  102. video_coding::H264SpsPpsTracker::FixedBitstream fixed =
  103. tracker_.CopyAndFixBitstream(
  104. rtc::MakeArrayView(codec_payload.cdata(), codec_payload.size()),
  105. &packet->video_header);
  106. switch (fixed.action) {
  107. case video_coding::H264SpsPpsTracker::kRequestKeyframe:
  108. rtcp_feedback_buffer_.RequestKeyFrame();
  109. rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
  110. ABSL_FALLTHROUGH_INTENDED;
  111. case video_coding::H264SpsPpsTracker::kDrop:
  112. return;
  113. case video_coding::H264SpsPpsTracker::kInsert:
  114. packet->video_payload = std::move(fixed.bitstream);
  115. break;
  116. }
  117. } else {
  118. packet->video_payload = std::move(codec_payload);
  119. }
  120. rtcp_feedback_buffer_.SendBufferedRtcpFeedback();
  121. frame_counter_.Add(packet->timestamp);
  122. OnInsertedPacket(packet_buffer_.InsertPacket(std::move(packet)));
  123. }