前言
我是一名打算长期走音视频路线的Android开发者。从此系列文章开始,记录我的音视频开发学习之路
ijkplayer播放器系列文章列表:
理解ijkplayer(一):开始
理解ijkplayer(二)项目结构分析
理解ijkplayer(三)从Java层开始初始化
理解ijkplayer(四)拉流
理解ijkplayer(五)解码、播放
理解ijkplayer(六)从ijkplayer看ffmpeg源码
理解ijkplayer(七)动态切换分辨率


1 解码线程

简略版代码:

解码线程位于:strem_component_open()中,简略版如下:

  1. static int stream_component_open(FFPlayer *ffp, int stream_index)
  2. {
  3. AVCodecContext *avctx;//解码器上下文
  4. AVCodec *codec = NULL;//解码器
  5. //找到解码器
  6. codec = avcodec_find_decoder(avctx->codec_id);
  7. switch (avctx->codec_type) {
  8. case AVMEDIA_TYPE_AUDIO:
  9. ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt);
  10. //decoder初始化
  11. decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
  12. //decoder启动,启动audio_thread线程
  13. if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
  14. goto out;
  15. break;
  16. case AVMEDIA_TYPE_VIDEO:
  17. //decoder初始化
  18. decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
  19. ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
  20. if (!ffp->node_vdec)
  21. goto fail;
  22. //解码器开始
  23. if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
  24. goto out;
  25. break;
  26. case AVMEDIA_TYPE_SUBTITLE:
  27. //decoder初始化
  28. decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
  29. //解码器开始
  30. if ((ret = decoder_start(&is->subdec, subtitle_thread, ffp, "ff_subtitle_dec")) < 0)
  31. goto out;
  32. break;
  33. }

完整版代码:

  1. /* open a given stream. Return 0 if OK */
  2. static int stream_component_open(FFPlayer *ffp, int stream_index)
  3. {
  4. VideoState *is = ffp->is;
  5. AVFormatContext *ic = is->ic;
  6. AVCodecContext *avctx;//解码器上下文
  7. AVCodec *codec = NULL;//解码器
  8. const char *forced_codec_name = NULL;
  9. AVDictionary *opts = NULL;
  10. AVDictionaryEntry *t = NULL;
  11. int sample_rate, nb_channels;
  12. int64_t channel_layout;
  13. int ret = 0;
  14. int stream_lowres = ffp->lowres;
  15. if (stream_index < 0 || stream_index >= ic->nb_streams)
  16. return -1;
  17. avctx = avcodec_alloc_context3(NULL);
  18. if (!avctx)
  19. return AVERROR(ENOMEM);
  20. //将AVCodecParameters中的变量赋值给AVCodecContext
  21. ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
  22. if (ret < 0)
  23. goto fail;
  24. av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
  25. //找到解码器
  26. codec = avcodec_find_decoder(avctx->codec_id);
  27. switch (avctx->codec_type) {
  28. case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = ffp->audio_codec_name; break;
  29. case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = ffp->subtitle_codec_name; break;
  30. case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = ffp->video_codec_name; break;
  31. default: break;
  32. }
  33. if (forced_codec_name)
  34. codec = avcodec_find_decoder_by_name(forced_codec_name);
  35. if (!codec) {
  36. if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
  37. "No codec could be found with name '%s'\n", forced_codec_name);
  38. else av_log(NULL, AV_LOG_WARNING,
  39. "No codec could be found with id %d\n", avctx->codec_id);
  40. ret = AVERROR(EINVAL);
  41. goto fail;
  42. }
  43. avctx->codec_id = codec->id;
  44. if(stream_lowres > av_codec_get_max_lowres(codec)){
  45. av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
  46. av_codec_get_max_lowres(codec));
  47. stream_lowres = av_codec_get_max_lowres(codec);
  48. }
  49. av_codec_set_lowres(avctx, stream_lowres);
  50. #if FF_API_EMU_EDGE
  51. if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
  52. #endif
  53. if (ffp->fast)
  54. avctx->flags2 |= AV_CODEC_FLAG2_FAST;
  55. #if FF_API_EMU_EDGE
  56. if(codec->capabilities & AV_CODEC_CAP_DR1)
  57. avctx->flags |= CODEC_FLAG_EMU_EDGE;
  58. #endif
  59. opts = filter_codec_opts(ffp->codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
  60. if (!av_dict_get(opts, "threads", NULL, 0))
  61. av_dict_set(&opts, "threads", "auto", 0);
  62. if (stream_lowres)
  63. av_dict_set_int(&opts, "lowres", stream_lowres, 0);
  64. if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
  65. av_dict_set(&opts, "refcounted_frames", "1", 0);
  66. if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
  67. goto fail;
  68. }
  69. if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  70. av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
  71. #ifdef FFP_MERGE
  72. ret = AVERROR_OPTION_NOT_FOUND;
  73. goto fail;
  74. #endif
  75. }
  76. is->eof = 0;
  77. ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
  78. switch (avctx->codec_type) {
  79. case AVMEDIA_TYPE_AUDIO:
  80. #if CONFIG_AVFILTER
  81. {
  82. AVFilterContext *sink;
  83. is->audio_filter_src.freq = avctx->sample_rate;
  84. is->audio_filter_src.channels = avctx->channels;
  85. is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
  86. is->audio_filter_src.fmt = avctx->sample_fmt;
  87. SDL_LockMutex(ffp->af_mutex);
  88. if ((ret = configure_audio_filters(ffp, ffp->afilters, 0)) < 0) {
  89. SDL_UnlockMutex(ffp->af_mutex);
  90. goto fail;
  91. }
  92. ffp->af_changed = 0;
  93. SDL_UnlockMutex(ffp->af_mutex);
  94. sink = is->out_audio_filter;
  95. sample_rate = av_buffersink_get_sample_rate(sink);
  96. nb_channels = av_buffersink_get_channels(sink);
  97. channel_layout = av_buffersink_get_channel_layout(sink);
  98. }
  99. #else
  100. sample_rate = avctx->sample_rate;
  101. nb_channels = avctx->channels;
  102. channel_layout = avctx->channel_layout;
  103. #endif
  104. /* prepare audio output */
  105. //audio_open方法是在做什么?
  106. if ((ret = audio_open(ffp, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
  107. goto fail;
  108. ffp_set_audio_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(avctx->codec_id));
  109. is->audio_hw_buf_size = ret;
  110. is->audio_src = is->audio_tgt;
  111. is->audio_buf_size = 0;
  112. is->audio_buf_index = 0;
  113. /* init averaging filter */
  114. is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
  115. is->audio_diff_avg_count = 0;
  116. /* since we do not have a precise anough audio FIFO fullness,
  117. we correct audio sync only if larger than this threshold */
  118. is->audio_diff_threshold = 2.0 * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec;
  119. is->audio_stream = stream_index;
  120. is->audio_st = ic->streams[stream_index];
  121. //decoder初始化
  122. decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
  123. if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
  124. is->auddec.start_pts = is->audio_st->start_time;
  125. is->auddec.start_pts_tb = is->audio_st->time_base;
  126. }
  127. //decoder启动,启动audio_thread线程
  128. if ((ret = decoder_start(&is->auddec, audio_thread, ffp, "ff_audio_dec")) < 0)
  129. goto out;
  130. SDL_AoutPauseAudio(ffp->aout, 0);
  131. break;
  132. case AVMEDIA_TYPE_VIDEO:
  133. is->video_stream = stream_index;
  134. is->video_st = ic->streams[stream_index];
  135. //async_init_decoder是一个option,默认是0
  136. if (ffp->async_init_decoder) {
  137. while (!is->initialized_decoder) {
  138. SDL_Delay(5);
  139. }
  140. if (ffp->node_vdec) {
  141. is->viddec.avctx = avctx;
  142. ret = ffpipeline_config_video_decoder(ffp->pipeline, ffp);
  143. }
  144. if (ret || !ffp->node_vdec) {
  145. decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
  146. ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
  147. if (!ffp->node_vdec)
  148. goto fail;
  149. }
  150. } else {
  151. //decoder初始化
  152. decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
  153. ffp->node_vdec = ffpipeline_open_video_decoder(ffp->pipeline, ffp);
  154. if (!ffp->node_vdec)
  155. goto fail;
  156. }
  157. //解码器开始
  158. if ((ret = decoder_start(&is->viddec, video_thread, ffp, "ff_video_dec")) < 0)
  159. goto out;
  160. is->queue_attachments_req = 1;
  161. if (ffp->max_fps >= 0) {
  162. if(is->video_st->avg_frame_rate.den && is->video_st->avg_frame_rate.num) {
  163. double fps = av_q2d(is->video_st->avg_frame_rate);
  164. SDL_ProfilerReset(&is->viddec.decode_profiler, fps + 0.5);
  165. if (fps > ffp->max_fps && fps < 130.0) {
  166. is->is_video_high_fps = 1;
  167. av_log(ffp, AV_LOG_WARNING, "fps: %lf (too high)\n", fps);
  168. } else {
  169. av_log(ffp, AV_LOG_WARNING, "fps: %lf (normal)\n", fps);
  170. }
  171. }
  172. if(is->video_st->r_frame_rate.den && is->video_st->r_frame_rate.num) {
  173. double tbr = av_q2d(is->video_st->r_frame_rate);
  174. if (tbr > ffp->max_fps && tbr < 130.0) {
  175. is->is_video_high_fps = 1;
  176. av_log(ffp, AV_LOG_WARNING, "fps: %lf (too high)\n", tbr);
  177. } else {
  178. av_log(ffp, AV_LOG_WARNING, "fps: %lf (normal)\n", tbr);
  179. }
  180. }
  181. }
  182. if (is->is_video_high_fps) {
  183. avctx->skip_frame = FFMAX(avctx->skip_frame, AVDISCARD_NONREF);
  184. avctx->skip_loop_filter = FFMAX(avctx->skip_loop_filter, AVDISCARD_NONREF);
  185. avctx->skip_idct = FFMAX(avctx->skip_loop_filter, AVDISCARD_NONREF);
  186. }
  187. break;
  188. case AVMEDIA_TYPE_SUBTITLE:
  189. if (!ffp->subtitle) break;
  190. is->subtitle_stream = stream_index;
  191. is->subtitle_st = ic->streams[stream_index];
  192. ffp_set_subtitle_codec_info(ffp, AVCODEC_MODULE_NAME, avcodec_get_name(avctx->codec_id));
  193. decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
  194. if ((ret = decoder_start(&is->subdec, subtitle_thread, ffp, "ff_subtitle_dec")) < 0)
  195. goto out;
  196. break;
  197. default:
  198. break;
  199. }
  200. goto out;
  201. fail:
  202. avcodec_free_context(&avctx);
  203. out:
  204. av_dict_free(&opts);
  205. return ret;
  206. }

小结:

  1. 找到解码器
  2. 初始化解码器
  3. 分别启动audio_threadvideo_threadsubtitle_thread这3条解码线程,内部开始不断解码。

那么以下3节则逐个分析这3条解码线程

2 字幕解码线程subtitle_thread

由于字幕解码线程最简单,所以先来看看他是如何工作的,对剩下的两个解码线程就更好理解了。

  1. static int subtitle_thread(void *arg)
  2. {
  3. FFPlayer *ffp = arg;
  4. VideoState *is = ffp->is;
  5. Frame *sp;
  6. int got_subtitle;
  7. double pts;
  8. for (;;) {
  9. //阻塞方法,阻塞,直到能取出windex(写下标)下标下的Frame
  10. if (!(sp = frame_queue_peek_writable(&is->subpq)))
  11. return 0;
  12. //解码,填充Frame中的字幕数据
  13. if ((got_subtitle = decoder_decode_frame(ffp, &is->subdec, NULL, &sp->sub)) < 0)
  14. break;
  15. pts = 0;
  16. #ifdef FFP_MERGE
  17. if (got_subtitle && sp->sub.format == 0) {
  18. #else
  19. if (got_subtitle) {
  20. #endif
  21. if (sp->sub.pts != AV_NOPTS_VALUE)
  22. pts = sp->sub.pts / (double)AV_TIME_BASE;
  23. sp->pts = pts;
  24. sp->serial = is->subdec.pkt_serial;
  25. sp->width = is->subdec.avctx->width;
  26. sp->height = is->subdec.avctx->height;
  27. sp->uploaded = 0;
  28. /* now we can update the picture count */
  29. //后移字幕FrameQueue的windex
  30. frame_queue_push(&is->subpq);
  31. #ifdef FFP_MERGE
  32. } else if (got_subtitle) {
  33. avsubtitle_free(&sp->sub);
  34. #endif
  35. }
  36. }
  37. return 0;
  38. }

解码后的数据sp要保存,留着待会渲染,也就是要入队,那么由于FrameQueue是数组的特殊性,因此入队的操作不需要新建的frame数据作为参数,只需要确保数组中的write index的数据正确填充,然后将write index后移一个位置,就称为入队成功了:

  1. static void frame_queue_push(FrameQueue *f)
  2. {
  3. //当使用数组作为队列的时候,只需要移动数组中的下标到有效下标,就表示入队了,并不需要外部再传一个参数进来。
  4. //如果到了尾下标,则windex回到起点。这是用数组作为循环队列的必要操作。
  5. if (++f->windex == f->max_size)
  6. f->windex = 0;
  7. SDL_LockMutex(f->mutex);
  8. f->size++;
  9. SDL_CondSignal(f->cond);
  10. SDL_UnlockMutex(f->mutex);
  11. }

那么接着来看:decoder_decode_frame()

注意:本文基于0.8.0的ijkplayer,这个函数和以前的ijkplayer的解码逻辑和调用的ffmpeg的函数都有些区别。我看到0.8.0的版本的decoder_decode_frame()函数的逻辑是在0.8.7的时候修改并上线的。

2.1 decoder_decode_frame(),since version 0.8.7

先看本文基于的0.8.0的ijkplayer的函数:

  1. static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
  2. int ret = AVERROR(EAGAIN);
  3. for (;;) {
  4. AVPacket pkt;
  5. if (d->queue->serial == d->pkt_serial) {
  6. do {
  7. if (d->queue->abort_request)
  8. return -1;
  9. switch (d->avctx->codec_type) {
  10. case AVMEDIA_TYPE_VIDEO:
  11. //从解码器中接收frame数据。当返回0表示成功
  12. ret = avcodec_receive_frame(d->avctx, frame);
  13. if (ret >= 0) {
  14. ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
  15. if (ffp->decoder_reorder_pts == -1) {
  16. frame->pts = frame->best_effort_timestamp;
  17. } else if (!ffp->decoder_reorder_pts) {
  18. frame->pts = frame->pkt_dts;
  19. }
  20. }
  21. break;
  22. case AVMEDIA_TYPE_AUDIO:
  23. //从解码器中接收frame数据。当返回0表示成功
  24. ret = avcodec_receive_frame(d->avctx, frame);
  25. if (ret >= 0) {
  26. AVRational tb = (AVRational){1, frame->sample_rate};
  27. if (frame->pts != AV_NOPTS_VALUE)
  28. frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
  29. else if (d->next_pts != AV_NOPTS_VALUE)
  30. frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
  31. if (frame->pts != AV_NOPTS_VALUE) {
  32. d->next_pts = frame->pts + frame->nb_samples;
  33. d->next_pts_tb = tb;
  34. }
  35. }
  36. break;
  37. default:
  38. break;
  39. }
  40. if (ret == AVERROR_EOF) {
  41. d->finished = d->pkt_serial;
  42. avcodec_flush_buffers(d->avctx);
  43. return 0;
  44. }
  45. //如果返回值>=0,表示avcodec_receive_frame函数解码成功,那么从外部函数decoder_decode_frame返回1。
  46. //视频,音频,字幕的解码都从这里返回,只要解码成功,都去读取ret然后返回给外面处理。
  47. if (ret >= 0)
  48. return 1;
  49. } while (ret != AVERROR(EAGAIN));
  50. }
  51. do {
  52. if (d->queue->nb_packets == 0)
  53. SDL_CondSignal(d->empty_queue_cond);
  54. if (d->packet_pending) {
  55. av_packet_move_ref(&pkt, &d->pkt);
  56. d->packet_pending = 0;
  57. } else {
  58. //从packet_queue中取出pkt,当packat_queue由于网络差等原因,没有足够的包可以取出时,则阻塞,直到有包能取出。
  59. if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
  60. return -1;
  61. }
  62. } while (d->queue->serial != d->pkt_serial);
  63. if (pkt.data == flush_pkt.data) {
  64. avcodec_flush_buffers(d->avctx);
  65. d->finished = 0;
  66. d->next_pts = d->start_pts;
  67. d->next_pts_tb = d->start_pts_tb;
  68. } else {
  69. if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
  70. int got_frame = 0;
  71. //解码字幕
  72. ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
  73. if (ret < 0) {
  74. ret = AVERROR(EAGAIN);
  75. } else {
  76. if (got_frame && !pkt.data) {
  77. d->packet_pending = 1;
  78. av_packet_move_ref(&d->pkt, &pkt);
  79. }
  80. ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
  81. }
  82. } else {
  83. //往解码器里面发送包数据pkt
  84. if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
  85. av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
  86. d->packet_pending = 1;
  87. av_packet_move_ref(&d->pkt, &pkt);
  88. }
  89. }
  90. av_packet_unref(&pkt);
  91. }
  92. }
  93. }

2.2 decoder_decode_frame(),before version 0.8.7

  1. static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
  2. int got_frame = 0;
  3. do {
  4. int ret = -1;
  5. if (d->queue->abort_request)
  6. return -1;
  7. if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
  8. AVPacket pkt;
  9. do {
  10. if (d->queue->nb_packets == 0)
  11. SDL_CondSignal(d->empty_queue_cond);
  12. //从packet_queue中获取pkt
  13. if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
  14. return -1;
  15. if (pkt.data == flush_pkt.data) {
  16. avcodec_flush_buffers(d->avctx);
  17. d->finished = 0;
  18. d->next_pts = d->start_pts;
  19. d->next_pts_tb = d->start_pts_tb;
  20. }
  21. } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
  22. av_packet_unref(&d->pkt);
  23. //将包pkt传递给解码器d
  24. d->pkt_temp = d->pkt = pkt;
  25. d->packet_pending = 1;
  26. }
  27. switch (d->avctx->codec_type) {
  28. case AVMEDIA_TYPE_VIDEO: {
  29. //调用ffmpeg方法:avcodec_deco_video2()来解码。
  30. ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
  31. if (got_frame) {
  32. ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
  33. if (ffp->decoder_reorder_pts == -1) {
  34. frame->pts = av_frame_get_best_effort_timestamp(frame);
  35. } else if (!ffp->decoder_reorder_pts) {
  36. frame->pts = frame->pkt_dts;
  37. }
  38. }
  39. }
  40. break;
  41. case AVMEDIA_TYPE_AUDIO:
  42. //调用ffmpeg方法:avcodec_decode_audio4()来解码
  43. ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
  44. if (got_frame) {
  45. AVRational tb = (AVRational){1, frame->sample_rate};
  46. if (frame->pts != AV_NOPTS_VALUE)
  47. frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
  48. else if (d->next_pts != AV_NOPTS_VALUE)
  49. frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
  50. if (frame->pts != AV_NOPTS_VALUE) {
  51. d->next_pts = frame->pts + frame->nb_samples;
  52. d->next_pts_tb = tb;
  53. }
  54. }
  55. break;
  56. case AVMEDIA_TYPE_SUBTITLE:
  57. ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
  58. break;
  59. default:
  60. break;
  61. }
  62. if (ret < 0) {
  63. d->packet_pending = 0;
  64. } else {
  65. d->pkt_temp.dts =
  66. d->pkt_temp.pts = AV_NOPTS_VALUE;
  67. if (d->pkt_temp.data) {
  68. if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
  69. ret = d->pkt_temp.size;
  70. d->pkt_temp.data += ret;
  71. d->pkt_temp.size -= ret;
  72. if (d->pkt_temp.size <= 0)
  73. d->packet_pending = 0;
  74. } else {
  75. if (!got_frame) {
  76. d->packet_pending = 0;
  77. d->finished = d->pkt_serial;
  78. }
  79. }
  80. }
  81. } while (!got_frame && !d->finished);
  82. return got_frame;
  83. }

那么再看回到最新的decoder_decode_frame()方法中,首先解码器要从包队列PakcetQueue中读取出包数据,再输送到ffmpeg解码器中。那么这个读取包队列中已经缓存好的包数据的方法是:

  1. static int packet_queue_get_or_buffering(FFPlayer *ffp, PacketQueue *q, AVPacket *pkt, int *serial, int *finished)
  2. {
  3. assert(finished);
  4. if (!ffp->packet_buffering)
  5. return packet_queue_get(q, pkt, 1, serial);
  6. while (1) {
  7. int new_packet = packet_queue_get(q, pkt, 0, serial);
  8. if (new_packet < 0)
  9. return -1;
  10. else if (new_packet == 0) {
  11. //=0表示no packet,因此要再取
  12. if (q->is_buffer_indicator && !*finished)
  13. ffp_toggle_buffering(ffp, 1);
  14. //阻塞,直到从包队列中取出队列头的包,并填充到pkt
  15. new_packet = packet_queue_get(q, pkt, 1, serial);
  16. if (new_packet < 0)
  17. return -1;
  18. }
  19. if (*finished == *serial) {
  20. av_packet_unref(pkt);
  21. continue;
  22. }
  23. else
  24. break;
  25. }
  26. return 1;
  27. }

即读取包pkt是会阻塞的,直到3.6.4章节介绍的视频读取线程读取并解封装包pkt,并放入PacketQueue,这里才能从阻塞返回并继续塞给解码器。

小结

  1. 0.8.7开始,decode_decode_frame()函数借助ffmpeg的两个方法来完成解码:
    1. int avcodec_send_packet(AVCodecContex* *avctx, const AVPacket *avpkt);往解码器里面发送pkt数据。
    2. int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);从解码器里面读取出frame帧数据。
  2. 而在0.8.7之前,音频和视频的解码都各自分别使用一个不同的解码函数:

    1. 视频:

      1. //已被废弃
      2. int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
      3. int *got_picture_ptr,
      4. const AVPacket *avpkt);
    2. 音频:

      1. //已被废弃
      2. int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
      3. int *got_frame_ptr, const AVPacket *avpkt)
  3. 解码字幕的函数:

    1. int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
    2. int *got_sub_ptr,
    3. AVPacket *avpkt);
  4. 从字幕的解码流程中可以看出解码的大致逻辑为:

    1. 循环地调用decoder_decode_frame(),在这个方法里面对视频,音频和字幕3种流用switch语句来分别处理解码。当然,在音频解码audio_thread和视频解码video_thread中同样会调用这个方法的。
    2. 解码前,先从PacketQueue读取包数据,这个数据从哪里来?从read_thread()函数中调用的ffmpeg的函数:av_read_frame(ic, pkt);来的。
    3. 解码时,先塞给解码器pkt数据,再从解码器中读出解码好的frame数据。
    4. 再把frame数据入队FrameQueue,留给稍后的渲染器来从FrameQueue中读取

3 音频解码线程audio_thread

  1. static int audio_thread(void *arg)
  2. {
  3. FFPlayer *ffp = arg;
  4. VideoState *is = ffp->is;
  5. AVFrame *frame = av_frame_alloc();//分配一个AVFrame
  6. Frame *af;//从FrameQueue sampq中取出来的,要写入数据的Frame
  7. #if CONFIG_AVFILTER
  8. int last_serial = -1;
  9. int64_t dec_channel_layout;
  10. int reconfigure;
  11. #endif
  12. int got_frame = 0;
  13. AVRational tb;//分子分母对(ffmpeg为了准确性和避免转换,定义了一个分子分母对来取代float)
  14. int ret = 0;
  15. int audio_accurate_seek_fail = 0;
  16. int64_t audio_seek_pos = 0;
  17. double frame_pts = 0;
  18. double audio_clock = 0;
  19. int64_t now = 0;
  20. double samples_duration = 0;
  21. int64_t deviation = 0;
  22. int64_t deviation2 = 0;
  23. int64_t deviation3 = 0;
  24. if (!frame)
  25. return AVERROR(ENOMEM);
  26. do {
  27. ffp_audio_statistic_l(ffp);
  28. //音频解码
  29. if ((got_frame = decoder_decode_frame(ffp, &is->auddec, frame, NULL)) < 0)
  30. goto the_end;
  31. //当解码成功
  32. if (got_frame) {
  33. tb = (AVRational){1, frame->sample_rate};
  34. //处理accurate_seek
  35. if (ffp->enable_accurate_seek && is->audio_accurate_seek_req && !is->seek_req) {
  36. frame_pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
  37. now = av_gettime_relative() / 1000;
  38. if (!isnan(frame_pts)) {
  39. samples_duration = (double) frame->nb_samples / frame->sample_rate;
  40. audio_clock = frame_pts + samples_duration;
  41. is->accurate_seek_aframe_pts = audio_clock * 1000 * 1000;
  42. audio_seek_pos = is->seek_pos;
  43. deviation = llabs((int64_t)(audio_clock * 1000 * 1000) - is->seek_pos);
  44. if ((audio_clock * 1000 * 1000 < is->seek_pos ) || deviation > MAX_DEVIATION) {
  45. if (is->drop_aframe_count == 0) {
  46. SDL_LockMutex(is->accurate_seek_mutex);
  47. if (is->accurate_seek_start_time <= 0 && (is->video_stream < 0 || is->video_accurate_seek_req)) {
  48. is->accurate_seek_start_time = now;
  49. }
  50. SDL_UnlockMutex(is->accurate_seek_mutex);
  51. av_log(NULL, AV_LOG_INFO, "audio accurate_seek start, is->seek_pos=%lld, audio_clock=%lf, is->accurate_seek_start_time = %lld\n", is->seek_pos, audio_clock, is->accurate_seek_start_time);
  52. }
  53. is->drop_aframe_count++;
  54. while (is->video_accurate_seek_req && !is->abort_request) {
  55. int64_t vpts = is->accurate_seek_vframe_pts;
  56. deviation2 = vpts - audio_clock * 1000 * 1000;
  57. deviation3 = vpts - is->seek_pos;
  58. if (deviation2 > -100 * 1000 && deviation3 < 0) {
  59. break;
  60. } else {
  61. av_usleep(20 * 1000);
  62. }
  63. now = av_gettime_relative() / 1000;
  64. if ((now - is->accurate_seek_start_time) > ffp->accurate_seek_timeout) {
  65. break;
  66. }
  67. }
  68. if(!is->video_accurate_seek_req && is->video_stream >= 0 && audio_clock * 1000 * 1000 > is->accurate_seek_vframe_pts) {
  69. audio_accurate_seek_fail = 1;
  70. } else {
  71. now = av_gettime_relative() / 1000;
  72. if ((now - is->accurate_seek_start_time) <= ffp->accurate_seek_timeout) {
  73. av_frame_unref(frame);
  74. continue; // drop some old frame when do accurate seek
  75. } else {
  76. audio_accurate_seek_fail = 1;
  77. }
  78. }
  79. } else {
  80. if (audio_seek_pos == is->seek_pos) {
  81. av_log(NULL, AV_LOG_INFO, "audio accurate_seek is ok, is->drop_aframe_count=%d, audio_clock = %lf\n", is->drop_aframe_count, audio_clock);
  82. is->drop_aframe_count = 0;
  83. SDL_LockMutex(is->accurate_seek_mutex);
  84. is->audio_accurate_seek_req = 0;
  85. SDL_CondSignal(is->video_accurate_seek_cond);
  86. if (audio_seek_pos == is->seek_pos && is->video_accurate_seek_req && !is->abort_request) {
  87. SDL_CondWaitTimeout(is->audio_accurate_seek_cond, is->accurate_seek_mutex, ffp->accurate_seek_timeout);
  88. } else {
  89. ffp_notify_msg2(ffp, FFP_MSG_ACCURATE_SEEK_COMPLETE, (int)(audio_clock * 1000));
  90. }
  91. if (audio_seek_pos != is->seek_pos && !is->abort_request) {
  92. is->audio_accurate_seek_req = 1;
  93. SDL_UnlockMutex(is->accurate_seek_mutex);
  94. av_frame_unref(frame);
  95. continue;
  96. }
  97. SDL_UnlockMutex(is->accurate_seek_mutex);
  98. }
  99. }
  100. } else {
  101. audio_accurate_seek_fail = 1;
  102. }
  103. if (audio_accurate_seek_fail) {
  104. av_log(NULL, AV_LOG_INFO, "audio accurate_seek is error, is->drop_aframe_count=%d, now = %lld, audio_clock = %lf\n", is->drop_aframe_count, now, audio_clock);
  105. is->drop_aframe_count = 0;
  106. SDL_LockMutex(is->accurate_seek_mutex);
  107. is->audio_accurate_seek_req = 0;
  108. SDL_CondSignal(is->video_accurate_seek_cond);
  109. if (is->video_accurate_seek_req && !is->abort_request) {
  110. SDL_CondWaitTimeout(is->audio_accurate_seek_cond, is->accurate_seek_mutex, ffp->accurate_seek_timeout);
  111. } else {
  112. ffp_notify_msg2(ffp, FFP_MSG_ACCURATE_SEEK_COMPLETE, (int)(audio_clock * 1000));
  113. }
  114. SDL_UnlockMutex(is->accurate_seek_mutex);
  115. }
  116. is->accurate_seek_start_time = 0;
  117. audio_accurate_seek_fail = 0;
  118. }
  119. #if CONFIG_AVFILTER
  120. dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
  121. reconfigure =
  122. cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
  123. frame->format, frame->channels) ||
  124. is->audio_filter_src.channel_layout != dec_channel_layout ||
  125. is->audio_filter_src.freq != frame->sample_rate ||
  126. is->auddec.pkt_serial != last_serial ||
  127. ffp->af_changed;
  128. if (reconfigure) {
  129. SDL_LockMutex(ffp->af_mutex);
  130. ffp->af_changed = 0;
  131. char buf1[1024], buf2[1024];
  132. av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
  133. av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
  134. av_log(NULL, AV_LOG_DEBUG,
  135. "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
  136. is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
  137. frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
  138. is->audio_filter_src.fmt = frame->format;
  139. is->audio_filter_src.channels = frame->channels;
  140. is->audio_filter_src.channel_layout = dec_channel_layout;
  141. is->audio_filter_src.freq = frame->sample_rate;
  142. last_serial = is->auddec.pkt_serial;
  143. if ((ret = configure_audio_filters(ffp, ffp->afilters, 1)) < 0) {
  144. SDL_UnlockMutex(ffp->af_mutex);
  145. goto the_end;
  146. }
  147. SDL_UnlockMutex(ffp->af_mutex);
  148. }
  149. if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
  150. goto the_end;
  151. while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
  152. tb = av_buffersink_get_time_base(is->out_audio_filter);
  153. #endif
  154. if (!(af = frame_queue_peek_writable(&is->sampq)))//如果sampq无法写入,则失败
  155. goto the_end;
  156. af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
  157. af->pos = frame->pkt_pos;
  158. af->serial = is->auddec.pkt_serial;
  159. af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
  160. //Move everything contained in src to dst and reset src.将解码出来的AVFrame传给af->frame
  161. av_frame_move_ref(af->frame, frame);
  162. //将af->frame入队
  163. frame_queue_push(&is->sampq);
  164. #if CONFIG_AVFILTER
  165. if (is->audioq.serial != is->auddec.pkt_serial)
  166. break;
  167. }
  168. if (ret == AVERROR_EOF)
  169. is->auddec.finished = is->auddec.pkt_serial;
  170. #endif
  171. }
  172. } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
  173. the_end:
  174. #if CONFIG_AVFILTER
  175. avfilter_graph_free(&is->agraph);
  176. #endif
  177. av_frame_free(&frame);
  178. return ret;
  179. }

音频解码这里暂时不去分析解码之后的seek操作,所以和字幕解码没什么差别,没什么好分析的。

4 视频解码线程video_thread

终于来到视频解码了…

  1. static int video_thread(void *arg)
  2. {
  3. FFPlayer *ffp = (FFPlayer *)arg;
  4. int ret = 0;
  5. //如果node_vdec不为null。
  6. if (ffp->node_vdec) {
  7. //调用解码器的解码方法,进入循环
  8. ret = ffpipenode_run_sync(ffp->node_vdec);
  9. }
  10. return ret;
  11. }

最后是走到了IJKFF_Pipenodefunc_run_sync()函数中

  1. static int ffplay_video_thread(void *arg)
  2. {
  3. FFPlayer *ffp = arg;
  4. VideoState *is = ffp->is;
  5. AVFrame *frame = av_frame_alloc();//创建一个新的AVFrame
  6. double pts;
  7. double duration;
  8. int ret;
  9. AVRational tb = is->video_st->time_base;
  10. AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
  11. int64_t dst_pts = -1;
  12. int64_t last_dst_pts = -1;
  13. int retry_convert_image = 0;
  14. int convert_frame_count = 0;
  15. #if CONFIG_AVFILTER
  16. AVFilterGraph *graph = avfilter_graph_alloc();
  17. AVFilterContext *filt_out = NULL, *filt_in = NULL;
  18. int last_w = 0;
  19. int last_h = 0;
  20. enum AVPixelFormat last_format = -2;
  21. int last_serial = -1;
  22. int last_vfilter_idx = 0;
  23. if (!graph) {
  24. av_frame_free(&frame);
  25. return AVERROR(ENOMEM);
  26. }
  27. #else
  28. ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, ffp_get_video_rotate_degrees(ffp));
  29. #endif
  30. if (!frame) {
  31. #if CONFIG_AVFILTER
  32. avfilter_graph_free(&graph);
  33. #endif
  34. return AVERROR(ENOMEM);
  35. }
  36. //开启无限循环,无限地去从packet_queue中拿取pkt来解码。
  37. for (;;) {
  38. ret = get_video_frame(ffp, frame);//解码,并将解码后的帧数据存放在frame中
  39. if (ret < 0)
  40. goto the_end;
  41. if (!ret)
  42. continue;
  43. if (ffp->get_frame_mode) {
  44. if (!ffp->get_img_info || ffp->get_img_info->count <= 0) {
  45. av_frame_unref(frame);
  46. continue;
  47. }
  48. last_dst_pts = dst_pts;
  49. if (dst_pts < 0) {
  50. dst_pts = ffp->get_img_info->start_time;
  51. } else {
  52. dst_pts += (ffp->get_img_info->end_time - ffp->get_img_info->start_time) / (ffp->get_img_info->num - 1);
  53. }
  54. pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
  55. pts = pts * 1000;
  56. if (pts >= dst_pts) {
  57. while (retry_convert_image <= MAX_RETRY_CONVERT_IMAGE) {
  58. ret = convert_image(ffp, frame, (int64_t)pts, frame->width, frame->height);
  59. if (!ret) {
  60. convert_frame_count++;
  61. break;
  62. }
  63. retry_convert_image++;
  64. av_log(NULL, AV_LOG_ERROR, "convert image error retry_convert_image = %d\n", retry_convert_image);
  65. }
  66. retry_convert_image = 0;
  67. if (ret || ffp->get_img_info->count <= 0) {
  68. if (ret) {
  69. av_log(NULL, AV_LOG_ERROR, "convert image abort ret = %d\n", ret);
  70. ffp_notify_msg3(ffp, FFP_MSG_GET_IMG_STATE, 0, ret);
  71. } else {
  72. av_log(NULL, AV_LOG_INFO, "convert image complete convert_frame_count = %d\n", convert_frame_count);
  73. }
  74. goto the_end;
  75. }
  76. } else {
  77. dst_pts = last_dst_pts;
  78. }
  79. av_frame_unref(frame);
  80. continue;
  81. }
  82. //省略了AV_FILTER部分的代码
  83. duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
  84. pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
  85. //将frame入队到pictq中,来让渲染线程读取。
  86. ret = queue_picture(ffp, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
  87. av_frame_unref(frame);
  88. if (ret < 0)
  89. goto the_end;
  90. }
  91. the_end:
  92. #if CONFIG_AVFILTER
  93. avfilter_graph_free(&graph);
  94. #endif
  95. av_log(NULL, AV_LOG_INFO, "convert image convert_frame_count = %d\n", convert_frame_count);
  96. av_frame_free(&frame);
  97. return 0;
  98. }

简略为:

  1. static int ffplay_video_thread(void *arg){
  2. for(;;){
  3. ret = get_video_frame(ffp, frame);//解码,并将解码后的帧数据存放在frame中
  4. //将frame入队到pictq中,来让渲染线程读取。
  5. ret = queue_picture(ffp, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
  6. }
  7. }

那么看下解码函数:

  1. static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
  2. {
  3. VideoState *is = ffp->is;
  4. int got_picture;
  5. ffp_video_statistic_l(ffp);
  6. //解码,并将视频帧数据填充到frame中,可能阻塞
  7. if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)
  8. return -1;
  9. if (got_picture) {
  10. double dpts = NAN;
  11. if (frame->pts != AV_NOPTS_VALUE)
  12. dpts = av_q2d(is->video_st->time_base) * frame->pts;
  13. frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
  14. if (ffp->framedrop>0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
  15. ffp->stat.decode_frame_count++;
  16. if (frame->pts != AV_NOPTS_VALUE) {
  17. double diff = dpts - get_master_clock(is);
  18. if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
  19. diff - is->frame_last_filter_delay < 0 &&
  20. is->viddec.pkt_serial == is->vidclk.serial &&
  21. is->videoq.nb_packets) {
  22. is->frame_drops_early++;
  23. is->continuous_frame_drops_early++;
  24. if (is->continuous_frame_drops_early > ffp->framedrop) {
  25. is->continuous_frame_drops_early = 0;
  26. } else {
  27. ffp->stat.drop_frame_count++;
  28. ffp->stat.drop_frame_rate = (float)(ffp->stat.drop_frame_count) / (float)(ffp->stat.decode_frame_count);
  29. av_frame_unref(frame);
  30. got_picture = 0;
  31. }
  32. }
  33. }
  34. }
  35. }
  36. return got_picture;
  37. }

那么这里又是用的和字幕解码、音频解码一样的解码函数:decoder_decode_frame,就不重复提了。

  1. //将src_frame入队到picq中,让渲染线程渲染。
  2. static int queue_picture(FFPlayer *ffp, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
  3. {
  4. VideoState *is = ffp->is;
  5. Frame *vp;
  6. int video_accurate_seek_fail = 0;
  7. int64_t video_seek_pos = 0;
  8. int64_t now = 0;
  9. int64_t deviation = 0;
  10. int64_t deviation2 = 0;
  11. int64_t deviation3 = 0;
  12. //处理精确seek
  13. if (ffp->enable_accurate_seek && is->video_accurate_seek_req && !is->seek_req) {
  14. if (!isnan(pts)) {
  15. video_seek_pos = is->seek_pos;
  16. is->accurate_seek_vframe_pts = pts * 1000 * 1000;
  17. deviation = llabs((int64_t)(pts * 1000 * 1000) - is->seek_pos);
  18. if ((pts * 1000 * 1000 < is->seek_pos) || deviation > MAX_DEVIATION) {
  19. now = av_gettime_relative() / 1000;
  20. if (is->drop_vframe_count == 0) {
  21. SDL_LockMutex(is->accurate_seek_mutex);
  22. if (is->accurate_seek_start_time <= 0 && (is->audio_stream < 0 || is->audio_accurate_seek_req)) {
  23. is->accurate_seek_start_time = now;
  24. }
  25. SDL_UnlockMutex(is->accurate_seek_mutex);
  26. av_log(NULL, AV_LOG_INFO, "video accurate_seek start, is->seek_pos=%lld, pts=%lf, is->accurate_seek_time = %lld\n", is->seek_pos, pts, is->accurate_seek_start_time);
  27. }
  28. is->drop_vframe_count++;
  29. while (is->audio_accurate_seek_req && !is->abort_request) {
  30. int64_t apts = is->accurate_seek_aframe_pts ;
  31. deviation2 = apts - pts * 1000 * 1000;
  32. deviation3 = apts - is->seek_pos;
  33. if (deviation2 > -100 * 1000 && deviation3 < 0) {
  34. break;
  35. } else {
  36. av_usleep(20 * 1000);
  37. }
  38. now = av_gettime_relative() / 1000;
  39. if ((now - is->accurate_seek_start_time) > ffp->accurate_seek_timeout) {
  40. break;
  41. }
  42. }
  43. if ((now - is->accurate_seek_start_time) <= ffp->accurate_seek_timeout) {
  44. return 1; // drop some old frame when do accurate seek
  45. } else {
  46. av_log(NULL, AV_LOG_WARNING, "video accurate_seek is error, is->drop_vframe_count=%d, now = %lld, pts = %lf\n", is->drop_vframe_count, now, pts);
  47. video_accurate_seek_fail = 1; // if KEY_FRAME interval too big, disable accurate seek
  48. }
  49. } else {
  50. av_log(NULL, AV_LOG_INFO, "video accurate_seek is ok, is->drop_vframe_count =%d, is->seek_pos=%lld, pts=%lf\n", is->drop_vframe_count, is->seek_pos, pts);
  51. if (video_seek_pos == is->seek_pos) {
  52. is->drop_vframe_count = 0;
  53. SDL_LockMutex(is->accurate_seek_mutex);
  54. is->video_accurate_seek_req = 0;
  55. SDL_CondSignal(is->audio_accurate_seek_cond);
  56. if (video_seek_pos == is->seek_pos && is->audio_accurate_seek_req && !is->abort_request) {
  57. SDL_CondWaitTimeout(is->video_accurate_seek_cond, is->accurate_seek_mutex, ffp->accurate_seek_timeout);
  58. } else {
  59. ffp_notify_msg2(ffp, FFP_MSG_ACCURATE_SEEK_COMPLETE, (int)(pts * 1000));
  60. }
  61. if (video_seek_pos != is->seek_pos && !is->abort_request) {
  62. is->video_accurate_seek_req = 1;
  63. SDL_UnlockMutex(is->accurate_seek_mutex);
  64. return 1;
  65. }
  66. SDL_UnlockMutex(is->accurate_seek_mutex);
  67. }
  68. }
  69. } else {
  70. video_accurate_seek_fail = 1;
  71. }
  72. if (video_accurate_seek_fail) {
  73. is->drop_vframe_count = 0;
  74. SDL_LockMutex(is->accurate_seek_mutex);
  75. is->video_accurate_seek_req = 0;
  76. SDL_CondSignal(is->audio_accurate_seek_cond);
  77. if (is->audio_accurate_seek_req && !is->abort_request) {
  78. SDL_CondWaitTimeout(is->video_accurate_seek_cond, is->accurate_seek_mutex, ffp->accurate_seek_timeout);
  79. } else {
  80. if (!isnan(pts)) {
  81. ffp_notify_msg2(ffp, FFP_MSG_ACCURATE_SEEK_COMPLETE, (int)(pts * 1000));
  82. } else {
  83. ffp_notify_msg2(ffp, FFP_MSG_ACCURATE_SEEK_COMPLETE, 0);
  84. }
  85. }
  86. SDL_UnlockMutex(is->accurate_seek_mutex);
  87. }
  88. is->accurate_seek_start_time = 0;
  89. video_accurate_seek_fail = 0;
  90. is->accurate_seek_vframe_pts = 0;
  91. }
  92. #if defined(DEBUG_SYNC)
  93. printf("frame_type=%c pts=%0.3f\n",
  94. av_get_picture_type_char(src_frame->pict_type), pts);
  95. #endif
  96. if (!(vp = frame_queue_peek_writable(&is->pictq)))
  97. return -1;
  98. vp->sar = src_frame->sample_aspect_ratio;
  99. #ifdef FFP_MERGE
  100. vp->uploaded = 0;
  101. #endif
  102. /* alloc or resize hardware picture buffer */
  103. if (!vp->bmp || !vp->allocated ||
  104. vp->width != src_frame->width ||
  105. vp->height != src_frame->height ||
  106. vp->format != src_frame->format) {
  107. if (vp->width != src_frame->width || vp->height != src_frame->height)
  108. ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, src_frame->width, src_frame->height);
  109. vp->allocated = 0;
  110. vp->width = src_frame->width;
  111. vp->height = src_frame->height;
  112. vp->format = src_frame->format;
  113. /* the allocation must be done in the main thread to avoid
  114. locking problems. */
  115. alloc_picture(ffp, src_frame->format);
  116. if (is->videoq.abort_request)
  117. return -1;
  118. }
  119. /* if the frame is not skipped, then display it */
  120. if (vp->bmp) {
  121. /* get a pointer on the bitmap */
  122. SDL_VoutLockYUVOverlay(vp->bmp);//加锁
  123. #ifdef FFP_MERGE
  124. #if CONFIG_AVFILTER
  125. // FIXME use direct rendering
  126. av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
  127. src_frame->format, vp->width, vp->height);
  128. #else
  129. // sws_getCachedContext(...);
  130. #endif
  131. #endif
  132. // FIXME: set swscale options
  133. //将src_frame中的帧数据填充到vp->bmp中,这个vp->bmp其实指的是bitmap?
  134. if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < 0) {
  135. av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
  136. exit(1);
  137. }
  138. /* update the bitmap content */
  139. SDL_VoutUnlockYUVOverlay(vp->bmp);//解锁
  140. vp->pts = pts;
  141. vp->duration = duration;
  142. vp->pos = pos;
  143. vp->serial = serial;
  144. vp->sar = src_frame->sample_aspect_ratio;
  145. vp->bmp->sar_num = vp->sar.num;
  146. vp->bmp->sar_den = vp->sar.den;
  147. #ifdef FFP_MERGE
  148. av_frame_move_ref(vp->frame, src_frame);
  149. #endif
  150. frame_queue_push(&is->pictq);
  151. if (!is->viddec.first_frame_decoded) {
  152. ALOGD("Video: first frame decoded\n");
  153. ffp_notify_msg1(ffp, FFP_MSG_VIDEO_DECODED_START);
  154. is->viddec.first_frame_decoded_time = SDL_GetTickHR();
  155. is->viddec.first_frame_decoded = 1;
  156. }
  157. }
  158. return 0;
  159. }

这里重点看下将frame数据填充到vp->bmp数据中的这个操作。

bmp长得非常像bitmap,看来意思是将帧数据填充到图像数据中的意思了。

  1. int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame)
  2. {
  3. if (!overlay || !overlay->func_fill_frame)
  4. return -1;
  5. return overlay->func_fill_frame(overlay, frame);
  6. }
  1. static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame){
  2. //...
  3. overlay_fill(overlay, opaque->linked_frame, opaque->planes);
  4. //...
  5. }
  6. static void overlay_fill(SDL_VoutOverlay *overlay, AVFrame *frame, int planes)
  7. {
  8. overlay->planes = planes;
  9. for (int i = 0; i < AV_NUM_DATA_POINTERS; ++i) {
  10. //数组的复制
  11. overlay->pixels[i] = frame->data[i];
  12. overlay->pitches[i] = frame->linesize[i];
  13. }
  14. }

那么到这里,应该是将AVFrame中的数据全部复制到这个vp->bmp中了,而他是:*SDL_VoutOverlay*

5 视频渲染线程

  1. //创建视频刷新线程
  2. is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");

创建一个线程专门用于渲染视频。在看代码之前,先了解一下视频渲染要做什么:

  1. FrameQueue中拿取每一帧解码完的原始图像帧数据。
  2. 将帧数据发送到显示设备,让对应设备将图像数据画出来。
  3. 这是一个循环的过程,解码线程不断解码出图像帧,这边的渲染线程不断地读取图像帧并输送到渲染设备。
  1. // ijkmedia/ijkplayer/ff_ffplay.c
  2. static int video_refresh_thread(void *arg)
  3. {
  4. FFPlayer *ffp = arg;
  5. VideoState *is = ffp->is;
  6. double remaining_time = 0.0;
  7. //循环,如果没有中断请求,那么就一直尝试去渲染。
  8. while (!is->abort_request) {
  9. if (remaining_time > 0.0)
  10. av_usleep((int)(int64_t)(remaining_time * 1000000.0));
  11. remaining_time = REFRESH_RATE;
  12. if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
  13. //刷新视频
  14. video_refresh(ffp, &remaining_time);
  15. }
  16. return 0;
  17. }
  1. // ijkmedia/ijkplayer/ff_ffplay.c
  2. /* called to display each frame */
  3. static void video_refresh(FFPlayer *opaque, double *remaining_time)
  4. {
  5. FFPlayer *ffp = opaque;
  6. VideoState *is = ffp->is;
  7. double time;
  8. Frame *sp, *sp2;
  9. //处理时钟。
  10. if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
  11. check_external_clock_speed(is);
  12. if (!ffp->display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
  13. time = av_gettime_relative() / 1000000.0;
  14. if (is->force_refresh || is->last_vis_time + ffp->rdftspeed < time) {
  15. //①
  16. video_display2(ffp);
  17. is->last_vis_time = time;
  18. }
  19. *remaining_time = FFMIN(*remaining_time, is->last_vis_time + ffp->rdftspeed - time);
  20. }
  21. if (is->video_st) {
  22. retry:
  23. if (frame_queue_nb_remaining(&is->pictq) == 0) {
  24. // nothing to do, no picture to display in the queue
  25. } else {
  26. double last_duration, duration, delay;
  27. Frame *vp, *lastvp;
  28. /* dequeue the picture */
  29. lastvp = frame_queue_peek_last(&is->pictq);
  30. vp = frame_queue_peek(&is->pictq);
  31. if (vp->serial != is->videoq.serial) {
  32. frame_queue_next(&is->pictq);
  33. goto retry;
  34. }
  35. if (lastvp->serial != vp->serial)
  36. is->frame_timer = av_gettime_relative() / 1000000.0;
  37. if (is->paused)
  38. goto display;
  39. /* compute nominal last_duration */
  40. last_duration = vp_duration(is, lastvp, vp);
  41. delay = compute_target_delay(ffp, last_duration, is);
  42. time= av_gettime_relative()/1000000.0;
  43. if (isnan(is->frame_timer) || time < is->frame_timer)
  44. is->frame_timer = time;
  45. if (time < is->frame_timer + delay) {
  46. *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
  47. goto display;
  48. }
  49. is->frame_timer += delay;
  50. if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
  51. is->frame_timer = time;
  52. SDL_LockMutex(is->pictq.mutex);
  53. if (!isnan(vp->pts))
  54. update_video_pts(is, vp->pts, vp->pos, vp->serial);
  55. SDL_UnlockMutex(is->pictq.mutex);
  56. if (frame_queue_nb_remaining(&is->pictq) > 1) {
  57. Frame *nextvp = frame_queue_peek_next(&is->pictq);
  58. duration = vp_duration(is, vp, nextvp);
  59. if(!is->step && (ffp->framedrop > 0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration) {
  60. frame_queue_next(&is->pictq);
  61. goto retry;
  62. }
  63. }
  64. if (is->subtitle_st) {
  65. while (frame_queue_nb_remaining(&is->subpq) > 0) {
  66. sp = frame_queue_peek(&is->subpq);
  67. if (frame_queue_nb_remaining(&is->subpq) > 1)
  68. sp2 = frame_queue_peek_next(&is->subpq);
  69. else
  70. sp2 = NULL;
  71. if (sp->serial != is->subtitleq.serial
  72. || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
  73. || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
  74. {
  75. if (sp->uploaded) {
  76. ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, 0, 0, "", 1);
  77. }
  78. frame_queue_next(&is->subpq);
  79. } else {
  80. break;
  81. }
  82. }
  83. }
  84. frame_queue_next(&is->pictq);
  85. is->force_refresh = 1;
  86. SDL_LockMutex(ffp->is->play_mutex);
  87. if (is->step) {
  88. is->step = 0;
  89. if (!is->paused)
  90. stream_update_pause_l(ffp);
  91. }
  92. SDL_UnlockMutex(ffp->is->play_mutex);
  93. }
  94. display:
  95. /* display picture */
  96. if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
  97. //①
  98. video_display2(ffp);
  99. }
  100. is->force_refresh = 0;
  101. if (ffp->show_status) {
  102. static int64_t last_time;
  103. int64_t cur_time;
  104. int aqsize, vqsize, sqsize __unused;
  105. double av_diff;
  106. cur_time = av_gettime_relative();
  107. if (!last_time || (cur_time - last_time) >= 30000) {
  108. aqsize = 0;
  109. vqsize = 0;
  110. sqsize = 0;
  111. if (is->audio_st)
  112. aqsize = is->audioq.size;
  113. if (is->video_st)
  114. vqsize = is->videoq.size;
  115. #ifdef FFP_MERGE
  116. if (is->subtitle_st)
  117. sqsize = is->subtitleq.size;
  118. #else
  119. sqsize = 0;
  120. #endif
  121. av_diff = 0;
  122. if (is->audio_st && is->video_st)
  123. av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
  124. else if (is->video_st)
  125. av_diff = get_master_clock(is) - get_clock(&is->vidclk);
  126. else if (is->audio_st)
  127. av_diff = get_master_clock(is) - get_clock(&is->audclk);
  128. av_log(NULL, AV_LOG_INFO,
  129. "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
  130. get_master_clock(is),
  131. (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
  132. av_diff,
  133. is->frame_drops_early + is->frame_drops_late,
  134. aqsize / 1024,
  135. vqsize / 1024,
  136. sqsize,
  137. is->video_st ? is->viddec.avctx->pts_correction_num_faulty_dts : 0,
  138. is->video_st ? is->viddec.avctx->pts_correction_num_faulty_pts : 0);
  139. fflush(stdout);
  140. last_time = cur_time;
  141. }
  142. }
  143. }

一长串代码,貌似有一些根据时钟来同步音视频的代码?暂时不做分析,这里面要跳转到方法(用①做了标记,有两处):

  1. //①
  2. video_display2(ffp);
  1. /* display the current picture, if any */
  2. static void video_display2(FFPlayer *ffp)
  3. {
  4. VideoState *is = ffp->is;
  5. if (is->video_st)
  6. video_image_display2(ffp);
  7. }
  1. static void video_image_display2(FFPlayer *ffp)
  2. {
  3. VideoState *is = ffp->is;
  4. Frame *vp;
  5. Frame *sp = NULL;
  6. //is->pictq就是picture queue的意思。读取队列中最后一帧。
  7. vp = frame_queue_peek_last(&is->pictq);
  8. //如果帧中的SDL_VoutOverlay数据不为null,那么就开始渲染
  9. if (vp->bmp) {
  10. //如果字幕流不为空,去渲染字幕
  11. if (is->subtitle_st) {
  12. if (frame_queue_nb_remaining(&is->subpq) > 0) {
  13. sp = frame_queue_peek(&is->subpq);
  14. if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
  15. if (!sp->uploaded) {
  16. if (sp->sub.num_rects > 0) {
  17. char buffered_text[4096];
  18. if (sp->sub.rects[0]->text) {
  19. strncpy(buffered_text, sp->sub.rects[0]->text, 4096);
  20. }
  21. else if (sp->sub.rects[0]->ass) {
  22. parse_ass_subtitle(sp->sub.rects[0]->ass, buffered_text);
  23. }
  24. ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, 0, 0, buffered_text, sizeof(buffered_text));
  25. }
  26. sp->uploaded = 1;
  27. }
  28. }
  29. }
  30. }
  31. if (ffp->render_wait_start && !ffp->start_on_prepared && is->pause_req) {
  32. if (!ffp->first_video_frame_rendered) {
  33. ffp->first_video_frame_rendered = 1;
  34. ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
  35. }
  36. while (is->pause_req && !is->abort_request) {
  37. SDL_Delay(20);
  38. }
  39. }
  40. //显示YUV数据。
  41. SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
  42. ffp->stat.vfps = SDL_SpeedSamplerAdd(&ffp->vfps_sampler, FFP_SHOW_VFPS_FFPLAY, "vfps[ffplay]");
  43. if (!ffp->first_video_frame_rendered) {
  44. ffp->first_video_frame_rendered = 1;
  45. ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
  46. }
  47. if (is->latest_video_seek_load_serial == vp->serial) {
  48. int latest_video_seek_load_serial = __atomic_exchange_n(&(is->latest_video_seek_load_serial), -1, memory_order_seq_cst);
  49. if (latest_video_seek_load_serial == vp->serial) {
  50. ffp->stat.latest_seek_load_duration = (av_gettime() - is->latest_seek_load_start_at) / 1000;
  51. if (ffp->av_sync_type == AV_SYNC_VIDEO_MASTER) {
  52. ffp_notify_msg2(ffp, FFP_MSG_VIDEO_SEEK_RENDERING_START, 1);
  53. } else {
  54. ffp_notify_msg2(ffp, FFP_MSG_VIDEO_SEEK_RENDERING_START, 0);
  55. }
  56. }
  57. }
  58. }
  59. }

首先看到取视频帧的这一段代码:

  1. Frame *vp;
  2. Frame *sp = NULL;
  3. //is->pictq就是picture queue的意思。读取队列中最后一帧。
  4. vp = frame_queue_peek_last(&is->pictq);

在这里,Frame就是每一帧解码后的图像数据,是直接拿去显示的。而is->pictq就是VideoState里面的解码后的图像队列FrameQueue

看一下FrameFrameQueue

  1. typedef struct Frame {
  2. AVFrame *frame;//ffmpeg定义的数据结构,里面存着buffer,存着真实的yuv图像数据
  3. AVSubtitle sub;//字幕数据
  4. int serial;
  5. double pts; /* presentation timestamp for the frame */
  6. double duration; /* estimated duration of the frame */
  7. int64_t pos; /* byte position of the frame in the input file */
  8. #ifdef FFP_MERGE
  9. SDL_Texture *bmp;
  10. #else
  11. SDL_VoutOverlay *bmp;//vout设备
  12. #endif
  13. int allocated;
  14. int width;
  15. int height;
  16. int format;
  17. AVRational sar;
  18. int uploaded;
  19. } Frame;
  20. typedef struct FrameQueue {
  21. Frame queue[FRAME_QUEUE_SIZE];//数组
  22. int rindex;//read index。下一个读取的下标
  23. int windex;//write index。下一个写入的下标
  24. int size;
  25. int max_size;
  26. int keep_last;
  27. int rindex_shown;
  28. SDL_mutex *mutex;
  29. SDL_cond *cond;
  30. PacketQueue *pktq;//引用的未解码的包队列
  31. } FrameQueue;

然后是看到渲染的这一句:

  1. //显示YUV数据。这个vp是Frame,而这个bmp是bitmap的意思
  2. SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);

这里的意思是将vp->bmp中的数据输送到ffp->vout中。