前言

我是一名打算走音视频路线的android开发者。以此系列文章开始,记录我的音视频开发学习之路

ijkplayer系列文章目录:
理解ijkplayer(一):开始

理解ijkplayer(二)项目结构分析

理解ijkplayer(三)从Java层开始初始化

理解ijkplayer(四)拉流

理解ijkplayer(五)解码、播放


由于篇幅的原因,因此这一篇文章是接着上一篇继续写的。

上一篇文章分析完了:

  1. JNI_Onload()
  2. native_init()
  3. native_setup()
  4. _setDataSource()
  5. _setVideoSurface

1 _prepareAsync()

播放器的异步准备。这是初始化阶段中最复杂,最重要的函数。

  1. // ijkmedia/ijkplayer/android/ijkplayer_jni.c
  2. static void
  3. IjkMediaPlayer_prepareAsync(JNIEnv *env, jobject thiz)
  4. {
  5. MPTRACE("%s\n", __func__);
  6. int retval = 0;
  7. IjkMediaPlayer *mp = jni_get_media_player(env, thiz);
  8. JNI_CHECK_GOTO(mp, env, "java/lang/IllegalStateException", "mpjni: prepareAsync: null mp", LABEL_RETURN);
  9. retval = ijkmp_prepare_async(mp);
  10. IJK_CHECK_MPRET_GOTO(retval, env, LABEL_RETURN);
  11. LABEL_RETURN:
  12. ijkmp_dec_ref_p(&mp);
  13. }
  1. // ijkmedia/ijkplayer/ijkplayer.c
  2. int ijkmp_prepare_async(IjkMediaPlayer *mp)
  3. {
  4. assert(mp);
  5. MPTRACE("ijkmp_prepare_async()\n");
  6. pthread_mutex_lock(&mp->mutex);
  7. int retval = ijkmp_prepare_async_l(mp);
  8. pthread_mutex_unlock(&mp->mutex);
  9. MPTRACE("ijkmp_prepare_async()=%d\n", retval);
  10. return retval;
  11. }
  1. static int ijkmp_prepare_async_l(IjkMediaPlayer *mp)
  2. {
  3. assert(mp);
  4. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_IDLE);
  5. // MPST_RET_IF_EQ(mp->mp_state, MP_STATE_INITIALIZED);
  6. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_ASYNC_PREPARING);
  7. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_PREPARED);
  8. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_STARTED);
  9. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_PAUSED);
  10. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_COMPLETED);
  11. // MPST_RET_IF_EQ(mp->mp_state, MP_STATE_STOPPED);
  12. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_ERROR);
  13. MPST_RET_IF_EQ(mp->mp_state, MP_STATE_END);
  14. //声明url不为空
  15. assert(mp->data_source);
  16. //改变播放器状态到MP_STATE_ASYNC_PREPARING
  17. ijkmp_change_state_l(mp, MP_STATE_ASYNC_PREPARING);
  18. //消息队列开始
  19. msg_queue_start(&mp->ffplayer->msg_queue);
  20. // released in msg_loop
  21. ijkmp_inc_ref(mp);
  22. //创建并启动消息线程,开启循环来读取消息队列的消息。
  23. mp->msg_thread = SDL_CreateThreadEx(&mp->_msg_thread, ijkmp_msg_loop, mp, "ff_msg_loop");
  24. // msg_thread is detached inside msg_loop
  25. // TODO: 9 release weak_thiz if pthread_create() failed;
  26. //逻辑跳转到ff_ffplay.c
  27. int retval = ffp_prepare_async_l(mp->ffplayer, mp->data_source);
  28. if (retval < 0) {
  29. //出错,则抛出MP_STATE_ERROR
  30. ijkmp_change_state_l(mp, MP_STATE_ERROR);
  31. return retval;
  32. }
  33. return 0;
  34. }

看到创建消息线程的那一句的ijkmp_msg_loop函数

  1. //这句函数会在线程被启动的时候调用,类似于Thread中的Runnable
  2. static int ijkmp_msg_loop(void *arg)
  3. {
  4. IjkMediaPlayer *mp = arg;
  5. //调用mp的msg_loop函数。
  6. int ret = mp->msg_loop(arg);
  7. return ret;
  8. }

mp->msg_loop这个函数,在前面播放器被创建的时候被赋值,在3.3.2中。

那么这时开启了消息循环线程,并且prepare的逻辑跳转到了ff_ffplay.c

  1. // ijkmedia/ijkplayer/ff_ffplay.c
  2. int ffp_prepare_async_l(FFPlayer *ffp, const char *file_name)
  3. {
  4. assert(ffp);
  5. assert(!ffp->is);
  6. assert(file_name);
  7. //针对rtmp和rtsp协议,移除选项”timeout“
  8. if (av_stristart(file_name, "rtmp", NULL) ||
  9. av_stristart(file_name, "rtsp", NULL)) {
  10. // There is total different meaning for 'timeout' option in rtmp
  11. av_log(ffp, AV_LOG_WARNING, "remove 'timeout' option for rtmp.\n");
  12. av_dict_set(&ffp->format_opts, "timeout", NULL, 0);
  13. }
  14. /* there is a length limit in avformat */
  15. if (strlen(file_name) + 1 > 1024) {
  16. av_log(ffp, AV_LOG_ERROR, "%s too long url\n", __func__);
  17. if (avio_find_protocol_name("ijklongurl:")) {
  18. av_dict_set(&ffp->format_opts, "ijklongurl-url", file_name, 0);
  19. file_name = "ijklongurl:";
  20. }
  21. }
  22. //打印版本信息
  23. av_log(NULL, AV_LOG_INFO, "===== versions =====\n");
  24. ffp_show_version_str(ffp, "ijkplayer", ijk_version_info());
  25. ffp_show_version_str(ffp, "FFmpeg", av_version_info());
  26. ffp_show_version_int(ffp, "libavutil", avutil_version());
  27. ffp_show_version_int(ffp, "libavcodec", avcodec_version());
  28. ffp_show_version_int(ffp, "libavformat", avformat_version());
  29. ffp_show_version_int(ffp, "libswscale", swscale_version());
  30. ffp_show_version_int(ffp, "libswresample", swresample_version());
  31. av_log(NULL, AV_LOG_INFO, "===== options =====\n");
  32. ffp_show_dict(ffp, "player-opts", ffp->player_opts);
  33. ffp_show_dict(ffp, "format-opts", ffp->format_opts);
  34. ffp_show_dict(ffp, "codec-opts ", ffp->codec_opts);
  35. ffp_show_dict(ffp, "sws-opts ", ffp->sws_dict);
  36. ffp_show_dict(ffp, "swr-opts ", ffp->swr_opts);
  37. av_log(NULL, AV_LOG_INFO, "===================\n");
  38. //设置播放器选项
  39. av_opt_set_dict(ffp, &ffp->player_opts);
  40. //如果ffplayer->aout==null,那么久打开音频输出设备。前面的初始化代码是没有为这个赋值过的,所以第一次调用肯定会返回true.
  41. if (!ffp->aout) {
  42. ffp->aout = ffpipeline_open_audio_output(ffp->pipeline, ffp);
  43. if (!ffp->aout)
  44. return -1;
  45. }
  46. #if CONFIG_AVFILTER
  47. if (ffp->vfilter0) {
  48. GROW_ARRAY(ffp->vfilters_list, ffp->nb_vfilters);
  49. ffp->vfilters_list[ffp->nb_vfilters - 1] = ffp->vfilter0;
  50. }
  51. #endif
  52. //打开流,并返回一个VideoState的结构体
  53. VideoState *is = stream_open(ffp, file_name, NULL);
  54. if (!is) {
  55. av_log(NULL, AV_LOG_WARNING, "ffp_prepare_async_l: stream_open failed OOM");
  56. return EIJK_OUT_OF_MEMORY;
  57. }
  58. ffp->is = is;
  59. ffp->input_filename = av_strdup(file_name);
  60. return 0;
  61. }

2 打开音频输出设备

  1. //如果ffplayer->aout==null,那么久打开音频输出设备。前面的初始化代码是没有为这个赋值过的,所以第一次调用肯定会返回true.
  2. if (!ffp->aout) {
  3. ffp->aout = ffpipeline_open_audio_output(ffp->pipeline, ffp);
  4. if (!ffp->aout)
  5. return -1;
  6. }
  1. SDL_Aout *ffpipeline_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
  2. {
  3. //借助pipeline的方法
  4. return pipeline->func_open_audio_output(pipeline, ffp);
  5. }

ffp->pipeline是在创建播放器IjkMediaPlayer的时候,在创建完ffplayer,和ffplyaer->vout一起创建的,在3.3.2有如下的代码:

  1. // ijkmedia/ijkplayer/android/ijkplayer_android.c
  2. IjkMediaPlayer *ijkmp_android_create(int(*msg_loop)(void*))
  3. {
  4. //创建IjkMediaPlayer
  5. IjkMediaPlayer *mp = ijkmp_create(msg_loop);
  6. if (!mp)
  7. goto fail;
  8. //创建视频输出设备,会根据根据硬解还是软件,硬解用MediaCodec创建,软解用FFmpeg创建
  9. mp->ffplayer->vout = SDL_VoutAndroid_CreateForAndroidSurface();
  10. if (!mp->ffplayer->vout)
  11. goto fail;
  12. //暂时不太理解这个叫做”管道“的东西是什么
  13. mp->ffplayer->pipeline = ffpipeline_create_from_android(mp->ffplayer);
  14. if (!mp->ffplayer->pipeline)
  15. goto fail;
  16. //将创建的视频输出设备vout,赋值到ffplayer->pipeline中
  17. ffpipeline_set_vout(mp->ffplayer->pipeline, mp->ffplayer->vout);
  18. return mp;
  19. fail:
  20. ijkmp_dec_ref_p(&mp);
  21. return NULL;
  22. }

那么我们看到pipeline->func_open_audio_output(pipeline, ffp);的这个方法:我一点进去直接跳转到IJKFF_Pipeline的结构体的定义来了。

  1. struct IJKFF_Pipeline {
  2. SDL_Class *opaque_class;
  3. IJKFF_Pipeline_Opaque *opaque;
  4. void (*func_destroy) (IJKFF_Pipeline *pipeline);
  5. IJKFF_Pipenode *(*func_open_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  6. //我们要看的是这个方法,名叫:打开音频输出设备。
  7. SDL_Aout *(*func_open_audio_output) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  8. IJKFF_Pipenode *(*func_init_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  9. int (*func_config_video_decoder) (IJKFF_Pipeline *pipeline, FFPlayer *ffp);
  10. };

这个函数应该是在某个地方被赋值了,我们得找一下,全局搜索关键字:func_open_audio_output

理解ijkplayer(四)拉流 - 图1

全局搜索中出现了3个对func_open_audio_output赋值的语句,分别出现在

  1. ffpipleline_android.c
  2. ffpipeline_ffplay.c
  3. ffpipeline_ios.c

其中貌似android和ios平台有各自的赋值规则,然后又一个中立的赋值的地方,我们先看这个平台无关的中立的赋值的地方:

  1. static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
  2. {
  3. //返回NULL
  4. return NULL;
  5. }
  6. IJKFF_Pipeline *ffpipeline_create_from_ffplay(FFPlayer *ffp)
  7. {
  8. IJKFF_Pipeline *pipeline = ffpipeline_alloc(&g_pipeline_class, sizeof(IJKFF_Pipeline_Opaque));
  9. if (!pipeline)
  10. return pipeline;
  11. IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
  12. opaque->ffp = ffp;
  13. pipeline->func_destroy = func_destroy;
  14. pipeline->func_open_video_decoder = func_open_video_decoder;
  15. //在这里
  16. pipeline->func_open_audio_output = func_open_audio_output;
  17. return pipeline;
  18. }

这个平台无关的函数赋值语句赋值的函数返回了NULL。而我再全局搜索一下这个ffpipeline_create_from_ffplay,发现没有调用这个的地方。那么这个函数应该只是一个示范函数,让Android和ios平台去各自实现。

那么看下android这边的:

  1. IJKFF_Pipeline *ffpipeline_create_from_android(FFPlayer *ffp)
  2. {
  3. ALOGD("ffpipeline_create_from_android()\n");
  4. IJKFF_Pipeline *pipeline = ffpipeline_alloc(&g_pipeline_class, sizeof(IJKFF_Pipeline_Opaque));
  5. if (!pipeline)
  6. return pipeline;
  7. IJKFF_Pipeline_Opaque *opaque = pipeline->opaque;
  8. opaque->ffp = ffp;
  9. opaque->surface_mutex = SDL_CreateMutex();
  10. opaque->left_volume = 1.0f;
  11. opaque->right_volume = 1.0f;
  12. if (!opaque->surface_mutex) {
  13. ALOGE("ffpipeline-android:create SDL_CreateMutex failed\n");
  14. goto fail;
  15. }
  16. pipeline->func_destroy = func_destroy;
  17. pipeline->func_open_video_decoder = func_open_video_decoder;
  18. //打开音频输出设备
  19. pipeline->func_open_audio_output = func_open_audio_output;
  20. pipeline->func_init_video_decoder = func_init_video_decoder;
  21. pipeline->func_config_video_decoder = func_config_video_decoder;
  22. return pipeline;
  23. fail:
  24. ffpipeline_free_p(&pipeline);
  25. return NULL;
  26. }

注意,这个ffpipeline_create_from_android方法被调用的地方,是在创建完ffplayer播放器后,和ffplayer->vout一起创建的,在3.3.2中有示例代码。

继续看func_open_audio_output函数:

  1. static SDL_Aout *func_open_audio_output(IJKFF_Pipeline *pipeline, FFPlayer *ffp)
  2. {
  3. SDL_Aout *aout = NULL;
  4. if (ffp->opensles) {
  5. //如果打开了opensles,则用OpenSLES来创建音频输出设备
  6. aout = SDL_AoutAndroid_CreateForOpenSLES();
  7. } else {
  8. //否则,使用Android平台的AudioTrack来创建音频输出设备
  9. aout = SDL_AoutAndroid_CreateForAudioTrack();
  10. }
  11. if (aout)
  12. SDL_AoutSetStereoVolume(aout, pipeline->opaque->left_volume, pipeline->opaque->right_volume);
  13. return aout;
  14. }

那么这个ffp->opensles的返回值就很关键了,通过全局搜索,我看到在

inline static void ffp_reset_internal(*FFPlayer* **ffp*)函数中有:

  1. ffp->opensles = 0; // option

即opensles是默认关闭的,除非用了option去打开它。

而option的定义位于:ijkmedia/ijkplayer/ff_ffplay_options.h。option是如何发挥作用的,后面再分析。

对于ijkplayer是如何利用AudioTrack来播放解码后的pcm音频数据的,这里也暂不分析。

3 打开流

  1. VideoState *is = stream_open(ffp, file_name, NULL);

单看这一句,感觉是:根据file_name(url)打开对应的视频流,并返回一个VideoState(视频状态)

而这个VideoState是保存在FFPlayer里面的

  1. typedef struct FFPlayer {
  2. const AVClass *av_class;
  3. /* ffplay context */
  4. VideoState *is;
  5. //...
  6. }

而这个FFPlayer则是IikMediaPlayer中真正的播放器对象。

即一个播放器对应一个VideoState对象。

那么先看一下VideoState的结构体:

  1. typedef struct VideoState {
  2. SDL_Thread *read_tid;//读线程
  3. SDL_Thread _read_tid;
  4. AVInputFormat *iformat;//输入格式
  5. int abort_request;//停止请求
  6. int force_refresh;//强制刷新
  7. int paused;//暂停
  8. int last_paused;
  9. int queue_attachments_req;
  10. int seek_req;
  11. int seek_flags;
  12. int64_t seek_pos;
  13. int64_t seek_rel;
  14. #ifdef FFP_MERGE
  15. int read_pause_return;
  16. #endif
  17. AVFormatContext *ic;
  18. int realtime;
  19. Clock audclk;//音频时钟
  20. Clock vidclk;//视频时钟
  21. Clock extclk;//外部时钟
  22. FrameQueue pictq;//图片帧队列:解码后的视频数据
  23. FrameQueue subpq;//字幕帧队列:解码后的字幕数据
  24. FrameQueue sampq;//音频帧队列:解码后的音频数据
  25. Decoder auddec;//音频解码器
  26. Decoder viddec;//视频解码器
  27. Decoder subdec;//字幕解码器
  28. int audio_stream;//音频流
  29. int av_sync_type;
  30. void *handle;
  31. double audio_clock;
  32. int audio_clock_serial;
  33. double audio_diff_cum; /* used for AV difference average computation */
  34. double audio_diff_avg_coef;
  35. double audio_diff_threshold;
  36. int audio_diff_avg_count;
  37. AVStream *audio_st;
  38. PacketQueue audioq;//音频包数据:未解码的音频数据,从demuxers输出
  39. int audio_hw_buf_size;
  40. uint8_t *audio_buf;
  41. uint8_t *audio_buf1;
  42. short *audio_new_buf; /* for soundtouch buf */
  43. unsigned int audio_buf_size; /* in bytes */
  44. unsigned int audio_buf1_size;
  45. unsigned int audio_new_buf_size;
  46. int audio_buf_index; /* in bytes */
  47. int audio_write_buf_size;
  48. int audio_volume;
  49. int muted;
  50. struct AudioParams audio_src;
  51. #if CONFIG_AVFILTER
  52. struct AudioParams audio_filter_src;
  53. #endif
  54. struct AudioParams audio_tgt;
  55. struct SwrContext *swr_ctx;
  56. int frame_drops_early;
  57. int frame_drops_late;
  58. int continuous_frame_drops_early;
  59. enum ShowMode {
  60. SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
  61. } show_mode;
  62. int16_t sample_array[SAMPLE_ARRAY_SIZE];
  63. int sample_array_index;
  64. int last_i_start;
  65. #ifdef FFP_MERGE
  66. RDFTContext *rdft;
  67. int rdft_bits;
  68. FFTSample *rdft_data;
  69. int xpos;
  70. #endif
  71. double last_vis_time;
  72. #ifdef FFP_MERGE
  73. SDL_Texture *vis_texture;
  74. SDL_Texture *sub_texture;
  75. #endif
  76. int subtitle_stream;
  77. AVStream *subtitle_st;
  78. PacketQueue subtitleq;//未解码的字幕数据:从demuxser输出
  79. double frame_timer;
  80. double frame_last_returned_time;
  81. double frame_last_filter_delay;
  82. int video_stream;
  83. AVStream *video_st;
  84. PacketQueue videoq;//未解码的视频数据:从demuxsers输出
  85. double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
  86. struct SwsContext *img_convert_ctx;
  87. #ifdef FFP_SUB
  88. struct SwsContext *sub_convert_ctx;
  89. #endif
  90. int eof;
  91. char *filename;
  92. int width, height, xleft, ytop;//视频的:宽、高、左上角x坐标,左上角y坐标。和ffmpeg里面的是对应的。
  93. int step;
  94. #if CONFIG_AVFILTER
  95. int vfilter_idx;
  96. AVFilterContext *in_video_filter; // the first filter in the video chain
  97. AVFilterContext *out_video_filter; // the last filter in the video chain
  98. AVFilterContext *in_audio_filter; // the first filter in the audio chain
  99. AVFilterContext *out_audio_filter; // the last filter in the audio chain
  100. AVFilterGraph *agraph; // audio filter graph
  101. #endif
  102. int last_video_stream, last_audio_stream, last_subtitle_stream;
  103. SDL_cond *continue_read_thread;
  104. /* extra fields */
  105. SDL_mutex *play_mutex; // only guard state, do not block any long operation
  106. SDL_Thread *video_refresh_tid;
  107. SDL_Thread _video_refresh_tid;
  108. int buffering_on;
  109. int pause_req;
  110. int dropping_frame;
  111. int is_video_high_fps; // above 30fps
  112. int is_video_high_res; // above 1080p
  113. PacketQueue *buffer_indicator_queue;
  114. volatile int latest_video_seek_load_serial;
  115. volatile int latest_audio_seek_load_serial;
  116. volatile int64_t latest_seek_load_start_at;
  117. int drop_aframe_count;
  118. int drop_vframe_count;
  119. int64_t accurate_seek_start_time;
  120. volatile int64_t accurate_seek_vframe_pts;
  121. volatile int64_t accurate_seek_aframe_pts;
  122. int audio_accurate_seek_req;
  123. int video_accurate_seek_req;
  124. SDL_mutex *accurate_seek_mutex;
  125. SDL_cond *video_accurate_seek_cond;
  126. SDL_cond *audio_accurate_seek_cond;
  127. volatile int initialized_decoder;
  128. int seek_buffering;
  129. } VideoState;

我针对我理解了的字段做了一些注释。

那么现在看到返回VideoState结构体的方法openstream

  1. static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat)
  2. {
  3. assert(!ffp->is);
  4. VideoState *is;
  5. //创建VideoState结构体
  6. is = av_mallocz(sizeof(VideoState));
  7. if (!is)
  8. return NULL;
  9. //给VideoState结构体中的属性赋值
  10. is->filename = av_strdup(filename);
  11. if (!is->filename)
  12. goto fail;
  13. is->iformat = iformat;
  14. is->ytop = 0;
  15. is->xleft = 0;
  16. #if defined(__ANDROID__)
  17. //android平台下的soundtouch,不太清楚是做什么的
  18. if (ffp->soundtouch_enable) {
  19. is->handle = ijk_soundtouch_create();
  20. }
  21. #endif
  22. /* start video display */
  23. //初始化3个帧队列(解码后帧的队列)
  24. if (frame_queue_init(&is->pictq, &is->videoq, ffp->pictq_size, 1) < 0)
  25. goto fail;
  26. if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
  27. goto fail;
  28. if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
  29. goto fail;
  30. //初始化3个包队列(解码前的帧队列,不过是demuxer输出的数据了)
  31. if (packet_queue_init(&is->videoq) < 0 ||
  32. packet_queue_init(&is->audioq) < 0 ||
  33. packet_queue_init(&is->subtitleq) < 0)
  34. goto fail;
  35. //以下3个创建SDL_cond的函数,不太清楚他们的作用是什么,暂不分析
  36. if (!(is->continue_read_thread = SDL_CreateCond())) {
  37. av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
  38. goto fail;
  39. }
  40. if (!(is->video_accurate_seek_cond = SDL_CreateCond())) {
  41. av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
  42. ffp->enable_accurate_seek = 0;
  43. }
  44. if (!(is->audio_accurate_seek_cond = SDL_CreateCond())) {
  45. av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
  46. ffp->enable_accurate_seek = 0;
  47. }
  48. //初始化音频时钟,视频时钟,外部时钟
  49. init_clock(&is->vidclk, &is->videoq.serial);
  50. init_clock(&is->audclk, &is->audioq.serial);
  51. init_clock(&is->extclk, &is->extclk.serial);
  52. is->audio_clock_serial = -1;
  53. //初始化播放器的初始音量
  54. if (ffp->startup_volume < 0)
  55. av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", ffp->startup_volume);
  56. if (ffp->startup_volume > 100)
  57. av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", ffp->startup_volume);
  58. ffp->startup_volume = av_clip(ffp->startup_volume, 0, 100);
  59. ffp->startup_volume = av_clip(SDL_MIX_MAXVOLUME * ffp->startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
  60. is->audio_volume = ffp->startup_volume;
  61. is->muted = 0;
  62. is->av_sync_type = ffp->av_sync_type;
  63. //初始化播放器互斥锁
  64. is->play_mutex = SDL_CreateMutex();
  65. is->accurate_seek_mutex = SDL_CreateMutex();
  66. ffp->is = is;
  67. //如果start_on_prepared=false,那么当prepare完之后要暂停,不能直接播放。
  68. is->pause_req = !ffp->start_on_prepared;
  69. //创建视频渲染线程
  70. is->video_refresh_tid = SDL_CreateThreadEx(&is->_video_refresh_tid, video_refresh_thread, ffp, "ff_vout");
  71. if (!is->video_refresh_tid) {
  72. av_freep(&ffp->is);
  73. return NULL;
  74. }
  75. //********开始初始化解码器
  76. is->initialized_decoder = 0;
  77. //创建读取线程
  78. is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
  79. if (!is->read_tid) {
  80. av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
  81. goto fail;
  82. }
  83. if (ffp->async_init_decoder && !ffp->video_disable && ffp->video_mime_type && strlen(ffp->video_mime_type) > 0
  84. && ffp->mediacodec_default_name && strlen(ffp->mediacodec_default_name) > 0) {
  85. if (ffp->mediacodec_all_videos || ffp->mediacodec_avc || ffp->mediacodec_hevc || ffp->mediacodec_mpeg2) {
  86. decoder_init(&is->viddec, NULL, &is->videoq, is->continue_read_thread);
  87. ffp->node_vdec = ffpipeline_init_video_decoder(ffp->pipeline, ffp);
  88. }
  89. }
  90. //********初始化解码器完成
  91. is->initialized_decoder = 1;
  92. return is;
  93. fail:
  94. is->initialized_decoder = 1;
  95. is->abort_request = true;
  96. if (is->video_refresh_tid)
  97. SDL_WaitThread(is->video_refresh_tid, NULL);
  98. stream_close(ffp);
  99. return NULL;
  100. }

他的逻辑大致为:

  1. 创建VideoState对象,并初始化他的一些默认属性。
  2. 初始化视频、音频、字幕的解码后的帧队列。
  3. 初始化视频、音频、字幕的解码前的包队列。
  4. 初始化播放器音量。
  5. 创建视频渲染线程。
  6. 创建视频数据读取线程(从网络读取或者从文件读取,io操作)。
  7. 初始化解码器。(ffmpeg应该会在内部创建解码线程)。

因此,在openstream()方法中完成了最主要的3个线程的创建。

4 视频读取线程

stream_open这个打开流的函数中,在开启了视频渲染线程后,接着就开启了视频读取线程。

  1. static VideoState *stream_open(FFPlayer *ffp, const char *filename, AVInputFormat *iformat){
  2. //...
  3. is->read_tid = SDL_CreateThreadEx(&is->_read_tid, read_thread, ffp, "ff_read");
  4. //...
  5. }

通过创建单独的线程,专门用于读取Packet,在read_thread()函数中。而这个函数非常长,做了很多事情,先放出一个浓缩版的:

简略版代码:

  1. static int read_thread(void *arg)
  2. {
  3. //Open an input stream and read the header. The codecs are not opened.
  4. //The stream must be closed with avformat_close_input().
  5. //打开输入流,并读取文件头部,解码器还未打开。主要作用是探测流的协议,如http还是rtmp等。
  6. err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts);
  7. // Read packets of a media file to get stream information. This
  8. // is useful for file formats with no headers such as MPEG. This
  9. // function also computes the real framerate in case of MPEG-2 repeat
  10. // frame mode.
  11. // The logical file position is not changed by this function;
  12. // examined packets may buffered for later processing.
  13. //探测文件封装格式,音视频编码参数等信息。
  14. err = avformat_find_stream_info(ic, opts);
  15. // Find the "best" stream in the file.
  16. // The best stream is determined according to various heuristics as the most
  17. // likely to be what the user expects.
  18. // If the decoder parameter is non-NULL, av_find_best_stream will find the
  19. // default decoder for the stream's codec; streams for which no decoder can
  20. // be found are ignored.
  21. //根据 AVFormatContext,找到最佳的流。
  22. av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
  23. st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
  24. //内部分别开启audio,video,subtitle的解码器的线程,开始各自的解码的工作。在稍后的3.6.5解码线程中分析这里的内容
  25. stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
  26. stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]);
  27. stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]);
  28. //开始无限循环,调用ffmpeg的av_read_frame()读取AVPacket,并入队。
  29. for (;;) {
  30. //AVPacket pkt;
  31. ret = av_read_frame(ic, pkt);
  32. //把网络读取到并解封装到的pkt包入队列。(稍后在解码线程会拿到这些pkt包去解码。)
  33. //如果是音频流的包
  34. packet_queue_put(&is->audioq, pkt);
  35. //如果是视频流的包
  36. packet_queue_put(&is->videoq, pkt);
  37. //如果是字幕流的包
  38. packet_queue_put(&is->subtitleq, pkt);
  39. }
  40. }

完整版代码:

那么详细的全部的源码(600行)如下,做了部分注释。

  1. /* this thread gets the stream from the disk or the network */
  2. static int read_thread(void *arg)
  3. {
  4. FFPlayer *ffp = arg;
  5. VideoState *is = ffp->is;
  6. AVFormatContext *ic = NULL;
  7. int err, i, ret __unused;
  8. int st_index[AVMEDIA_TYPE_NB];
  9. AVPacket pkt1, *pkt = &pkt1;
  10. int64_t stream_start_time;
  11. int completed = 0;
  12. int pkt_in_play_range = 0;
  13. AVDictionaryEntry *t;
  14. SDL_mutex *wait_mutex = SDL_CreateMutex();
  15. int scan_all_pmts_set = 0;
  16. int64_t pkt_ts;
  17. int last_error = 0;
  18. int64_t prev_io_tick_counter = 0;
  19. int64_t io_tick_counter = 0;
  20. int init_ijkmeta = 0;
  21. if (!wait_mutex) {
  22. av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
  23. ret = AVERROR(ENOMEM);
  24. goto fail;
  25. }
  26. memset(st_index, -1, sizeof(st_index));
  27. is->last_video_stream = is->video_stream = -1;
  28. is->last_audio_stream = is->audio_stream = -1;
  29. is->last_subtitle_stream = is->subtitle_stream = -1;
  30. is->eof = 0;
  31. //初始化AVFormatContext
  32. ic = avformat_alloc_context();
  33. if (!ic) {
  34. av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
  35. ret = AVERROR(ENOMEM);
  36. goto fail;
  37. }
  38. //为AVFormatContext设置中断回调
  39. ic->interrupt_callback.callback = decode_interrupt_cb;
  40. ic->interrupt_callback.opaque = is;
  41. if (!av_dict_get(ffp->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
  42. av_dict_set(&ffp->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
  43. scan_all_pmts_set = 1;
  44. }
  45. if (av_stristart(is->filename, "rtmp", NULL) ||
  46. av_stristart(is->filename, "rtsp", NULL)) {
  47. // There is total different meaning for 'timeout' option in rtmp
  48. av_log(ffp, AV_LOG_WARNING, "remove 'timeout' option for rtmp.\n");
  49. av_dict_set(&ffp->format_opts, "timeout", NULL, 0);
  50. }
  51. if (ffp->skip_calc_frame_rate) {
  52. av_dict_set_int(&ic->metadata, "skip-calc-frame-rate", ffp->skip_calc_frame_rate, 0);
  53. av_dict_set_int(&ffp->format_opts, "skip-calc-frame-rate", ffp->skip_calc_frame_rate, 0);
  54. }
  55. if (ffp->iformat_name)
  56. //找到视频格式:AVInputFormat
  57. is->iformat = av_find_input_format(ffp->iformat_name);
  58. //Open an input stream and read the header. The codecs are not opened.
  59. //The stream must be closed with avformat_close_input().
  60. //打开输入流,并读取文件头部,解码器还未打开。主要作用是探测流的协议,如http还是rtmp等。
  61. err = avformat_open_input(&ic, is->filename, is->iformat, &ffp->format_opts);
  62. if (err < 0) {
  63. print_error(is->filename, err);
  64. ret = -1;
  65. goto fail;
  66. }
  67. ffp_notify_msg1(ffp, FFP_MSG_OPEN_INPUT);
  68. if (scan_all_pmts_set)
  69. av_dict_set(&ffp->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
  70. if ((t = av_dict_get(ffp->format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  71. av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
  72. #ifdef FFP_MERGE
  73. ret = AVERROR_OPTION_NOT_FOUND;
  74. goto fail;
  75. #endif
  76. }
  77. is->ic = ic;
  78. if (ffp->genpts)
  79. ic->flags |= AVFMT_FLAG_GENPTS;
  80. av_format_inject_global_side_data(ic);
  81. //
  82. //AVDictionary **opts;
  83. //int orig_nb_streams;
  84. //opts = setup_find_stream_info_opts(ic, ffp->codec_opts);
  85. //orig_nb_streams = ic->nb_streams;
  86. if (ffp->find_stream_info) {
  87. AVDictionary **opts = setup_find_stream_info_opts(ic, ffp->codec_opts);
  88. int orig_nb_streams = ic->nb_streams;
  89. do {
  90. if (av_stristart(is->filename, "data:", NULL) && orig_nb_streams > 0) {
  91. for (i = 0; i < orig_nb_streams; i++) {
  92. if (!ic->streams[i] || !ic->streams[i]->codecpar || ic->streams[i]->codecpar->profile == FF_PROFILE_UNKNOWN) {
  93. break;
  94. }
  95. }
  96. if (i == orig_nb_streams) {
  97. break;
  98. }
  99. }
  100. // Read packets of a media file to get stream information. This
  101. // is useful for file formats with no headers such as MPEG. This
  102. // function also computes the real framerate in case of MPEG-2 repeat
  103. // frame mode.
  104. // The logical file position is not changed by this function;
  105. // examined packets may buffered for later processing.
  106. //探测文件封装格式,音视频编码参数等信息。
  107. err = avformat_find_stream_info(ic, opts);
  108. } while(0);
  109. ffp_notify_msg1(ffp, FFP_MSG_FIND_STREAM_INFO);
  110. for (i = 0; i < orig_nb_streams; i++)
  111. av_dict_free(&opts[i]);
  112. av_freep(&opts);
  113. if (err < 0) {
  114. av_log(NULL, AV_LOG_WARNING,
  115. "%s: could not find codec parameters\n", is->filename);
  116. ret = -1;
  117. goto fail;
  118. }
  119. }
  120. if (ic->pb)
  121. ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
  122. if (ffp->seek_by_bytes < 0)
  123. ffp->seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
  124. is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
  125. is->max_frame_duration = 10.0;
  126. av_log(ffp, AV_LOG_INFO, "max_frame_duration: %.3f\n", is->max_frame_duration);
  127. #ifdef FFP_MERGE
  128. if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
  129. window_title = av_asprintf("%s - %s", t->value, input_filename);
  130. #endif
  131. //处理seek
  132. /* if seeking requested, we execute it */
  133. if (ffp->start_time != AV_NOPTS_VALUE) {
  134. int64_t timestamp;
  135. timestamp = ffp->start_time;
  136. /* add the stream start time */
  137. if (ic->start_time != AV_NOPTS_VALUE)
  138. timestamp += ic->start_time;
  139. ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
  140. if (ret < 0) {
  141. av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
  142. is->filename, (double)timestamp / AV_TIME_BASE);
  143. }
  144. }
  145. is->realtime = is_realtime(ic);
  146. //打印详细的格式信息
  147. av_dump_format(ic, 0, is->filename, 0);
  148. int video_stream_count = 0;
  149. int h264_stream_count = 0;
  150. int first_h264_stream = -1;
  151. for (i = 0; i < ic->nb_streams; i++) {
  152. AVStream *st = ic->streams[i];
  153. enum AVMediaType type = st->codecpar->codec_type;
  154. st->discard = AVDISCARD_ALL;
  155. if (type >= 0 && ffp->wanted_stream_spec[type] && st_index[type] == -1)
  156. if (avformat_match_stream_specifier(ic, st, ffp->wanted_stream_spec[type]) > 0)
  157. st_index[type] = i;
  158. // choose first h264
  159. if (type == AVMEDIA_TYPE_VIDEO) {
  160. enum AVCodecID codec_id = st->codecpar->codec_id;
  161. video_stream_count++;
  162. if (codec_id == AV_CODEC_ID_H264) {
  163. h264_stream_count++;
  164. if (first_h264_stream < 0)
  165. first_h264_stream = i;
  166. }
  167. }
  168. }
  169. if (video_stream_count > 1 && st_index[AVMEDIA_TYPE_VIDEO] < 0) {
  170. st_index[AVMEDIA_TYPE_VIDEO] = first_h264_stream;
  171. av_log(NULL, AV_LOG_WARNING, "multiple video stream found, prefer first h264 stream: %d\n", first_h264_stream);
  172. }
  173. //*****对视频,音频,和字幕,调用av_find_best_stream,找到对应的stream的下标,并填充在st_index数组中
  174. if (!ffp->video_disable)
  175. st_index[AVMEDIA_TYPE_VIDEO] =
  176. // Find the "best" stream in the file.
  177. // The best stream is determined according to various heuristics as the most
  178. // likely to be what the user expects.
  179. // If the decoder parameter is non-NULL, av_find_best_stream will find the
  180. // default decoder for the stream's codec; streams for which no decoder can
  181. // be found are ignored.
  182. //根据AVFormatContext,找到最佳的流。
  183. av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
  184. st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
  185. if (!ffp->audio_disable)
  186. st_index[AVMEDIA_TYPE_AUDIO] =
  187. av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
  188. st_index[AVMEDIA_TYPE_AUDIO],
  189. st_index[AVMEDIA_TYPE_VIDEO],
  190. NULL, 0);
  191. if (!ffp->video_disable && !ffp->subtitle_disable)
  192. st_index[AVMEDIA_TYPE_SUBTITLE] =
  193. av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
  194. st_index[AVMEDIA_TYPE_SUBTITLE],
  195. (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
  196. st_index[AVMEDIA_TYPE_AUDIO] :
  197. st_index[AVMEDIA_TYPE_VIDEO]),
  198. NULL, 0);
  199. is->show_mode = ffp->show_mode;
  200. #ifdef FFP_MERGE // bbc: dunno if we need this
  201. if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
  202. AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
  203. AVCodecParameters *codecpar = st->codecpar;
  204. AVRational sar = av_guess_sample_aspect_ratio(ic, st, NULL);
  205. if (codecpar->width)
  206. set_default_window_size(codecpar->width, codecpar->height, sar);
  207. }
  208. #endif
  209. //******打开3个流 start
  210. /* open the streams */
  211. if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
  212. stream_component_open(ffp, st_index[AVMEDIA_TYPE_AUDIO]);
  213. } else {
  214. ffp->av_sync_type = AV_SYNC_VIDEO_MASTER;
  215. is->av_sync_type = ffp->av_sync_type;
  216. }
  217. ret = -1;
  218. if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
  219. ret = stream_component_open(ffp, st_index[AVMEDIA_TYPE_VIDEO]);
  220. }
  221. if (is->show_mode == SHOW_MODE_NONE)
  222. is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
  223. if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
  224. stream_component_open(ffp, st_index[AVMEDIA_TYPE_SUBTITLE]);
  225. }
  226. //******打开3个流 end
  227. ffp_notify_msg1(ffp, FFP_MSG_COMPONENT_OPEN);
  228. if (!ffp->ijkmeta_delay_init) {
  229. ijkmeta_set_avformat_context_l(ffp->meta, ic);
  230. }
  231. ffp->stat.bit_rate = ic->bit_rate;
  232. if (st_index[AVMEDIA_TYPE_VIDEO] >= 0)
  233. ijkmeta_set_int64_l(ffp->meta, IJKM_KEY_VIDEO_STREAM, st_index[AVMEDIA_TYPE_VIDEO]);
  234. if (st_index[AVMEDIA_TYPE_AUDIO] >= 0)
  235. ijkmeta_set_int64_l(ffp->meta, IJKM_KEY_AUDIO_STREAM, st_index[AVMEDIA_TYPE_AUDIO]);
  236. if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0)
  237. ijkmeta_set_int64_l(ffp->meta, IJKM_KEY_TIMEDTEXT_STREAM, st_index[AVMEDIA_TYPE_SUBTITLE]);
  238. if (is->video_stream < 0 && is->audio_stream < 0) {
  239. av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
  240. is->filename);
  241. ret = -1;
  242. goto fail;
  243. }
  244. //初始化缓冲指示器队列,优先使用音频packetQueue,找不到就使用视频packetQueue。
  245. if (is->audio_stream >= 0) {
  246. is->audioq.is_buffer_indicator = 1;
  247. is->buffer_indicator_queue = &is->audioq;
  248. } else if (is->video_stream >= 0) {
  249. is->videoq.is_buffer_indicator = 1;
  250. is->buffer_indicator_queue = &is->videoq;
  251. } else {
  252. assert("invalid streams");
  253. }
  254. //如果是直播流,则播放器使用无限缓存
  255. if (ffp->infinite_buffer < 0 && is->realtime)
  256. ffp->infinite_buffer = 1;
  257. if (!ffp->render_wait_start && !ffp->start_on_prepared)
  258. toggle_pause(ffp, 1);
  259. if (is->video_st && is->video_st->codecpar) {
  260. AVCodecParameters *codecpar = is->video_st->codecpar;
  261. //发送VIDEO_SIZE_CHANGED回调和SAR_CHANGED回调
  262. ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, codecpar->width, codecpar->height);
  263. ffp_notify_msg3(ffp, FFP_MSG_SAR_CHANGED, codecpar->sample_aspect_ratio.num, codecpar->sample_aspect_ratio.den);
  264. }
  265. ffp->prepared = true;
  266. //发送PREPARED回调
  267. ffp_notify_msg1(ffp, FFP_MSG_PREPARED);
  268. if (!ffp->render_wait_start && !ffp->start_on_prepared) {
  269. while (is->pause_req && !is->abort_request) {
  270. SDL_Delay(20);
  271. }
  272. }
  273. if (ffp->auto_resume) {
  274. ffp_notify_msg1(ffp, FFP_REQ_START);
  275. ffp->auto_resume = 0;
  276. }
  277. /* offset should be seeked*/
  278. if (ffp->seek_at_start > 0) {
  279. ffp_seek_to_l(ffp, (long)(ffp->seek_at_start));
  280. }
  281. //开始无限循环,调用ffmpeg的av_read_frame()读取AVPacket,并入队。
  282. for (;;) {
  283. //如果中断请求,则跳出循环
  284. if (is->abort_request)
  285. break;
  286. #ifdef FFP_MERGE
  287. if (is->paused != is->last_paused) {
  288. is->last_paused = is->paused;
  289. if (is->paused)
  290. is->read_pause_return = av_read_pause(ic);
  291. else
  292. av_read_play(ic);
  293. }
  294. #endif
  295. #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
  296. if (is->paused &&
  297. (!strcmp(ic->iformat->name, "rtsp") ||
  298. (ic->pb && !strncmp(ffp->input_filename, "mmsh:", 5)))) {
  299. /* wait 10 ms to avoid trying to get another packet */
  300. /* XXX: horrible */
  301. SDL_Delay(10);
  302. continue;
  303. }
  304. #endif
  305. //如果是seek 请求
  306. if (is->seek_req) {
  307. int64_t seek_target = is->seek_pos;
  308. int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
  309. int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
  310. // FIXME the +-2 is due to rounding being not done in the correct direction in generation
  311. // of the seek_pos/seek_rel variables
  312. ffp_toggle_buffering(ffp, 1);
  313. ffp_notify_msg3(ffp, FFP_MSG_BUFFERING_UPDATE, 0, 0);
  314. //ffmepg 中处理seek
  315. ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
  316. if (ret < 0) {
  317. av_log(NULL, AV_LOG_ERROR,
  318. "%s: error while seeking\n", is->ic->filename);
  319. } else {
  320. if (is->audio_stream >= 0) {
  321. packet_queue_flush(&is->audioq);
  322. packet_queue_put(&is->audioq, &flush_pkt);
  323. // TODO: clear invaild audio data
  324. // SDL_AoutFlushAudio(ffp->aout);
  325. }
  326. if (is->subtitle_stream >= 0) {
  327. packet_queue_flush(&is->subtitleq);
  328. packet_queue_put(&is->subtitleq, &flush_pkt);
  329. }
  330. if (is->video_stream >= 0) {
  331. if (ffp->node_vdec) {
  332. ffpipenode_flush(ffp->node_vdec);
  333. }
  334. packet_queue_flush(&is->videoq);
  335. packet_queue_put(&is->videoq, &flush_pkt);
  336. }
  337. if (is->seek_flags & AVSEEK_FLAG_BYTE) {
  338. set_clock(&is->extclk, NAN, 0);
  339. } else {
  340. set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
  341. }
  342. is->latest_video_seek_load_serial = is->videoq.serial;
  343. is->latest_audio_seek_load_serial = is->audioq.serial;
  344. is->latest_seek_load_start_at = av_gettime();
  345. }
  346. ffp->dcc.current_high_water_mark_in_ms = ffp->dcc.first_high_water_mark_in_ms;
  347. is->seek_req = 0;
  348. is->queue_attachments_req = 1;
  349. is->eof = 0;
  350. #ifdef FFP_MERGE
  351. if (is->paused)
  352. step_to_next_frame(is);
  353. #endif
  354. completed = 0;
  355. SDL_LockMutex(ffp->is->play_mutex);
  356. if (ffp->auto_resume) {
  357. is->pause_req = 0;
  358. if (ffp->packet_buffering)
  359. is->buffering_on = 1;
  360. ffp->auto_resume = 0;
  361. stream_update_pause_l(ffp);
  362. }
  363. if (is->pause_req)
  364. step_to_next_frame_l(ffp);
  365. SDL_UnlockMutex(ffp->is->play_mutex);
  366. if (ffp->enable_accurate_seek) {
  367. is->drop_aframe_count = 0;
  368. is->drop_vframe_count = 0;
  369. SDL_LockMutex(is->accurate_seek_mutex);
  370. if (is->video_stream >= 0) {
  371. is->video_accurate_seek_req = 1;
  372. }
  373. if (is->audio_stream >= 0) {
  374. is->audio_accurate_seek_req = 1;
  375. }
  376. SDL_CondSignal(is->audio_accurate_seek_cond);
  377. SDL_CondSignal(is->video_accurate_seek_cond);
  378. SDL_UnlockMutex(is->accurate_seek_mutex);
  379. }
  380. ffp_notify_msg3(ffp, FFP_MSG_SEEK_COMPLETE, (int)fftime_to_milliseconds(seek_target), ret);
  381. ffp_toggle_buffering(ffp, 1);
  382. }
  383. if (is->queue_attachments_req) {
  384. if (is->video_st && (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
  385. AVPacket copy = { 0 };
  386. if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
  387. goto fail;
  388. packet_queue_put(&is->videoq, &copy);
  389. packet_queue_put_nullpacket(&is->videoq, is->video_stream);
  390. }
  391. is->queue_attachments_req = 0;
  392. }
  393. /* if the queue are full, no need to read more */
  394. if (ffp->infinite_buffer<1 && !is->seek_req &&
  395. #ifdef FFP_MERGE
  396. (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
  397. #else
  398. (is->audioq.size + is->videoq.size + is->subtitleq.size > ffp->dcc.max_buffer_size
  399. #endif
  400. //内部逻辑为queue->nb_packets > min_frames
  401. || ( stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq, MIN_FRAMES)
  402. && stream_has_enough_packets(is->video_st, is->video_stream, &is->videoq, MIN_FRAMES)
  403. && stream_has_enough_packets(is->subtitle_st, is->subtitle_stream, &is->subtitleq, MIN_FRAMES)))) {
  404. if (!is->eof) {
  405. ffp_toggle_buffering(ffp, 0);
  406. }
  407. /* wait 10 ms */
  408. SDL_LockMutex(wait_mutex);
  409. SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
  410. SDL_UnlockMutex(wait_mutex);
  411. //进入到下一次循环
  412. continue;
  413. }
  414. //处理播放结束
  415. if ((!is->paused || completed) &&
  416. (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
  417. (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
  418. if (ffp->loop != 1 && (!ffp->loop || --ffp->loop)) {
  419. stream_seek(is, ffp->start_time != AV_NOPTS_VALUE ? ffp->start_time : 0, 0, 0);
  420. } else if (ffp->autoexit) {
  421. ret = AVERROR_EOF;
  422. goto fail;
  423. } else {
  424. ffp_statistic_l(ffp);
  425. if (completed) {
  426. av_log(ffp, AV_LOG_INFO, "ffp_toggle_buffering: eof\n");
  427. SDL_LockMutex(wait_mutex);
  428. // infinite wait may block shutdown
  429. while(!is->abort_request && !is->seek_req)
  430. SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 100);
  431. SDL_UnlockMutex(wait_mutex);
  432. if (!is->abort_request)
  433. continue;
  434. } else {
  435. completed = 1;
  436. ffp->auto_resume = 0;
  437. // TODO: 0 it's a bit early to notify complete here
  438. ffp_toggle_buffering(ffp, 0);
  439. toggle_pause(ffp, 1);
  440. if (ffp->error) {
  441. av_log(ffp, AV_LOG_INFO, "ffp_toggle_buffering: error: %d\n", ffp->error);
  442. ffp_notify_msg1(ffp, FFP_MSG_ERROR);
  443. } else {
  444. av_log(ffp, AV_LOG_INFO, "ffp_toggle_buffering: completed: OK\n");
  445. ffp_notify_msg1(ffp, FFP_MSG_COMPLETED);
  446. }
  447. }
  448. }
  449. }
  450. pkt->flags = 0;
  451. //读帧,读到这个pkt包里面?
  452. //0 if OK, < 0 on error or end of file
  453. ret = av_read_frame(ic, pkt);
  454. if (ret < 0) {
  455. int pb_eof = 0;
  456. int pb_error = 0;
  457. //EOF表示:end of file
  458. if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
  459. ffp_check_buffering_l(ffp);
  460. pb_eof = 1;
  461. // check error later
  462. }
  463. if (ic->pb && ic->pb->error) {
  464. pb_eof = 1;
  465. pb_error = ic->pb->error;
  466. }
  467. if (ret == AVERROR_EXIT) {
  468. pb_eof = 1;
  469. pb_error = AVERROR_EXIT;
  470. }
  471. if (pb_eof) {
  472. if (is->video_stream >= 0)
  473. packet_queue_put_nullpacket(&is->videoq, is->video_stream);
  474. if (is->audio_stream >= 0)
  475. packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
  476. if (is->subtitle_stream >= 0)
  477. packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
  478. is->eof = 1;
  479. }
  480. if (pb_error) {
  481. if (is->video_stream >= 0)
  482. packet_queue_put_nullpacket(&is->videoq, is->video_stream);
  483. if (is->audio_stream >= 0)
  484. packet_queue_put_nullpacket(&is->audioq, is->audio_stream);
  485. if (is->subtitle_stream >= 0)
  486. packet_queue_put_nullpacket(&is->subtitleq, is->subtitle_stream);
  487. is->eof = 1;
  488. ffp->error = pb_error;
  489. av_log(ffp, AV_LOG_ERROR, "av_read_frame error: %s\n", ffp_get_error_string(ffp->error));
  490. // break;
  491. } else {
  492. ffp->error = 0;
  493. }
  494. if (is->eof) {
  495. ffp_toggle_buffering(ffp, 0);
  496. SDL_Delay(100);
  497. }
  498. SDL_LockMutex(wait_mutex);
  499. SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
  500. SDL_UnlockMutex(wait_mutex);
  501. ffp_statistic_l(ffp);
  502. continue;
  503. } else {
  504. is->eof = 0;
  505. }
  506. //flush_pkt是用来做什么的?
  507. if (pkt->flags & AV_PKT_FLAG_DISCONTINUITY) {
  508. if (is->audio_stream >= 0) {
  509. packet_queue_put(&is->audioq, &flush_pkt);
  510. }
  511. if (is->subtitle_stream >= 0) {
  512. packet_queue_put(&is->subtitleq, &flush_pkt);
  513. }
  514. if (is->video_stream >= 0) {
  515. packet_queue_put(&is->videoq, &flush_pkt);
  516. }
  517. }
  518. /* check if packet is in play range specified by user, then queue, otherwise discard */
  519. stream_start_time = ic->streams[pkt->stream_index]->start_time;
  520. pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
  521. pkt_in_play_range = ffp->duration == AV_NOPTS_VALUE ||
  522. (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
  523. av_q2d(ic->streams[pkt->stream_index]->time_base) -
  524. (double)(ffp->start_time != AV_NOPTS_VALUE ? ffp->start_time : 0) / 1000000
  525. <= ((double)ffp->duration / 1000000);
  526. if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
  527. packet_queue_put(&is->audioq, pkt);
  528. } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
  529. && !(is->video_st && (is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC))) {
  530. packet_queue_put(&is->videoq, pkt);
  531. } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
  532. packet_queue_put(&is->subtitleq, pkt);
  533. } else {
  534. av_packet_unref(pkt);
  535. }
  536. ffp_statistic_l(ffp);
  537. if (ffp->ijkmeta_delay_init && !init_ijkmeta &&
  538. (ffp->first_video_frame_rendered || !is->video_st) && (ffp->first_audio_frame_rendered || !is->audio_st)) {
  539. ijkmeta_set_avformat_context_l(ffp->meta, ic);
  540. init_ijkmeta = 1;
  541. }
  542. if (ffp->packet_buffering) {
  543. io_tick_counter = SDL_GetTickHR();
  544. if ((!ffp->first_video_frame_rendered && is->video_st) || (!ffp->first_audio_frame_rendered && is->audio_st)) {
  545. if (abs((int)(io_tick_counter - prev_io_tick_counter)) > FAST_BUFFERING_CHECK_PER_MILLISECONDS) {
  546. prev_io_tick_counter = io_tick_counter;
  547. ffp->dcc.current_high_water_mark_in_ms = ffp->dcc.first_high_water_mark_in_ms;
  548. ffp_check_buffering_l(ffp);
  549. }
  550. } else {
  551. if (abs((int)(io_tick_counter - prev_io_tick_counter)) > BUFFERING_CHECK_PER_MILLISECONDS) {
  552. prev_io_tick_counter = io_tick_counter;
  553. ffp_check_buffering_l(ffp);
  554. }
  555. }
  556. }
  557. }
  558. ret = 0;
  559. fail:
  560. if (ic && !is->ic)
  561. avformat_close_input(&ic);
  562. if (!ffp->prepared || !is->abort_request) {
  563. ffp->last_error = last_error;
  564. ffp_notify_msg2(ffp, FFP_MSG_ERROR, last_error);
  565. }
  566. SDL_DestroyMutex(wait_mutex);
  567. return 0;
  568. }

小结:

总结一下:视频读取线程大致做的事情就是:

  1. ffmpeg进行协议探测,封装格式探测等网络请求,为创建解码器做准备。
  2. 创建video,audio,subtitle解码器并开启相应的解码线程。
  3. for循环不断地调用av_read_frame()去从ffmpeg内部维护的网络包缓存去取出下载好的AVPacket,并放入相应的队列中,供稍后解码线程取出解码。