ffmpeg实现视频切割
流程
打开输入——>打开输出——>根据输入来创建流——>拷贝流设置——>循环读帧——>判断时间点是否到达切割点,并做设置——>设置pts和dts——>写入——>善后
代码
/*
*最简单的视频切割
*本程序实现把一个视频切割为2个视频,不涉及编解码,最难理解的地方在于pts和dts的计算,要好好看看
*不够完美的地方在于没有按照关键帧切割,所以会在切割点花屏,以后改善
*注:只处理一个视频流和一个音频流,若流多了,估计会crash
*/
#include "stdafx.h"
#ifdef __cplusplus
extern"C"
{
#endif
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/avutil.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libswresample\swresample.h"
#include "libavutil\fifo.h"
#include "libavutil/audio_fifo.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
//#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
//#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
//#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
int _tmain(int argc, _TCHAR* argv[])
{
if(argc < 3)
{
printf("no input file!\n");
return -1;
}
AVFormatContext *ifmt_ctx = NULL, *ofmt1_ctx = NULL, *ofmt2_ctx = NULL;
AVStream *out1_vstream = NULL, *out1_astream = NULL;
AVStream *out2_vstream = NULL, *out2_astream = NULL;
char str_out1_filename[10];
char str_out2_filename[10];
sprintf(str_out1_filename, "test1.%s", argv[2]);
sprintf(str_out2_filename, "test2.%s", argv[2]);
int inVideo_StreamIndex = -1,inAudio_StreamIndex = -1;
int ret;
av_register_all();
if ((ret = avformat_open_input(&ifmt_ctx, argv[1], NULL, NULL)) < 0)
{
printf("can not open the in put file format context!\n");
return -1;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0)
{
printf("can not find the input stream info!\n");
goto end;
}
avformat_alloc_output_context2(&ofmt1_ctx, NULL, NULL, str_out1_filename);
if (!ofmt1_ctx)
{
printf( "Could not create output1 context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
avformat_alloc_output_context2(&ofmt2_ctx, NULL, NULL, str_out2_filename);
if (!ofmt2_ctx)
{
printf( "Could not create output2 context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
for (int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
inVideo_StreamIndex = i;
out1_vstream = avformat_new_stream(ofmt1_ctx, NULL);
out2_vstream = avformat_new_stream(ofmt2_ctx, NULL);
//open decoder
if(0 > avcodec_open2(ifmt_ctx->streams[i]->codec, avcodec_find_decoder(ifmt_ctx->streams[i]->codec->codec_id), NULL))
{
printf("can not find or open video decoder!\n");
goto end;
}
if (!out1_vstream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out1_vstream->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
goto end;
}
out1_vstream->codec->codec_tag = 0;
if(ofmt1_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out1_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
if (!out2_vstream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out2_vstream->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
goto end;
}
out2_vstream->codec->codec_tag = 0;
if(ofmt2_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out2_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
else if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
inAudio_StreamIndex = i;
out1_astream = avformat_new_stream(ofmt1_ctx, NULL);
out2_astream = avformat_new_stream(ofmt2_ctx, NULL);
if (!out1_astream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out1_astream->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
goto end;
}
out1_astream->codec->codec_tag = 0;
if(ofmt1_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out1_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
if (!out2_astream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
else
{
//copy the settings of AVCodecContext;
if (avcodec_copy_context(out2_astream->codec, ifmt_ctx->streams[i]->codec) < 0)
{
printf( "Failed to copy context from input to output stream codec context\n");
goto end;
}
out2_astream->codec->codec_tag = 0;
if(ofmt2_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out2_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
}
//Dump Format------------------
printf("\n==============Input Video=============\n");
av_dump_format(ifmt_ctx, 0, argv[1], 0);
printf("\n==============Output1============\n");
av_dump_format(ofmt1_ctx, 0, str_out1_filename, 1);
printf("\n==============Output2============\n");
av_dump_format(ofmt2_ctx, 0, str_out1_filename, 1);
printf("\n======================================\n");
//open output1 file
if (!(ofmt1_ctx->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&ofmt1_ctx->pb, str_out1_filename, AVIO_FLAG_WRITE) < 0)
{
printf( "Could not open output file '%s'", str_out1_filename);
goto end;
}
}
//open output2 file
if (!(ofmt2_ctx->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&ofmt2_ctx->pb, str_out2_filename, AVIO_FLAG_WRITE) < 0)
{
printf( "Could not open output file '%s'", str_out2_filename);
goto end;
}
}
//write out 1 file header
if (avformat_write_header(ofmt1_ctx, NULL) < 0)
{
printf( "Error occurred when opening video output file\n");
goto end;
}
//write out 2 file header
if (avformat_write_header(ofmt2_ctx, NULL) < 0)
{
printf( "Error occurred when opening video output file\n");
goto end;
}
int splitPtsV = 0;//the real split video pts
int splitDtsV = 0;
int splitPtsA = 0;//the real split audio pts
int splitDtsA = 0;
int videoIndex = 0;//the real video index
int splitTime = 30;//the split time (sec)
AVPacket pkt;
while(1)
{
AVFormatContext *ofmt_ctx;
AVStream *in_stream, *out_stream;
if (av_read_frame(ifmt_ctx, &pkt) < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt.stream_index == inVideo_StreamIndex)
{
videoIndex++;
int time = pkt.pts * (((float)in_stream->time_base.num) / ((float)in_stream->time_base.den));
if (time <= splitTime)
{
splitPtsV = pkt.pts;
splitDtsV = pkt.dts;
out_stream = ofmt1_ctx->streams[pkt.stream_index];
ofmt_ctx = ofmt1_ctx;
}
else
{
pkt.pts = pkt.pts - splitPtsV;
pkt.dts = pkt.dts - splitDtsV;
out_stream = ofmt2_ctx->streams[pkt.stream_index];
ofmt_ctx = ofmt2_ctx;
}
}
else if (pkt.stream_index == inAudio_StreamIndex)
{
int time = pkt.pts * (((float)in_stream->time_base.num) / ((float)in_stream->time_base.den));
if (time <= splitTime)
{
splitPtsA = pkt.pts;
splitDtsA = pkt.dts;
out_stream = ofmt1_ctx->streams[pkt.stream_index];
ofmt_ctx = ofmt1_ctx;
}
else
{
pkt.pts = pkt.pts - splitPtsA;
pkt.dts = pkt.dts - splitDtsA;
out_stream = ofmt2_ctx->streams[pkt.stream_index];
ofmt_ctx = ofmt2_ctx;
}
}
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
//write into file
if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0)
{
printf( "Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
av_write_trailer(ofmt1_ctx);
av_write_trailer(ofmt2_ctx);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt1_ctx && !(ofmt1_ctx->oformat->flags & AVFMT_NOFILE))
avio_close(ofmt1_ctx->pb);
avformat_free_context(ofmt1_ctx);
/* close output */
if (ofmt2_ctx && !(ofmt2_ctx->oformat->flags & AVFMT_NOFILE))
avio_close(ofmt2_ctx->pb);
avformat_free_context(ofmt2_ctx);
return 0;
}
ffmpeg实现视频合并
流程
打开输入——>打开输出——>根据输入来创建流——>拷贝流设置——>循环读帧——>判断第一个读完,改输入为第二个——>设置pts和dts——>写入——>善后
代码
/*
*最简单的视频合并
*本程序实现把2个视频合并为一个视频,不涉及编解码,但是对视频源有要求,必须是相同的参数
*着重理解第二个视频开始的时候的pts和dts计算
*注:只处理一个视频流和一个音频流,若流多了,估计会crash
*/
#include "stdafx.h"
#ifdef __cplusplus
extern"C"
{
#endif
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/avutil.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libswresample\swresample.h"
#include "libavutil\fifo.h"
#include "libavutil/audio_fifo.h"
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
//#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")
//#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
//#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif
AVFormatContext *in1_fmtctx = NULL, *in2_fmtctx = NULL, *out_fmtctx = NULL;
AVStream *out_video_stream = NULL, *out_audio_stream = NULL;
int video_stream_index = -1, audio_stream_index = -1;
int open_input(const char * in1_name, const char * in2_name)
{
int ret = -1;
if ((ret = avformat_open_input(&in1_fmtctx, in1_name, NULL, NULL)) < 0)
{
printf("can not open the first input context!\n");
return ret;
}
if ((ret = avformat_find_stream_info(in1_fmtctx, NULL)) < 0)
{
printf("can not find the first input stream info!\n");
return ret;
}
if ((ret = avformat_open_input(&in2_fmtctx, in2_name, NULL, NULL)) < 0)
{
printf("can not open the first input context!\n");
return ret;
}
if ((ret = avformat_find_stream_info(in2_fmtctx, NULL)) < 0)
{
printf("can not find the second input stream info!\n");
return ret;
}
}
int open_output(const char * out_name)
{
int ret = -1;
if ((ret = avformat_alloc_output_context2(&out_fmtctx, NULL, NULL, out_name)) < 0)
{
printf("can not alloc context for output!\n");
return ret;
}
//new stream for out put
for (int i = 0; i < in1_fmtctx->nb_streams; i++)
{
if (in1_fmtctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_index = i;
out_video_stream = avformat_new_stream(out_fmtctx, NULL);
if (!out_video_stream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
return ret;
}
if ((ret = avcodec_copy_context(out_video_stream->codec, in1_fmtctx->streams[i]->codec)) < 0)
{
printf("can not copy the video codec context!\n");
return ret;
}
out_video_stream->codec->codec_tag = 0;
if(out_fmtctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_video_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
else if (in1_fmtctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audio_stream_index = i;
out_audio_stream = avformat_new_stream(out_fmtctx, NULL);
if (!out_audio_stream)
{
printf("Failed allocating output1 video stream\n");
ret = AVERROR_UNKNOWN;
return ret;
}
if ((ret = avcodec_copy_context(out_audio_stream->codec, in1_fmtctx->streams[i]->codec)) < 0)
{
printf("can not copy the video codec context!\n");
return ret;
}
out_audio_stream->codec->codec_tag = 0;
if(out_fmtctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_audio_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
}
//open output file
if (!(out_fmtctx->oformat->flags & AVFMT_NOFILE))
{
if ((ret = avio_open(&out_fmtctx->pb, out_name, AVIO_FLAG_WRITE)) < 0)
{
printf("can not open the out put file handle!\n");
return ret;
}
}
//write out file header
if ((ret = avformat_write_header(out_fmtctx, NULL)) < 0)
{
printf( "Error occurred when opening video output file\n");
return ret;
}
}
int _tmain(int argc, _TCHAR* argv[])
{
if(argc < 4)
{
printf("no input file!\n");
return -1;
}
char out_name[20];
sprintf(out_name, "combine.%s", argv[3]);
av_register_all();
if (0 > open_input(argv[1], argv[2]))
{
goto end;
}
if(0 > open_output(out_name))
{
goto end;
}
AVFormatContext *input_ctx = in1_fmtctx;
AVPacket pkt;
int pts_v, pts_a, dts_v, dts_a;
while(1)
{
if(0 > av_read_frame(input_ctx, &pkt))
{
if (input_ctx == in1_fmtctx)
{
float vedioDuraTime, audioDuraTime;
//calc the first media dura time
vedioDuraTime = ((float)input_ctx->streams[video_stream_index]->time_base.num /
(float)input_ctx->streams[video_stream_index]->time_base.den) * ((float)pts_v);
audioDuraTime = ((float)input_ctx->streams[audio_stream_index]->time_base.num /
(float)input_ctx->streams[audio_stream_index]->time_base.den) * ((float)pts_a);
//calc the pts and dts end of the first media
if (audioDuraTime > vedioDuraTime)
{
dts_v = pts_v = audioDuraTime / ((float)input_ctx->streams[video_stream_index]->time_base.num /
(float)input_ctx->streams[video_stream_index]->time_base.den);
dts_a++;
pts_a++;
}
else
{
dts_a = pts_a = vedioDuraTime / ((float)input_ctx->streams[audio_stream_index]->time_base.num /
(float)input_ctx->streams[audio_stream_index]->time_base.den);
dts_v++;
pts_v++;
}
input_ctx = in2_fmtctx;
continue;
}
break;
}
if (pkt.stream_index == video_stream_index)
{
if (input_ctx == in2_fmtctx)
{
pkt.pts += pts_v;
pkt.dts += dts_v;
}
else
{
pts_v = pkt.pts;
dts_v = pkt.dts;
}
}
else if (pkt.stream_index == audio_stream_index)
{
if (input_ctx == in2_fmtctx)
{
pkt.pts += pts_a;
pkt.dts += dts_a;
}
else
{
pts_a = pkt.pts;
dts_a = pkt.dts;
}
}
pkt.pts = av_rescale_q_rnd(pkt.pts, input_ctx->streams[pkt.stream_index]->time_base,
out_fmtctx->streams[pkt.stream_index]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, input_ctx->streams[pkt.stream_index]->time_base,
out_fmtctx->streams[pkt.stream_index]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
pkt.pos = -1;
if (av_interleaved_write_frame(out_fmtctx, &pkt) < 0)
{
printf( "Error muxing packet\n");
//break;
}
av_free_packet(&pkt);
}
av_write_trailer(out_fmtctx);
end:
avformat_close_input(&in1_fmtctx);
avformat_close_input(&in2_fmtctx);
/* close output */
if (out_fmtctx && !(out_fmtctx->oformat->flags & AVFMT_NOFILE))
avio_close(out_fmtctx->pb);
avformat_free_context(out_fmtctx);
return 0;
}