本文主要是介绍FFmpeg AAC文件和H264文件合成MP4/FLV文件,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
使用FFmpeg库把AAC文件和H264文件合成MP4/FLV文件,FFmpeg版本为4.4.2-0。
需要aac和h264测试文件的,可以从我上传的MP4文件中用ffmpeg提取,命令如下:
ffmpeg -i <input.mp4> -map 0:v -c:v copy <output.h264> -map 0:a -c:a copy <output.aac>
代码如下:
#include <stdio.h>
#include "libavformat/avformat.h"// 打开输入文件并查找流信息
int open_input_file(const char *filename, AVFormatContext **ifmt_ctx)
{// 打开输入文件if (avformat_open_input(ifmt_ctx, filename, 0, 0) < 0){fprintf(stderr, "open %s file failed\n", filename);return -1;}// 查找流信息if (avformat_find_stream_info(*ifmt_ctx, 0) < 0){fprintf(stderr, "avformat_find_stream_info failed\n");return -1;}return 0;
}// 创建输出流
int create_output_stream(AVFormatContext *ofmt_ctx, AVFormatContext *ifmt_ctx, enum AVMediaType type,int *index_in, int *index_out)
{for (unsigned int i = 0; i < ifmt_ctx->nb_streams; i++){// 查找指定类型的流if (ifmt_ctx->streams[i]->codecpar->codec_type == type){AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);if (!out_stream){fprintf(stderr, "avformat_new_stream failed\n");return -1;}*index_in = i;*index_out = out_stream->index;// 复制输入流的编码参数到输出流if (avcodec_parameters_copy(out_stream->codecpar, ifmt_ctx->streams[i]->codecpar) < 0){fprintf(stderr, "avcodec_parameters_copy failed\n");return -1;}// 如果是音频流,设置相关标志if (type == AVMEDIA_TYPE_AUDIO){/** codec_tag 是一个标识符,用于指定特定的编解码器。* 将其设置为0表示在输出文件中不使用特定的编解码器标识符。*/out_stream->codecpar->codec_tag = 0;/** 检查输出格式的标志是否包含 AVFMT_GLOBALHEADER,* AVFMT_GLOBALHEADER 是一个标志,表示编解码器的头部信息应存储在文件的全局头部,* 而不是每个帧的头部。常用于某些格式(例如MP4),以减少每个帧的开销。*/if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER){/** 设置全局头部标志,如果输出格式需要全局头部,* 则在输出格式上下文的标志中添加 AV_CODEC_FLAG_GLOBAL_HEADER,* 这会通知编码器将头部信息写入文件的全局头部,而不是每个帧的头部。*/ofmt_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;}}return 0;}}return -1;
}// 读取并写入帧
int rw_frame(AVFormatContext *ifmt_ctx, AVFormatContext *ofmt_ctx,int stream_index_in, int stream_index_out, int *frame_index, int64_t *cur_pts)
{int ret;AVPacket pkt;AVStream *in_stream, *out_stream;// 读取帧if ((ret = av_read_frame(ifmt_ctx, &pkt)) < 0){if (ret == AVERROR_EOF) // 读到文件尾{return -2;}else{fprintf(stderr, "av_read_frame failed\n");return -1;}}in_stream = ifmt_ctx->streams[pkt.stream_index];out_stream = ofmt_ctx->streams[stream_index_out];// 处理指定的流if (pkt.stream_index == stream_index_in){// 如果PTS值无效,计算并设置PTS和DTSif (pkt.pts == AV_NOPTS_VALUE){AVRational time_base = in_stream->time_base;int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);pkt.pts = (double)(*frame_index * calc_duration) / (double)(av_q2d(time_base) * AV_TIME_BASE);pkt.dts = pkt.pts;pkt.duration = (double)calc_duration / (double)(av_q2d(time_base) * AV_TIME_BASE);(*frame_index)++;}*cur_pts = pkt.pts;// 转换PTS和DTSpkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);pkt.pos = -1;pkt.stream_index = stream_index_out;if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO){printf("Write 1 video Packet. size:%d pts:%ld\n", pkt.size, pkt.pts);}else if (in_stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO){printf("Write 1 audio Packet. size:%d pts:%ld\n", pkt.size, pkt.pts);}// 写入帧if ((ret = av_interleaved_write_frame(ofmt_ctx, &pkt)) < 0){fprintf(stderr, "av_interleaved_write_frame failed\n");av_packet_unref(&pkt);return -1;}}av_packet_unref(&pkt);return 0;
}int main(int argc, char *argv[])
{int ret = -1, value = -1;const char *in_filename_v = argv[1];const char *in_filename_a = argv[2];const char *out_filename = argv[3];int videoindex_v = -1, videoindex_out = -1;int audioindex_a = -1, audioindex_out = -1;int frame_index = 0;int64_t cur_pts_v = 0, cur_pts_a = 0;int writing_v = 1, writing_a = 1;const AVOutputFormat *ofmt = NULL;AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;if (argc < 3){fprintf(stderr, "Usage: %s <h264 filename> <aac filename> <output filename>\n", argv[0]);return -1;}// 打开视频输入文件if (open_input_file(in_filename_v, &ifmt_ctx_v) < 0){goto end;}// 打开音频输入文件if (open_input_file(in_filename_a, &ifmt_ctx_a) < 0){goto end;}// 分配输出上下文avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);if (!ofmt_ctx){fprintf(stderr, "avformat_alloc_output_context2 failed\n");goto end;}ofmt = ofmt_ctx->oformat;// 创建视频输出流if (create_output_stream(ofmt_ctx, ifmt_ctx_v, AVMEDIA_TYPE_VIDEO, &videoindex_v, &videoindex_out) < 0)goto end;// 创建音频输出流if (create_output_stream(ofmt_ctx, ifmt_ctx_a, AVMEDIA_TYPE_AUDIO, &audioindex_a, &audioindex_out) < 0)goto end;// 打开输出文件if (!(ofmt->flags & AVFMT_NOFILE)) // 检查输出格式是否需要文件存储{if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE)){fprintf(stderr, "open %s file failed\n", out_filename);goto end;}}// 写入文件头if (avformat_write_header(ofmt_ctx, NULL) < 0){fprintf(stderr, "avformat_write_header failed\n");goto end;}// 循环写入视频和音频帧while (writing_v || writing_a){// 如果还在写视频帧,且(不写音频帧或者视频帧的PTS小于等于音频帧的PTS)if (writing_v &&(!writing_a ||av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)){// 读取并写入视频帧value = rw_frame(ifmt_ctx_v, ofmt_ctx, videoindex_v,videoindex_out, &frame_index, &cur_pts_v);if (value == -2){writing_v = 0;}else if (value < 0){goto end;}}else{ // 读取并写入音频帧value = rw_frame(ifmt_ctx_a, ofmt_ctx, audioindex_a,audioindex_out, &frame_index, &cur_pts_a);if (value == -2){writing_a = 0;}else if (value < 0){goto end;}}}// 写入文件尾av_write_trailer(ofmt_ctx);ret = 0;end:if (ifmt_ctx_v)avformat_close_input(&ifmt_ctx_v);if (ifmt_ctx_a)avformat_close_input(&ifmt_ctx_a);if (ofmt && !(ofmt->flags & AVFMT_NOFILE))avio_close(ofmt_ctx->pb);if (ofmt_ctx)avformat_free_context(ofmt_ctx);return ret;
}
参考博客链接:FFMPEG库实现mp4/flv文件(H264+AAC)的封装与分离_ffmpeg mp4 flv-CSDN博客
这篇关于FFmpeg AAC文件和H264文件合成MP4/FLV文件的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!