本文主要是介绍FFMPeg代码分析:AVPacket结构体和av_read_frame函数,希望对大家解决编程问题提供一定的参考价值,需要的开发者们随着小编来一起学习吧!
AVPacket结构用于保存压缩编码过的数据。在解码时,该结构的实例通常作为解复用器(demuxer)的输出并输入到解码器中;在编码时,通常是编码器的输出,并输入到复用器(muxer)中。该结构体的定义如下:
typedef struct AVPacket {/*** A reference to the reference-counted buffer where the packet data is* stored.* May be NULL, then the packet data is not reference-counted.*/AVBufferRef *buf;/*** Presentation timestamp in AVStream->time_base units; the time at which* the decompressed packet will be presented to the user.* Can be AV_NOPTS_VALUE if it is not stored in the file.* pts MUST be larger or equal to dts as presentation cannot happen before* decompression, unless one wants to view hex dumps. Some formats misuse* the terms dts and pts/cts to mean something different. Such timestamps* must be converted to true pts/dts before they are stored in AVPacket.*/int64_t pts;//显示时间戳/*** Decompression timestamp in AVStream->time_base units; the time at which* the packet is decompressed.* Can be AV_NOPTS_VALUE if it is not stored in the file.*/int64_t dts;//解码时间戳uint8_t *data;//实例所包含的压缩数据,直接获取该指针指向缓存的数据可以得到压缩过的码流;int size;//原始压缩数据的大小int stream_index;//标识当前AVPacket所从属的码流/*** A combination of AV_PKT_FLAG values*/int flags;/*** Additional packet data that can be provided by the container.* Packet can contain several types of side information.*/struct {uint8_t *data;int size;enum AVPacketSideDataType type;} *side_data;int side_data_elems;/*** Duration of this packet in AVStream->time_base units, 0 if unknown.* Equals next_pts - this_pts in presentation order.*/int duration;
#if FF_API_DESTRUCT_PACKETattribute_deprecatedvoid (*destruct)(struct AVPacket *);attribute_deprecatedvoid *priv;
#endifint64_t pos; ///< byte position in stream, -1 if unknown/*** Time difference in AVStream->time_base units from the pts of this* packet to the point at which the output from the decoder has converged* independent from the availability of previous frames. That is, the* frames are virtually identical no matter if decoding started from* the very first frame or from this keyframe.* Is AV_NOPTS_VALUE if unknown.* This field is not the display duration of the current packet.* This field has no meaning if the packet does not have AV_PKT_FLAG_KEY* set.** The purpose of this field is to allow seeking in streams that have no* keyframes in the conventional sense. It corresponds to the* recovery point SEI in H.264 and match_time_delta in NUT. It is also* essential for some types of subtitle streams to ensure that all* subtitles are correctly displayed after seeking.*/int64_t convergence_duration;
} AVPacket;
在demo中,调用了av_read_frame(pFormatCtx, &packet)函数从pFormatCtx所指向的环境的文件中读取压缩码流数据,保存到AVPacket实例packet中。av_read_frame()函数实现如下所示:
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{const int genpts = s->flags & AVFMT_FLAG_GENPTS;int eof = 0;int ret;AVStream *st;if (!genpts) {ret = s->packet_buffer ?read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :read_frame_internal(s, pkt);if (ret < 0)return ret;goto return_packet;}for (;;) {AVPacketList *pktl = s->packet_buffer;if (pktl) {AVPacket *next_pkt = &pktl->pkt;if (next_pkt->dts != AV_NOPTS_VALUE) {int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;// last dts seen for this stream. if any of packets following// current one had no dts, we will set this to AV_NOPTS_VALUE.int64_t last_dts = next_pkt->dts;while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {if (pktl->pkt.stream_index == next_pkt->stream_index &&(av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b framenext_pkt->pts = pktl->pkt.dts;}if (last_dts != AV_NOPTS_VALUE) {// Once last dts was set to AV_NOPTS_VALUE, we don't change it.last_dts = pktl->pkt.dts;}}pktl = pktl->next;}if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {// Fixing the last reference frame had none pts issue (For MXF etc).// We only do this when// 1. eof.// 2. we are not able to resolve a pts value for current packet.// 3. the packets for this stream at the end of the files had valid dts.next_pkt->pts = last_dts + next_pkt->duration;}pktl = s->packet_buffer;}/* read packet from packet buffer, if there is data */if (!(next_pkt->pts == AV_NOPTS_VALUE &&next_pkt->dts != AV_NOPTS_VALUE && !eof)) {ret = read_from_packet_buffer(&s->packet_buffer,&s->packet_buffer_end, pkt);goto return_packet;}}ret = read_frame_internal(s, pkt);if (ret < 0) {if (pktl && ret != AVERROR(EAGAIN)) {eof = 1;continue;} elsereturn ret;}if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,&s->packet_buffer_end)) < 0)return AVERROR(ENOMEM);}return_packet:st = s->streams[pkt->stream_index];if (st->skip_samples) {uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);if (p) {AV_WL32(p, st->skip_samples);av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);}st->skip_samples = 0;}if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {ff_reduce_index(s, st->index);av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);}if (is_relative(pkt->dts))pkt->dts -= RELATIVE_TS_BASE;if (is_relative(pkt->pts))pkt->pts -= RELATIVE_TS_BASE;return ret;
}
这篇关于FFMPeg代码分析:AVPacket结构体和av_read_frame函数的文章就介绍到这儿,希望我们推荐的文章对编程师们有所帮助!