0
点赞
收藏
分享

微信扫一扫

FFmpeg源代码:av_read_frame


ffmpeg中的av_read_frame()的作用是读取码流中的​​音频​​若干帧或者视频一帧。

例如,解码视频的时候,每解码一个视频帧,需要先调用 av_read_frame()获得一帧视频的压缩数据,然后才能对该数据进行解码(例如H.264中一帧压缩数据通常对应一个NAL)。

该函数声明位于libavformat\avformat.h中

/**
* Return the next frame of a stream.
* This function returns what is stored in the file, and does not validate
* that what is there are valid frames for the decoder. It will split what is
* stored in the file into frames and return one for each call. It will not
* omit invalid data between valid frames so as to give the decoder the maximum
* information possible for decoding.
*
* On success, the returned packet is reference-counted (pkt->buf is set) and
* valid indefinitely. The packet must be freed with av_packet_unref() when
* it is no longer needed. For video, the packet contains exactly one frame.
* For audio, it contains an integer number of frames if each frame has
* a known fixed size (e.g. PCM or ADPCM data). If the audio frames have
* a variable size (e.g. MPEG audio), then it contains one frame.
*
* pkt->pts, pkt->dts and pkt->duration are always set to correct
* values in AVStream.time_base units (and guessed if the format cannot
* provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
* has B-frames, so it is better to rely on pkt->dts if you do not
* decompress the payload.
*
* @return 0 if OK, < 0 on error or end of file. On error, pkt will be blank
* (as if it came from av_packet_alloc()).
*
* @note pkt will be initialized, so it may be uninitialized, but it must not
* contain data that needs to be freed.
*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt);

该函数参数:

s: 输入流的AVFormatContext

pkt: 输出的AVPacket

 av_read_frame()的定义位于libavformat\utils.c,如下所示:

//获取一个AVPacket
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{
const int genpts = s->flags & AVFMT_FLAG_GENPTS;
int eof = 0;
int ret;
AVStream *st;

if (!genpts) {
//一般情况下会调用read_frame_internal
ret = s->internal->packet_buffer
? ff_packet_list_get(&s->internal->packet_buffer,
&s->internal->packet_buffer_end, pkt)
: read_frame_internal(s, pkt);
if (ret < 0)
return ret;
goto return_packet;
}

for (;;) {
AVPacketList *pktl = s->internal->packet_buffer;

if (pktl) {
AVPacket *next_pkt = &pktl->pkt;

if (next_pkt->dts != AV_NOPTS_VALUE) {
int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
// last dts seen for this stream. if any of packets following
// current one had no dts, we will set this to AV_NOPTS_VALUE.
int64_t last_dts = next_pkt->dts;
av_assert2(wrap_bits <= 64);
while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
if (pktl->pkt.stream_index == next_pkt->stream_index &&
av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2ULL << (wrap_bits - 1)) < 0) {
if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2ULL << (wrap_bits - 1))) {
// not B-frame
next_pkt->pts = pktl->pkt.dts;
}
if (last_dts != AV_NOPTS_VALUE) {
// Once last dts was set to AV_NOPTS_VALUE, we don't change it.
last_dts = pktl->pkt.dts;
}
}
pktl = pktl->next;
}
if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
// Fixing the last reference frame had none pts issue (For MXF etc).
// We only do this when
// 1. eof.
// 2. we are not able to resolve a pts value for current packet.
// 3. the packets for this stream at the end of the files had valid dts.
next_pkt->pts = last_dts + next_pkt->duration;
}
pktl = s->internal->packet_buffer;
}

/* read packet from packet buffer, if there is data */
st = s->streams[next_pkt->stream_index];
if (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&
next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
ret = ff_packet_list_get(&s->internal->packet_buffer,
&s->internal->packet_buffer_end, pkt);
goto return_packet;
}
}

ret = read_frame_internal(s, pkt);
if (ret < 0) {
if (pktl && ret != AVERROR(EAGAIN)) {
eof = 1;
continue;
} else
return ret;
}

ret = ff_packet_list_put(&s->internal->packet_buffer,
&s->internal->packet_buffer_end,
pkt, 0);
if (ret < 0) {
av_packet_unref(pkt);
return ret;
}
}

return_packet:

st = s->streams[pkt->stream_index];
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
}

if (is_relative(pkt->dts))
pkt->dts -= RELATIVE_TS_BASE;
if (is_relative(pkt->pts))
pkt->pts -= RELATIVE_TS_BASE;

return ret;
}

 从接口定义中可以看出,av_read_frame()调用了read_frame_internal()

read_frame_internal()是一个内部函数

static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{
int ret, i, got_packet = 0;
AVDictionary *metadata = NULL;

while (!got_packet && !s->internal->parse_queue) {
AVStream *st;

/* read next packet */
//Read a transport packet from a media file.
// 很重要的一个函数
ret = ff_read_packet(s, pkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN))
return ret;
/* flush the parsers */
for (i = 0; i < s->nb_streams; i++) {
st = s->streams[i];
//解析pkt
if (st->parser && st->need_parsing)
parse_packet(s, pkt, st->index, 1);
}
/* all remaining packets are now in parse_queue =>
* really terminate parsing */
break;
}
ret = 0;
st = s->streams[pkt->stream_index];

/* update context if required */
if (st->internal->need_context_update) {
if (avcodec_is_open(st->internal->avctx)) {
av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n");
avcodec_close(st->internal->avctx);
st->info->found_decoder = 0;
}

/* close parser, because it depends on the codec */
if (st->parser && st->internal->avctx->codec_id != st->codecpar->codec_id) {
av_parser_close(st->parser);
st->parser = NULL;
}

ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);
if (ret < 0) {
av_packet_unref(pkt);
return ret;
}

#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
/* update deprecated public codec context */
ret = avcodec_parameters_to_context(st->codec, st->codecpar);
if (ret < 0) {
av_packet_unref(pkt);
return ret;
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif

st->internal->need_context_update = 0;
}

if (pkt->pts != AV_NOPTS_VALUE &&
pkt->dts != AV_NOPTS_VALUE &&
pkt->pts < pkt->dts) {
av_log(s, AV_LOG_WARNING,
"Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
pkt->stream_index,
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->size);
}
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",
pkt->stream_index,
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->size, pkt->duration, pkt->flags);

if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
st->parser = av_parser_init(st->codecpar->codec_id);
if (!st->parser) {
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
"%s, packets or times may be invalid.\n",
avcodec_get_name(st->codecpar->codec_id));
/* no parser available: just output the raw packets */
st->need_parsing = AVSTREAM_PARSE_NONE;
} else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)
st->parser->flags |= PARSER_FLAG_ONCE;
else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
}

if (!st->need_parsing || !st->parser) {
/* no parsing needed: we just output the packet as is */
compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
ff_reduce_index(s, st->index);
av_add_index_entry(st, pkt->pos, pkt->dts,
0, 0, AVINDEX_KEYFRAME);
}
got_packet = 1;
} else if (st->discard < AVDISCARD_ALL) {
if ((ret = parse_packet(s, pkt, pkt->stream_index, 0)) < 0)
return ret;
st->codecpar->sample_rate = st->internal->avctx->sample_rate;
st->codecpar->bit_rate = st->internal->avctx->bit_rate;
st->codecpar->channels = st->internal->avctx->channels;
st->codecpar->channel_layout = st->internal->avctx->channel_layout;
st->codecpar->codec_id = st->internal->avctx->codec_id;
} else {
/* free packet */
av_packet_unref(pkt);
}
if (pkt->flags & AV_PKT_FLAG_KEY)
st->skip_to_keyframe = 0;
if (st->skip_to_keyframe) {
av_packet_unref(pkt);
got_packet = 0;
}
}

if (!got_packet && s->internal->parse_queue)
ret = ff_packet_list_get(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);

if (ret >= 0) {
AVStream *st = s->streams[pkt->stream_index];
int discard_padding = 0;
if (st->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {
int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);
int64_t sample = ts_to_samples(st, pts);
int duration = ts_to_samples(st, pkt->duration);
int64_t end_sample = sample + duration;
if (duration > 0 && end_sample >= st->first_discard_sample &&
sample < st->last_discard_sample)
discard_padding = FFMIN(end_sample - st->first_discard_sample, duration);
}
if (st->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))
st->skip_samples = st->start_skip_samples;
if (st->skip_samples || discard_padding) {
uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
if (p) {
AV_WL32(p, st->skip_samples);
AV_WL32(p + 4, discard_padding);
av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->skip_samples, discard_padding);
}
st->skip_samples = 0;
}

if (st->inject_global_side_data) {
for (i = 0; i < st->nb_side_data; i++) {
AVPacketSideData *src_sd = &st->side_data[i];
uint8_t *dst_data;

if (av_packet_get_side_data(pkt, src_sd->type, NULL))
continue;

dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
if (!dst_data) {
av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");
continue;
}

memcpy(dst_data, src_sd->data, src_sd->size);
}
st->inject_global_side_data = 0;
}
}

av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);
if (metadata) {
s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
av_dict_copy(&s->metadata, metadata, 0);
av_dict_free(&metadata);
av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);
}

#if FF_API_LAVF_AVCTX
update_stream_avctx(s);
#endif

if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"read_frame_internal stream=%d, pts=%s, dts=%s, "
"size=%d, duration=%"PRId64", flags=%d\n",
pkt->stream_index,
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->size, pkt->duration, pkt->flags);

/* A demuxer might have returned EOF because of an IO error, let's
* propagate this back to the user. */
if (ret == AVERROR_EOF && s->pb && s->pb->error < 0 && s->pb->error != AVERROR(EAGAIN))
ret = s->pb->error;

return ret;
}

read_frame_internal() 的功能主要由以下两个接口实现:

1. ff_read_packet()从相应的AVInputFormat读取AVPacket数据;

2. 如果媒体频流需要使用AVCodecParser,则调用parse_packet()解析相应的AVPacket

举报

相关推荐

0 条评论