mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-12 01:40:04 +01:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: (31 commits) cdxl demux: do not create packets with uninitialized data at EOF. Replace computations of remaining bits with calls to get_bits_left(). amrnb/amrwb: Remove get_bits usage. cosmetics: reindent avformat: do not require a pixel/sample format if there is no decoder avformat: do not fill-in audio packet duration in compute_pkt_fields() lavf: Use av_get_audio_frame_duration() in get_audio_frame_size() dca_parser: parse the sample rate and frame durations libspeexdec: do not set AVCodecContext.frame_size libopencore-amr: do not set AVCodecContext.frame_size alsdec: do not set AVCodecContext.frame_size siff: do not set AVCodecContext.frame_size amr demuxer: do not set AVCodecContext.frame_size. aiffdec: do not set AVCodecContext.frame_size mov: do not set AVCodecContext.frame_size ape: do not set AVCodecContext.frame_size. rdt: remove workaround for infinite loop with aac avformat: do not require frame_size in avformat_find_stream_info() for CELT avformat: do not require frame_size in avformat_find_stream_info() for MP1/2/3 avformat: do not require frame_size in avformat_find_stream_info() for AAC ... Conflicts: doc/APIchanges libavcodec/Makefile libavcodec/avcodec.h libavcodec/h264.c libavcodec/h264_ps.c libavcodec/utils.c libavcodec/version.h libavcodec/x86/dsputil_mmx.c libavformat/utils.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
@@ -754,11 +754,11 @@ int av_read_packet(AVFormatContext *s, AVPacket *pkt)
|
||||
|
||||
static int determinable_frame_size(AVCodecContext *avctx)
|
||||
{
|
||||
if (avctx->codec_id == CODEC_ID_AAC ||
|
||||
if (/*avctx->codec_id == CODEC_ID_AAC ||
|
||||
avctx->codec_id == CODEC_ID_MP1 ||
|
||||
avctx->codec_id == CODEC_ID_MP2 ||
|
||||
avctx->codec_id == CODEC_ID_MP3 ||
|
||||
avctx->codec_id == CODEC_ID_CELT)
|
||||
avctx->codec_id == CODEC_ID_MP2 ||*/
|
||||
avctx->codec_id == CODEC_ID_MP3/* ||
|
||||
avctx->codec_id == CODEC_ID_CELT*/)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@@ -766,27 +766,22 @@ static int determinable_frame_size(AVCodecContext *avctx)
|
||||
/**
|
||||
* Get the number of samples of an audio frame. Return -1 on error.
|
||||
*/
|
||||
static int get_audio_frame_size(AVCodecContext *enc, int size)
|
||||
static int get_audio_frame_size(AVCodecContext *enc, int size, int mux)
|
||||
{
|
||||
int frame_size;
|
||||
|
||||
if (enc->frame_size <= 1) {
|
||||
int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
|
||||
/* give frame_size priority if demuxing */
|
||||
if (!mux && enc->frame_size > 1)
|
||||
return enc->frame_size;
|
||||
|
||||
if (bits_per_sample) {
|
||||
if (enc->channels == 0)
|
||||
return -1;
|
||||
frame_size = (size << 3) / (bits_per_sample * enc->channels);
|
||||
} else {
|
||||
/* used for example by ADPCM codecs */
|
||||
if (enc->bit_rate == 0 || determinable_frame_size(enc))
|
||||
return -1;
|
||||
frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
|
||||
}
|
||||
} else {
|
||||
frame_size = enc->frame_size;
|
||||
}
|
||||
return frame_size;
|
||||
if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
|
||||
return frame_size;
|
||||
|
||||
/* fallback to using frame_size if muxing */
|
||||
if (enc->frame_size > 1)
|
||||
return enc->frame_size;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
@@ -822,7 +817,7 @@ static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
frame_size = get_audio_frame_size(st->codec, pkt->size);
|
||||
frame_size = get_audio_frame_size(st->codec, pkt->size, 0);
|
||||
if (frame_size <= 0 || st->codec->sample_rate <= 0)
|
||||
break;
|
||||
*pnum = frame_size;
|
||||
@@ -860,11 +855,20 @@ static int is_intra_only(AVCodecContext *enc){
|
||||
return 0;
|
||||
}
|
||||
|
||||
static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
|
||||
{
|
||||
if (pktl->next)
|
||||
return pktl->next;
|
||||
if (pktl == s->parse_queue_end)
|
||||
return s->packet_buffer;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void update_initial_timestamps(AVFormatContext *s, int stream_index,
|
||||
int64_t dts, int64_t pts)
|
||||
{
|
||||
AVStream *st= s->streams[stream_index];
|
||||
AVPacketList *pktl= s->packet_buffer;
|
||||
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
|
||||
|
||||
if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
|
||||
return;
|
||||
@@ -872,7 +876,7 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
|
||||
st->first_dts= dts - st->cur_dts;
|
||||
st->cur_dts= dts;
|
||||
|
||||
for(; pktl; pktl= pktl->next){
|
||||
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
|
||||
if(pktl->pkt.stream_index != stream_index)
|
||||
continue;
|
||||
//FIXME think more about this check
|
||||
@@ -889,34 +893,36 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
|
||||
st->start_time = pts;
|
||||
}
|
||||
|
||||
static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
|
||||
static void update_initial_durations(AVFormatContext *s, AVStream *st,
|
||||
int stream_index, int duration)
|
||||
{
|
||||
AVPacketList *pktl= s->packet_buffer;
|
||||
AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
|
||||
int64_t cur_dts= 0;
|
||||
|
||||
if(st->first_dts != AV_NOPTS_VALUE){
|
||||
cur_dts= st->first_dts;
|
||||
for(; pktl; pktl= pktl->next){
|
||||
if(pktl->pkt.stream_index == pkt->stream_index){
|
||||
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
|
||||
if(pktl->pkt.stream_index == stream_index){
|
||||
if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
|
||||
break;
|
||||
cur_dts -= pkt->duration;
|
||||
cur_dts -= duration;
|
||||
}
|
||||
}
|
||||
pktl= s->packet_buffer;
|
||||
pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
|
||||
st->first_dts = cur_dts;
|
||||
}else if(st->cur_dts)
|
||||
return;
|
||||
|
||||
for(; pktl; pktl= pktl->next){
|
||||
if(pktl->pkt.stream_index != pkt->stream_index)
|
||||
for(; pktl; pktl= get_next_pkt(s, st, pktl)){
|
||||
if(pktl->pkt.stream_index != stream_index)
|
||||
continue;
|
||||
if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
|
||||
&& !pktl->pkt.duration){
|
||||
pktl->pkt.dts= cur_dts;
|
||||
if(!st->codec->has_b_frames)
|
||||
pktl->pkt.pts= cur_dts;
|
||||
pktl->pkt.duration= pkt->duration;
|
||||
// if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
|
||||
pktl->pkt.duration = duration;
|
||||
}else
|
||||
break;
|
||||
cur_dts = pktl->pkt.dts + pktl->pkt.duration;
|
||||
@@ -969,8 +975,8 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
|
||||
}
|
||||
}
|
||||
if(pkt->duration != 0 && s->packet_buffer)
|
||||
update_initial_durations(s, st, pkt);
|
||||
if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
|
||||
update_initial_durations(s, st, pkt->stream_index, pkt->duration);
|
||||
|
||||
/* correct timestamps with byte offset if demuxers only have timestamps
|
||||
on packet boundaries */
|
||||
@@ -1029,12 +1035,16 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
st->last_IP_pts= pkt->pts;
|
||||
/* cannot compute PTS if not present (we can compute it only
|
||||
by knowing the future */
|
||||
} else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
|
||||
if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
|
||||
int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
|
||||
} else if (pkt->pts != AV_NOPTS_VALUE ||
|
||||
pkt->dts != AV_NOPTS_VALUE ||
|
||||
pkt->duration ) {
|
||||
int duration = pkt->duration;
|
||||
|
||||
if(pkt->pts != AV_NOPTS_VALUE && duration){
|
||||
int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
|
||||
int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
|
||||
if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
|
||||
pkt->pts += pkt->duration;
|
||||
if(old_diff < new_diff && old_diff < (duration>>3)){
|
||||
pkt->pts += duration;
|
||||
// av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
|
||||
}
|
||||
}
|
||||
@@ -1047,7 +1057,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
pkt->pts = st->cur_dts;
|
||||
pkt->dts = pkt->pts;
|
||||
if(pkt->pts != AV_NOPTS_VALUE)
|
||||
st->cur_dts = pkt->pts + pkt->duration;
|
||||
st->cur_dts = pkt->pts + duration;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1073,161 +1083,215 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
||||
pkt->convergence_duration = pc->convergence_duration;
|
||||
}
|
||||
|
||||
static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
|
||||
{
|
||||
while (*pkt_buf) {
|
||||
AVPacketList *pktl = *pkt_buf;
|
||||
*pkt_buf = pktl->next;
|
||||
av_free_packet(&pktl->pkt);
|
||||
av_freep(&pktl);
|
||||
}
|
||||
*pkt_buf_end = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a packet, add all split parts to parse_queue
|
||||
*
|
||||
* @param pkt packet to parse, NULL when flushing the parser at end of stream
|
||||
*/
|
||||
static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
|
||||
{
|
||||
AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
|
||||
AVStream *st = s->streams[stream_index];
|
||||
uint8_t *data = pkt ? pkt->data : NULL;
|
||||
int size = pkt ? pkt->size : 0;
|
||||
int ret = 0, got_output = 0;
|
||||
|
||||
if (!pkt) {
|
||||
av_init_packet(&flush_pkt);
|
||||
pkt = &flush_pkt;
|
||||
got_output = 1;
|
||||
}
|
||||
|
||||
while (size > 0 || (pkt == &flush_pkt && got_output)) {
|
||||
int len;
|
||||
|
||||
av_init_packet(&out_pkt);
|
||||
len = av_parser_parse2(st->parser, st->codec,
|
||||
&out_pkt.data, &out_pkt.size, data, size,
|
||||
pkt->pts, pkt->dts, pkt->pos);
|
||||
|
||||
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
|
||||
/* increment read pointer */
|
||||
data += len;
|
||||
size -= len;
|
||||
|
||||
got_output = !!out_pkt.size;
|
||||
|
||||
if (!out_pkt.size)
|
||||
continue;
|
||||
|
||||
/* set the duration */
|
||||
out_pkt.duration = 0;
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (st->codec->sample_rate > 0) {
|
||||
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
|
||||
(AVRational){ 1, st->codec->sample_rate },
|
||||
st->time_base,
|
||||
AV_ROUND_DOWN);
|
||||
}
|
||||
} else if (st->codec->time_base.num != 0 &&
|
||||
st->codec->time_base.den != 0) {
|
||||
out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
|
||||
st->codec->time_base,
|
||||
st->time_base,
|
||||
AV_ROUND_DOWN);
|
||||
}
|
||||
|
||||
out_pkt.stream_index = st->index;
|
||||
out_pkt.pts = st->parser->pts;
|
||||
out_pkt.dts = st->parser->dts;
|
||||
out_pkt.pos = st->parser->pos;
|
||||
|
||||
if (st->parser->key_frame == 1 ||
|
||||
(st->parser->key_frame == -1 &&
|
||||
st->parser->pict_type == AV_PICTURE_TYPE_I))
|
||||
out_pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
compute_pkt_fields(s, st, st->parser, &out_pkt);
|
||||
|
||||
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
|
||||
out_pkt.flags & AV_PKT_FLAG_KEY) {
|
||||
int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
|
||||
ff_reduce_index(s, st->index);
|
||||
av_add_index_entry(st, pos, out_pkt.dts,
|
||||
0, 0, AVINDEX_KEYFRAME);
|
||||
}
|
||||
|
||||
if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
|
||||
out_pkt.destruct = pkt->destruct;
|
||||
pkt->destruct = NULL;
|
||||
}
|
||||
if ((ret = av_dup_packet(&out_pkt)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
|
||||
av_free_packet(&out_pkt);
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* end of the stream => close and free the parser */
|
||||
if (pkt == &flush_pkt) {
|
||||
av_parser_close(st->parser);
|
||||
st->parser = NULL;
|
||||
}
|
||||
|
||||
fail:
|
||||
av_free_packet(pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int read_from_packet_buffer(AVPacketList **pkt_buffer,
|
||||
AVPacketList **pkt_buffer_end,
|
||||
AVPacket *pkt)
|
||||
{
|
||||
AVPacketList *pktl;
|
||||
av_assert0(*pkt_buffer);
|
||||
pktl = *pkt_buffer;
|
||||
*pkt = pktl->pkt;
|
||||
*pkt_buffer = pktl->next;
|
||||
if (!pktl->next)
|
||||
*pkt_buffer_end = NULL;
|
||||
av_freep(&pktl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
AVStream *st;
|
||||
int len, ret, i;
|
||||
int ret = 0, i, got_packet = 0;
|
||||
|
||||
av_init_packet(pkt);
|
||||
|
||||
for(;;) {
|
||||
/* select current input stream component */
|
||||
st = s->cur_st;
|
||||
if (st) {
|
||||
if (!st->need_parsing || !st->parser) {
|
||||
/* no parsing needed: we just output the packet as is */
|
||||
/* raw data support */
|
||||
*pkt = st->cur_pkt;
|
||||
st->cur_pkt.data= NULL;
|
||||
st->cur_pkt.side_data_elems = 0;
|
||||
st->cur_pkt.side_data = NULL;
|
||||
compute_pkt_fields(s, st, NULL, pkt);
|
||||
s->cur_st = NULL;
|
||||
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
|
||||
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
|
||||
ff_reduce_index(s, st->index);
|
||||
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
|
||||
}
|
||||
break;
|
||||
} else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
|
||||
len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
|
||||
st->cur_ptr, st->cur_len,
|
||||
st->cur_pkt.pts, st->cur_pkt.dts,
|
||||
st->cur_pkt.pos);
|
||||
st->cur_pkt.pts = AV_NOPTS_VALUE;
|
||||
st->cur_pkt.dts = AV_NOPTS_VALUE;
|
||||
/* increment read pointer */
|
||||
st->cur_ptr += len;
|
||||
st->cur_len -= len;
|
||||
while (!got_packet && !s->parse_queue) {
|
||||
AVStream *st;
|
||||
AVPacket cur_pkt;
|
||||
|
||||
/* return packet if any */
|
||||
if (pkt->size) {
|
||||
got_packet:
|
||||
pkt->duration = 0;
|
||||
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (st->codec->sample_rate > 0) {
|
||||
pkt->duration = av_rescale_q_rnd(st->parser->duration,
|
||||
(AVRational){ 1, st->codec->sample_rate },
|
||||
st->time_base,
|
||||
AV_ROUND_DOWN);
|
||||
}
|
||||
} else if (st->codec->time_base.num != 0 &&
|
||||
st->codec->time_base.den != 0) {
|
||||
pkt->duration = av_rescale_q_rnd(st->parser->duration,
|
||||
st->codec->time_base,
|
||||
st->time_base,
|
||||
AV_ROUND_DOWN);
|
||||
}
|
||||
pkt->stream_index = st->index;
|
||||
pkt->pts = st->parser->pts;
|
||||
pkt->dts = st->parser->dts;
|
||||
pkt->pos = st->parser->pos;
|
||||
if (st->parser->key_frame == 1 ||
|
||||
(st->parser->key_frame == -1 &&
|
||||
st->parser->pict_type == AV_PICTURE_TYPE_I))
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
|
||||
s->cur_st = NULL;
|
||||
pkt->destruct= st->cur_pkt.destruct;
|
||||
st->cur_pkt.destruct= NULL;
|
||||
st->cur_pkt.data = NULL;
|
||||
assert(st->cur_len == 0);
|
||||
}else{
|
||||
pkt->destruct = NULL;
|
||||
}
|
||||
compute_pkt_fields(s, st, st->parser, pkt);
|
||||
|
||||
if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
|
||||
int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
|
||||
ff_reduce_index(s, st->index);
|
||||
av_add_index_entry(st, pos, pkt->dts,
|
||||
0, 0, AVINDEX_KEYFRAME);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* free packet */
|
||||
av_free_packet(&st->cur_pkt);
|
||||
s->cur_st = NULL;
|
||||
}
|
||||
} else {
|
||||
AVPacket cur_pkt;
|
||||
/* read next packet */
|
||||
ret = av_read_packet(s, &cur_pkt);
|
||||
if (ret < 0) {
|
||||
if (ret == AVERROR(EAGAIN))
|
||||
return ret;
|
||||
/* return the last frames, if any */
|
||||
for(i = 0; i < s->nb_streams; i++) {
|
||||
st = s->streams[i];
|
||||
if (st->parser && st->need_parsing) {
|
||||
av_parser_parse2(st->parser, st->codec,
|
||||
&pkt->data, &pkt->size,
|
||||
NULL, 0,
|
||||
AV_NOPTS_VALUE, AV_NOPTS_VALUE,
|
||||
AV_NOPTS_VALUE);
|
||||
if (pkt->size)
|
||||
goto got_packet;
|
||||
}
|
||||
}
|
||||
/* no more packets: really terminate parsing */
|
||||
/* read next packet */
|
||||
ret = av_read_packet(s, &cur_pkt);
|
||||
if (ret < 0) {
|
||||
if (ret == AVERROR(EAGAIN))
|
||||
return ret;
|
||||
/* flush the parsers */
|
||||
for(i = 0; i < s->nb_streams; i++) {
|
||||
st = s->streams[i];
|
||||
if (st->parser && st->need_parsing)
|
||||
parse_packet(s, NULL, st->index);
|
||||
}
|
||||
st = s->streams[cur_pkt.stream_index];
|
||||
st->cur_pkt= cur_pkt;
|
||||
/* all remaining packets are now in parse_queue =>
|
||||
* really terminate parsing */
|
||||
break;
|
||||
}
|
||||
ret = 0;
|
||||
st = s->streams[cur_pkt.stream_index];
|
||||
|
||||
if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
|
||||
st->cur_pkt.dts != AV_NOPTS_VALUE &&
|
||||
st->cur_pkt.pts < st->cur_pkt.dts){
|
||||
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
|
||||
st->cur_pkt.stream_index,
|
||||
st->cur_pkt.pts,
|
||||
st->cur_pkt.dts,
|
||||
st->cur_pkt.size);
|
||||
// av_free_packet(&st->cur_pkt);
|
||||
// return -1;
|
||||
}
|
||||
if (cur_pkt.pts != AV_NOPTS_VALUE &&
|
||||
cur_pkt.dts != AV_NOPTS_VALUE &&
|
||||
cur_pkt.pts < cur_pkt.dts) {
|
||||
av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
|
||||
cur_pkt.stream_index,
|
||||
cur_pkt.pts,
|
||||
cur_pkt.dts,
|
||||
cur_pkt.size);
|
||||
}
|
||||
if (s->debug & FF_FDEBUG_TS)
|
||||
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
|
||||
cur_pkt.stream_index,
|
||||
cur_pkt.pts,
|
||||
cur_pkt.dts,
|
||||
cur_pkt.size,
|
||||
cur_pkt.duration,
|
||||
cur_pkt.flags);
|
||||
|
||||
if(s->debug & FF_FDEBUG_TS)
|
||||
av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
|
||||
st->cur_pkt.stream_index,
|
||||
st->cur_pkt.pts,
|
||||
st->cur_pkt.dts,
|
||||
st->cur_pkt.size,
|
||||
st->cur_pkt.duration,
|
||||
st->cur_pkt.flags);
|
||||
|
||||
s->cur_st = st;
|
||||
st->cur_ptr = st->cur_pkt.data;
|
||||
st->cur_len = st->cur_pkt.size;
|
||||
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
|
||||
st->parser = av_parser_init(st->codec->codec_id);
|
||||
if (!st->parser) {
|
||||
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
|
||||
"%s, packets or times may be invalid.\n",
|
||||
avcodec_get_name(st->codec->codec_id));
|
||||
/* no parser available: just output the raw packets */
|
||||
st->need_parsing = AVSTREAM_PARSE_NONE;
|
||||
}else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
|
||||
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
|
||||
}else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
|
||||
st->parser->flags |= PARSER_FLAG_ONCE;
|
||||
}
|
||||
if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
|
||||
st->parser = av_parser_init(st->codec->codec_id);
|
||||
if (!st->parser) {
|
||||
av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
|
||||
"%s, packets or times may be invalid.\n",
|
||||
avcodec_get_name(st->codec->codec_id));
|
||||
/* no parser available: just output the raw packets */
|
||||
st->need_parsing = AVSTREAM_PARSE_NONE;
|
||||
} else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
|
||||
st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
|
||||
} else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
|
||||
st->parser->flags |= PARSER_FLAG_ONCE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!st->need_parsing || !st->parser) {
|
||||
/* no parsing needed: we just output the packet as is */
|
||||
*pkt = cur_pkt;
|
||||
compute_pkt_fields(s, st, NULL, pkt);
|
||||
if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
|
||||
(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
|
||||
ff_reduce_index(s, st->index);
|
||||
av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
|
||||
}
|
||||
got_packet = 1;
|
||||
} else if (st->discard < AVDISCARD_ALL) {
|
||||
if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
|
||||
return ret;
|
||||
} else {
|
||||
/* free packet */
|
||||
av_free_packet(&cur_pkt);
|
||||
}
|
||||
}
|
||||
|
||||
if (!got_packet && s->parse_queue)
|
||||
ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
|
||||
|
||||
if(s->debug & FF_FDEBUG_TS)
|
||||
av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
|
||||
pkt->stream_index,
|
||||
@@ -1237,17 +1301,7 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
|
||||
pkt->duration,
|
||||
pkt->flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_from_packet_buffer(AVFormatContext *s, AVPacket *pkt)
|
||||
{
|
||||
AVPacketList *pktl = s->packet_buffer;
|
||||
av_assert0(pktl);
|
||||
*pkt = pktl->pkt;
|
||||
s->packet_buffer = pktl->next;
|
||||
av_freep(&pktl);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
|
||||
@@ -1256,7 +1310,9 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
|
||||
int eof = 0;
|
||||
|
||||
if (!genpts)
|
||||
return s->packet_buffer ? read_from_packet_buffer(s, pkt) :
|
||||
return s->packet_buffer ? read_from_packet_buffer(&s->packet_buffer,
|
||||
&s->packet_buffer_end,
|
||||
pkt) :
|
||||
read_frame_internal(s, pkt);
|
||||
|
||||
for (;;) {
|
||||
@@ -1282,7 +1338,8 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
|
||||
/* read packet from packet buffer, if there is data */
|
||||
if (!(next_pkt->pts == AV_NOPTS_VALUE &&
|
||||
next_pkt->dts != AV_NOPTS_VALUE && !eof))
|
||||
return read_from_packet_buffer(s, pkt);
|
||||
return read_from_packet_buffer(&s->packet_buffer,
|
||||
&s->packet_buffer_end, pkt);
|
||||
}
|
||||
|
||||
ret = read_frame_internal(s, pkt);
|
||||
@@ -1303,24 +1360,10 @@ int av_read_frame(AVFormatContext *s, AVPacket *pkt)
|
||||
/* XXX: suppress the packet queue */
|
||||
static void flush_packet_queue(AVFormatContext *s)
|
||||
{
|
||||
AVPacketList *pktl;
|
||||
free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
|
||||
free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
|
||||
free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
|
||||
|
||||
for(;;) {
|
||||
pktl = s->packet_buffer;
|
||||
if (!pktl)
|
||||
break;
|
||||
s->packet_buffer = pktl->next;
|
||||
av_free_packet(&pktl->pkt);
|
||||
av_free(pktl);
|
||||
}
|
||||
while(s->raw_packet_buffer){
|
||||
pktl = s->raw_packet_buffer;
|
||||
s->raw_packet_buffer = pktl->next;
|
||||
av_free_packet(&pktl->pkt);
|
||||
av_free(pktl);
|
||||
}
|
||||
s->packet_buffer_end=
|
||||
s->raw_packet_buffer_end= NULL;
|
||||
s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
@@ -1356,8 +1399,6 @@ void ff_read_frame_flush(AVFormatContext *s)
|
||||
|
||||
flush_packet_queue(s);
|
||||
|
||||
s->cur_st = NULL;
|
||||
|
||||
/* for each stream, reset read state */
|
||||
for(i = 0; i < s->nb_streams; i++) {
|
||||
st = s->streams[i];
|
||||
@@ -1365,15 +1406,11 @@ void ff_read_frame_flush(AVFormatContext *s)
|
||||
if (st->parser) {
|
||||
av_parser_close(st->parser);
|
||||
st->parser = NULL;
|
||||
av_free_packet(&st->cur_pkt);
|
||||
}
|
||||
st->last_IP_pts = AV_NOPTS_VALUE;
|
||||
if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = 0;
|
||||
else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
|
||||
st->reference_dts = AV_NOPTS_VALUE;
|
||||
/* fail safe */
|
||||
st->cur_ptr = NULL;
|
||||
st->cur_len = 0;
|
||||
|
||||
st->probe_packets = MAX_PROBE_PACKETS;
|
||||
|
||||
@@ -1992,8 +2029,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
|
||||
int64_t filesize, offset, duration;
|
||||
int retry=0;
|
||||
|
||||
ic->cur_st = NULL;
|
||||
|
||||
/* flush packet queue */
|
||||
flush_packet_queue(ic);
|
||||
|
||||
@@ -2005,7 +2040,6 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
|
||||
if (st->parser) {
|
||||
av_parser_close(st->parser);
|
||||
st->parser= NULL;
|
||||
av_free_packet(&st->cur_pkt);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2107,17 +2141,22 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
|
||||
}
|
||||
}
|
||||
|
||||
static int has_codec_parameters(AVCodecContext *avctx)
|
||||
static int has_codec_parameters(AVStream *st)
|
||||
{
|
||||
AVCodecContext *avctx = st->codec;
|
||||
int val;
|
||||
switch (avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
|
||||
val = avctx->sample_rate && avctx->channels;
|
||||
if (!avctx->frame_size && determinable_frame_size(avctx))
|
||||
return 0;
|
||||
if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
|
||||
return 0;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
|
||||
val = avctx->width;
|
||||
if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
|
||||
return 0;
|
||||
break;
|
||||
case AVMEDIA_TYPE_DATA:
|
||||
if(avctx->codec_id == CODEC_ID_NONE) return 1;
|
||||
@@ -2142,14 +2181,16 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
|
||||
AVFrame picture;
|
||||
AVPacket pkt = *avpkt;
|
||||
|
||||
if (!avcodec_is_open(st->codec)) {
|
||||
if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
|
||||
AVDictionary *thread_opt = NULL;
|
||||
|
||||
codec = st->codec->codec ? st->codec->codec :
|
||||
avcodec_find_decoder(st->codec->codec_id);
|
||||
|
||||
if (!codec)
|
||||
if (!codec) {
|
||||
st->info->found_decoder = -1;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* force thread count to 1 since the h264 decoder will not extract SPS
|
||||
* and PPS to extradata during multi-threaded decoding */
|
||||
@@ -2157,13 +2198,20 @@ static int try_decode_frame(AVStream *st, AVPacket *avpkt, AVDictionary **option
|
||||
ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
|
||||
if (!options)
|
||||
av_dict_free(&thread_opt);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
st->info->found_decoder = -1;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
st->info->found_decoder = 1;
|
||||
} else if (!st->info->found_decoder)
|
||||
st->info->found_decoder = 1;
|
||||
|
||||
if (st->info->found_decoder < 0)
|
||||
return -1;
|
||||
|
||||
while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
|
||||
ret >= 0 &&
|
||||
(!has_codec_parameters(st->codec) ||
|
||||
(!has_codec_parameters(st) ||
|
||||
!has_decode_delay_been_guessed(st) ||
|
||||
(!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
|
||||
got_picture = 0;
|
||||
@@ -2331,7 +2379,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
: &thread_opt);
|
||||
|
||||
//try to just open decoders, in case this is enough to get parameters
|
||||
if(!has_codec_parameters(st->codec)){
|
||||
if (!has_codec_parameters(st)) {
|
||||
if (codec && !st->codec->codec)
|
||||
avcodec_open2(st->codec, codec, options ? &options[i]
|
||||
: &thread_opt);
|
||||
@@ -2358,7 +2406,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
int fps_analyze_framecount = 20;
|
||||
|
||||
st = ic->streams[i];
|
||||
if (!has_codec_parameters(st->codec))
|
||||
if (!has_codec_parameters(st))
|
||||
break;
|
||||
/* if the timebase is coarse (like the usual millisecond precision
|
||||
of mkv), we need to analyze more frames to reliably arrive at
|
||||
@@ -2483,7 +2531,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
|
||||
if (flush_codecs) {
|
||||
AVPacket empty_pkt = { 0 };
|
||||
int err;
|
||||
int err = 0;
|
||||
av_init_packet(&empty_pkt);
|
||||
|
||||
ret = -1; /* we could not have all the codec parameters before EOF */
|
||||
@@ -2491,17 +2539,20 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
|
||||
st = ic->streams[i];
|
||||
|
||||
/* flush the decoders */
|
||||
do {
|
||||
err = try_decode_frame(st, &empty_pkt,
|
||||
(options && i < orig_nb_streams) ?
|
||||
&options[i] : NULL);
|
||||
} while (err > 0 && !has_codec_parameters(st->codec));
|
||||
if (st->info->found_decoder == 1) {
|
||||
do {
|
||||
err = try_decode_frame(st, &empty_pkt,
|
||||
(options && i < orig_nb_streams) ?
|
||||
&options[i] : NULL);
|
||||
} while (err > 0 && !has_codec_parameters(st));
|
||||
|
||||
if (err < 0) {
|
||||
av_log(ic, AV_LOG_INFO,
|
||||
"decoding for stream %d failed\n", st->index);
|
||||
if (err < 0) {
|
||||
av_log(ic, AV_LOG_INFO,
|
||||
"decoding for stream %d failed\n", st->index);
|
||||
}
|
||||
}
|
||||
if (!has_codec_parameters(st->codec)){
|
||||
|
||||
if (!has_codec_parameters(st)){
|
||||
char buf[256];
|
||||
avcodec_string(buf, sizeof(buf), st->codec, 0);
|
||||
av_log(ic, AV_LOG_WARNING,
|
||||
@@ -2738,7 +2789,6 @@ void avformat_free_context(AVFormatContext *s)
|
||||
st = s->streams[i];
|
||||
if (st->parser) {
|
||||
av_parser_close(st->parser);
|
||||
av_free_packet(&st->cur_pkt);
|
||||
}
|
||||
if (st->attached_pic.data)
|
||||
av_free_packet(&st->attached_pic);
|
||||
@@ -3193,7 +3243,7 @@ static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
|
||||
/* update pts */
|
||||
switch (st->codec->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
frame_size = get_audio_frame_size(st->codec, pkt->size);
|
||||
frame_size = get_audio_frame_size(st->codec, pkt->size, 1);
|
||||
|
||||
/* HACK/FIXME, we skip the initial 0 size packets as they are most
|
||||
likely equal to the encoder delay, but it would be better if we
|
||||
|
||||
Reference in New Issue
Block a user