Compare commits

..

1 Commits

Author SHA1 Message Date
Marton Balint
340cea9f22 avdevice/decklink_dec: fix bitrate calculations
Reviewed-by: Deti Fliegl <deti@fliegl.de>
Signed-off-by: Marton Balint <cus@passwd.hu>
2016-06-26 19:17:29 +02:00
47 changed files with 154 additions and 498 deletions

View File

@@ -2,51 +2,7 @@ Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version <next>:
version 3.1.2:
- cmdutils: remove the current working directory from the DLL search path on win32
- avcodec/rawdec: Fix palette handling with changing palettes
- avcodec/raw: Fix decoding of ilacetest.mov
- avformat/mov: Enable mp3 parsing if a packet needs it
- avformat/hls: Use an array instead of stream offset for stream mapping
- avformat/hls: Sync starting segment across variants on live streams
- avformat/hls: Fix regression with ranged media segments
- avcodec/ffv1enc: Fix assertion failure with non zero bits per sample
- avfilter/af_hdcd: small fix in af_hdcd.c where gain was not being adjusted for "attenuate slowly"
- avformat/oggdec: Fix integer overflow with invalid pts
- ffplay: Fix invalid array index
- avcodec/alacenc: allocate bigger packets (cherry picked from commit 82b84c71b009884c8d041361027718b19922c76d)
- libavcodec/dnxhd: Enable 12-bit DNxHR support.
- lavc/vaapi_encode_h26x: Fix a crash if "." is not the decimal separator.
- jni: Return ENOSYS on unsupported platforms
- lavu/hwcontext_vaapi: Fix compilation if VA_FOURCC_ABGR is not defined.
- avcodec/vp9_parser: Check the input frame sizes for being consistent
- avformat/flvdec: parse keyframe before a\v stream was created add_keyframes_index() when stream created or keyframe parsed
- avformat/flvdec: splitting add_keyframes_index() out from parse_keyframes_index()
- libavformat/rtpdec_asf: zero initialize the AVIOContext struct
- libavutil/opt: Small bugfix in example.
- libx264: Increase x264 opts character limit to 4096
- avcodec/h264_parser: Set sps/pps_ref
- librtmp: Avoid an infiniloop setting connection arguments
- avformat/oggparsevp8: fix pts calculation on pages ending with an invisible frame
- lavc/Makefile: Fix standalone compilation of the svq3 decoder.
- lavf/vplayerdec: Improve auto-detection.
- lavc/mediacodecdec_h264: properly convert extradata to annex-b
- Revert "configure: Enable GCC vectorization on ≥4.9 on x86"
version 3.1.1:
- doc/APIchanges: document the lavu/lavf field moves
- avformat/avformat: Move new field to the end of AVStream
- avformat/utils: update deprecated AVStream->codec when the context is updated
- avutil/frame: Move new field to the end of AVFrame
- libavcodec/exr : fix decoding piz float file.
- avformat/mov: Check sample size
- lavfi: Move new field to the end of AVFilterContext
- lavfi: Move new field to the end of AVFilterLink
- ffplay: Fix usage of private lavfi API
- lavc/mediacodecdec_h264: add missing NAL headers to SPS/PPS buffers
- lavc/pnm_parser: disable parsing for text based PNMs
- YUY2 Lossless Codec decoder
version 3.1:
@@ -92,8 +48,6 @@ version 3.1:
- CUDA CUVID H264/HEVC decoder
- 10-bit depth support in native utvideo decoder
- libutvideo wrapper removed
- YUY2 Lossless Codec decoder
- VideoToolbox H.264 encoder
version 3.0:

View File

@@ -1 +1 @@
3.1.2
3.0.git

View File

@@ -1,15 +0,0 @@
┌────────────────────────────────────────┐
│ RELEASE NOTES for FFmpeg 3.1 "Laplace" │
└────────────────────────────────────────┘
The FFmpeg Project proudly presents FFmpeg 3.1 "Laplace", about 4
months after the release of FFmpeg 3.0.
A complete Changelog is available at the root of the project, and the
complete Git history on http://source.ffmpeg.org.
We hope you will like this release as much as we enjoyed working on it, and
as usual, if you have any questions about it, or any FFmpeg related topic,
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
on the mailing-lists.

View File

@@ -107,15 +107,6 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
}
}
void init_dynload(void)
{
#ifdef _WIN32
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
#endif
}
static void (*program_exit)(int ret);
void register_exit(void (*cb)(int ret))

View File

@@ -61,11 +61,6 @@ void register_exit(void (*cb)(int ret));
*/
void exit_program(int ret) av_noreturn;
/**
* Initialize dynamic library loading
*/
void init_dynload(void);
/**
* Initialize the cmdutils option system, in particular
* allocate the *_opts contexts.

8
configure vendored
View File

@@ -4529,7 +4529,7 @@ fi
add_cppflags -D_ISOC99_SOURCE
add_cxxflags -D__STDC_CONSTANT_MACROS
add_cxxflags -std=c++98
add_cxxflags -std=c++11
check_cflags -std=c99
check_cc -D_FILE_OFFSET_BITS=64 <<EOF && add_cppflags -D_FILE_OFFSET_BITS=64
#include <stdlib.h>
@@ -6125,7 +6125,11 @@ elif enabled ccc; then
add_cflags -msg_disable nonstandcast
add_cflags -msg_disable unsupieee
elif enabled gcc; then
check_optflags -fno-tree-vectorize
case $gcc_basever in
4.9*) enabled x86 || check_optflags -fno-tree-vectorize ;;
4.*) check_optflags -fno-tree-vectorize ;;
*) enabled x86 || check_optflags -fno-tree-vectorize ;;
esac
check_cflags -Werror=format-security
check_cflags -Werror=implicit-function-declaration
check_cflags -Werror=missing-prototypes

View File

@@ -15,30 +15,14 @@ libavutil: 2015-08-28
API changes, most recent first:
2016-06-30 - c1c7e0ab - lavf 57.41.100 - avformat.h
Moved codecpar field from AVStream to the end of the struct, so that
the following private fields are in the same location as in FFmpeg 3.0 (lavf 57.25.100).
2016-06-30 - 042fb69d - lavu 55.28.100 - frame.h
Moved hw_frames_ctx field from AVFrame to the end of the struct, so that
the following private fields are in the same location as in FFmpeg 3.0 (lavu 55.17.103).
2016-06-29 - 1a751455 - lavfi 6.47.100 - avfilter.h
Fix accidental ABI breakage in AVFilterContext.
ABI was broken in 8688d3a, lavfi 6.42.100 and released as ffmpeg 3.1.
Because of this, ffmpeg and ffplay built against lavfi>=6.42.100 will not be
compatible with lavfi>=6.47.100. Potentially also affects other users of
libavfilter if they are using one of the affected fields.
-------- 8< --------- FFmpeg 3.1 was cut here -------- 8< ---------
2016-06-26 - 481f320 / 1c9e861 - lavu 55.27.100 / 55.13.0 - hwcontext.h
2016-06-26 - xxxxxxx / 1c9e861 - lavu 55.27.100 / 55.13.0 - hwcontext.h
Add av_hwdevice_ctx_create().
2016-06-26 - b95534b / e47b8bb - lavc 57.48.101 / 57.19.1 - avcodec.h
2016-06-26 - xxxxxxx / e47b8bb - lavc 57.48.101 / 57.19.1 - avcodec.h
Adjust values for JPEG 2000 profiles.
-------- 8< --------- FFmpeg 3.1 was cut here -------- 8< ---------
2016-06-23 - 5d75e46 / db7968b - lavf 57.40.100 / 57.7.0 - avio.h
Add AVIODataMarkerType, write_data_type, ignore_boundary_point and
avio_write_marker.

View File

@@ -31,7 +31,7 @@ PROJECT_NAME = FFmpeg
# This could be handy for archiving the generated documentation or
# if some version control system is used.
PROJECT_NUMBER = 3.1.2
PROJECT_NUMBER =
# With the PROJECT_LOGO tag one can specify a logo or icon that is included
# in the documentation. The maximum height of the logo should not exceed 55

View File

@@ -4303,8 +4303,6 @@ int main(int argc, char **argv)
int ret;
int64_t ti;
init_dynload();
register_exit(ffmpeg_cleanup);
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */

View File

@@ -2725,7 +2725,7 @@ static int stream_component_open(VideoState *is, int stream_index)
goto fail;
link = is->out_audio_filter->inputs[0];
sample_rate = link->sample_rate;
nb_channels = avfilter_link_get_channels(link);
nb_channels = link->channels;
channel_layout = link->channel_layout;
}
#else
@@ -2936,7 +2936,7 @@ static int read_thread(void *arg)
AVStream *st = ic->streams[i];
enum AVMediaType type = st->codecpar->codec_type;
st->discard = AVDISCARD_ALL;
if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
if (wanted_stream_spec[type] && st_index[type] == -1)
if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
st_index[type] = i;
}
@@ -3776,8 +3776,6 @@ int main(int argc, char **argv)
char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
char alsa_bufsize[] = "SDL_AUDIO_ALSA_SET_BUFFER_SIZE=1";
init_dynload();
av_log_set_flags(AV_LOG_SKIP_REPEATED);
parse_loglevel(argc, argv, options);

View File

@@ -3241,8 +3241,6 @@ int main(int argc, char **argv)
char *w_name = NULL, *w_args = NULL;
int ret, i;
init_dynload();
av_log_set_flags(AV_LOG_SKIP_REPEATED);
register_exit(ffprobe_cleanup);

View File

@@ -3980,7 +3980,6 @@ int main(int argc, char **argv)
int cfg_parsed;
int ret = EXIT_FAILURE;
init_dynload();
config.filename = av_strdup("/etc/ffserver.conf");

View File

@@ -528,8 +528,7 @@ OBJS-$(CONFIG_SUNRAST_ENCODER) += sunrastenc.o
OBJS-$(CONFIG_SVQ1_DECODER) += svq1dec.o svq1.o svq13.o h263data.o
OBJS-$(CONFIG_SVQ1_ENCODER) += svq1enc.o svq1.o h263data.o \
h263.o ituh263enc.o
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o \
h264_parse.o h264data.o h264_ps.o h2645_parse.o
OBJS-$(CONFIG_SVQ3_DECODER) += svq3.o svq13.o mpegutils.o h264_parse.o h264data.o
OBJS-$(CONFIG_TEXT_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_TEXT_ENCODER) += srtenc.o ass_split.o
OBJS-$(CONFIG_TAK_DECODER) += takdec.o tak.o takdsp.o

View File

@@ -623,7 +623,7 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
else
max_frame_size = s->max_coded_frame_size;
if ((ret = ff_alloc_packet2(avctx, avpkt, 4 * max_frame_size, 0)) < 0)
if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * max_frame_size, 0)) < 0)
return ret;
/* use verbatim mode for compression_level 0 */

View File

@@ -118,6 +118,11 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth)
av_log(ctx->avctx, AV_LOG_ERROR, "bit depth mismatches %d %d\n", ff_dnxhd_cid_table[index].bit_depth, bitdepth);
return AVERROR_INVALIDDATA;
}
if (bitdepth > 10) {
avpriv_request_sample(ctx->avctx, "DNXHR 12-bit");
if (ctx->avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL)
return AVERROR_PATCHWELCOME;
}
ctx->cid_table = &ff_dnxhd_cid_table[index];
av_log(ctx->avctx, AV_LOG_VERBOSE, "Profile cid %d.\n", cid);
@@ -128,7 +133,7 @@ static int dnxhd_init_vlc(DNXHDContext *ctx, uint32_t cid, int bitdepth)
init_vlc(&ctx->ac_vlc, DNXHD_VLC_BITS, 257,
ctx->cid_table->ac_bits, 1, 1,
ctx->cid_table->ac_codes, 2, 2, 0);
init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth > 8 ? 14 : 12,
init_vlc(&ctx->dc_vlc, DNXHD_DC_VLC_BITS, bitdepth + 4,
ctx->cid_table->dc_bits, 1, 1,
ctx->cid_table->dc_codes, 1, 1, 0);
init_vlc(&ctx->run_vlc, DNXHD_VLC_BITS, 62,

View File

@@ -749,9 +749,6 @@ static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize,
uint16_t *tmp = (uint16_t *)td->tmp;
uint8_t *out;
int ret, i, j;
int pixel_half_size;/* 1 for half, 2 for float and uint32 */
EXRChannel *channel;
int tmp_offset;
if (!td->bitmap)
td->bitmap = av_malloc(BITMAP_SIZE);
@@ -784,38 +781,24 @@ static int piz_uncompress(EXRContext *s, const uint8_t *src, int ssize,
ptr = tmp;
for (i = 0; i < s->nb_channels; i++) {
channel = &s->channels[i];
EXRChannel *channel = &s->channels[i];
int size = channel->pixel_type;
if (channel->pixel_type == EXR_HALF)
pixel_half_size = 1;
else
pixel_half_size = 2;
for (j = 0; j < pixel_half_size; j++)
wav_decode(ptr + j, td->xsize, pixel_half_size, td->ysize,
td->xsize * pixel_half_size, maxval);
ptr += td->xsize * td->ysize * pixel_half_size;
for (j = 0; j < size; j++)
wav_decode(ptr + j, td->xsize, size, td->ysize,
td->xsize * size, maxval);
ptr += td->xsize * td->ysize * size;
}
apply_lut(td->lut, tmp, dsize / sizeof(uint16_t));
out = td->uncompressed_data;
for (i = 0; i < td->ysize; i++) {
tmp_offset = 0;
for (i = 0; i < td->ysize; i++)
for (j = 0; j < s->nb_channels; j++) {
uint16_t *in;
EXRChannel *channel = &s->channels[j];
if (channel->pixel_type == EXR_HALF)
pixel_half_size = 1;
else
pixel_half_size = 2;
in = tmp + tmp_offset * td->xsize * td->ysize + i * td->xsize * pixel_half_size;
tmp_offset += pixel_half_size;
memcpy(out, in, td->xsize * 2 * pixel_half_size);
out += td->xsize * 2 * pixel_half_size;
uint16_t *in = tmp + j * td->xsize * td->ysize + i * td->xsize;
memcpy(out, in, td->xsize * 2);
out += td->xsize * 2;
}
}
return 0;
}

View File

@@ -781,12 +781,14 @@ FF_ENABLE_DEPRECATION_WARNINGS
s->colorspace = 1;
s->transparency = 1;
s->chroma_planes = 1;
s->bits_per_raw_sample = 8;
if (!avctx->bits_per_raw_sample)
s->bits_per_raw_sample = 8;
break;
case AV_PIX_FMT_0RGB32:
s->colorspace = 1;
s->chroma_planes = 1;
s->bits_per_raw_sample = 8;
if (!avctx->bits_per_raw_sample)
s->bits_per_raw_sample = 8;
break;
case AV_PIX_FMT_GBRP9:
if (!avctx->bits_per_raw_sample)

View File

@@ -367,26 +367,13 @@ static inline int parse_nal_units(AVCodecParserContext *s,
"non-existing PPS %u referenced\n", pps_id);
goto fail;
}
av_buffer_unref(&p->ps.pps_ref);
av_buffer_unref(&p->ps.sps_ref);
p->ps.pps = NULL;
p->ps.sps = NULL;
p->ps.pps_ref = av_buffer_ref(p->ps.pps_list[pps_id]);
if (!p->ps.pps_ref)
goto fail;
p->ps.pps = (const PPS*)p->ps.pps_ref->data;
p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
if (!p->ps.sps_list[p->ps.pps->sps_id]) {
av_log(avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n", p->ps.pps->sps_id);
goto fail;
}
p->ps.sps_ref = av_buffer_ref(p->ps.sps_list[p->ps.pps->sps_id]);
if (!p->ps.sps_ref)
goto fail;
p->ps.sps = (SPS*)p->ps.sps_ref->data;
p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
sps = p->ps.sps;

View File

@@ -20,18 +20,19 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdlib.h>
#include "libavutil/error.h"
#include "config.h"
#include "jni.h"
#if CONFIG_JNI
#include <errno.h>
#include <jni.h>
#include <pthread.h>
#include "libavutil/log.h"
#include "libavutil/error.h"
#include "ffjni.h"
void *java_vm;
@@ -68,7 +69,7 @@ void *av_jni_get_java_vm(void *log_ctx)
int av_jni_set_java_vm(void *vm, void *log_ctx)
{
return AVERROR(ENOSYS);
return 0;
}
void *av_jni_get_java_vm(void *log_ctx)

View File

@@ -777,8 +777,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
if(x4->x264opts){
const char *p= x4->x264opts;
while(p){
char param[4096]={0}, val[4096]={0};
if(sscanf(p, "%4095[^:=]=%4095[^:]", param, val) == 1){
char param[256]={0}, val[256]={0};
if(sscanf(p, "%255[^:=]=%255[^:]", param, val) == 1){
OPT_STR(param, "1");
}else
OPT_STR(param, val);

View File

@@ -65,58 +65,6 @@ static av_cold int mediacodec_decode_close(AVCodecContext *avctx)
return 0;
}
static int h264_ps_to_nalu(const uint8_t *src, int src_size, uint8_t **out, int *out_size)
{
int i;
int ret = 0;
uint8_t *p = NULL;
static const uint8_t nalu_header[] = { 0x00, 0x00, 0x00, 0x01 };
if (!out || !out_size) {
return AVERROR(EINVAL);
}
p = av_malloc(sizeof(nalu_header) + src_size);
if (!p) {
return AVERROR(ENOMEM);
}
*out = p;
*out_size = sizeof(nalu_header) + src_size;
memcpy(p, nalu_header, sizeof(nalu_header));
memcpy(p + sizeof(nalu_header), src, src_size);
/* Escape 0x00, 0x00, 0x0{0-3} pattern */
for (i = 4; i < *out_size; i++) {
if (i < *out_size - 3 &&
p[i + 0] == 0 &&
p[i + 1] == 0 &&
p[i + 2] <= 3) {
uint8_t *new;
*out_size += 1;
new = av_realloc(*out, *out_size);
if (!new) {
ret = AVERROR(ENOMEM);
goto done;
}
*out = p = new;
i = i + 3;
memmove(p + i, p + i - 1, *out_size - i);
p[i - 1] = 0x03;
}
}
done:
if (ret < 0) {
av_freep(out);
*out_size = 0;
}
return ret;
}
static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
{
int i;
@@ -164,20 +112,8 @@ static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
}
if (pps && sps) {
uint8_t *data = NULL;
size_t data_size = 0;
if ((ret = h264_ps_to_nalu(sps->data, sps->data_size, &data, &data_size)) < 0) {
goto done;
}
ff_AMediaFormat_setBuffer(format, "csd-0", (void*)data, data_size);
av_freep(&data);
if ((ret = h264_ps_to_nalu(pps->data, pps->data_size, &data, &data_size)) < 0) {
goto done;
}
ff_AMediaFormat_setBuffer(format, "csd-1", (void*)data, data_size);
av_freep(&data);
ff_AMediaFormat_setBuffer(format, "csd-0", (void*)sps->data, sps->data_size);
ff_AMediaFormat_setBuffer(format, "csd-1", (void*)pps->data, pps->data_size);
} else {
av_log(avctx, AV_LOG_ERROR, "Could not extract PPS/SPS from extradata");
ret = AVERROR_INVALIDDATA;

View File

@@ -66,8 +66,6 @@ retry:
}
#endif
next = END_NOT_FOUND;
} else if (pnmctx.type < 4) {
next = END_NOT_FOUND;
} else {
next = pnmctx.bytestream - pnmctx.bytestream_start
+ av_image_get_buffer_size(avctx->pix_fmt, avctx->width, avctx->height, 1);

View File

@@ -31,7 +31,6 @@
const PixelFormatTag ff_raw_pix_fmt_tags[] = {
{ AV_PIX_FMT_YUV420P, MKTAG('I', '4', '2', '0') }, /* Planar formats */
{ AV_PIX_FMT_YUV420P, MKTAG('I', 'Y', 'U', 'V') },
{ AV_PIX_FMT_YUV420P, MKTAG('y', 'v', '1', '2') },
{ AV_PIX_FMT_YUV420P, MKTAG('Y', 'V', '1', '2') },
{ AV_PIX_FMT_YUV410P, MKTAG('Y', 'U', 'V', '9') },
{ AV_PIX_FMT_YUV410P, MKTAG('Y', 'V', 'U', '9') },

View File

@@ -365,29 +365,20 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
NULL);
int ret;
if (!context->palette)
context->palette = av_buffer_alloc(AVPALETTE_SIZE);
if (!context->palette) {
av_buffer_unref(&frame->buf[0]);
return AVERROR(ENOMEM);
}
ret = av_buffer_make_writable(&context->palette);
if (ret < 0) {
av_buffer_unref(&frame->buf[0]);
return ret;
}
if (pal) {
av_buffer_unref(&context->palette);
context->palette = av_buffer_alloc(AVPALETTE_SIZE);
if (!context->palette) {
av_buffer_unref(&frame->buf[0]);
return AVERROR(ENOMEM);
}
memcpy(context->palette->data, pal, AVPALETTE_SIZE);
frame->palette_has_changed = 1;
} else if (context->is_nut_pal8) {
int vid_size = avctx->width * avctx->height;
int pal_size = avpkt->size - vid_size;
if (avpkt->size > vid_size && pal_size <= AVPALETTE_SIZE) {
if (avpkt->size - vid_size) {
pal = avpkt->data + vid_size;
memcpy(context->palette->data, pal, pal_size);
memcpy(context->palette->data, pal, avpkt->size - vid_size);
frame->palette_has_changed = 1;
}
}

View File

@@ -967,10 +967,10 @@ static const AVCodecDefault vaapi_encode_h264_defaults[] = {
{ "b", "0" },
{ "bf", "2" },
{ "g", "120" },
{ "i_qfactor", "1" },
{ "i_qoffset", "0" },
{ "b_qfactor", "6/5" },
{ "b_qoffset", "0" },
{ "i_qfactor", "1.0" },
{ "i_qoffset", "0.0" },
{ "b_qfactor", "1.2" },
{ "b_qoffset", "0.0" },
{ NULL },
};

View File

@@ -1338,10 +1338,10 @@ static const AVCodecDefault vaapi_encode_h265_defaults[] = {
{ "b", "0" },
{ "bf", "2" },
{ "g", "120" },
{ "i_qfactor", "1" },
{ "i_qoffset", "0" },
{ "b_qfactor", "6/5" },
{ "b_qoffset", "0" },
{ "i_qfactor", "1.0" },
{ "i_qoffset", "0.0" },
{ "b_qfactor", "1.2" },
{ "b_qoffset", "0.0" },
{ NULL },
};

View File

@@ -28,7 +28,6 @@
typedef struct VP9ParseContext {
int n_frames; // 1-8
int size[8];
int marker_size;
int64_t pts;
} VP9ParseContext;
@@ -89,21 +88,6 @@ static int parse(AVCodecParserContext *ctx,
return 0;
}
if (s->n_frames > 0) {
int i;
int size_sum = 0;
for (i = 0; i < s->n_frames ;i++)
size_sum += s->size[i];
size_sum += s->marker_size;
if (size_sum != size) {
av_log(avctx, AV_LOG_ERROR, "Inconsistent input frame sizes %d %d\n",
size_sum, size);
s->n_frames = 0;
}
}
if (s->n_frames > 0) {
*out_data = data;
*out_size = s->size[--s->n_frames];
@@ -147,7 +131,6 @@ static int parse(AVCodecParserContext *ctx,
data += sz; \
size -= sz; \
} \
s->marker_size = size; \
parse_frame(ctx, *out_data, *out_size); \
return s->n_frames > 0 ? *out_size : full_size

View File

@@ -563,15 +563,16 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx)
st->time_base.den = ctx->bmd_tb_den;
st->time_base.num = ctx->bmd_tb_num;
st->codecpar->bit_rate = av_image_get_buffer_size((AVPixelFormat)st->codecpar->format, ctx->bmd_width, ctx->bmd_height, 1) * 1/av_q2d(st->time_base) * 8;
if (cctx->v210) {
st->codecpar->codec_id = AV_CODEC_ID_V210;
st->codecpar->codec_tag = MKTAG('V', '2', '1', '0');
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
} else {
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->format = AV_PIX_FMT_UYVY422;
st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

View File

@@ -949,7 +949,6 @@ static int hdcd_envelope(int32_t *samples, int count, int stride, int gain, int
int len = FFMIN(count, target_gain - gain);
/* attenuate slowly */
for (i = 0; i < len; i++) {
++gain;
APPLY_GAIN(*samples, gain);
samples += stride;
}

View File

@@ -344,13 +344,6 @@ struct AVFilterContext {
*/
AVFilterInternal *internal;
struct AVFilterCommand *command_queue;
char *enable_str; ///< enable expression string
void *enable; ///< parsed expression (AVExpr*)
double *var_values; ///< variable values for the enable expression
int is_disabled; ///< the enabled state from the last expression evaluation
/**
* For filters which will create hardware frames, sets the device the
* filter should create them in. All other filters will ignore this field:
@@ -359,6 +352,13 @@ struct AVFilterContext {
* hardware context information.
*/
AVBufferRef *hw_device_ctx;
struct AVFilterCommand *command_queue;
char *enable_str; ///< enable expression string
void *enable; ///< parsed expression (AVExpr*)
double *var_values; ///< variable values for the enable expression
int is_disabled; ///< the enabled state from the last expression evaluation
};
/**
@@ -473,6 +473,12 @@ struct AVFilterLink {
*/
AVRational frame_rate;
/**
* For hwaccel pixel formats, this should be a reference to the
* AVHWFramesContext describing the frames.
*/
AVBufferRef *hw_frames_ctx;
/**
* Buffer partially filled with samples to achieve a fixed/minimum size.
*/
@@ -544,12 +550,6 @@ struct AVFilterLink {
* cleared when a frame is filtered.
*/
int frame_wanted_out;
/**
* For hwaccel pixel formats, this should be a reference to the
* AVHWFramesContext describing the frames.
*/
AVBufferRef *hw_frames_ctx;
};
/**

View File

@@ -30,8 +30,8 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 6
#define LIBAVFILTER_VERSION_MINOR 47
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_MINOR 46
#define LIBAVFILTER_VERSION_MICRO 102
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
LIBAVFILTER_VERSION_MINOR, \

View File

@@ -985,6 +985,17 @@ typedef struct AVStream {
int event_flags;
#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata.
/*
* Codec parameters associated with this stream. Allocated and freed by
* libavformat in avformat_new_stream() and avformat_free_context()
* respectively.
*
* - demuxing: filled by libavformat on stream creation or in
* avformat_find_stream_info()
* - muxing: filled by the caller before avformat_write_header()
*/
AVCodecParameters *codecpar;
/*****************************************************************
* All fields below this line are not part of the public API. They
* may not be used outside of libavformat and can be changed and
@@ -1206,17 +1217,6 @@ typedef struct AVStream {
* Must not be accessed in any way by callers.
*/
AVStreamInternal *internal;
/*
* Codec parameters associated with this stream. Allocated and freed by
* libavformat in avformat_new_stream() and avformat_free_context()
* respectively.
*
* - demuxing: filled by libavformat on stream creation or in
* avformat_find_stream_info()
* - muxing: filled by the caller before avformat_write_header()
*/
AVCodecParameters *codecpar;
} AVStream;
AVRational av_stream_get_r_frame_rate(const AVStream *s);

View File

@@ -61,11 +61,6 @@ typedef struct FLVContext {
int broken_sizes;
int sum_flv_tag_size;
int last_keyframe_stream_index;
int keyframe_count;
int64_t *keyframe_times;
int64_t *keyframe_filepositions;
} FLVContext;
static int probe(AVProbeData *p, int live)
@@ -97,38 +92,8 @@ static int live_flv_probe(AVProbeData *p)
return probe(p, 1);
}
static void add_keyframes_index(AVFormatContext *s)
{
FLVContext *flv = s->priv_data;
AVStream *stream = NULL;
unsigned int i = 0;
if (flv->last_keyframe_stream_index < 0) {
av_log(s, AV_LOG_DEBUG, "keyframe stream hasn't been created\n");
return;
}
av_assert0(flv->last_keyframe_stream_index <= s->nb_streams);
stream = s->streams[flv->last_keyframe_stream_index];
if (stream->nb_index_entries == 0) {
for (i = 0; i < flv->keyframe_count; i++) {
av_add_index_entry(stream, flv->keyframe_filepositions[i],
flv->keyframe_times[i] * 1000, 0, 0, AVINDEX_KEYFRAME);
}
} else
av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
av_freep(&flv->keyframe_times);
av_freep(&flv->keyframe_filepositions);
flv->keyframe_count = 0;
}
}
static AVStream *create_stream(AVFormatContext *s, int codec_type)
{
FLVContext *flv = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return NULL;
@@ -139,8 +104,6 @@ static AVStream *create_stream(AVFormatContext *s, int codec_type)
s->ctx_flags &= ~AVFMTCTX_NOHEADER;
avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
flv->last_keyframe_stream_index = s->nb_streams - 1;
add_keyframes_index(s);
return st;
}
@@ -342,7 +305,8 @@ static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
return length;
}
static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t max_pos)
static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
AVStream *vstream, int64_t max_pos)
{
FLVContext *flv = s->priv_data;
unsigned int timeslen = 0, fileposlen = 0, i;
@@ -352,12 +316,10 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t m
int ret = AVERROR(ENOSYS);
int64_t initial_pos = avio_tell(ioc);
if (flv->keyframe_count > 0) {
av_log(s, AV_LOG_DEBUG, "keyframes have been paresed\n");
if (vstream->nb_index_entries>0) {
av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
return 0;
}
av_assert0(!flv->keyframe_times);
av_assert0(!flv->keyframe_filepositions);
if (s->flags & AVFMT_FLAG_IGNIDX)
return 0;
@@ -406,16 +368,15 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t m
}
if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
for (i = 0; i < FFMIN(2,fileposlen); i++) {
flv->validate_index[i].pos = filepositions[i];
flv->validate_index[i].dts = times[i] * 1000;
flv->validate_count = i + 1;
for (i = 0; i < fileposlen; i++) {
av_add_index_entry(vstream, filepositions[i], times[i] * 1000,
0, 0, AVINDEX_KEYFRAME);
if (i < 2) {
flv->validate_index[i].pos = filepositions[i];
flv->validate_index[i].dts = times[i] * 1000;
flv->validate_count = i + 1;
}
}
flv->keyframe_times = times;
flv->keyframe_filepositions = filepositions;
flv->keyframe_count = timeslen;
times = NULL;
filepositions = NULL;
} else {
invalid:
av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
@@ -457,14 +418,13 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
}
break;
case AMF_DATA_TYPE_OBJECT:
if (key &&
if ((vstream || astream) && key &&
ioc->seekable &&
!strcmp(KEYFRAMES_TAG, key) && depth == 1)
if (parse_keyframes_index(s, ioc,
if (parse_keyframes_index(s, ioc, vstream ? vstream : astream,
max_pos) < 0)
av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
else
add_keyframes_index(s);
while (avio_tell(ioc) < max_pos - 2 &&
amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
if (amf_parse_object(s, astream, vstream, str_val, max_pos,
@@ -614,7 +574,6 @@ static int amf_parse_object(AVFormatContext *s, AVStream *astream,
static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
{
FLVContext *flv = s->priv_data;
AMFDataType type;
AVStream *stream, *astream, *vstream;
AVStream av_unused *dstream;
@@ -653,14 +612,10 @@ static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
// the lookup every time it is called.
for (i = 0; i < s->nb_streams; i++) {
stream = s->streams[i];
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
vstream = stream;
flv->last_keyframe_stream_index = i;
} else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
astream = stream;
if (flv->last_keyframe_stream_index == -1)
flv->last_keyframe_stream_index = i;
}
else if (stream->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
dstream = stream;
}
@@ -688,7 +643,6 @@ static int flv_read_header(AVFormatContext *s)
s->start_time = 0;
flv->sum_flv_tag_size = 0;
flv->last_keyframe_stream_index = -1;
return 0;
}
@@ -699,8 +653,6 @@ static int flv_read_close(AVFormatContext *s)
FLVContext *flv = s->priv_data;
for (i=0; i<FLV_STREAM_TYPE_NB; i++)
av_freep(&flv->new_extradata[i]);
av_freep(&flv->keyframe_times);
av_freep(&flv->keyframe_filepositions);
return 0;
}

View File

@@ -98,11 +98,7 @@ struct playlist {
int index;
AVFormatContext *ctx;
AVPacket pkt;
/* main demuxer streams associated with this playlist
* indexed by the subdemuxer stream indexes */
AVStream **main_streams;
int n_main_streams;
int stream_offset;
int finished;
enum PlaylistType type;
@@ -243,7 +239,6 @@ static void free_playlist_list(HLSContext *c)
struct playlist *pls = c->playlists[i];
free_segment_list(pls);
free_init_section_list(pls);
av_freep(&pls->main_streams);
av_freep(&pls->renditions);
av_freep(&pls->id3_buf);
av_dict_free(&pls->id3_initial);
@@ -595,7 +590,7 @@ static void update_options(char **dest, const char *name, void *src)
}
static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
AVDictionary *opts, AVDictionary *opts2, int *is_http)
AVDictionary *opts, AVDictionary *opts2)
{
HLSContext *c = s->priv_data;
AVDictionary *tmp = NULL;
@@ -636,9 +631,6 @@ static int open_url(AVFormatContext *s, AVIOContext **pb, const char *url,
av_dict_free(&tmp);
if (is_http)
*is_http = av_strstart(proto_name, "http", NULL);
return ret;
}
@@ -1080,7 +1072,6 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
{
AVDictionary *opts = NULL;
int ret;
int is_http = 0;
// broker prior HTTP options that should be consistent across requests
av_dict_set(&opts, "user-agent", c->user_agent, 0);
@@ -1100,13 +1091,13 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
seg->url, seg->url_offset, pls->index);
if (seg->key_type == KEY_NONE) {
ret = open_url(pls->parent, &pls->input, seg->url, c->avio_opts, opts, &is_http);
ret = open_url(pls->parent, &pls->input, seg->url, c->avio_opts, opts);
} else if (seg->key_type == KEY_AES_128) {
AVDictionary *opts2 = NULL;
char iv[33], key[33], url[MAX_URL_SIZE];
if (strcmp(seg->key, pls->key_url)) {
AVIOContext *pb;
if (open_url(pls->parent, &pb, seg->key, c->avio_opts, opts, NULL) == 0) {
if (open_url(pls->parent, &pb, seg->key, c->avio_opts, opts) == 0) {
ret = avio_read(pb, pls->key, sizeof(pls->key));
if (ret != sizeof(pls->key)) {
av_log(NULL, AV_LOG_ERROR, "Unable to read key file %s\n",
@@ -1131,7 +1122,7 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
av_dict_set(&opts2, "key", key, 0);
av_dict_set(&opts2, "iv", iv, 0);
ret = open_url(pls->parent, &pls->input, url, opts2, opts, &is_http);
ret = open_url(pls->parent, &pls->input, url, opts2, opts);
av_dict_free(&opts2);
@@ -1149,15 +1140,8 @@ static int open_input(HLSContext *c, struct playlist *pls, struct segment *seg)
/* Seek to the requested position. If this was a HTTP request, the offset
* should already be where want it to, but this allows e.g. local testing
* without a HTTP server.
*
* This is not done for HTTP at all as avio_seek() does internal bookkeeping
* of file offset which is out-of-sync with the actual offset when "offset"
* AVOption is used with http protocol, causing the seek to not be a no-op
* as would be expected. Wrong offset received from the server will not be
* noticed without the call, though.
*/
if (ret == 0 && !is_http && seg->key_type == KEY_NONE && seg->url_offset) {
* without a HTTP server. */
if (ret == 0 && seg->key_type == KEY_NONE && seg->url_offset) {
int64_t seekret = avio_seek(pls->input, seg->url_offset, SEEK_SET);
if (seekret < 0) {
av_log(pls->parent, AV_LOG_ERROR, "Unable to seek to offset %"PRId64" of HLS segment '%s'\n", seg->url_offset, seg->url);
@@ -1253,13 +1237,13 @@ restart:
/* Check that the playlist is still needed before opening a new
* segment. */
if (v->ctx && v->ctx->nb_streams) {
if (v->ctx && v->ctx->nb_streams &&
v->parent->nb_streams >= v->stream_offset + v->ctx->nb_streams) {
v->needed = 0;
for (i = 0; i < v->n_main_streams; i++) {
if (v->main_streams[i]->discard < AVDISCARD_ALL) {
for (i = v->stream_offset; i < v->stream_offset + v->ctx->nb_streams;
i++) {
if (v->parent->streams[i]->discard < AVDISCARD_ALL)
v->needed = 1;
break;
}
}
}
if (!v->needed) {
@@ -1397,8 +1381,8 @@ static void add_metadata_from_renditions(AVFormatContext *s, struct playlist *pl
int rend_idx = 0;
int i;
for (i = 0; i < pls->n_main_streams; i++) {
AVStream *st = pls->main_streams[i];
for (i = 0; i < pls->ctx->nb_streams; i++) {
AVStream *st = s->streams[pls->stream_offset + i];
if (st->codecpar->codec_type != type)
continue;
@@ -1524,8 +1508,7 @@ static int hls_read_header(AVFormatContext *s)
{
void *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb;
HLSContext *c = s->priv_data;
int ret = 0, i, j;
int highest_cur_seq_no = 0;
int ret = 0, i, j, stream_offset = 0;
c->ctx = s;
c->interrupt_callback = &s->interrupt_callback;
@@ -1600,17 +1583,6 @@ static int hls_read_header(AVFormatContext *s)
add_renditions_to_variant(c, var, AVMEDIA_TYPE_SUBTITLE, var->subtitles_group);
}
/* Select the starting segments */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
if (pls->n_segments == 0)
continue;
pls->cur_seq_no = select_cur_seq_no(c, pls);
highest_cur_seq_no = FFMAX(highest_cur_seq_no, pls->cur_seq_no);
}
/* Open the demuxer for each playlist */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
@@ -1627,18 +1599,7 @@ static int hls_read_header(AVFormatContext *s)
pls->index = i;
pls->needed = 1;
pls->parent = s;
/*
* If this is a live stream and this playlist looks like it is one segment
* behind, try to sync it up so that every substream starts at the same
* time position (so e.g. avformat_find_stream_info() will see packets from
* all active streams within the first few seconds). This is not very generic,
* though, as the sequence numbers are technically independent.
*/
if (!pls->finished && pls->cur_seq_no == highest_cur_seq_no - 1 &&
highest_cur_seq_no < pls->start_seq_no + pls->n_segments) {
pls->cur_seq_no = highest_cur_seq_no;
}
pls->cur_seq_no = select_cur_seq_no(c, pls);
pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
if (!pls->read_buffer){
@@ -1664,6 +1625,7 @@ static int hls_read_header(AVFormatContext *s)
}
pls->ctx->pb = &pls->pb;
pls->ctx->io_open = nested_io_open;
pls->stream_offset = stream_offset;
if ((ret = ff_copy_whiteblacklists(pls->ctx, s)) < 0)
goto fail;
@@ -1703,13 +1665,13 @@ static int hls_read_header(AVFormatContext *s)
avpriv_set_pts_info(st, 33, 1, MPEG_TIME_BASE);
else
avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
dynarray_add(&pls->main_streams, &pls->n_main_streams, st);
}
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_AUDIO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_VIDEO);
add_metadata_from_renditions(s, pls, AVMEDIA_TYPE_SUBTITLE);
stream_offset += pls->ctx->nb_streams;
}
/* Create a program for each variant */
@@ -1727,10 +1689,10 @@ static int hls_read_header(AVFormatContext *s)
int is_shared = playlist_in_multiple_variants(c, pls);
int k;
for (k = 0; k < pls->n_main_streams; k++) {
struct AVStream *st = pls->main_streams[k];
for (k = 0; k < pls->ctx->nb_streams; k++) {
struct AVStream *st = s->streams[pls->stream_offset + k];
av_program_add_stream_index(s, i, st->index);
av_program_add_stream_index(s, i, pls->stream_offset + k);
/* Set variant_bitrate for streams unique to this variant */
if (!is_shared && v->bandwidth)
@@ -1909,17 +1871,8 @@ static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
/* If we got a packet, return it */
if (minplaylist >= 0) {
struct playlist *pls = c->playlists[minplaylist];
if (pls->pkt.stream_index >= pls->n_main_streams) {
av_log(s, AV_LOG_ERROR, "stream index inconsistency: index %d, %d main streams, %d subdemuxer streams\n",
pls->pkt.stream_index, pls->n_main_streams, pls->ctx->nb_streams);
av_packet_unref(&pls->pkt);
reset_packet(&pls->pkt);
return AVERROR_BUG;
}
*pkt = pls->pkt;
pkt->stream_index = pls->main_streams[pls->pkt.stream_index]->index;
pkt->stream_index += pls->stream_offset;
reset_packet(&c->playlists[minplaylist]->pkt);
if (pkt->dts != AV_NOPTS_VALUE)
@@ -1951,8 +1904,6 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
HLSContext *c = s->priv_data;
struct playlist *seek_pls = NULL;
int i, seq_no;
int j;
int stream_subdemuxer_index;
int64_t first_timestamp, seek_timestamp, duration;
if ((flags & AVSEEK_FLAG_BYTE) ||
@@ -1976,12 +1927,10 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
/* find the playlist with the specified stream */
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
for (j = 0; j < pls->n_main_streams; j++) {
if (pls->main_streams[j] == s->streams[stream_index]) {
seek_pls = pls;
stream_subdemuxer_index = j;
break;
}
if (stream_index >= pls->stream_offset &&
stream_index - pls->stream_offset < pls->ctx->nb_streams) {
seek_pls = pls;
break;
}
}
/* check if the timestamp is valid for the playlist with the
@@ -1991,7 +1940,7 @@ static int hls_read_seek(AVFormatContext *s, int stream_index,
/* set segment now so we do not need to search again below */
seek_pls->cur_seq_no = seq_no;
seek_pls->seek_stream_index = stream_subdemuxer_index;
seek_pls->seek_stream_index = stream_index - seek_pls->stream_offset;
for (i = 0; i < c->n_playlists; i++) {
/* Reset reading */

View File

@@ -193,8 +193,6 @@ static int rtmp_open(URLContext *s, const char *uri, int flags)
if (sep)
p = sep + 1;
else
break;
}
}
if (ctx->playpath) {

View File

@@ -43,7 +43,6 @@
#include "libavutil/sha.h"
#include "libavutil/timecode.h"
#include "libavcodec/ac3tab.h"
#include "libavcodec/mpegaudiodecheader.h"
#include "avformat.h"
#include "internal.h"
#include "avio_internal.h"
@@ -2844,12 +2843,7 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
sample_size = sc->stsz_sample_size > 0 ? sc->stsz_sample_size : sc->sample_sizes[current_sample];
if (sc->pseudo_stream_id == -1 ||
sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) {
AVIndexEntry *e;
if (sample_size > 0x3FFFFFFF) {
av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", sample_size);
return;
}
e = &st->index_entries[st->nb_index_entries++];
AVIndexEntry *e = &st->index_entries[st->nb_index_entries++];
e->pos = current_offset;
e->timestamp = current_dts;
e->size = sample_size;
@@ -2974,10 +2968,6 @@ static void mov_build_index(MOVContext *mov, AVStream *st)
av_log(mov->fc, AV_LOG_ERROR, "wrong chunk count %d\n", total);
return;
}
if (size > 0x3FFFFFFF) {
av_log(mov->fc, AV_LOG_ERROR, "Sample size %u is too large\n", size);
return;
}
e = &st->index_entries[st->nb_index_entries++];
e->pos = current_offset;
e->timestamp = current_dts;
@@ -5223,10 +5213,6 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
return ret;
}
#endif
if (st->codecpar->codec_id == AV_CODEC_ID_MP3 && !st->need_parsing && pkt->size > 4) {
if (ff_mpa_check_header(AV_RB32(pkt->data)) < 0)
st->need_parsing = AVSTREAM_PARSE_FULL;
}
}
pkt->stream_index = sc->ffindex;

View File

@@ -162,11 +162,6 @@ ogg_gptopts (AVFormatContext * s, int i, uint64_t gp, int64_t *dts)
if (dts)
*dts = pts;
}
if (pts > INT64_MAX && pts != AV_NOPTS_VALUE) {
// The return type is unsigned, we thus cannot return negative pts
av_log(s, AV_LOG_ERROR, "invalid pts %"PRId64"\n", pts);
pts = AV_NOPTS_VALUE;
}
return pts;
}

View File

@@ -82,11 +82,7 @@ static uint64_t vp8_gptopts(AVFormatContext *s, int idx,
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
int invcnt = !((granule >> 30) & 3);
// If page granule is that of an invisible vp8 frame, its pts will be
// that of the end of the next visible frame. We substract 1 for those
// to prevent messing up pts calculations.
uint64_t pts = (granule >> 32) - invcnt;
uint64_t pts = (granule >> 32);
uint32_t dist = (granule >> 3) & 0x07ffffff;
if (!dist)

View File

@@ -101,7 +101,7 @@ int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
int ret = 0;
if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
AVIOContext pb = { 0 };
AVIOContext pb;
RTSPState *rt = s->priv_data;
AVDictionary *opts = NULL;
int len = strlen(p) * 6 / 8;

View File

@@ -1483,15 +1483,6 @@ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
if (ret < 0)
return ret;
#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS
/* update deprecated public codec context */
ret = avcodec_parameters_to_context(st->codec, st->codecpar);
if (ret < 0)
return ret;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
st->internal->need_context_update = 0;
}

View File

@@ -32,8 +32,8 @@
// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium)
// Also please add any ticket numbers that you belive might be affected here
#define LIBAVFORMAT_VERSION_MAJOR 57
#define LIBAVFORMAT_VERSION_MINOR 41
#define LIBAVFORMAT_VERSION_MICRO 100
#define LIBAVFORMAT_VERSION_MINOR 40
#define LIBAVFORMAT_VERSION_MICRO 101
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \

View File

@@ -36,8 +36,8 @@ static int vplayer_probe(AVProbeData *p)
char c;
const unsigned char *ptr = p->buf;
if ((sscanf(ptr, "%*3d:%*2d:%*2d.%*2d%c", &c) == 1 ||
sscanf(ptr, "%*3d:%*2d:%*2d%c", &c) == 1) && strchr(": =", c))
if ((sscanf(ptr, "%*d:%*d:%*d.%*d%c", &c) == 1 ||
sscanf(ptr, "%*d:%*d:%*d%c", &c) == 1) && strchr(": =", c))
return AVPROBE_SCORE_MAX;
return 0;
}

View File

@@ -427,6 +427,12 @@ typedef struct AVFrame {
enum AVChromaLocation chroma_location;
/**
* For hwaccel-format frames, this should be a reference to the
* AVHWFramesContext describing the frame.
*/
AVBufferRef *hw_frames_ctx;
/**
* frame timestamp estimated using various heuristics, in stream time base
* Code outside libavutil should access this field using:
@@ -518,11 +524,6 @@ typedef struct AVFrame {
*/
AVBufferRef *qp_table_buf;
#endif
/**
* For hwaccel-format frames, this should be a reference to the
* AVHWFramesContext describing the frame.
*/
AVBufferRef *hw_frames_ctx;
} AVFrame;
/**

View File

@@ -115,10 +115,8 @@ static struct {
MAP(BGRX, RGB32, BGR0),
MAP(RGBA, RGB32, RGBA),
MAP(RGBX, RGB32, RGB0),
#ifdef VA_FOURCC_ABGR
MAP(ABGR, RGB32, ABGR),
MAP(XBGR, RGB32, 0BGR),
#endif
MAP(ARGB, RGB32, ARGB),
MAP(XRGB, RGB32, 0RGB),
};

View File

@@ -58,7 +58,7 @@
* The following example illustrates an AVOptions-enabled struct:
* @code
* typedef struct test_struct {
* const AVClass *class;
* AVClass *class;
* int int_opt;
* char *str_opt;
* uint8_t *bin_opt;
@@ -96,7 +96,7 @@
* @code
* test_struct *alloc_test_struct(void)
* {
* test_struct *ret = av_mallocz(sizeof(*ret));
* test_struct *ret = av_malloc(sizeof(*ret));
* ret->class = &test_class;
* av_opt_set_defaults(ret);
* return ret;

View File

@@ -64,7 +64,7 @@
*/
#define LIBAVUTIL_VERSION_MAJOR 55
#define LIBAVUTIL_VERSION_MINOR 28
#define LIBAVUTIL_VERSION_MINOR 27
#define LIBAVUTIL_VERSION_MICRO 100
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \

View File

@@ -89,7 +89,7 @@ static const struct {
#if CONFIG_JPEG2000_DECODER
{ "jpeg2000dsp", checkasm_check_jpeg2000dsp },
#endif
#if CONFIG_PIXBLOCKDSP && !(ARCH_PPC64 && HAVE_BIGENDIAN)
#if CONFIG_PIXBLOCKDSP
{ "pixblockdsp", checkasm_check_pixblockdsp },
#endif
#if CONFIG_V210_ENCODER