Compare commits

..

153 Commits

Author SHA1 Message Date
James Almer
644296e736 avutil/softfloat: use abort() instead of av_assert0(0)
Fixes compilation of host tool aacps_fixed_tablegen.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: James Almer <jamrial@gmail.com>
(cherry picked from commit 9f4a41bf99)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-27 15:01:22 +01:00
Michael Niedermayer
a353cc44a6 Update for 2.8.3
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-27 14:29:04 +01:00
Michael Niedermayer
4c718691ea avcodec/cabac: Check initial cabac decoder state
Fixes integer overflows
Fixes: 1430e9c43fae47a24c179c7c54f94918/signal_sigsegv_421427_2340_591e9810c7b09efe501ad84638c9e9f8.264

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Found-by: xiedingbao (Ticket4727)
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 8000d484b8)

Conflicts:

	libavcodec/cabac.h
2015-11-27 14:07:03 +01:00
Michael Niedermayer
24c504bd0a avcodec/cabac_functions: Fix "left shift of negative value -31767"
Fixes: 1430e9c43fae47a24c179c7c54f94918/signal_sigsegv_421427_2340_591e9810c7b09efe501ad84638c9e9f8.264

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Found-by: xiedingbao (Ticket4727)
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a1f6b05f52)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-27 14:03:01 +01:00
Michael Niedermayer
6b91701de3 avcodec/h264_slice: Limit max_contexts when slice_context_count is initialized
Fixes out of array access
Fixes: 1430e9c43fae47a24c179c7c54f94918/signal_sigsegv_421427_2049_f2192b6829ab6e0eefcb035329c03c60.264

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4ea4d2f438)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Martin Storsjö
1290c85c9d rtmpcrypt: Do the xtea decryption in little endian mode
The XTEA algorithm operates on 32 bit numbers, not on byte sequences.
The XTEA implementation in libavutil is written assuming big endian
numbers, while the rtmpe signature encryption assumes little endian.

This fixes rtmpe communication with rtmpe servers that use signature
type 8 (XTEA), e.g. crunchyroll.

CC: libav-stable@libav.org
Signed-off-by: Martin Storsjö <martin@martin.st>
(cherry picked from commit e7728319b92dbb4fb949155e33de7ff5358ddff3)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Michael Niedermayer
b70f7d20e1 avformat/matroskadec: Check subtitle stream before dereferencing
Unrecognized streams are not allocated
Fixes: flicker-1.color1.vp91447030769.08.webm

Found-by: Chris Cunningham <chcunningham@google.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a5034b324c)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Michael Niedermayer
71a3113333 avcodec/pngdec: Replace assert by request for sample for unsupported TRNS cases
Fixes assertion failure
Fixes: 7f646252a30ee28b583aac1f82e7985e/signal_sigabrt_7ffff6ae7cc9_7353_62fc077bf2f454d39e188c69807193a6.png

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a62178be80)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Michael Niedermayer
859a6edaed avformat/utils: Do not init parser if probing is unfinished
Fixes assertion failure
Fixes: 136f8b8d47af7892306625e597dee655/signal_sigabrt_7ffff6ae7cc9_8941_ab11bea57c84796418f481f873dc31ba.dvr_ms

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 1ef336e912)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Michael Niedermayer
2dc1f3a02b avcodec/jpeg2000dec: Fix potential integer overflow with tile dimensions
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 65d3359fb3)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:06:39 +01:00
Michael Niedermayer
9a9dda615b avcodec/jpeg2000: Use av_image_check_size() in ff_jpeg2000_init_component()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 016fd413f9)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:42 +01:00
Michael Niedermayer
b3bc05290a avcodec/wmaprodec: Check for overread in decode_packet()
Fixes assertion failure
Fixes: 0256e92df2df7e933b43a2c70e4c8040/signal_sigabrt_7ffff6ae7cc9_1358_999ac18684788221490757582ce9af84.wma

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 7ad698e24e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:42 +01:00
Michael Niedermayer
093e58228e avcodec/smacker: Check that the data size is a multiple of a sample vector
Fixes out of array access
Fixes: ce19e41f0ef1e52a23edc488faecdb58/asan_heap-oob_2504e97_4202_ffa0df1baed14022b9bfd4f8ac23d0cb.smk

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4a9af07a49)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:42 +01:00
Michael Niedermayer
948d93d3f4 avcodec/takdec: Skip last p2 sample (which is unused)
Fixes out of array read
Fixes: cb3f38b08b4541523974667c7d1eee9e/asan_heap-oob_2659e18_9838_021fd5cd635bf76cede6398cd9ecbcdd.tak

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 08b520636e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
824e72d9b7 avcodec/dxtory: Fix input size check in dxtory_decode_v1_410()
Fixes potential out of array read

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 76b6f4b7d9)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
c7a970a3a0 avcodec/dxtory: Fix input size check in dxtory_decode_v1_420()
Fixes out of array read
Fixes: c50c4aa6cefda71b19a31ea12302980c/asan_heap-oob_12be5fd_7011_33ebd015a74976215934add72b9c8352.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 9caa9414cc)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
6af38b2718 avcodec/error_resilience: avoid accessing previous or next frames tables beyond height
The height of tables can be rounded up for MBAFF but this does not imply that is also true
for the previous frames

Fixes out of array reads
Fixes: c106b36fa36db8ff8f3ed0c82be7bea2/asan_heap-oob_32699f0_6321_467b9a1d7e03d7cfd310b7e65dc53bcc.mov

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a105f52855)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
0ce7baa245 avcodec/dpx: Move need_align to act per line
Fixes out of array read
Fixes: 61cf123c081ee2bb774d307c75bdb99e/asan_heap-oob_1224f76_5546_bee833ffae73f752b489b9eeaac52db7.dpx

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit c8aaae8e0f)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
7f0b58947d avcodec/flashsv: Check size before updating it
Fixes out of array read
Fixes: 3c857d4d90365731524716e6d051e43a/signal_sigsegv_7f4f59bcc29e_1386_20abd2c8e655cb9c75b24368e65fe3b1.flv

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 17705f5d4f)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
f64ffbbdc0 avcodec/ivi: Check image dimensions
Fixes integer overflow
Fixes: 1e32c6c591d940337c20b197ec1c4d3d/asan_heap-oob_4a52e5_8946_0bb0d9e863def56005e49f1d89bdc94d.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit df91aa034b)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
c0748b9954 avcodec/utils: Better check for channels in av_get_audio_frame_duration()
Fixes integer overflow
Fixes: 0c2625f236ced104d402b4a03c0d65c7/asan_generic_274e1ce_5990_9314e7a67c26aecf011b178ade9f217c.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4e16ad2868)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Michael Niedermayer
dbfec68d32 avcodec/jpeg2000dec: Check for duplicate SIZ marker
Fixes: 0231a17345734228011c6f35a64e4594/asan_heap-oob_1d92a72_3218_1213809a9e3affec77e4c191fdfdc0a9.mov

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 44a7f17d0b)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-26 16:05:41 +01:00
Andreas Cadhalpun
34f2d74555 aacsbr: don't call sbr_dequant twice without intermediate read_sbr_data
Doing that doesn't make sense, because the only purpose of sbr_dequant
is to process the data from read_sbr_data.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 1c3e43a627)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
22017f7745 hqx: correct type and size check of info_offset
It is used as size argument of ff_canopus_parse_info_tag, which uses it
as size argument to bytestream2_init, which only supports sizes up to
INT_MAX.
Changing it's type to unsigned simplifies the check.

Reviewed-by: Vittorio Giovara <vittorio.giovara@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 1ed7fcd42a)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
b372ad819e mxfdec: check edit_rate also for physical_track
Previously only the edit_rate of material_track was checked.
If it's negative, it causes assertion failures in av_rescale_rnd.

Reviewed-by: Tim Nicholson <nichot20-at-yahoo.com@ffmpeg.org>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 047bf82c18)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Michael Niedermayer
bdbfc12e7f avcodec/jpeg2000: Change coord to 32bit to support larger than 32k width or height
Fixes: 03e0abe721b1174856d41a1eb5d6a896/signal_sigabrt_7ffff6ae7cc9_3813_e71bf3541abed3ccba031cd5ba0269a4.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 0eb7de1973)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Michael Niedermayer
c3a44a2a55 avcodec/jpeg2000dec: Check SIZ dimensions to be within the supported range
Fixes potential integer overflows
Fixes: 03e0abe721b1174856d41a1eb5d6a896/signal_sigabrt_7ffff6ae7cc9_3813_e71bf3541abed3ccba031cd5ba0269a4.avi

This fix is choosen to be simple to backport, better solution
for master is planed

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 6ef819c40b)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Michael Niedermayer
d1d48d4319 avcodec/jpeg2000: Check comp coords to be within the supported size
Fixes assertion failure
Fixes: 03e0abe721b1174856d41a1eb5d6a896/signal_sigabrt_7ffff6ae7cc9_3813_e71bf3541abed3ccba031cd5ba0269a4.avi

This fix is choosen to be simple to backport, better solution
for master is planed

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a1a8cbcb35)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
6b0bc64f54 mpegvideo: clear overread in clear_context
Otherwise the h263p decoder can try to copy overread bytes, even though
buffer is NULL.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 6a69a175e7)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Michael Niedermayer
983e63b490 avcodec/avrndec: Use the AVFrame format instead of the context
Fixes out of array read
Fixes: 20dd01398dee0f6d83d7e5410a2ae8eb/signal_sigsegv_39eeb1f_4001_62efbdf1c60748dabf1ec310b59525fd.mov

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit ccba8aaff2)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
bf2f7115d9 dds: disable palette flag for compressed images
Having both is not valid and can cause a NULL pointer dereference of
frame->data[1] later.

Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
Signed-off-by: Vittorio Giovara <vittorio.giovara@gmail.com>
(cherry picked from commit 0a8bff788b)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
ef699b4135 dds: validate compressed source buffer size
A too small buffer will cause segfaults somewhere below
decompress_texture_thread.

Reviewed-by: Vittorio Giovara <vittorio.giovara@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 9a37d47644)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
a8513826dd dds: validate source buffer size before copying
If it is too small av_image_copy_plane segfaults.

Reviewed-by: Vittorio Giovara <vittorio.giovara@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 1675809d2d)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:46 +01:00
Andreas Cadhalpun
df31acda64 dvdsubdec: validate offset2 similar to offset1
If it is negative, it causes segmentation faults in decode_rle.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit f621749d11)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:45 +01:00
Andreas Cadhalpun
d09fd0736a brstm: reject negative sample rate
A negative sample rate causes assertion failures in av_rescale_rnd.

Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 7b67fe20f6)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:45 +01:00
Andreas Cadhalpun
c3f276b608 aacps: avoid division by zero in stereo_processing
This fixes a SIGFPE crash in the aac_fixed decoder.

Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
Reviewed-by: Rostislav Pehlivanov <atomnuker@gmail.com>
(cherry picked from commit ef7fe9851e)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:45 +01:00
Andreas Cadhalpun
510d88ae93 softfloat: assert when the argument of av_sqrt_sf is negative
The correct result can't be expressed in SoftFloat.
Currently it returns a random value from an out of bounds read.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit f3866a14c3)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:45 +01:00
Michael Niedermayer
c4133b25f8 avcodec/takdec: Use memove, avoid undefined memcpy() use
Fixes: e214333cbd94c91228e624ff39329ce6/asan_generic_4a5159_6412_96cda2530e80607210ab41ccae3d456d.tak

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 7cea3430a5)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-11-26 01:35:41 +01:00
Michael Niedermayer
c9b3451da3 Update Changelog
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 01:17:17 +01:00
Andreas Cadhalpun
46f83b059b aacsbr_fixed: check for envelope scalefactors overflowing
This prevents various values from getting an insanely huge exponent.
If someone knows a cleaner solution, thats welcome!

This is similar to commit 8978c74 for aacsbr.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 0e36a14a42)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:43:01 +01:00
Andreas Cadhalpun
ce2664f5f7 aacdec: don't return frames without data from aac_decode_er_frame
This is similar to commit ec38a1b for aac_decode_frame_int.

Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit d7f29bfa69)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:42:50 +01:00
Michael Niedermayer
8364d607ac avcodec/aacsbr_fixed: Try to initialize sum[0..1] differently to fix build with VS2012
Found-by: Hendrik Leppkes <h.leppkes@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 8a024f6a43)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:42:43 +01:00
Michael Niedermayer
21e42d9b0d avcodec/aacsbr: Use FLOAT_0
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit dcf1cf5d24)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:42:35 +01:00
Andreas Cadhalpun
e10c353ca5 softfloat: handle INT_MIN correctly in av_int2sf
Otherwise v=INT_MIN doesn't get normalized and thus triggers av_assert2
in other functions.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 9ac61e73d0)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
72be96ac55 avutil/softfloat: Include negative numbers in cmp/gt tests
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 955cdc43a3)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
6581e40e1a avutil/softfloat: Fix av_gt_sf() with large exponents try #2
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 05b05a7a84)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
0f9c617979 avutil/softfloat: Add test for av_gt_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 791ea23e57)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
f9998d1994 avutil/softfloat: Extend the av_cmp_sf() test to cover a wider range of exponents
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit ecfb076141)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
7ad4bf4899 avutil/softfloat: Fix overflows in shifts in av_cmp_sf() and av_gt_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit cee3c9d29a)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
43ada90fc5 avutil/softfloat: Add test for av_cmp_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit df2a2117d2)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Ganesh Ajjanagadde
476ddffccb avutil/common: add FFDIFFSIGN macro
This is of use for defining comparator callbacks. Common approaches like
return x-y are not safe due to the risks of overflow.
Furthermore, the (x > y) - (x < y) trick is optimized to branchless
code.
This also documents this macro accordingly.

Reviewed-by: Michael Niedermayer <michael@niedermayer.cc>
Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
(cherry picked from commit 265f83fd35)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-12 00:00:57 +01:00
Michael Niedermayer
b533998d0a avutil/softfloat: Add tests for exponent underflows
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 596dfe7d6c)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:58 +01:00
Michael Niedermayer
acd203fc0d avutil/softfloat: Fix exponent underflow in av_div_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 046218b212)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:54 +01:00
Michael Niedermayer
402c4a9f81 avutil/softfloat: Fix exponent underflow in av_mul_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a1e3303fc0)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:51 +01:00
Michael Niedermayer
6e4bfbe936 avutil/softfloat: Fix typo in av_mul_sf() doxy
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4135a2bfd6)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:48 +01:00
Michael Niedermayer
f38beb47da avutil/softfloat: Correctly set the exponent for 0.0 in av_sqrt_sf()
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 107db5abf3)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:32 +01:00
Michael Niedermayer
efa9128556 avutil/softfloat: FLOAT_0 should use MIN_EXP
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a66b243d52)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 23:32:27 +01:00
Michael Niedermayer
3de8521667 swresample/resample: increase precision for compensation
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 351e625d60)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 18:41:01 +01:00
Rodger Combs
edf5e88eac lavf/mov: add support for sidx fragment indexes
Fixes trac #3842
(cherry picked from commit 4ab5666759)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 02:50:39 +01:00
Michael Niedermayer
8d634be4ce update versions for 2.8.2
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-11 02:50:39 +01:00
Michael Niedermayer
9a6d581076 avformat/mxfenc: Only store user comment related tags when needed
Also support disabling them as they seem to cause problems to some
Users. They are also not allowed in IRT D-10 thus the default for
mxf_d10 is not to write them

This also decreases the filesize when no user comment are stored

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit d9726893f3)

Conflicts:

	libavformat/mxfenc.c
2015-11-11 02:21:32 +01:00
Michael Niedermayer
84f8157662 tests/fate/avformat: Fix fate-lavf
The CMP variable seems to have been inherited from fate-api-seek which set it to null

the mxf reference needed a change due to c7e14a279f

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit b83c849e87)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-10 11:20:21 +01:00
Simon Thelen
e5a2f5e74d doc/ffmpeg: Clarify that the sdp_file option requires an rtp output.
Signed-off-by: Simon Thelen <ffmpeg-dev@c-14.de>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit b02201efb5)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-09 23:58:05 +01:00
Simon Thelen
dac3598563 ffmpeg: Don't try and write sdp info if none of the outputs had an rtp format.
Fixes a segfault when trying to write nonexistent rtp information.

Signed-off-by: Simon Thelen <ffmpeg-dev@c-14.de>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 70fb5eadc5)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-09 23:56:29 +01:00
Andreas Cadhalpun
c0cd8747ef apng: use correct size for output buffer
The buffer needs s->bpp bytes, at maximum currently 10.
Assert that s->bpp is not larger.

This fixes a stack buffer overflow.

Reviewed-by: wm4 <nfxjfg@googlemail.com>
Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 3e8e1a660e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-07 14:11:12 +01:00
Andreas Cadhalpun
e217224456 jvdec: avoid unsigned overflow in comparison
The return type of strlen is size_t, i.e. unsigned, so if pd->buf_size
is 3, the right side overflows leading to a wrong result of the
comparison and subsequently a heap buffer overflow.

Reviewed-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit db374790c7)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-07 14:10:41 +01:00
Michael Niedermayer
56419053bc avcodec/jpeg2000dec: Clip all tile coordinates
Fixes out of array access
Fixes: b877a6b788a25c70e8b1d014f8628549/asan_heap-oob_1da2c3f_2324_5a1b329b0b3c4bb6b1d775660ac56717.r3d

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 43492ff3ab)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-07 03:02:04 +01:00
Michael Niedermayer
11b4822ddb avcodec/microdvddec: Check for string end in 'P' case
Fixes out of array read
Fixes: a9502b60f4cecc19475382aee255f73c/asan_heap-oob_1e87fba_2548_a8ad47f6dde36644fe9cdc444d4632d0.sub

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit c719cd6cf7)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-07 01:10:58 +01:00
Michael Niedermayer
2de2959305 avcodec/dirac_parser: Fix undefined memcpy() use
Fixes: 9d375e415486edd1a0c826f2307d89a4/asan_generic_4a5159_1577_faa333e83dacdd9e4dd322380aeed537.iss

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit daefd8ab2f)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-07 01:10:16 +01:00
Michael Niedermayer
b93a8bd838 avformat/xmv: Discard remainder of packet on error
Fixes infinite loop
Fixes: 9c48ae2680c5f23bca3d20ff0f325fd8/asan_generic_4c254d_1374_993f1e5967dd6f844b8d72f978ce2a6c.pss

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 79c4a338e4)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-06 02:55:48 +01:00
Michael Niedermayer
2817eb514c avformat/xmv: factor return check out of if/else
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 9b6fac11da)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-06 02:55:13 +01:00
Michael Niedermayer
6e085f9a32 avcodec/mpeg12dec: Do not call show_bits() with invalid bits
Fixes assertion failure
Fixes: 63e50545709a6440d3d59f6426d58db9/signal_sigabrt_7ffff6ae7cc9_8189_3272a3010fd98ddf947c662bbde1ac13.ts

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 973c3dba27)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-06 01:17:00 +01:00
Michael Niedermayer
a7bbb7fb88 avcodec/faxcompr: Add missing runs check in decode_uncompressed()
Fixes out of array access
Fixes: 54e488b9da4abbceaf405d6492515697/asan_heap-oob_32769b0_160_a8755eb08ee8f9579348501945a33955.TIF

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit d4a731b84a)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-05 21:50:14 +01:00
Michael Niedermayer
1290037626 libavutil/channel_layout: Check strtol*() for failure
Fixes assertion failure
Fixes: 4f5814bb15d2dda6fc18ef9791b13816/signal_sigabrt_7ffff6ae7cc9_65_7209d160d168b76f311be6cd64a548eb.wv

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit c9bfd6a8c3)

Conflicts:

	libavutil/channel_layout.c
2015-11-05 20:47:15 +01:00
Michael Niedermayer
c7174d5204 avformat/mpegts: Only start probing data streams within probe_packets
Fixes assertion failure
Fixes: 4321db8ac331f5967ebfbfe80ce5eb78/signal_sigabrt_7ffff6ae7cc9_7213_0d6457b9d6897fa7c78507fa5de53510.ts

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 3692d859f4)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-05 18:23:11 +01:00
Michael Niedermayer
ff30907205 avcodec/hevc_ps: Check chroma_format_idc
Fixes out of array access
Fixes: 24d05e8b84676799c735c9e27d97895e/asan_heap-oob_1b70f6a_2955_7c3652a7f370f9f3ef40642bc2c99bb2.bit

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 93f30f825c)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-05 15:17:56 +01:00
Michael Niedermayer
a6ae88bb25 avcodec/ffv1dec: Check for 0 quant tables
Fixes assertion failure
Fixes: 07ec1fc3c1cbf2d3edcd7d9b52ca156c/asan_heap-oob_13624c5_491_ecd4720a03e697ba750b235690656c8f.avi

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 5745cf799a)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-05 01:49:09 +01:00
Michael Niedermayer
4567cba0b8 avcodec/mjpegdec: Reinitialize IDCT on BPP changes
Fixes misaligned access
Fixes: dc9262a469f6f315f74c087a7b3a7f35/signal_sigsegv_2e95bcd_9_9c0f9f4a9ba82aa9b3ab2b91ce4d5277.jpg

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit cc35f6f476)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-04 22:16:38 +01:00
Michael Niedermayer
fdb8842639 avcodec/mjpegdec: Check index in ljpeg_decode_yuv_scan() before using it
Fixes: 04715144ba237443010554be0d05343f/asan_heap-oob_1eafc76_1737_c685b48041a563461839e4e7ab97abb8.jpg
Fixes out of array access

Found-by: Mateusz "j00ru" Jurczyk and Gynvael Coldwind
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit d24888ef19)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-04 19:42:29 +01:00
Tobias Rapp
c6c801d993 avutil/file_open: avoid file handle inheritance on Windows
Avoids inheritance of file handles on Windows systems similar to the
O_CLOEXEC/FD_CLOEXEC flag on Linux.

Fixes file lock issues in Windows applications when a child process
is started with handle inheritance enabled (standard input/output
redirection) while a FFmpeg transcoding is running in the parent
process.

Links relevant to the subject:

https://msdn.microsoft.com/en-us/library/w7sa2b22.aspx

Describes the _wsopen() function and the O_NOINHERIT flag. File handles
opened by _wsopen() are inheritable by default.

https://msdn.microsoft.com/en-us/library/windows/desktop/ms682425%28v=vs.85%29.aspx

Describes handle inheritance when creating new processes. Handle
inheritance must be enabled (bInheritHandles = TRUE) e.g. when you want
to pass handles for stdin/stdout via lpStartupInfo.

Signed-off-by: Tobias Rapp <t.rapp@noa-audio.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4746653466)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-11-02 19:15:32 +01:00
Michael Niedermayer
aa34146e41 avcodec/h264_slice: Disable slice threads if there are multiple access units in a packet
Fixes null pointer dereference
Fixes part of Ticket4977

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 9368d2da3d33cac845f2fdf663df500b53625c5e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-31 22:57:52 +01:00
Lucas de Andrade
fcb8ee98f6 avformat/hls: update cookies on setcookie response
Context cookies must be updated when a playlist response return Setcookie header.

See: 770dd10504
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 18:12:30 +01:00
Kieran Kunhya
2f5f940bef opusdec: Don't run vector_fmul_scalar on zero length arrays
Fixes crashes on fuzzed files
Fixes Ticket4969 part2

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit b3e5f15b95)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 17:55:31 +01:00
Michael Niedermayer
6ac9d6303f avcodec/opusdec: Fix extra samples read index
Fixes crash
Fixes Ticket4969 part 1

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 07225fa74f)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 17:55:31 +01:00
Michael Niedermayer
81a2ad762b avcodec/ffv1: Initialize vlc_state on allocation
This ensures that they are always set to valid values
Fixes Ticket4939

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a878dfa4f5)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 17:55:31 +01:00
Michael Niedermayer
c8a1324d1e avcodec/ffv1dec: update progress in case of broken pointer chains
Fixes deadlock
Fixes Ticket4932

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 5063a18f56)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 17:55:31 +01:00
Michael Niedermayer
6a0e10ae0d avcodec/ffv1dec: Clear slice coordinates if they are invalid or slice header decoding fails for other reasons
Fixes Ticket4931

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 4c2d4e8700)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-30 17:55:31 +01:00
Martin Storsjö
3f3e12c768 rtsp: Allow $ as interleaved packet indicator before a complete response header
Some RTSP servers ("HiIpcam/V100R003 VodServer/1.0.0") respond to
our keepalive GET_PARAMETER request by a truncated RTSP header
(lacking the final empty line to indicate a complete response
header). Prior to 764ec70149, this worked just fine since we
reacted to the $ as interleaved packet indicator anywhere.

Since $ is a valid character within the response header lines,
764ec70149 changed it to be ignored there. But to keep
compatibility with such broken servers, we need to at least
allow reacting to it at the start of lines.

Fixes ticket #4952.

Signed-off-by: Martin Storsjö <martin@martin.st>
(cherry picked from commit e02dcdf6bb)
2015-10-27 13:40:06 +01:00
Ronald S. Bultje
6616762134 videodsp: don't overread edges in vfix3 emu_edge.
Fixes trac ticket 3226. Also see Andreas' analysis in
https://bugs.debian.org/801745, which was very helpful.
(cherry picked from commit 52f84d82bd)
2015-10-25 01:05:31 +02:00
wm4
96b87d5cfa avformat/mp3dec: improve junk skipping heuristic
Commit 2b3e9bbfb5 caused problems for a
certain API user:

https://code.google.com/p/chromium/issues/detail?id=537725
https://code.google.com/p/chromium/issues/detail?id=542032

The problem seems rather arbitrary, because if there's junk, anything
can happen. In this case, the imperfect junk skipping just caused it to
read different junk, from what I can see.

We can improve the accuracy of junk detection by a lot by checking if 2
consecutive frames use the same configuration. While in theory it might
be completely fine for the 1st frame to have a different format than the
2nd frame, it's exceedingly unlikely, and I can't think of a legitimate
use-case.

This is approximately the same mpg123 does for junk skipping. The
set of compared header bits is the same as the libavcodec mp3 parser
uses for similar purposes.
(cherry picked from commit de1b1a7da9)
2015-10-20 12:35:43 +02:00
Marton Balint
e0e28dad90 concatdec: fix file_start_time calculation regression
Fixes ticket #4924.

Found-by: Jaroslav Šnajdr <jsnajdr@gmail.com>
Reviewed-by: Nicolas George <george@nsup.org>
Signed-off-by: Marton Balint <cus@passwd.hu>
(cherry picked from commit df239b7619)
2015-10-17 22:36:33 +02:00
周晓勇
1a67b0f9ae avcodec: loongson optimize h264dsp idct and loop filter with mmi
Change-Id: Ic87fb8f5cd22a502ff9dbbc5a5a8ea97cfc8a1dd
Signed-off-by: ZhouXiaoyong <zhouxiaoyong@loongson.cn>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-16 14:47:56 +02:00
Michael Niedermayer
e3fcd88f08 avcodec/jpeg2000dec: Clear properties in jpeg2000_dec_cleanup() too
Fixes: Ticket4878

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit c980c5e54d)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-16 14:47:56 +02:00
Anssi Hannula
68a6178ef0 avformat/hls: add support for EXT-X-MAP
Without EXT-X-MAP support we miss the first bytes of some streams.

These streams worked by luck before byte-ranged segment support was added in
da7759b357

Fixes ticket #4797.
(cherry picked from commit 9099079488)

Conflicts:
	libavformat/hls.c
2015-10-15 14:27:20 +02:00
Anssi Hannula
d51ddd45b2 avformat/hls: fix segment selection regression on track changes of live streams
Commit ad701326b4 ("avformat/hls: open playlists immediately when
AVDISCARD_ALL is dropped") inadvertently caused first_packet to never be
cleared, causing select_cur_seq_no() to not use the specific code for
live streams.

In practice this means that when the user selects a different audio
track during live stream (i.e. non-VOD) playback, there may be some
additional delay as the code might select an incorrect segment at first,
and we have to wait for video to catch audio (if too late segment was
selected) or to download more following audio segments (if too early
segment was selected).

Fix that by restoring the zeroing of first_packet.
(cherry picked from commit fd74d45d51)
2015-10-15 14:21:29 +02:00
Carl Eugen Hoyos
c2db8ebc08 configure: Require libkvazaar < 0.7.
Fixes ticket #4925.

Reviewed-by: Arttu Ylä-Outinen
2015-10-14 14:15:30 +02:00
Michael Niedermayer
b46efcb293 avcodec/vp8: Do not use num_coeff_partitions in thread/buffer setup
The variable is not a constant and can lead to race conditions

Fixes: repro.webm (not reproducable with FFmpeg alone)

Found-by: Dale Curtis <dalecurtis@google.com>
Tested-by: Dale Curtis <dalecurtis@google.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit dabea74d0e)
2015-10-14 14:15:11 +02:00
Michael Niedermayer
40934e0e9b Update for 2.8.1
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-13 20:44:52 +02:00
Pedro Arthur
bb3b4ad460 swscale: fix ticket #4881
When scaling only a slice of a frame the output was written always
in the first lines leaving the rest of the frame black.
(cherry picked from commit 5bd62a1b3c)
2015-10-13 20:11:42 +02:00
Andreas Cadhalpun
13d3749424 doc: fix spelling errors
Reviewed-by: Lou Logan <lou@lrcd.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit 8d6625642d)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-10-12 21:46:54 +02:00
Andreas Cadhalpun
173053a125 hls: only seek if there is an offset
If there is no #EXT-X-BYTERANGE specified, there is no need to seek.
Seeking fails anyway for rtmp, because this protocol does not support
url_seek.

This fixes CNN.m3u from trac ticket 4797 (i.e. Debian bug #798189).

Reviewed-by: wm4 <nfxjfg@googlemail.com>
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
(cherry picked from commit f9f0b4c08e)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-10-12 21:46:54 +02:00
Alexandra Hájková
8118fdf8bb asfdec: add more checks for size left in asf packet buffer
Signed-off-by: Luca Barbato <lu_zero@gentoo.org>
(cherry picked from commit c0a49077ea)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-10-12 21:46:54 +02:00
Alexandra Hájková
f235f511a0 asfdec: alloc enough space for storing name in asf_read_metadata_obj
Signed-off-by: Luca Barbato <lu_zero@gentoo.org>
(cherry picked from commit 77cf236689)
Signed-off-by: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
2015-10-12 21:46:54 +02:00
Michael Niedermayer
c149a4afee avcodec/pngdec: Check blend_op.
Fixes CID1322359, CID1322358

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 1e7e4f13f9)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-12 02:23:36 +02:00
赵宇龙
02d8abf0f5 h264_mp4toannexb: fix pps offfset fault when there are more than one sps in avcc
the pps offset is used to locate pps in the spspps_buf; however, the
current calc method is wrong because it is the offset of the original
avctx->extradata;
when there is only one sps in the avcc; the value is correct by
coincidence, however, it will
fail in avcc with multi sps

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 6b32d0d86b6bec2b5cb565d6ab4556f8cd66214a)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-12 02:23:36 +02:00
Michael Niedermayer
9579550b2b avcodec/h264_mp4toannexb_bsf: Use av_freep() to free spspps_buf
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 3d126ef188)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-12 02:23:36 +02:00
Michael Niedermayer
7ec05ae969 avformat/avidec: Workaround broken initial frame
Fixes Ticket4851

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 3e2ef00394)
2015-10-09 21:15:15 +02:00
wm4
b9841ba98c avformat/hls: fix some cases of HLS streams which require cookies
Broken by commit ba12ba859a. This only
happens with HLS streams which use encryption and require preserving
cookies sent by the server.

Fixes trac issue #4846.
(cherry picked from commit 26eb294007)
2015-10-09 21:14:23 +02:00
Paul B Mahol
39df4d2475 avcodec/pngdec: reset has_trns after every decode_frame_png()
Fixes #4887.

Signed-off-by: Paul B Mahol <onemda@gmail.com>
(cherry picked from commit 1d0487f77f)
2015-10-09 21:14:02 +02:00
Przemysław Sobala
635831c087 lavf/img2dec: Fix memory leak
Fixes #4886

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 01dd7e025c)
2015-10-09 21:13:49 +02:00
wm4
291a2f1ea7 avcodec/mp3: fix skipping zeros
Commits 43bc5cf9 and c5371f77 add code for skipping initial zeros in mp3
packets. This code forgot to report to the user that data was skipped at
all.

Since audio codecs allow partial packet decoding, the user application
has to rely on the return value. It will remove the data reported as
consumed by the decoder, and feed it to the decoder again. This resulted
in the mp3 frame after the zero region to be decoded over and over
again, until the zero region was finally skipped by the application.

Fix this by including the amount of skipped bytes to the number of
consumed bytes returned by the decode call.

Fixes trac ticket #4890.
(cherry picked from commit cb1da9fb8d)
2015-10-09 21:12:30 +02:00
Clément Bœsch
ee1bcd3436 avformat/srtdec: make sure we probe a number
Fixes regression since 7218352e02: WebVTT
files were matching the SRT probing.

(cherry picked from commit 40d9d6de90)
2015-10-09 10:56:31 +02:00
James Almer
408240267a configure: check for ID3D11VideoContext
Should fix compilation with mingw-w64 trunk, where ID3D11VideoDecoder is available
but ID3D11VideoContext still isn't.

Signed-off-by: James Almer <jamrial@gmail.com>
(cherry picked from commit b8e4df46ab)
2015-10-08 17:08:04 -03:00
wm4
eca7b0dcce avformat/vobsub: compare correct packet stream IDs
The stream ID is essentially an arbitrary number defined by the .idx
file headers. They have to match the IDs in the .sub stream. The vobsub
demuxer assumed the IDs would just start from 0, increassing by 1 for
each stream. This is not correct. In the sample I had, the IDs were
starting from 1, leading to no subtitles being displayed at all.

Fix this by using the correct stream ID.

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a47ad06baf)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 15:21:45 +02:00
Clément Bœsch
64b659673a avformat/srtdec: more lenient first line probing
Fixes Ticket #4898
(cherry picked from commit 7218352e02)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 15:21:39 +02:00
Clément Bœsch
1d9d300d65 avformat/srtdec: fix number check for the first character
(cherry picked from commit d161a2a72b)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 15:21:24 +02:00
Shivraj Patil
a931ad554d avcodec/mips: build fix for MSA 64bit
Modified datatype of function argument (pitch from int32_t to ptrdiff_t).

Signed-off-by: Shivraj Patil <shivraj.patil@imgtec.com>
Commit in master: 322e960dbf
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 14:32:05 +02:00
Shivraj Patil
7236080d27 avcodec/mips: build fix for MSA
Modified sps and pps access from old HEVCContext(s) structure to newly introduced HEVCParamSets(ps).

Signed-off-by: Shivraj Patil <shivraj.patil@imgtec.com>
Commit in master: b0732b0214
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 14:31:49 +02:00
Andrey Utkin
6dcd2ebd34 avformat/httpauth: Add space after commas in HTTP/RTSP auth header
This fixes access to Grandstream cameras, which return 401 to ffmpeg
otherwise.
VLC sends Authorization: header with spaces between parameters, and it
is known to work with Grandstream devices and broad range of other HTTP
and RTSP servers, so author considers switching to such behaviour safe.
Just for record - RFC 2617 (HTTP Auth) does not specify the need in
spaces, so this is not a bug of FFmpeg.

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit fdb3283872)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
DHE
83d75c70df libavformat/hlsenc: Use of uninitialized memory unlinking old files
Fixes ticket#4900

Signed-off-by: DHE <git@dehacked.net>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 76e3f8242d)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
818ebcbf5c avcodec/x86/sbrdsp: Fix using uninitialized upper 32bit of noise
Fixes crash
Fixes: flicker-1.scout3d21443372922.28.m4a

Found-by: Dale Curtis <dalecurtis@google.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 1b82b934a1)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
837113ab5f avcodec/ffv1dec: Fix off by 1 error in quant_table_count check
Fixes: invalid_read.nut
Found-by: Paul B Mahol <onemda@gmail.com>

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 2d221d9e06)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
97340bdfa3 avcodec/ffv1dec: Explicitly check read_quant_table() return value
Forwards the error code, avoids potential integer overflow

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 10bbf6cf62)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Jeremy James
61fd5a3072 dnxhddata: correct weight tables
CID 1260 (as evidenced by incorrect decoding of a sample from ticket
4876) seems to use incorrect weight tables. It appears those tables
were not zigzag-scanned.

Apply zigzag on weight tables for new CIDs 1258, 1259, and 1260, and
fix an incorrect chroma table for CID 1256.

Fixes last issue from ticket #4876.

Found-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: Christophe Gisquet <christophe.gisquet@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 428424fe75)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Christophe Gisquet
f5f9c166a1 dnxhddec: decode and use interlace mb flag
This bit is 1 in some samples, and seems to coincide with interlaced
mbs and CID1260. 2008 specs do not know about it, and maintain qscale
is 11 bits. This looks oversized, but may help larger bitdepths.

Currently, it leads to an obviously incorrect qscale value, meaning
its syntax is shifted by 1. However, reading 11 bits also leads to
obviously incorrect decoding: qscale seems to be 10 bits.

However, as most profiles still have 11bits qscale, the feature is
restricted to the CID1260 profile.

The encoder writes 12 bits of syntax, last and first bits always 0,
which is now somewhat inconsistent with the decoder, but ends up with
the same effect (progressive + reserved bit).

Partially fixes ticket #4876.

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 2801a1352d)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Pedro Arthur
01bf0a178d swscale: fix ticket #4877
(cherry picked from commit a8602dde5e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
3cd1be9702 avcodec/rangecoder: Check e
Fixes hang.nut

Found-by: Paul B Mahol <onemda@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit b2955b6c5a)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
d4b1fe72c2 avcodec/ffv1: seperate slice_count from max_slice_count
Fix segfault with too large slice_count
Fixes Ticket4879

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit aa6c43f3fd)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Pedro Arthur
a8d0dcbafa swscale: fix ticket 4850
(cherry picked from commit 77367f61b3)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
2a6103a082 cmdutils: Filter dst/srcw/h
Dimensions / pixel formats for scaling must be set through the -s / pix_fmt options
or the scale / format filters. Otherwise there are mismatches between whet is
in/output to the scaler and for what the scaler is configured

Fixes Ticket4856

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a0af9fd954)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Ganesh Ajjanagadde
3fedd64d4b avutil/log: fix zero length gnu_printf format string warning
This should fix warning reported by fate client:
http://fate.ffmpeg.org/report.cgi?time=20150917113121&slot=x86_32-linux-gnu-gcc-4.5.1-have_6regs.
Untested.

Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 63cdb6e4a5)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Simon Thelen
9bbcd1cc7b lavf/webvttenc: Require webvtt file to contain exactly one WebVTT stream.
Not requiring this can end up producing hilariously broken files
together with -c:s copy (e.g. a webvtt file containing binary subtitle data).

Signed-off-by: Simon Thelen <ffmpeg-dev@c-14.de>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit b84232694e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
9801c9524a swscale/swscale: Fix "unused variable" warning
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 0ae40c5a70)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
08fc0d771a avcodec/mjpegdec: Fix decoding RGBA RCT LJPEG
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 055e56e9f7)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
Michael Niedermayer
a3db85581e MAINTAINERS: add 2.8, drop 2.2
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-10-08 13:15:42 +02:00
James Almer
ddbb8d5eda doc: mention libavcodec can decode Opus natively
Signed-off-by: James Almer <jamrial@gmail.com>
(cherry picked from commit fd9ac48dc8)
2015-09-20 23:30:50 -03:00
Hendrik Leppkes
41aa6b2095 hevc: properly handle no_rasl_output_flag when removing pictures from the DPB
Fixes ticket #4185.

Reviewed-By: Mickael Raulet <Mickael.Raulet@insa-rennes.fr>
Signed-off-by: Hendrik Leppkes <h.leppkes@gmail.com>
2015-09-20 00:18:57 +02:00
Paul B Mahol
fb0d41932d avfilter/af_ladspa: process all channels for nb_handles > 1
Signed-off-by: Paul B Mahol <onemda@gmail.com>
(cherry picked from commit dc1050a3e8)
2015-09-16 12:50:13 +00:00
Ricardo Constantino
aa46ae8848 configure: add libsoxr to swresample's pkgconfig
Fixes linking in FFMS and f265 at least, when ffmpeg is compiled with
libsoxr.

Signed-off-by: Ricardo Constantino <wiiaboo@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 2641eeeefe)
Signed-off-by: Timothy Gu <timothygu99@gmail.com>
2015-09-09 07:41:33 -07:00
Hendrik Schreiber
c3021738fc lavc: Fix compilation with --disable-everything --enable-parser=mpeg4video.
(cherry picked from commit 9d742d23d2)
2015-09-09 16:23:26 +02:00
Michael Niedermayer
b72c184194 avcodec/h264_sei: Remove "Subtitles with data type 0x%02x" sample request
Suggested-by: Carl and Hendrik
2015-09-08 23:02:00 +02:00
Michael Niedermayer
d86c5f8de8 RELEASE_NOTES based on 2.7
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-08 22:33:04 +02:00
周晓勇
0752e44b1f avcodec: loongson delete invalid simple idct put and add optimization
Change-Id: I23a36c65915f01a1cf20e317c14b8eaaa62958b4
Signed-off-by: ZhouXiaoyong <zhouxiaoyong@loongson.cn>

Fixes Decoding of http://loongnix.org/ftp/multimedia/testing/nanocore_720p_24fps_mpeg4_ac3_short.avi

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit a78656a187)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-08 22:31:44 +02:00
Michael Niedermayer
1d42df7292 Add NOA credits 2015-09-08 22:31:44 +02:00
Gwenole Beauchesne
eaabfe8ef8 vaapi: fix local header include.
Signed-off-by: Gwenole Beauchesne <gwenole.beauchesne@intel.com>
(cherry picked from commit aea611dc3e)
2015-09-07 15:32:56 +02:00
Michael Niedermayer
90d29c3d04 Changelog: Add 2.8
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-06 16:00:01 +02:00
Michael Niedermayer
48211b0c0d set version to 2.8
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-06 15:48:55 +02:00
Ganesh Ajjanagadde
aa661d3672 avfilter/af_asyncts: use llabs for int64_t
long may not be 64 bit on all platforms; so labs on int64_t is unsafe.
This fixes a warning reported in:
http://fate.ffmpeg.org/log.cgi?time=20150905071512&log=compile&slot=i386-darwin-clang-polly-3.7

Signed-off-by: Ganesh Ajjanagadde <gajjanagadde@gmail.com>
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit d74123d03e)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-06 12:11:23 +02:00
Zhang Rui
8cd24f8fe7 avformat/async: replace strerror with av_err2str
Fixes CID1322337

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 929451c5cb)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-06 11:56:54 +02:00
Rostislav Pehlivanov
7e853879ce fate: increase the fuzz of the AAC encoder aref test
Almost fine on SunOS without yasm but 5 wasn't enough.

Signed-off-by: Rostislav Pehlivanov <atomnuker@gmail.com>
2015-09-06 00:27:08 +02:00
Michael Niedermayer
f598ca088e doc/APIchanges: Fill in missing fields and correct one lavu version
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 0acd4e75fd)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-05 18:34:01 +02:00
Michael Niedermayer
2710c14a83 doc/APIchanges: add 2.8 cut line
Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
(cherry picked from commit 982e235d76)

Signed-off-by: Michael Niedermayer <michael@niedermayer.cc>
2015-09-05 18:33:58 +02:00
James Almer
1a56be9cdc avutil: undo FF_API_CRYPTO_CONTEXT deprecation for 2.8 release
There's no consensus yet if this deprecation is desired, so it's removed
from this release for the time being

Signed-off-by: James Almer <jamrial@gmail.com>
2015-09-05 13:02:29 -03:00
4070 changed files with 80316 additions and 251683 deletions

68
.gitignore vendored
View File

@@ -1,6 +1,5 @@
*.a
*.o
*.o.*
*.d
*.def
*.dll
@@ -18,9 +17,9 @@
*.so.*
*.swp
*.ver
*-example
*-test
*_g
\#*
.\#*
/.config
/.version
/ffmpeg
@@ -30,6 +29,65 @@
/config.*
/coverage.info
/avversion.h
/doc/*.1
/doc/*.3
/doc/*.html
/doc/*.pod
/doc/config.texi
/doc/avoptions_codec.texi
/doc/avoptions_format.texi
/doc/doxy/html/
/doc/examples/avio_dir_cmd
/doc/examples/avio_reading
/doc/examples/decoding_encoding
/doc/examples/demuxing_decoding
/doc/examples/extract_mvs
/doc/examples/filter_audio
/doc/examples/filtering_audio
/doc/examples/filtering_video
/doc/examples/metadata
/doc/examples/muxing
/doc/examples/pc-uninstalled
/doc/examples/remuxing
/doc/examples/resampling_audio
/doc/examples/scaling_video
/doc/examples/transcode_aac
/doc/examples/transcoding
/doc/fate.txt
/doc/print_options
/lcov/
/src
/mapfile
/libavcodec/*_tablegen
/libavcodec/*_tables.c
/libavcodec/*_tables.h
/libavutil/avconfig.h
/libavutil/ffversion.h
/tests/audiogen
/tests/base64
/tests/checkasm/checkasm
/tests/data/
/tests/pixfmts.mak
/tests/rotozoom
/tests/test_copy.ffmeta
/tests/tiny_psnr
/tests/tiny_ssim
/tests/videogen
/tests/vsynth1/
/tools/aviocat
/tools/ffbisect
/tools/bisect.need
/tools/crypto_bench
/tools/cws2fws
/tools/fourcc2pixfmt
/tools/ffescape
/tools/ffeval
/tools/ffhash
/tools/graph2dot
/tools/ismindex
/tools/pktdumper
/tools/probetest
/tools/qt-faststart
/tools/sidxindex
/tools/trasher
/tools/seek_print
/tools/uncoded_frame
/tools/zmqsend

View File

@@ -1,26 +0,0 @@
language: c
sudo: false
os:
- linux
- osx
addons:
apt:
packages:
- yasm
- diffutils
compiler:
- clang
- gcc
cache:
directories:
- ffmpeg-samples
before_install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update --all; fi
install:
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install yasm; fi
script:
- mkdir -p ffmpeg-samples
- ./configure --samples=ffmpeg-samples --cc=$CC
- make -j 8
- make fate-rsync
- make check -j 8

View File

@@ -1,4 +0,0 @@
# Note to Github users
Patches should be submitted to the [ffmpeg-devel mailing list](https://ffmpeg.org/mailman/listinfo/ffmpeg-devel) using `git format-patch` or `git send-email`. Github pull requests should be avoided because they are not part of our review process and **will be ignored**.
See [https://ffmpeg.org/developer.html#Contributing](https://ffmpeg.org/developer.html#Contributing) for more information.

381
Changelog
View File

@@ -1,261 +1,130 @@
Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest.
version 3.2.2:
- ffserver: Check chunk size
- Avoid using the term "file" and prefer "url" in some docs and comments
- avformat/rtmppkt: Check for packet size mismatches
- zmqsend: Initialize ret to 0
- avcodec/flacdec: Fix undefined shift in decode_subframe()
- avcodec/get_bits: Fix get_sbits_long(0)
- avformat/ffmdec: Check media type for chunks
- avcodec/flacdec: Fix signed integer overflow in decode_subframe_fixed()
- avcodec/flacdsp_template: Fix undefined shift in flac_decorrelate_indep_c
- avformat/oggparsespeex: Check frames_per_packet and packet_size
- avformat/utils: Check start/end before computing duration in update_stream_timings()
- avcodec/flac_parser: Update nb_headers_buffered
- avformat/idroqdec: Check chunk_size for being too large
- avcodec/me_cmp: Fix median_sad size
- avformat/utils: Fix type mismatch
- configure: check for strtoull on msvc
- http: move chunk handling from http_read_stream() to http_buf_read().
- http: make length/offset-related variables unsigned
version 2.8.3
- avcodec/cabac: Check initial cabac decoder state
- avcodec/cabac_functions: Fix "left shift of negative value -31767"
- avcodec/h264_slice: Limit max_contexts when slice_context_count is initialized
- rtmpcrypt: Do the xtea decryption in little endian mode
- avformat/matroskadec: Check subtitle stream before dereferencing
- avcodec/pngdec: Replace assert by request for sample for unsupported TRNS cases
- avformat/utils: Do not init parser if probing is unfinished
- avcodec/jpeg2000dec: Fix potential integer overflow with tile dimensions
- avcodec/jpeg2000: Use av_image_check_size() in ff_jpeg2000_init_component()
- avcodec/wmaprodec: Check for overread in decode_packet()
- avcodec/smacker: Check that the data size is a multiple of a sample vector
- avcodec/takdec: Skip last p2 sample (which is unused)
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_410()
- avcodec/dxtory: Fix input size check in dxtory_decode_v1_420()
- avcodec/error_resilience: avoid accessing previous or next frames tables beyond height
- avcodec/dpx: Move need_align to act per line
- avcodec/flashsv: Check size before updating it
- avcodec/ivi: Check image dimensions
- avcodec/utils: Better check for channels in av_get_audio_frame_duration()
- avcodec/jpeg2000dec: Check for duplicate SIZ marker
- aacsbr: don't call sbr_dequant twice without intermediate read_sbr_data
- hqx: correct type and size check of info_offset
- mxfdec: check edit_rate also for physical_track
- avcodec/jpeg2000: Change coord to 32bit to support larger than 32k width or height
- avcodec/jpeg2000dec: Check SIZ dimensions to be within the supported range
- avcodec/jpeg2000: Check comp coords to be within the supported size
- mpegvideo: clear overread in clear_context
- avcodec/avrndec: Use the AVFrame format instead of the context
- dds: disable palette flag for compressed images
- dds: validate compressed source buffer size
- dds: validate source buffer size before copying
- dvdsubdec: validate offset2 similar to offset1
- brstm: reject negative sample rate
- aacps: avoid division by zero in stereo_processing
- softfloat: assert when the argument of av_sqrt_sf is negative
version 3.2.1:
- avcodec/aac_adtstoasc_bsf: validate and forward extradata if the stream is already ASC
- mss2: only use error correction for matching block counts
- softfloat: decrease MIN_EXP to cover full float range
- libopusdec: default to stereo for invalid number of channels
- flvdec: require need_context_update when changing codec id
- pgssubdec: only set w/h/linesize when allocating data
- sbgdec: prevent NULL pointer access
- rmdec: validate block alignment
- smacker: limit recursion depth of smacker_decode_bigtree
- mxfdec: fix NULL pointer dereference in mxf_read_packet_old
- ffmdec: validate codec parameters
- avformat/mpeg: Adjust vid probe threshold to correct mis-detection
- avcodec/ass_split: Change order of operations in ass_split_section()
- avcodec/rawdec: check for side data before checking its size
- avcodec/avpacket: fix leak on realloc in av_packet_add_side_data()
- avformat/apngenc: use the stream parameters extradata if available
- Revert "apngdec: use side data to pass extradata to the decoder"
- ffprobe: fix crash in case -of is specified with an empty string
- libavcodec/exr : fix channel size calculation for uint32 channel
- exr: fix out-of-bounds read
- libschroedingerdec: fix leaking of framewithpts
- libschroedingerdec: don't produce empty frames
- dds: limit 4 bpp handling to AV_PIX_FMT_PAL8
- mlz: limit next_code to data buffer size
- softfloat: handle -INT_MAX correctly
- filmstripdec: correctly check image dimensions
- pnmdec: make sure v is capped by maxval
- smvjpegdec: make sure cur_frame is not negative
- icodec: correctly check avio_read return value
- icodec: fix leaking pkt on error
- dvbsubdec: fix division by zero in compute_default_clut
- proresdec_lgpl: explicitly check coff[3] against slice_data_size
- escape124: reject codebook size 0
- mpegts: prevent division by zero
- matroskadec: fix NULL pointer dereference in webm_dash_manifest_read_header
- mpegaudio_parser: don't return AVERROR_PATCHWELCOME
- mxfdec: fix NULL pointer dereference
- lzf: update pointer p after realloc
- diracdec: check return code of get_buffer_with_edge
- diracdec: clear slice_params_num_buf on allocation failure
- diracdec: use correct buffer for slice_params_buf realloc
- ppc: pixblockdsp: do unaligned block accesses correctly again
- avformat: close parser if codec changed
- fate: add streamcopy test for apng
- apngdec: use side data to pass extradata to the decoder
- mov: immediately return from mov_fix_index without old index entries
- interplayacm: increase bitstream buffer size by AV_INPUT_BUFFER_PADDING_SIZE
- interplayacm: validate number of channels
- interplayacm: check for too large b
version 2.8.2
- various fixes in the aac_fixed decoder
- various fixes in softfloat
- swresample/resample: increase precision for compensation
- lavf/mov: add support for sidx fragment indexes
- avformat/mxfenc: Only store user comment related tags when needed
- tests/fate/avformat: Fix fate-lavf
- doc/ffmpeg: Clarify that the sdp_file option requires an rtp output.
- ffmpeg: Don't try and write sdp info if none of the outputs had an rtp format.
- apng: use correct size for output buffer
- jvdec: avoid unsigned overflow in comparison
- avcodec/jpeg2000dec: Clip all tile coordinates
- avcodec/microdvddec: Check for string end in 'P' case
- avcodec/dirac_parser: Fix undefined memcpy() use
- avformat/xmv: Discard remainder of packet on error
- avformat/xmv: factor return check out of if/else
- avcodec/mpeg12dec: Do not call show_bits() with invalid bits
- avcodec/faxcompr: Add missing runs check in decode_uncompressed()
- libavutil/channel_layout: Check strtol*() for failure
- avformat/mpegts: Only start probing data streams within probe_packets
- avcodec/hevc_ps: Check chroma_format_idc
- avcodec/ffv1dec: Check for 0 quant tables
- avcodec/mjpegdec: Reinitialize IDCT on BPP changes
- avcodec/mjpegdec: Check index in ljpeg_decode_yuv_scan() before using it
- avutil/file_open: avoid file handle inheritance on Windows
- avcodec/h264_slice: Disable slice threads if there are multiple access units in a packet
- avformat/hls: update cookies on setcookie response
- opusdec: Don't run vector_fmul_scalar on zero length arrays
- avcodec/opusdec: Fix extra samples read index
- avcodec/ffv1: Initialize vlc_state on allocation
- avcodec/ffv1dec: update progress in case of broken pointer chains
- avcodec/ffv1dec: Clear slice coordinates if they are invalid or slice header decoding fails for other reasons
- rtsp: Allow $ as interleaved packet indicator before a complete response header
- videodsp: don't overread edges in vfix3 emu_edge.
- avformat/mp3dec: improve junk skipping heuristic
- concatdec: fix file_start_time calculation regression
- avcodec: loongson optimize h264dsp idct and loop filter with mmi
- avcodec/jpeg2000dec: Clear properties in jpeg2000_dec_cleanup() too
- avformat/hls: add support for EXT-X-MAP
- avformat/hls: fix segment selection regression on track changes of live streams
- configure: Require libkvazaar < 0.7.
- avcodec/vp8: Do not use num_coeff_partitions in thread/buffer setup
version 2.8.1:
- swscale: fix ticket #4881
- doc: fix spelling errors
- configure: make sure LTO does not optimize out the test functions
- fate: add apng encoding/muxing test
- apng: use side data to pass extradata to muxer
- avcodec/mpeg4videodec: Workaround interlaced mpeg4 edge MC bug
- avcodec/mpegvideo: Fix edge emu buffer overlap with interlaced mpeg4
- avcodec/rv40: Test remaining space in loop of get_dimension()
- avcodec/ituh263dec: Avoid spending a long time in slice sync
- avcodec/movtextdec: Add error message for tsmb_size check
- avcodec/movtextdec: Fix tsmb_size check==0 check
- avcodec/movtextdec: Fix potential integer overflow
- ffmpeg: Fix bsf corrupting merged side data
- avcodec/sunrast: Fix input buffer pointer check
- avcodec/tscc: Check side data size before use
- avcodec/rscc: Fix constant
- avcodec/rawdec: Check side data size before use
- avcodec/rscc: Check side data size before use
- avcodec/msvideo1: Check side data size before use
- avcodec/qpeg: Check side data size before use
- avcodec/qtrle: Check side data size before use
- avcodec/msrle: Check side data size before use
- avcodec/kmvc: Check side data size before use
- avcodec/idcinvideo: Check side data size before use
- avcodec/cinepak: Check side data size before use
- avcodec/8bps: Check side data size before use
- avformat/flvdec: Fix regression losing streams
- avformat/hls: Add missing error check for avcodec_parameters_copy()
- avformat/hls: Fix probing mpegts audio streams that use probing
- avformat/hls: Factor copying stream info to a separate function
version 3.2:
- libopenmpt demuxer
- tee protocol
- Changed metadata print option to accept general urls
- Alias muxer for Ogg Video (.ogv)
- VP8 in Ogg muxing
- curves filter doesn't automatically insert points at x=0 and x=1 anymore
- 16-bit support in curves filter and selectivecolor filter
- OpenH264 decoder wrapper
- MediaCodec H.264/HEVC/MPEG-4/VP8/VP9 hwaccel
- True Audio (TTA) muxer
- crystalizer audio filter
- acrusher audio filter
- bitplanenoise video filter
- floating point support in als decoder
- fifo muxer
- maskedclamp filter
- hysteresis filter
- lut2 filter
- yuvtestsrc filter
- CUDA CUVID H.263/VP8/VP9/10 bit HEVC (Dithered) Decoding
- vaguedenoiser filter
- added threads option per filter instance
- weave filter
- gblur filter
- avgblur filter
- sobel and prewitt filter
- MediaCodec HEVC/MPEG-4/VP8/VP9 decoding
- Meridian Lossless Packing (MLP) / TrueHD encoder
- Non-Local Means (nlmeans) denoising filter
- sdl2 output device and ffplay support
- sdl1 output device and sdl1 support removed
- extended mov edit list support
- libfaac encoder removed
- Matroska muxer now writes CRC32 elements by default in all Level 1 elements
- sidedata video and asidedata audio filter
- Changed mapping of rtp MIME type G726 to codec g726le.
version 3.1:
- DXVA2-accelerated HEVC Main10 decoding
- fieldhint filter
- loop video filter and aloop audio filter
- Bob Weaver deinterlacing filter
- firequalizer filter
- datascope filter
- bench and abench filters
- ciescope filter
- protocol blacklisting API
- MediaCodec H264 decoding
- VC-2 HQ RTP payload format (draft v1) depacketizer and packetizer
- VP9 RTP payload format (draft v2) packetizer
- AudioToolbox audio decoders
- AudioToolbox audio encoders
- coreimage filter (GPU based image filtering on OSX)
- libdcadec removed
- bitstream filter for extracting DTS core
- ADPCM IMA DAT4 decoder
- musx demuxer
- aix demuxer
- remap filter
- hash and framehash muxers
- colorspace filter
- hdcd filter
- readvitc filter
- VAAPI-accelerated format conversion and scaling
- libnpp/CUDA-accelerated format conversion and scaling
- Duck TrueMotion 2.0 Real Time decoder
- Wideband Single-bit Data (WSD) demuxer
- VAAPI-accelerated H.264/HEVC/MJPEG encoding
- DTS Express (LBR) decoder
- Generic OpenMAX IL encoder with support for Raspberry Pi
- IFF ANIM demuxer & decoder
- Direct Stream Transfer (DST) decoder
- loudnorm filter
- MTAF demuxer and decoder
- MagicYUV decoder
- OpenExr improvements (tile data and B44/B44A support)
- BitJazz SheerVideo decoder
- CUDA CUVID H264/HEVC decoder
- 10-bit depth support in native utvideo decoder
- libutvideo wrapper removed
- YUY2 Lossless Codec decoder
- VideoToolbox H.264 encoder
version 3.0:
- Common Encryption (CENC) MP4 encoding and decoding support
- DXV decoding
- extrastereo filter
- ocr filter
- alimiter filter
- stereowiden filter
- stereotools filter
- rubberband filter
- tremolo filter
- agate filter
- chromakey filter
- maskedmerge filter
- Screenpresso SPV1 decoding
- chromaprint fingerprinting muxer
- ffplay dynamic volume control
- displace filter
- selectivecolor filter
- extensive native AAC encoder improvements and removal of experimental flag
- ADPCM PSX decoder
- 3dostr, dcstr, fsb, genh, vag, xvag, ads, msf, svag & vpk demuxer
- zscale filter
- wve demuxer
- zero-copy Intel QSV transcoding in ffmpeg
- shuffleframes filter
- SDX2 DPCM decoder
- vibrato filter
- innoHeim/Rsupport Screen Capture Codec decoder
- ADPCM AICA decoder
- Interplay ACM demuxer and audio decoder
- XMA1 & XMA2 decoder
- realtime filter
- anoisesrc audio filter source
- IVR demuxer
- compensationdelay filter
- acompressor filter
- support encoding 16-bit RLE SGI images
- apulsator filter
- sidechaingate audio filter
- mipsdspr1 option has been renamed to mipsdsp
- aemphasis filter
- mips32r5 option has been removed
- mips64r6 option has been removed
- DXVA2-accelerated VP9 decoding
- SOFAlizer: virtual binaural acoustics filter
- VAAPI VP9 hwaccel
- audio high-order multiband parametric equalizer
- automatic bitstream filtering
- showspectrumpic filter
- libstagefright support removed
- spectrumsynth filter
- ahistogram filter
- only seek with the right mouse button in ffplay
- toggle full screen when double-clicking with the left mouse button in ffplay
- afftfilt filter
- convolution filter
- libquvi support removed
- support for dvaudio in wav and avi
- libaacplus and libvo-aacenc support removed
- Cineform HD decoder
- new DCA decoder with full support for DTS-HD extensions
- significant performance improvements in Windows Television (WTV) demuxer
- nnedi deinterlacer
- streamselect video and astreamselect audio filter
- swaprect filter
- metadata video and ametadata audio filter
- SMPTE VC-2 HQ profile support for the Dirac decoder
- SMPTE VC-2 native encoder supporting the HQ profile
- hls: only seek if there is an offset
- asfdec: add more checks for size left in asf packet buffer
- asfdec: alloc enough space for storing name in asf_read_metadata_obj
- avcodec/pngdec: Check blend_op.
- h264_mp4toannexb: fix pps offfset fault when there are more than one sps in avcc
- avcodec/h264_mp4toannexb_bsf: Use av_freep() to free spspps_buf
- avformat/avidec: Workaround broken initial frame
- avformat/hls: fix some cases of HLS streams which require cookies
- avcodec/pngdec: reset has_trns after every decode_frame_png()
- lavf/img2dec: Fix memory leak
- avcodec/mp3: fix skipping zeros
- avformat/srtdec: make sure we probe a number
- configure: check for ID3D11VideoContext
- avformat/vobsub: compare correct packet stream IDs
- avformat/srtdec: more lenient first line probing
- avformat/srtdec: fix number check for the first character
- avcodec/mips: build fix for MSA 64bit
- avcodec/mips: build fix for MSA
- avformat/httpauth: Add space after commas in HTTP/RTSP auth header
- libavformat/hlsenc: Use of uninitialized memory unlinking old files
- avcodec/x86/sbrdsp: Fix using uninitialized upper 32bit of noise
- avcodec/ffv1dec: Fix off by 1 error in quant_table_count check
- avcodec/ffv1dec: Explicitly check read_quant_table() return value
- dnxhddata: correct weight tables
- dnxhddec: decode and use interlace mb flag
- swscale: fix ticket #4877
- avcodec/rangecoder: Check e
- avcodec/ffv1: separate slice_count from max_slice_count
- swscale: fix ticket 4850
- cmdutils: Filter dst/srcw/h
- avutil/log: fix zero length gnu_printf format string warning
- lavf/webvttenc: Require webvtt file to contain exactly one WebVTT stream.
- swscale/swscale: Fix "unused variable" warning
- avcodec/mjpegdec: Fix decoding RGBA RCT LJPEG
- MAINTAINERS: add 2.8, drop 2.2
- doc: mention libavcodec can decode Opus natively
- hevc: properly handle no_rasl_output_flag when removing pictures from the DPB
- avfilter/af_ladspa: process all channels for nb_handles > 1
- configure: add libsoxr to swresample's pkgconfig
- lavc: Fix compilation with --disable-everything --enable-parser=mpeg4video.
version 2.8:
- colorkey video filter
@@ -1057,8 +926,8 @@ version 0.8:
- showinfo filter added
- SMPTE 302M AES3 audio decoder
- Apple Core Audio Format muxer
- 9 bits and 10 bits per sample support in the H.264 decoder
- 9 bits and 10 bits FFV1 encoding / decoding
- 9bit and 10bit per sample support in the H.264 decoder
- 9bit and 10bit FFV1 encoding / decoding
- split filter added
- select filter added
- sdl output device added
@@ -1351,7 +1220,7 @@ version 0.4.9-pre1:
- rate distorted optimal lambda->qp support
- AAC encoding with libfaac
- Sunplus JPEG codec (SP5X) support
- use Lagrange multiplier instead of QP for ratecontrol
- use Lagrange multipler instead of QP for ratecontrol
- Theora/VP3 decoding support
- XA and ADX ADPCM codecs
- export MPEG-2 active display area / pan scan

View File

@@ -1,4 +1,4 @@
# License
#FFmpeg:
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
or later (LGPL v2.1+). Read the file `COPYING.LGPLv2.1` for details. Some other
@@ -13,18 +13,16 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
Specifically, the GPL parts of FFmpeg are:
- libpostproc
- optional x86 optimization in the files
- `libavcodec/x86/flac_dsp_gpl.asm`
- `libavcodec/x86/idct_mmx.c`
- `libavfilter/x86/vf_removegrain.asm`
- optional x86 optimizations in the files
- `libavcodec/x86/flac_dsp_gpl.asm`
- `libavcodec/x86/idct_mmx.c`
- `libavfilter/x86/vf_removegrain.asm`
- libutvideo encoding/decoding wrappers in
`libavcodec/libutvideo*.cpp`
- the X11 grabber in `libavdevice/x11grab.c`
- the following building and testing tools
- `compat/solaris/make_sunver.pl`
- `doc/t2h.pm`
- `doc/texi2pod.pl`
- `libswresample/swresample-test.c`
- `tests/checkasm/*`
- `tests/tiny_ssim.c`
- the swresample test app in
`libswresample/swresample-test.c`
- the `texi2pod.pl` tool
- the following filters in libavfilter:
- `f_ebur128.c`
- `vf_blackframe.c`
@@ -49,9 +47,9 @@ Specifically, the GPL parts of FFmpeg are:
- `vf_pp.c`
- `vf_pp7.c`
- `vf_pullup.c`
- `vf_repeatfields.c`
- `vf_sab.c`
- `vf_smartblur.c`
- `vf_repeatfields.c`
- `vf_spp.c`
- `vf_stereo3d.c`
- `vf_super2xsai.c`
@@ -75,17 +73,19 @@ There are a handful of files under other licensing terms, namely:
* `tests/reference.pnm` is under the expat license.
## External libraries
external libraries
==================
FFmpeg can be combined with a number of external libraries, which sometimes
affect the licensing of binaries resulting from the combination.
### Compatible libraries
compatible libraries
--------------------
The following libraries are under GPL:
- frei0r
- libcdio
- librubberband
- libutvideo
- libvidstab
- libx264
- libx265
@@ -100,23 +100,14 @@ license is incompatible with the LGPL v2.1 and the GPL v2, but not with
version 3 of those licenses. So to combine these libraries with FFmpeg, the
license version needs to be upgraded by passing `--enable-version3` to configure.
### Incompatible libraries
incompatible libraries
----------------------
There are certain libraries you can combine with FFmpeg whose licenses are not
compatible with the GPL and/or the LGPL. If you wish to enable these
libraries, even in circumstances that their license may be incompatible, pass
`--enable-nonfree` to configure. But note that if you enable any of these
libraries the resulting binary will be under a complex license mix that is
more restrictive than the LGPL and that may result in additional obligations.
It is possible that these restrictions cause the resulting binary to be
unredistributable.
The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
compatible with the LGPL.
The NVENC library, while its header file is licensed under the compatible MIT
license, requires a proprietary binary blob at run time, and is deemed to be
incompatible with the GPL. We are not certain if it is compatible with the
LGPL, but we require `--enable-nonfree` even with LGPL configurations in case
it is not.
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
are incompatible with the GPLv2 and v3. We do not know for certain if their
licenses are compatible with the LGPL.
If you wish to enable these libraries, pass `--enable-nonfree` to configure.
But note that if you enable any of these libraries the resulting binary will
be under a complex license mix that is more restrictive than the LGPL and that
may result in additional obligations. It is possible that these
restrictions cause the resulting binary to be unredistributeable.

View File

@@ -43,7 +43,8 @@ Miscellaneous Areas
===================
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
build system (configure, makefiles) Diego Biurrun, Mans Rullgard
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Lou Logan
presets Robert Swain
metadata subsystem Aurelien Jacobs
release management Michael Niedermayer
@@ -55,7 +56,7 @@ Communication
website Deby Barbara Lepage
fate.ffmpeg.org Timothy Gu
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
mailing lists Baptiste Coudurier, Lou Logan
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
Launchpad Timothy Gu
@@ -70,7 +71,6 @@ Internal Interfaces:
libavutil/common.h Michael Niedermayer
Other:
aes_ctr.c, aes_ctr.h Eran Kornblau
bprint Nicolas George
bswap.h
des Reimar Doeffinger
@@ -78,7 +78,6 @@ Other:
eval.c, eval.h Michael Niedermayer
float_dsp Loren Merritt
hash Reimar Doeffinger
hwcontext_cuda* Timo Rothenpieler
intfloat* Michael Niedermayer
integer.c, integer.h Michael Niedermayer
lzo Reimar Doeffinger
@@ -89,6 +88,7 @@ Other:
rational.c, rational.h Michael Niedermayer
rc4 Reimar Doeffinger
ripemd.c, ripemd.h James Almer
timecode Clément Bœsch
libavcodec
@@ -116,6 +116,8 @@ Generic Parts:
faandct.c, faandct.h Michael Niedermayer
Golomb coding:
golomb.c, golomb.h Michael Niedermayer
LPC:
lpc.c, lpc.h Justin Ruggles
motion estimation:
motion* Michael Niedermayer
rate control:
@@ -136,37 +138,46 @@ Codecs:
8bps.c Roberto Togni
8svx.c Jaikrishnan Menon
aacenc*, aaccoder.c Rostislav Pehlivanov
aasc.c Kostya Shishkov
ac3* Justin Ruggles
alacenc.c Jaikrishnan Menon
alsdec.c Thilo Borgmann, Umair Khan
alsdec.c Thilo Borgmann
apedec.c Kostya Shishkov
ass* Aurelien Jacobs
asv* Michael Niedermayer
atrac3* Benjamin Larsson
atrac3plus* Maxim Poliakovski
audiotoolbox* Rodger Combs
bgmc.c, bgmc.h Thilo Borgmann
bink.c Kostya Shishkov
binkaudio.c Peter Ross
bmp.c Mans Rullgard, Kostya Shishkov
cavs* Stefan Gehrer
cdxl.c Paul B Mahol
celp_filters.* Vitor Sessak
cinepak.c Roberto Togni
cinepakenc.c Rl / Aetey G.T. AB
ccaption_dec.c Anshul Maheshwari, Aman Gupta
ccaption_dec.c Anshul Maheshwari
cljr Alex Beregszaszi
cllc.c Derek Buitenhuis
cook.c, cookdata.h Benjamin Larsson
cpia.c Stephan Hilb
crystalhd.c Philip Langdale
cscd.c Reimar Doeffinger
cuvid.c Timo Rothenpieler
dirac* Rostislav Pehlivanov
dca.c Kostya Shishkov, Benjamin Larsson
dnxhd* Baptiste Coudurier
dpcm.c Mike Melanson
dss_sp.c Oleksij Rempel
dss_sp.c Oleksij Rempel, Michael Niedermayer
dv.c Roman Shaposhnik
dvbsubdec.c Anshul Maheshwari
dxa.c Kostya Shishkov
eacmv*, eaidct*, eat* Peter Ross
evrc* Paul B Mahol
exif.c, exif.h Thilo Borgmann
ffv1* Michael Niedermayer
ffwavesynth.c Nicolas George
fifo.c Jan Sebechlebsky
fic.c Derek Buitenhuis
flac* Justin Ruggles
flashsv* Benjamin Larsson
flicvideo.c Mike Melanson
g722.c Martin Storsjo
g726.c Roman Shaposhnik
@@ -174,13 +185,18 @@ Codecs:
h261* Michael Niedermayer
h263* Michael Niedermayer
h264* Loren Merritt, Michael Niedermayer
hap* Tom Butterworth
huffyuv* Michael Niedermayer, Christophe Gisquet
idcinvideo.c Mike Melanson
imc* Benjamin Larsson
indeo2* Kostya Shishkov
indeo5* Kostya Shishkov
interplayvideo.c Mike Melanson
jni*, ffjni* Matthieu Bouron
ivi* Kostya Shishkov
jacosub* Clément Bœsch
jpeg2000* Nicolas Bertrand
jpeg_ls.c Kostya Shishkov
jvdec.c Peter Ross
kmvc.c Kostya Shishkov
lcl*.c Roberto Togni, Reimar Doeffinger
libcelt_dec.c Nicolas George
libdirac* David Conrad
@@ -189,31 +205,40 @@ Codecs:
libopenjpeg.c Jaikrishnan Menon
libopenjpegenc.c Michael Bradshaw
libschroedinger* David Conrad
libspeexdec.c Justin Ruggles
libtheoraenc.c David Conrad
libutvideo* Derek Buitenhuis
libvorbis.c David Conrad
libvpx* James Zern
libx264.c Mans Rullgard, Jason Garrett-Glaser
libx265.c Derek Buitenhuis
libxavs.c Stefan Gehrer
libzvbi-teletextdec.c Marton Balint
loco.c Kostya Shishkov
lzo.h, lzo.c Reimar Doeffinger
mdec.c Michael Niedermayer
mimic.c Ramiro Polla
mjpeg*.c Michael Niedermayer
mlp* Ramiro Polla, Jai Luthra
mlp* Ramiro Polla
mmvideo.c Peter Ross
mpc* Kostya Shishkov
mpeg12.c, mpeg12data.h Michael Niedermayer
mpegvideo.c, mpegvideo.h Michael Niedermayer
mqc* Nicolas Bertrand
msmpeg4.c, msmpeg4data.h Michael Niedermayer
msrle.c Mike Melanson
msvideo1.c Mike Melanson
nellymoserdec.c Benjamin Larsson
nuv.c Reimar Doeffinger
nvenc* Timo Rothenpieler
nvenc.c Timo Rothenpieler
paf.* Paul B Mahol
pcx.c Ivo van Poorten
pgssubdec.c Reimar Doeffinger
ptx.c Ivo van Poorten
qcelp* Reynaldo H. Verdejo Pinochet
qdm2.c, qdm2data.h Roberto Togni
qdm2.c, qdm2data.h Roberto Togni, Benjamin Larsson
qdrw.c Kostya Shishkov
qpeg.c Kostya Shishkov
qsv* Ivan Uskov
qtrle.c Mike Melanson
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
@@ -222,8 +247,10 @@ Codecs:
rpza.c Roberto Togni
rtjpeg.c, rtjpeg.h Reimar Doeffinger
rv10.c Michael Niedermayer
rv4* Christophe Gisquet
rv3* Kostya Shishkov
rv4* Kostya Shishkov, Christophe Gisquet
s3tc* Ivo van Poorten
smacker.c Kostya Shishkov
smc.c Mike Melanson
smvjpegdec.c Ash Hughes
snow* Michael Niedermayer, Loren Merritt
@@ -232,41 +259,54 @@ Codecs:
sunrast.c Ivo van Poorten
svq3.c Michael Niedermayer
tak* Paul B Mahol
targa.c Kostya Shishkov
tiff.c Kostya Shishkov
truemotion1* Mike Melanson
truemotion2* Kostya Shishkov
truespeech.c Kostya Shishkov
tscc.c Kostya Shishkov
tta.c Alex Beregszaszi, Jaikrishnan Menon
ttaenc.c Paul B Mahol
txd.c Ivo van Poorten
vc1* Christophe Gisquet
vc2* Rostislav Pehlivanov
ulti* Kostya Shishkov
v410*.c Derek Buitenhuis
vb.c Kostya Shishkov
vble.c Derek Buitenhuis
vc1* Kostya Shishkov, Christophe Gisquet
vcr1.c Michael Niedermayer
vda_h264_dec.c Xidorn Quan
videotoolboxenc.c Rick Kern
vima.c Paul B Mahol
vmnc.c Kostya Shishkov
vorbisdec.c Denes Balatoni, David Conrad
vorbisenc.c Oded Shimon
vp3* Mike Melanson
vp5 Aurelien Jacobs
vp6 Aurelien Jacobs
vp8 David Conrad, Ronald Bultje
vp9 Ronald Bultje
vp8 David Conrad, Jason Garrett-Glaser, Ronald Bultje
vp9 Ronald Bultje, Clément Bœsch
vqavideo.c Mike Melanson
wavpack.c Kostya Shishkov
wmaprodec.c Sascha Sommer
wmavoice.c Ronald S. Bultje
wmv2.c Michael Niedermayer
wnv1.c Kostya Shishkov
xan.c Mike Melanson
xbm* Paul B Mahol
xface Stefano Sabatini
xl.c Kostya Shishkov
xvmc.c Ivan Kalvachev
xwd* Paul B Mahol
zerocodec.c Derek Buitenhuis
zmbv* Kostya Shishkov
Hardware acceleration:
crystalhd.c Philip Langdale
dxva2* Hendrik Leppkes, Laurent Aimar
mediacodec* Matthieu Bouron
libstagefright.cpp Mohamed Naufal
vaapi* Gwenole Beauchesne
vaapi_encode* Mark Thompson
vda* Sebastien Zwickert
vdpau* Philip Langdale, Carl Eugen Hoyos
videotoolbox* Rick Kern
videotoolbox* Sebastien Zwickert
libavdevice
@@ -287,7 +327,6 @@ libavdevice
pulse_audio_enc.c Lukasz Marek
qtkit.m Thilo Borgmann
sdl Stefano Sabatini
sdl2.c Josh de Kock
v4l2.c Giorgio Vazzana
vfwcap.c Ramiro Polla
xv.c Lukasz Marek
@@ -298,8 +337,6 @@ libavfilter
Generic parts:
graphdump.c Nicolas George
motion_estimation.c Davinder Singh
Filters:
f_drawgraph.c Paul B Mahol
af_adelay.c Paul B Mahol
@@ -309,14 +346,12 @@ Filters:
af_aphaser.c Paul B Mahol
af_aresample.c Michael Niedermayer
af_astats.c Paul B Mahol
af_astreamsync.c Nicolas George
af_atempo.c Pavel Koshevoy
af_biquads.c Paul B Mahol
af_chorus.c Paul B Mahol
af_compand.c Paul B Mahol
af_firequalizer.c Muhammad Faiz
af_hdcd.c Burt P.
af_ladspa.c Paul B Mahol
af_loudnorm.c Kyle Swanson
af_pan.c Nicolas George
af_sidechaincompress.c Paul B Mahol
af_silenceremove.c Paul B Mahol
@@ -324,15 +359,13 @@ Filters:
avf_avectorscope.c Paul B Mahol
avf_showcqt.c Muhammad Faiz
vf_blend.c Paul B Mahol
vf_chromakey.c Timo Rothenpieler
vf_colorchannelmixer.c Paul B Mahol
vf_colorbalance.c Paul B Mahol
vf_colorkey.c Timo Rothenpieler
vf_colorlevels.c Paul B Mahol
vf_coreimage.m Thilo Borgmann
vf_deband.c Paul B Mahol
vf_dejudder.c Nicholas Robbins
vf_delogo.c Jean Delvare (CC <jdelvare@suse.com>)
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
vf_drawbox.c/drawgrid Andrey Utkin
vf_extractplanes.c Paul B Mahol
vf_histogram.c Paul B Mahol
@@ -341,12 +374,9 @@ Filters:
vf_il.c Paul B Mahol
vf_lenscorrection.c Daniel Oberhoff
vf_mergeplanes.c Paul B Mahol
vf_mestimate.c Davinder Singh
vf_minterpolate.c Davinder Singh
vf_neighbor.c Paul B Mahol
vf_psnr.c Paul B Mahol
vf_random.c Paul B Mahol
vf_readvitc.c Tobias Rapp (CC t.rapp at noa-archive dot com)
vf_scale.c Michael Niedermayer
vf_separatefields.c Paul B Mahol
vf_ssim.c Paul B Mahol
@@ -366,7 +396,6 @@ Generic parts:
libavformat/avformat.h Michael Niedermayer
Utility Code:
libavformat/utils.c Michael Niedermayer
Text Subtitles Clément Bœsch
Muxers/Demuxers:
@@ -376,12 +405,13 @@ Muxers/Demuxers:
afc.c Paul B Mahol
aiffdec.c Baptiste Coudurier, Matthieu Bouron
aiffenc.c Baptiste Coudurier, Matthieu Bouron
ape.c Kostya Shishkov
apngdec.c Benoit Fouet
ass* Aurelien Jacobs
astdec.c Paul B Mahol
astenc.c James Almer
avi* Michael Niedermayer
avisynth.c Stephen Hutchinson
avisynth.c AvxSynth Team (avxsynth.testing at gmail dot com)
avr.c Paul B Mahol
bink.c Peter Ross
brstm.c Paul B Mahol
@@ -389,18 +419,20 @@ Muxers/Demuxers:
cdxl.c Paul B Mahol
crc.c Michael Niedermayer
daud.c Reimar Doeffinger
dss.c Oleksij Rempel
dss.c Oleksij Rempel, Michael Niedermayer
dtshddec.c Paul B Mahol
dv.c Roman Shaposhnik
dxa.c Kostya Shishkov
electronicarts.c Peter Ross
epafdec.c Paul B Mahol
ffm* Baptiste Coudurier
flac* Justin Ruggles
flic.c Mike Melanson
flvdec.c, flvenc.c Michael Niedermayer
gxf.c Reimar Doeffinger
gxfenc.c Baptiste Coudurier
hls.c Anssi Hannula
hls encryption (hlsenc.c) Christian Suloway, Steven Liu
hls encryption (hlsenc.c) Christian Suloway
idcin.c Mike Melanson
idroqdec.c Mike Melanson
iff.c Jaikrishnan Menon
@@ -408,10 +440,10 @@ Muxers/Demuxers:
ipmovie.c Mike Melanson
ircam* Paul B Mahol
iss.c Stefan Gehrer
jacosub* Clément Bœsch
jvdec.c Peter Ross
libmodplug.c Clément Bœsch
libnut.c Oded Shimon
libopenmpt.c Josh de Kock
lmlm4.c Ivo van Poorten
lvfdec.c Paul B Mahol
lxfdec.c Tomas Härdin
@@ -423,9 +455,9 @@ Muxers/Demuxers:
mgsts.c Paul B Mahol
microdvd* Aurelien Jacobs
mm.c Peter Ross
mov.c Baptiste Coudurier
mov.c Michael Niedermayer, Baptiste Coudurier
movenc.c Baptiste Coudurier, Matthieu Bouron
movenccenc.c Eran Kornblau
mpc.c Kostya Shishkov
mpeg.c Michael Niedermayer
mpegenc.c Michael Niedermayer
mpegts.c Marton Balint
@@ -441,7 +473,6 @@ Muxers/Demuxers:
oggdec.c, oggdec.h David Conrad
oggenc.c Baptiste Coudurier
oggparse*.c David Conrad
oggparsedaala* Rostislav Pehlivanov
oma.c Maxim Poliakovski
paf.c Paul B Mahol
psxstr.c Mike Melanson
@@ -451,7 +482,8 @@ Muxers/Demuxers:
raw.c Michael Niedermayer
rdt.c Ronald S. Bultje
rl2.c Sascha Sommer
rmdec.c, rmenc.c Ronald S. Bultje
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
rtmp* Kostya Shishkov
rtp.c, rtpenc.c Martin Storsjo
rtpdec_ac3.* Gilles Chanteperdrix
rtpdec_dv.* Thomas Volkert
@@ -459,13 +491,15 @@ Muxers/Demuxers:
rtpdec_hevc.*, rtpenc_hevc.* Thomas Volkert
rtpdec_mpa_robust.* Gilles Chanteperdrix
rtpdec_asf.* Ronald S. Bultje
rtpdec_vc2hq.*, rtpenc_vc2hq.* Thomas Volkert
rtpdec_vp9.c Thomas Volkert
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
rtsp.c Luca Barbato
sbgdec.c Nicolas George
sdp.c Martin Storsjo
segafilm.c Mike Melanson
segment.c Stefano Sabatini
siff.c Kostya Shishkov
smacker.c Kostya Shishkov
smjpeg* Paul B Mahol
spdif* Anssi Hannula
srtdec.c Aurelien Jacobs
@@ -480,6 +514,7 @@ Muxers/Demuxers:
webvtt* Matthew J Heaney
westwood.c Mike Melanson
wtv.c Peter Ross
wv.c Kostya Shishkov
wvenc.c Paul B Mahol
Protocols:
@@ -510,17 +545,20 @@ Resamplers:
Operating systems / CPU architectures
=====================================
Alpha Falk Hueffner
MIPS Nedeljko Babic
Alpha Mans Rullgard, Falk Hueffner
ARM Mans Rullgard
AVR32 Mans Rullgard
MIPS Mans Rullgard, Nedeljko Babic
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
Amiga / PowerPC Colin Ward
Linux / PowerPC Luca Barbato
Windows MinGW Alex Beregszaszi, Ramiro Polla
Windows Cygwin Victor Paesa
Windows MSVC Matthew Oliver, Hendrik Leppkes
Windows ICL Matthew Oliver
ADI/Blackfin DSP Marc Hoffman
Sparc Roman Shaposhnik
OS/2 KO Myung-Hun
x86 Michael Niedermayer
Releases
@@ -530,6 +568,7 @@ Releases
2.7 Michael Niedermayer
2.6 Michael Niedermayer
2.5 Michael Niedermayer
2.4 Michael Niedermayer
If you want to maintain an older release, please contact us
@@ -539,6 +578,7 @@ GnuPG Fingerprints of maintainers and contributors
Alexander Strasser 1C96 78B7 83CB 8AA7 9AF5 D1EB A7D8 A57B A876 E58F
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
Anton Khirnov 6D0C 6625 56F8 65D1 E5F5 814B B50A 1241 C067 07AB
Ash Hughes 694D 43D2 D180 C7C7 6421 ABD3 A641 D0B7 623D 6029
Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
@@ -546,16 +586,17 @@ Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
Justin Ruggles 3136 ECC0 C10D 6C04 5F43 CA29 FCBE CD2A 3787 1EBF
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
Luca Barbato 6677 4209 213C 8843 5B67 29E7 E84C 78C2 84E9 0E34
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E

View File

@@ -4,7 +4,6 @@ include config.mak
vpath %.c $(SRC_PATH)
vpath %.cpp $(SRC_PATH)
vpath %.h $(SRC_PATH)
vpath %.inc $(SRC_PATH)
vpath %.m $(SRC_PATH)
vpath %.S $(SRC_PATH)
vpath %.asm $(SRC_PATH)
@@ -30,18 +29,15 @@ $(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog) += cmdutils.o))
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_opencl.o))
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
OBJS-ffmpeg-$(CONFIG_LIBMFX) += ffmpeg_qsv.o
OBJS-ffmpeg-$(CONFIG_VAAPI) += ffmpeg_vaapi.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
ifndef CONFIG_VIDEOTOOLBOX
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_videotoolbox.o
endif
OBJS-ffmpeg-$(CONFIG_CUVID) += ffmpeg_cuvid.o
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
OBJS-ffserver += ffserver_config.o
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
TOOLS = qt-faststart trasher uncoded_frame
TOOLS-$(CONFIG_ZLIB) += cws2fws
@@ -61,8 +57,7 @@ FFLIBS := avutil
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
SKIPHEADERS = cmdutils_common_opts.h \
compat/w32pthreads.h
SKIPHEADERS = cmdutils_common_opts.h compat/w32pthreads.h
include $(SRC_PATH)/common.mak
@@ -89,7 +84,7 @@ SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MSA-OBJS \
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
define RESET
@@ -180,15 +175,11 @@ clean::
$(RM) $(CLEANSUFFIXES)
$(RM) $(CLEANSUFFIXES:%=tools/%)
$(RM) -r coverage-html
$(RM) -rf coverage.info coverage.info.in lcov
$(RM) -rf coverage.info lcov
distclean::
$(RM) $(DISTCLEANSUFFIXES)
$(RM) config.* .config libavutil/avconfig.h .version mapfile avversion.h version.h libavutil/ffversion.h libavcodec/codec_names.h libavcodec/bsf_list.c libavformat/protocol_list.c
ifeq ($(SRC_LINK),src)
$(RM) src
endif
$(RM) -rf doc/examples/pc-uninstalled
$(RM) config.* .config libavutil/avconfig.h .version avversion.h version.h libavutil/ffversion.h libavcodec/codec_names.h
config:
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)

View File

@@ -16,12 +16,12 @@ such as audio, video, subtitles and related metadata.
## Tools
* [ffmpeg](https://ffmpeg.org/ffmpeg.html) is a command line toolbox to
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
manipulate, convert and stream multimedia content.
* [ffplay](https://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
* [ffprobe](https://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analysis tool to inspect
multimedia content.
* [ffserver](https://ffmpeg.org/ffserver.html) is a multimedia streaming server
* [ffserver](http://ffmpeg.org/ffserver.html) is a multimedia streaming server
for live broadcasts.
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
@@ -29,8 +29,8 @@ such as audio, video, subtitles and related metadata.
The offline documentation is available in the **doc/** directory.
The online documentation is available in the main [website](https://ffmpeg.org)
and in the [wiki](https://trac.ffmpeg.org).
The online documentation is available in the main [website](http://ffmpeg.org)
and in the [wiki](http://trac.ffmpeg.org).
### Examples
@@ -40,9 +40,3 @@ Coding examples are available in the **doc/examples** directory.
FFmpeg codebase is mainly LGPL-licensed with optional components licensed under
GPL. Please refer to the LICENSE file for detailed information.
## Contributing
Patches should be submitted to the ffmpeg-devel mailing list using
`git format-patch` or `git send-email`. Github pull requests should be
avoided because they are not part of our review process and will be ignored.

View File

@@ -1 +1 @@
3.2.2
2.8.3

View File

@@ -1,10 +1,10 @@
┌────────────────────────────────────────┐
│ RELEASE NOTES for FFmpeg 3.2 "Hypatia" │
│ RELEASE NOTES for FFmpeg 2.8 "Feynman" │
└────────────────────────────────────────┘
The FFmpeg Project proudly presents FFmpeg 3.2 "Hypatia", about 4
months after the release of FFmpeg 3.1.
The FFmpeg Project proudly presents FFmpeg 2.8 "Feynman", about 3
months after the release of FFmpeg 2.7.
A complete Changelog is available at the root of the project, and the
complete Git history on http://source.ffmpeg.org.

View File

@@ -5,7 +5,7 @@ OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)

View File

@@ -52,7 +52,6 @@
#include "libavutil/opt.h"
#include "libavutil/cpu.h"
#include "libavutil/ffversion.h"
#include "libavutil/version.h"
#include "cmdutils.h"
#if CONFIG_NETWORK
#include "libavformat/network.h"
@@ -61,9 +60,6 @@
#include <sys/time.h>
#include <sys/resource.h>
#endif
#ifdef _WIN32
#include <windows.h>
#endif
static int init_report(const char *env);
@@ -110,15 +106,6 @@ static void log_callback_report(void *ptr, int level, const char *fmt, va_list v
}
}
void init_dynload(void)
{
#ifdef _WIN32
/* Calling SetDllDirectory with the empty string (but not NULL) removes the
* current working directory from the DLL search path as a security pre-caution. */
SetDllDirectory("");
#endif
}
static void (*program_exit)(int ret);
void register_exit(void (*cb)(int ret))
@@ -546,12 +533,7 @@ int opt_default(void *optctx, const char *opt, const char *arg)
#if CONFIG_AVRESAMPLE
const AVClass *rc = avresample_get_class();
#endif
#if CONFIG_SWSCALE
const AVClass *sc = sws_get_class();
#endif
#if CONFIG_SWRESAMPLE
const AVClass *swr_class = swr_get_class();
#endif
const AVClass *sc, *swr_class;
if (!strcmp(opt, "debug") || !strcmp(opt, "fdebug"))
av_log_set_level(AV_LOG_DEBUG);
@@ -575,6 +557,7 @@ int opt_default(void *optctx, const char *opt, const char *arg)
consumed = 1;
}
#if CONFIG_SWSCALE
sc = sws_get_class();
if (!consumed && (o = opt_find(&sc, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
struct SwsContext *sws = sws_alloc_context();
@@ -602,6 +585,7 @@ int opt_default(void *optctx, const char *opt, const char *arg)
}
#endif
#if CONFIG_SWRESAMPLE
swr_class = swr_get_class();
if (!consumed && (o=opt_find(&swr_class, opt, NULL, 0,
AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ))) {
struct SwrContext *swr = swr_alloc();
@@ -1071,8 +1055,7 @@ static int warned_cfg = 0;
LIB##LIBNAME##_VERSION_MAJOR, \
LIB##LIBNAME##_VERSION_MINOR, \
LIB##LIBNAME##_VERSION_MICRO, \
AV_VERSION_MAJOR(version), AV_VERSION_MINOR(version),\
AV_VERSION_MICRO(version)); \
version >> 16, version >> 8 & 0xff, version & 0xff); \
} \
if (flags & SHOW_CONFIG) { \
const char *cfg = libname##_configuration(); \
@@ -1091,15 +1074,15 @@ static int warned_cfg = 0;
static void print_all_libs_info(int flags, int level)
{
PRINT_LIB_INFO(avutil, AVUTIL, flags, level);
PRINT_LIB_INFO(avcodec, AVCODEC, flags, level);
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
PRINT_LIB_INFO(avutil, AVUTIL, flags, level);
PRINT_LIB_INFO(avcodec, AVCODEC, flags, level);
PRINT_LIB_INFO(avformat, AVFORMAT, flags, level);
PRINT_LIB_INFO(avdevice, AVDEVICE, flags, level);
PRINT_LIB_INFO(avfilter, AVFILTER, flags, level);
PRINT_LIB_INFO(avresample, AVRESAMPLE, flags, level);
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
PRINT_LIB_INFO(swresample, SWRESAMPLE, flags, level);
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
PRINT_LIB_INFO(swscale, SWSCALE, flags, level);
PRINT_LIB_INFO(swresample,SWRESAMPLE, flags, level);
PRINT_LIB_INFO(postproc, POSTPROC, flags, level);
}
static void print_program_info(int flags, int level)
@@ -1336,47 +1319,16 @@ static void print_codec(const AVCodec *c)
printf("%s %s [%s]:\n", encoder ? "Encoder" : "Decoder", c->name,
c->long_name ? c->long_name : "");
printf(" General capabilities: ");
if (c->capabilities & AV_CODEC_CAP_DRAW_HORIZ_BAND)
printf("horizband ");
if (c->capabilities & AV_CODEC_CAP_DR1)
printf("dr1 ");
if (c->capabilities & AV_CODEC_CAP_TRUNCATED)
printf("trunc ");
if (c->capabilities & AV_CODEC_CAP_DELAY)
printf("delay ");
if (c->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME)
printf("small ");
if (c->capabilities & AV_CODEC_CAP_SUBFRAMES)
printf("subframes ");
if (c->capabilities & AV_CODEC_CAP_EXPERIMENTAL)
printf("exp ");
if (c->capabilities & AV_CODEC_CAP_CHANNEL_CONF)
printf("chconf ");
if (c->capabilities & AV_CODEC_CAP_PARAM_CHANGE)
printf("paramchange ");
if (c->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
printf("variable ");
if (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS))
printf("threads ");
if (!c->capabilities)
printf("none");
printf("\n");
if (c->type == AVMEDIA_TYPE_VIDEO ||
c->type == AVMEDIA_TYPE_AUDIO) {
printf(" Threading capabilities: ");
switch (c->capabilities & (AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_AUTO_THREADS)) {
AV_CODEC_CAP_SLICE_THREADS)) {
case AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS: printf("frame and slice"); break;
case AV_CODEC_CAP_FRAME_THREADS: printf("frame"); break;
case AV_CODEC_CAP_SLICE_THREADS: printf("slice"); break;
case AV_CODEC_CAP_AUTO_THREADS : printf("auto"); break;
default: printf("none"); break;
default: printf("no"); break;
}
printf("\n");
}
@@ -1435,7 +1387,7 @@ static int compare_codec_desc(const void *a, const void *b)
const AVCodecDescriptor * const *da = a;
const AVCodecDescriptor * const *db = b;
return (*da)->type != (*db)->type ? FFDIFFSIGN((*da)->type, (*db)->type) :
return (*da)->type != (*db)->type ? (*da)->type - (*db)->type :
strcmp((*da)->name, (*db)->name);
}
@@ -1579,11 +1531,10 @@ int show_encoders(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
{
const AVBitStreamFilter *bsf = NULL;
void *opaque = NULL;
AVBitStreamFilter *bsf = NULL;
printf("Bitstream filters:\n");
while ((bsf = av_bsf_next(&opaque)))
while ((bsf = av_bitstream_filter_next(bsf)))
printf("%s\n", bsf->name);
printf("\n");
return 0;
@@ -1638,7 +1589,7 @@ int show_filters(void *optctx, const char *opt, const char *arg)
( i && (filter->flags & AVFILTER_FLAG_DYNAMIC_OUTPUTS))) ? 'N' : '|';
}
*descr_cur = 0;
printf(" %c%c%c %-17s %-10s %s\n",
printf(" %c%c%c %-16s %-10s %s\n",
filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE ? 'T' : '.',
filter->flags & AVFILTER_FLAG_SLICE_THREADS ? 'S' : '.',
filter->process_command ? 'C' : '.',
@@ -1994,7 +1945,7 @@ AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
codec = s->oformat ? avcodec_find_encoder(codec_id)
: avcodec_find_decoder(codec_id);
switch (st->codecpar->codec_type) {
switch (st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
prefix = 'v';
flags |= AV_OPT_FLAG_VIDEO_PARAM;
@@ -2052,7 +2003,7 @@ AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
return NULL;
}
for (i = 0; i < s->nb_streams; i++)
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codec->codec_id,
s, s->streams[i], NULL);
return opts;
}
@@ -2112,7 +2063,7 @@ static int print_device_sources(AVInputFormat *fmt, AVDictionary *opts)
if (!fmt || !fmt->priv_class || !AV_IS_INPUT_DEVICE(fmt->priv_class->category))
return AVERROR(EINVAL);
printf("Auto-detected sources for %s:\n", fmt->name);
printf("Audo-detected sources for %s:\n", fmt->name);
if (!fmt->get_device_list) {
ret = AVERROR(ENOSYS);
printf("Cannot list sources. Not implemented.\n");
@@ -2142,7 +2093,7 @@ static int print_device_sinks(AVOutputFormat *fmt, AVDictionary *opts)
if (!fmt || !fmt->priv_class || !AV_IS_OUTPUT_DEVICE(fmt->priv_class->category))
return AVERROR(EINVAL);
printf("Auto-detected sinks for %s:\n", fmt->name);
printf("Audo-detected sinks for %s:\n", fmt->name);
if (!fmt->get_device_list) {
ret = AVERROR(ENOSYS);
printf("Cannot list sinks. Not implemented.\n");

View File

@@ -19,8 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef CMDUTILS_H
#define CMDUTILS_H
#ifndef FFMPEG_CMDUTILS_H
#define FFMPEG_CMDUTILS_H
#include <stdint.h>
@@ -61,11 +61,6 @@ void register_exit(void (*cb)(int ret));
*/
void exit_program(int ret) av_noreturn;
/**
* Initialize dynamic library loading
*/
void init_dynload(void);
/**
* Initialize the cmdutils option system, in particular
* allocate the *_opts contexts.
@@ -450,13 +445,13 @@ int show_devices(void *optctx, const char *opt, const char *arg);
#if CONFIG_AVDEVICE
/**
* Print a listing containing autodetected sinks of the output device.
* Print a listing containing audodetected sinks of the output device.
* Device name with options may be passed as an argument to limit results.
*/
int show_sinks(void *optctx, const char *opt, const char *arg);
/**
* Print a listing containing autodetected sources of the input device.
* Print a listing containing audodetected sources of the input device.
* Device name with options may be passed as an argument to limit results.
*/
int show_sources(void *optctx, const char *opt, const char *arg);

View File

@@ -206,9 +206,7 @@ end:
static int compare_ocl_device_desc(const void *a, const void *b)
{
const OpenCLDeviceBenchmark* va = (const OpenCLDeviceBenchmark*)a;
const OpenCLDeviceBenchmark* vb = (const OpenCLDeviceBenchmark*)b;
return FFDIFFSIGN(va->runtime , vb->runtime);
return ((OpenCLDeviceBenchmark*)a)->runtime - ((OpenCLDeviceBenchmark*)b)->runtime;
}
int opt_opencl_bench(void *optctx, const char *opt, const char *arg)

View File

@@ -18,7 +18,7 @@ ifndef SUBDIR
ifndef V
Q = @
ECHO = printf "$(1)\t%s\n" $(2)
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS YASM AR LD STRIP CP WINDRES
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP WINDRES
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
MSG = $@
@@ -32,14 +32,12 @@ endif
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
# NASM requires -I path terminated with /
IFLAGS := -I. -I$(SRC_LINK)/
IFLAGS := -I. -I$(SRC_PATH)/
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
CFLAGS += $(ECFLAGS)
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
OBJCFLAGS += $(EOBJCFLAGS)
OBJCCFLAGS = $(CPPFLAGS) $(CFLAGS) $(OBJCFLAGS)
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
CXXFLAGS := $(CPPFLAGS) $(CFLAGS) $(CXXFLAGS)
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
@@ -47,13 +45,12 @@ LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
define COMPILE
$(call $(1)DEP,$(1))
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $<
endef
COMPILE_C = $(call COMPILE,CC)
COMPILE_CXX = $(call COMPILE,CXX)
COMPILE_S = $(call COMPILE,AS)
COMPILE_M = $(call COMPILE,OBJCC)
COMPILE_HOSTC = $(call COMPILE,HOSTCC)
%.o: %.c
@@ -63,10 +60,10 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
$(COMPILE_CXX)
%.o: %.m
$(COMPILE_M)
$(COMPILE_C)
%.s: %.c
$(CC) $(CCFLAGS) -S -o $@ $<
$(CC) $(CPPFLAGS) $(CFLAGS) -S -o $@ $<
%.o: %.S
$(COMPILE_S)
@@ -83,7 +80,10 @@ COMPILE_HOSTC = $(call COMPILE,HOSTCC)
%.h.c:
$(Q)echo '#include "$*.h"' >$@
%.c %.h %.ver: TAG = GEN
%.ver: %.v
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
%.c %.h: TAG = GEN
# Dummy rule to stop make trying to rebuild removed or renamed headers
%.h:
@@ -109,8 +109,8 @@ FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)tests/%$(EXESUF))
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)%) $(TESTPROGS:%=$(SUBDIR)%-test.o)
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)%-test$(EXESUF))
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF))
TOOLS += $(TOOLS-yes)
@@ -147,13 +147,15 @@ $(TOOLOBJS): | tools
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.o *~ *.h.c *.gcda *.gcno *.map *.ver *.ho *$(DEFAULT_YASMD).asm
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda *$(DEFAULT_YASMD).asm
DISTCLEANSUFFIXES = *.pc
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
define RULES
clean::
$(RM) $(HOSTPROGS) $(TESTPROGS) $(TOOLS)
$(RM) $(OBJS) $(OBJS:.o=.d) $(OBJS:.o=$(DEFAULT_YASMD).d)
$(RM) $(HOSTPROGS)
$(RM) $(TOOLS)
endef
$(eval $(RULES))

View File

@@ -19,8 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_AIX_MATH_H
#define COMPAT_AIX_MATH_H
#ifndef FFMPEG_COMPAT_AIX_MATH_H
#define FFMPEG_COMPAT_AIX_MATH_H
#define class class_in_math_h_causes_problems
@@ -28,4 +28,4 @@
#undef class
#endif /* COMPAT_AIX_MATH_H */
#endif /* FFMPEG_COMPAT_AIX_MATH_H */

View File

@@ -75,149 +75,54 @@ enum {AVS_PLANAR_Y=1<<0,
AVS_PLANAR_B_ALIGNED=AVS_PLANAR_B|AVS_PLANAR_ALIGNED};
// Colorspace properties.
enum {
AVS_CS_YUVA = 1 << 27,
AVS_CS_BGR = 1 << 28,
AVS_CS_YUV = 1 << 29,
AVS_CS_INTERLEAVED = 1 << 30,
AVS_CS_PLANAR = 1 << 31,
enum {AVS_CS_BGR = 1<<28,
AVS_CS_YUV = 1<<29,
AVS_CS_INTERLEAVED = 1<<30,
AVS_CS_PLANAR = 1<<31,
AVS_CS_SHIFT_SUB_WIDTH = 0,
AVS_CS_SHIFT_SUB_HEIGHT = 8,
AVS_CS_SHIFT_SAMPLE_BITS = 16,
AVS_CS_SHIFT_SUB_WIDTH = 0,
AVS_CS_SHIFT_SUB_HEIGHT = 8,
AVS_CS_SHIFT_SAMPLE_BITS = 16,
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
AVS_CS_UPLANEFIRST = 1 << 4, // I420
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
AVS_CS_UPLANEFIRST = 1 << 4, // I420
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_10 = 5 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_12 = 6 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_14 = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_YUVA | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
AVS_CS_PLANAR_FILTER = ~(AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST),
AVS_CS_RGB_TYPE = 1 << 0,
AVS_CS_RGBA_TYPE = 1 << 1,
AVS_CS_GENERIC_YUV420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0 planar
AVS_CS_GENERIC_YUV422 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2 planar
AVS_CS_GENERIC_YUV444 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // 4:4:4 planar
AVS_CS_GENERIC_Y = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV, // Y only (4:0:0)
AVS_CS_GENERIC_RGBP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGB_TYPE, // planar RGB
AVS_CS_GENERIC_RGBAP = AVS_CS_PLANAR | AVS_CS_BGR | AVS_CS_RGBA_TYPE, // planar RGBA
AVS_CS_GENERIC_YUVA420 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // 4:2:0:A planar
AVS_CS_GENERIC_YUVA422 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // 4:2:2:A planar
AVS_CS_GENERIC_YUVA444 = AVS_CS_PLANAR | AVS_CS_YUVA | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1 }; // 4:4:4:A planar
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
AVS_CS_PLANAR_FILTER = ~( AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST )};
// Specific colorformats
enum {
AVS_CS_UNKNOWN = 0,
AVS_CS_BGR24 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR32 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
// AVS_CS_YV12 = 1<<3 Reserved
// AVS_CS_I420 = 1<<4 Reserved
AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED,
AVS_CS_YV24 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_8, // YVU 4:4:4 planar
AVS_CS_YV16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:2 planar
AVS_CS_YV12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_8, // YVU 4:2:0 planar
AVS_CS_YV24 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // YVU 4:4:4 planar
AVS_CS_YV16 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:2 planar
AVS_CS_YV12 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:0 planar
AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar
AVS_CS_IYUV = AVS_CS_I420,
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar
AVS_CS_Y8 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_8, // Y 4:0:0 planar
//-------------------------
// AVS16: new planar constants go live! Experimental PF 160613
// 10-12-14 bit + planar RGB + BRG48/64 160725
AVS_CS_YUV444P10 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_10, // YUV 4:4:4 10bit samples
AVS_CS_YUV422P10 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:2 10bit samples
AVS_CS_YUV420P10 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_10, // YUV 4:2:0 10bit samples
AVS_CS_Y10 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_10, // Y 4:0:0 10bit samples
AVS_CS_YUV444P12 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_12, // YUV 4:4:4 12bit samples
AVS_CS_YUV422P12 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:2 12bit samples
AVS_CS_YUV420P12 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_12, // YUV 4:2:0 12bit samples
AVS_CS_Y12 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_12, // Y 4:0:0 12bit samples
AVS_CS_YUV444P14 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_14, // YUV 4:4:4 14bit samples
AVS_CS_YUV422P14 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:2 14bit samples
AVS_CS_YUV420P14 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_14, // YUV 4:2:0 14bit samples
AVS_CS_Y14 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_14, // Y 4:0:0 14bit samples
AVS_CS_YUV444P16 = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_16, // YUV 4:4:4 16bit samples
AVS_CS_YUV422P16 = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:2 16bit samples
AVS_CS_YUV420P16 = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_16, // YUV 4:2:0 16bit samples
AVS_CS_Y16 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_16, // Y 4:0:0 16bit samples
// 32 bit samples (float)
AVS_CS_YUV444PS = AVS_CS_GENERIC_YUV444 | AVS_CS_SAMPLE_BITS_32, // YUV 4:4:4 32bit samples
AVS_CS_YUV422PS = AVS_CS_GENERIC_YUV422 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:2 32bit samples
AVS_CS_YUV420PS = AVS_CS_GENERIC_YUV420 | AVS_CS_SAMPLE_BITS_32, // YUV 4:2:0 32bit samples
AVS_CS_Y32 = AVS_CS_GENERIC_Y | AVS_CS_SAMPLE_BITS_32, // Y 4:0:0 32bit samples
// RGB packed
AVS_CS_BGR48 = AVS_CS_RGB_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 3x16 bit
AVS_CS_BGR64 = AVS_CS_RGBA_TYPE | AVS_CS_BGR | AVS_CS_INTERLEAVED | AVS_CS_SAMPLE_BITS_16, // BGR 4x16 bit
// no packed 32 bit (float) support for these legacy types
// RGB planar
AVS_CS_RGBP = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_8, // Planar RGB 8 bit samples
AVS_CS_RGBP10 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_10, // Planar RGB 10bit samples
AVS_CS_RGBP12 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_12, // Planar RGB 12bit samples
AVS_CS_RGBP14 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_14, // Planar RGB 14bit samples
AVS_CS_RGBP16 = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_16, // Planar RGB 16bit samples
AVS_CS_RGBPS = AVS_CS_GENERIC_RGBP | AVS_CS_SAMPLE_BITS_32, // Planar RGB 32bit samples
// RGBA planar
AVS_CS_RGBAP = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_8, // Planar RGBA 8 bit samples
AVS_CS_RGBAP10 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_10, // Planar RGBA 10bit samples
AVS_CS_RGBAP12 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_12, // Planar RGBA 12bit samples
AVS_CS_RGBAP14 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_14, // Planar RGBA 14bit samples
AVS_CS_RGBAP16 = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_16, // Planar RGBA 16bit samples
AVS_CS_RGBAPS = AVS_CS_GENERIC_RGBAP | AVS_CS_SAMPLE_BITS_32, // Planar RGBA 32bit samples
// Planar YUVA
AVS_CS_YUVA444 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:4:4 8bit samples
AVS_CS_YUVA422 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:2 8bit samples
AVS_CS_YUVA420 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_8, // YUVA 4:2:0 8bit samples
AVS_CS_YUVA444P10 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:4:4 10bit samples
AVS_CS_YUVA422P10 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:2 10bit samples
AVS_CS_YUVA420P10 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_10, // YUVA 4:2:0 10bit samples
AVS_CS_YUVA444P12 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:4:4 12bit samples
AVS_CS_YUVA422P12 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:2 12bit samples
AVS_CS_YUVA420P12 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_12, // YUVA 4:2:0 12bit samples
AVS_CS_YUVA444P14 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:4:4 14bit samples
AVS_CS_YUVA422P14 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:2 14bit samples
AVS_CS_YUVA420P14 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_14, // YUVA 4:2:0 14bit samples
AVS_CS_YUVA444P16 = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:4:4 16bit samples
AVS_CS_YUVA422P16 = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:2 16bit samples
AVS_CS_YUVA420P16 = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_16, // YUVA 4:2:0 16bit samples
AVS_CS_YUVA444PS = AVS_CS_GENERIC_YUVA444 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:4:4 32bit samples
AVS_CS_YUVA422PS = AVS_CS_GENERIC_YUVA422 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:2 32bit samples
AVS_CS_YUVA420PS = AVS_CS_GENERIC_YUVA420 | AVS_CS_SAMPLE_BITS_32, // YUVA 4:2:0 32bit samples
AVS_CS_Y8 = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 // Y 4:0:0 planar
};
enum {
@@ -342,10 +247,10 @@ AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
{ return !!(p->pixel_type&AVS_CS_BGR); }
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
{ return ((p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); }
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
{ return ((p->pixel_type&AVS_CS_BGR32)==AVS_CS_BGR32) && ((p->pixel_type & AVS_CS_SAMPLE_BITS_MASK) == AVS_CS_SAMPLE_BITS_8); }
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
{ return !!(p->pixel_type&AVS_CS_YUV ); }
@@ -353,10 +258,6 @@ AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
AVSC_API(int, avs_is_rgb48)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_rgb64)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yv24)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yv16)(const AVS_VideoInfo * p);
@@ -367,38 +268,6 @@ AVSC_API(int, avs_is_yv411)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y8)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv444p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv422p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv420p16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y16)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv444ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv422ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuv420ps)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y32)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_444)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_422)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_420)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_y)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_yuva)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_planar_rgb)(const AVS_VideoInfo * p);
AVSC_API(int, avs_is_planar_rgba)(const AVS_VideoInfo * p);
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
{ return ((p->image_type & property)==property ); }
@@ -496,12 +365,6 @@ AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
}
#endif
AVSC_API(int, avs_num_components)(const AVS_VideoInfo * p);
AVSC_API(int, avs_component_size)(const AVS_VideoInfo * p);
AVSC_API(int, avs_bits_per_component)(const AVS_VideoInfo * p);
/////////////////////////////////////////////////////////////////////
//
// AVS_VideoFrame
@@ -665,7 +528,7 @@ AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
#endif
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = (short)size; return v; }
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
/////////////////////////////////////////////////////////////////////
//
@@ -898,28 +761,11 @@ struct AVS_Library {
AVSC_DECLARE_FUNC(avs_vsprintf);
AVSC_DECLARE_FUNC(avs_get_error);
AVSC_DECLARE_FUNC(avs_is_rgb48);
AVSC_DECLARE_FUNC(avs_is_rgb64);
AVSC_DECLARE_FUNC(avs_is_yv24);
AVSC_DECLARE_FUNC(avs_is_yv16);
AVSC_DECLARE_FUNC(avs_is_yv12);
AVSC_DECLARE_FUNC(avs_is_yv411);
AVSC_DECLARE_FUNC(avs_is_y8);
AVSC_DECLARE_FUNC(avs_is_yuv444p16);
AVSC_DECLARE_FUNC(avs_is_yuv422p16);
AVSC_DECLARE_FUNC(avs_is_yuv420p16);
AVSC_DECLARE_FUNC(avs_is_y16);
AVSC_DECLARE_FUNC(avs_is_yuv444ps);
AVSC_DECLARE_FUNC(avs_is_yuv422ps);
AVSC_DECLARE_FUNC(avs_is_yuv420ps);
AVSC_DECLARE_FUNC(avs_is_y32);
AVSC_DECLARE_FUNC(avs_is_444);
AVSC_DECLARE_FUNC(avs_is_422);
AVSC_DECLARE_FUNC(avs_is_420);
AVSC_DECLARE_FUNC(avs_is_y);
AVSC_DECLARE_FUNC(avs_is_yuva);
AVSC_DECLARE_FUNC(avs_is_planar_rgb);
AVSC_DECLARE_FUNC(avs_is_planar_rgba);
AVSC_DECLARE_FUNC(avs_is_color_space);
AVSC_DECLARE_FUNC(avs_get_plane_width_subsampling);
@@ -934,11 +780,6 @@ struct AVS_Library {
AVSC_DECLARE_FUNC(avs_get_read_ptr_p);
AVSC_DECLARE_FUNC(avs_is_writable);
AVSC_DECLARE_FUNC(avs_get_write_ptr_p);
AVSC_DECLARE_FUNC(avs_num_components);
AVSC_DECLARE_FUNC(avs_component_size);
AVSC_DECLARE_FUNC(avs_bits_per_component);
};
#undef AVSC_DECLARE_FUNC
@@ -999,28 +840,11 @@ AVSC_INLINE AVS_Library * avs_load_library() {
AVSC_LOAD_FUNC(avs_vsprintf);
AVSC_LOAD_FUNC(avs_get_error);
AVSC_LOAD_FUNC(avs_is_rgb48);
AVSC_LOAD_FUNC(avs_is_rgb64);
AVSC_LOAD_FUNC(avs_is_yv24);
AVSC_LOAD_FUNC(avs_is_yv16);
AVSC_LOAD_FUNC(avs_is_yv12);
AVSC_LOAD_FUNC(avs_is_yv411);
AVSC_LOAD_FUNC(avs_is_y8);
AVSC_LOAD_FUNC(avs_is_yuv444p16);
AVSC_LOAD_FUNC(avs_is_yuv422p16);
AVSC_LOAD_FUNC(avs_is_yuv420p16);
AVSC_LOAD_FUNC(avs_is_y16);
AVSC_LOAD_FUNC(avs_is_yuv444ps);
AVSC_LOAD_FUNC(avs_is_yuv422ps);
AVSC_LOAD_FUNC(avs_is_yuv420ps);
AVSC_LOAD_FUNC(avs_is_y32);
AVSC_LOAD_FUNC(avs_is_444);
AVSC_LOAD_FUNC(avs_is_422);
AVSC_LOAD_FUNC(avs_is_420);
AVSC_LOAD_FUNC(avs_is_y);
AVSC_LOAD_FUNC(avs_is_yuva);
AVSC_LOAD_FUNC(avs_is_planar_rgb);
AVSC_LOAD_FUNC(avs_is_planar_rgba);
AVSC_LOAD_FUNC(avs_is_color_space);
AVSC_LOAD_FUNC(avs_get_plane_width_subsampling);
@@ -1036,12 +860,6 @@ AVSC_INLINE AVS_Library * avs_load_library() {
AVSC_LOAD_FUNC(avs_is_writable);
AVSC_LOAD_FUNC(avs_get_write_ptr_p);
AVSC_LOAD_FUNC(avs_num_components);
AVSC_LOAD_FUNC(avs_component_size);
AVSC_LOAD_FUNC(avs_bits_per_component);
#undef __AVSC_STRINGIFY
#undef AVSC_STRINGIFY
#undef AVSC_LOAD_FUNC

View File

@@ -1,827 +0,0 @@
/*
* This copyright notice applies to this header file only:
*
* Copyright (c) 2010-2016 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the software, and to permit persons to whom the
* software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file cuviddec.h
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
* \date 2015-2016
* This file contains constants, structure definitions and function prototypes used for decoding.
*/
#if !defined(__CUDA_VIDEO_H__)
#define __CUDA_VIDEO_H__
#ifndef __cuda_cuda_h__
#include <cuda.h>
#endif // __cuda_cuda_h__
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
#if (CUDA_VERSION >= 3020) && (!defined(CUDA_FORCE_API_VERSION) || (CUDA_FORCE_API_VERSION >= 3020))
#define __CUVID_DEVPTR64
#endif
#endif
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
typedef void *CUvideodecoder;
typedef struct _CUcontextlock_st *CUvideoctxlock;
/**
* \addtogroup VIDEO_DECODER Video Decoder
* @{
*/
/*!
* \enum cudaVideoCodec
* Video Codec Enums
*/
typedef enum cudaVideoCodec_enum {
cudaVideoCodec_MPEG1=0, /**< MPEG1 */
cudaVideoCodec_MPEG2, /**< MPEG2 */
cudaVideoCodec_MPEG4, /**< MPEG4 */
cudaVideoCodec_VC1, /**< VC1 */
cudaVideoCodec_H264, /**< H264 */
cudaVideoCodec_JPEG, /**< JPEG */
cudaVideoCodec_H264_SVC, /**< H264-SVC */
cudaVideoCodec_H264_MVC, /**< H264-MVC */
cudaVideoCodec_HEVC, /**< HEVC */
cudaVideoCodec_VP8, /**< VP8 */
cudaVideoCodec_VP9, /**< VP9 */
cudaVideoCodec_NumCodecs, /**< Max COdecs */
// Uncompressed YUV
cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */
cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */
cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */
cudaVideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), /**< YUYV/YUY2 (4:2:2) */
cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */
} cudaVideoCodec;
/*!
* \enum cudaVideoSurfaceFormat
* Video Surface Formats Enums
*/
typedef enum cudaVideoSurfaceFormat_enum {
cudaVideoSurfaceFormat_NV12=0 /**< NV12 (currently the only supported output format) */
} cudaVideoSurfaceFormat;
/*!
* \enum cudaVideoDeinterlaceMode
* Deinterlacing Modes Enums
*/
typedef enum cudaVideoDeinterlaceMode_enum {
cudaVideoDeinterlaceMode_Weave=0, /**< Weave both fields (no deinterlacing) */
cudaVideoDeinterlaceMode_Bob, /**< Drop one field */
cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */
} cudaVideoDeinterlaceMode;
/*!
* \enum cudaVideoChromaFormat
* Chroma Formats Enums
*/
typedef enum cudaVideoChromaFormat_enum {
cudaVideoChromaFormat_Monochrome=0, /**< MonoChrome */
cudaVideoChromaFormat_420, /**< 4:2:0 */
cudaVideoChromaFormat_422, /**< 4:2:2 */
cudaVideoChromaFormat_444 /**< 4:4:4 */
} cudaVideoChromaFormat;
/*!
* \enum cudaVideoCreateFlags
* Decoder Flags Enums
*/
typedef enum cudaVideoCreateFlags_enum {
cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */
cudaVideoCreate_PreferCUDA = 0x01, /**< Use a CUDA-based decoder if faster than dedicated engines (requires a valid vidLock object for multi-threading) */
cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */
cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */
} cudaVideoCreateFlags;
/*!
* \struct CUVIDDECODECREATEINFO
* Struct used in create decoder
*/
typedef struct _CUVIDDECODECREATEINFO
{
unsigned long ulWidth; /**< Coded Sequence Width */
unsigned long ulHeight; /**< Coded Sequence Height */
unsigned long ulNumDecodeSurfaces; /**< Maximum number of internal decode surfaces */
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
cudaVideoChromaFormat ChromaFormat; /**< cudaVideoChromaFormat_XXX (only 4:2:0 is currently supported) */
unsigned long ulCreationFlags; /**< Decoder creation flags (cudaVideoCreateFlags_XXX) */
unsigned long bitDepthMinus8;
unsigned long Reserved1[4]; /**< Reserved for future use - set to zero */
/**
* area of the frame that should be displayed
*/
struct {
short left;
short top;
short right;
short bottom;
} display_area;
cudaVideoSurfaceFormat OutputFormat; /**< cudaVideoSurfaceFormat_XXX */
cudaVideoDeinterlaceMode DeinterlaceMode; /**< cudaVideoDeinterlaceMode_XXX */
unsigned long ulTargetWidth; /**< Post-processed Output Width (Should be aligned to 2) */
unsigned long ulTargetHeight; /**< Post-processed Output Height (Should be aligbed to 2) */
unsigned long ulNumOutputSurfaces; /**< Maximum number of output surfaces simultaneously mapped */
CUvideoctxlock vidLock; /**< If non-NULL, context lock used for synchronizing ownership of the cuda context */
/**
* target rectangle in the output frame (for aspect ratio conversion)
* if a null rectangle is specified, {0,0,ulTargetWidth,ulTargetHeight} will be used
*/
struct {
short left;
short top;
short right;
short bottom;
} target_rect;
unsigned long Reserved2[5]; /**< Reserved for future use - set to zero */
} CUVIDDECODECREATEINFO;
/*!
* \struct CUVIDH264DPBENTRY
* H.264 DPB Entry
*/
typedef struct _CUVIDH264DPBENTRY
{
int PicIdx; /**< picture index of reference frame */
int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */
int is_long_term; /**< 0=short term reference, 1=long term reference */
int not_existing; /**< non-existing reference frame (corresponding PicIdx should be set to -1) */
int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */
int FieldOrderCnt[2]; /**< field order count of top and bottom fields */
} CUVIDH264DPBENTRY;
/*!
* \struct CUVIDH264MVCEXT
* H.264 MVC Picture Parameters Ext
*/
typedef struct _CUVIDH264MVCEXT
{
int num_views_minus1;
int view_id;
unsigned char inter_view_flag;
unsigned char num_inter_view_refs_l0;
unsigned char num_inter_view_refs_l1;
unsigned char MVCReserved8Bits;
int InterViewRefsL0[16];
int InterViewRefsL1[16];
} CUVIDH264MVCEXT;
/*!
* \struct CUVIDH264SVCEXT
* H.264 SVC Picture Parameters Ext
*/
typedef struct _CUVIDH264SVCEXT
{
unsigned char profile_idc;
unsigned char level_idc;
unsigned char DQId;
unsigned char DQIdMax;
unsigned char disable_inter_layer_deblocking_filter_idc;
unsigned char ref_layer_chroma_phase_y_plus1;
signed char inter_layer_slice_alpha_c0_offset_div2;
signed char inter_layer_slice_beta_offset_div2;
unsigned short DPBEntryValidFlag;
unsigned char inter_layer_deblocking_filter_control_present_flag;
unsigned char extended_spatial_scalability_idc;
unsigned char adaptive_tcoeff_level_prediction_flag;
unsigned char slice_header_restriction_flag;
unsigned char chroma_phase_x_plus1_flag;
unsigned char chroma_phase_y_plus1;
unsigned char tcoeff_level_prediction_flag;
unsigned char constrained_intra_resampling_flag;
unsigned char ref_layer_chroma_phase_x_plus1_flag;
unsigned char store_ref_base_pic_flag;
unsigned char Reserved8BitsA;
unsigned char Reserved8BitsB;
// For the 4 scaled_ref_layer_XX fields below,
// if (extended_spatial_scalability_idc == 1), SPS field, G.7.3.2.1.4, add prefix "seq_"
// if (extended_spatial_scalability_idc == 2), SLH field, G.7.3.3.4,
short scaled_ref_layer_left_offset;
short scaled_ref_layer_top_offset;
short scaled_ref_layer_right_offset;
short scaled_ref_layer_bottom_offset;
unsigned short Reserved16Bits;
struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded. Linked list ends at the target layer. */
int bRefBaseLayer; /**< whether to store ref base pic */
} CUVIDH264SVCEXT;
/*!
* \struct CUVIDH264PICPARAMS
* H.264 Picture Parameters
*/
typedef struct _CUVIDH264PICPARAMS
{
// SPS
int log2_max_frame_num_minus4;
int pic_order_cnt_type;
int log2_max_pic_order_cnt_lsb_minus4;
int delta_pic_order_always_zero_flag;
int frame_mbs_only_flag;
int direct_8x8_inference_flag;
int num_ref_frames; // NOTE: shall meet level 4.1 restrictions
unsigned char residual_colour_transform_flag;
unsigned char bit_depth_luma_minus8; // Must be 0 (only 8-bit supported)
unsigned char bit_depth_chroma_minus8; // Must be 0 (only 8-bit supported)
unsigned char qpprime_y_zero_transform_bypass_flag;
// PPS
int entropy_coding_mode_flag;
int pic_order_present_flag;
int num_ref_idx_l0_active_minus1;
int num_ref_idx_l1_active_minus1;
int weighted_pred_flag;
int weighted_bipred_idc;
int pic_init_qp_minus26;
int deblocking_filter_control_present_flag;
int redundant_pic_cnt_present_flag;
int transform_8x8_mode_flag;
int MbaffFrameFlag;
int constrained_intra_pred_flag;
int chroma_qp_index_offset;
int second_chroma_qp_index_offset;
int ref_pic_flag;
int frame_num;
int CurrFieldOrderCnt[2];
// DPB
CUVIDH264DPBENTRY dpb[16]; // List of reference frames within the DPB
// Quantization Matrices (raster-order)
unsigned char WeightScale4x4[6][16];
unsigned char WeightScale8x8[2][64];
// FMO/ASO
unsigned char fmo_aso_enable;
unsigned char num_slice_groups_minus1;
unsigned char slice_group_map_type;
signed char pic_init_qs_minus26;
unsigned int slice_group_change_rate_minus1;
union
{
unsigned long long slice_group_map_addr;
const unsigned char *pMb2SliceGroupMap;
} fmo;
unsigned int Reserved[12];
// SVC/MVC
union
{
CUVIDH264MVCEXT mvcext;
CUVIDH264SVCEXT svcext;
};
} CUVIDH264PICPARAMS;
/*!
* \struct CUVIDMPEG2PICPARAMS
* MPEG-2 Picture Parameters
*/
typedef struct _CUVIDMPEG2PICPARAMS
{
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
int BackwardRefIdx; // Picture index of backward reference (B-frames)
int picture_coding_type;
int full_pel_forward_vector;
int full_pel_backward_vector;
int f_code[2][2];
int intra_dc_precision;
int frame_pred_frame_dct;
int concealment_motion_vectors;
int q_scale_type;
int intra_vlc_format;
int alternate_scan;
int top_field_first;
// Quantization matrices (raster order)
unsigned char QuantMatrixIntra[64];
unsigned char QuantMatrixInter[64];
} CUVIDMPEG2PICPARAMS;
////////////////////////////////////////////////////////////////////////////////////////////////
//
// MPEG-4 Picture Parameters
//
// MPEG-4 has VOP types instead of Picture types
#define I_VOP 0
#define P_VOP 1
#define B_VOP 2
#define S_VOP 3
/*!
* \struct CUVIDMPEG4PICPARAMS
* MPEG-4 Picture Parameters
*/
typedef struct _CUVIDMPEG4PICPARAMS
{
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
int BackwardRefIdx; // Picture index of backward reference (B-frames)
// VOL
int video_object_layer_width;
int video_object_layer_height;
int vop_time_increment_bitcount;
int top_field_first;
int resync_marker_disable;
int quant_type;
int quarter_sample;
int short_video_header;
int divx_flags;
// VOP
int vop_coding_type;
int vop_coded;
int vop_rounding_type;
int alternate_vertical_scan_flag;
int interlaced;
int vop_fcode_forward;
int vop_fcode_backward;
int trd[2];
int trb[2];
// Quantization matrices (raster order)
unsigned char QuantMatrixIntra[64];
unsigned char QuantMatrixInter[64];
int gmc_enabled;
} CUVIDMPEG4PICPARAMS;
/*!
* \struct CUVIDVC1PICPARAMS
* VC1 Picture Parameters
*/
typedef struct _CUVIDVC1PICPARAMS
{
int ForwardRefIdx; /**< Picture index of forward reference (P/B-frames) */
int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */
int FrameWidth; /**< Actual frame width */
int FrameHeight; /**< Actual frame height */
// PICTURE
int intra_pic_flag; /**< Set to 1 for I,BI frames */
int ref_pic_flag; /**< Set to 1 for I,P frames */
int progressive_fcm; /**< Progressive frame */
// SEQUENCE
int profile;
int postprocflag;
int pulldown;
int interlace;
int tfcntrflag;
int finterpflag;
int psf;
int multires;
int syncmarker;
int rangered;
int maxbframes;
// ENTRYPOINT
int panscan_flag;
int refdist_flag;
int extended_mv;
int dquant;
int vstransform;
int loopfilter;
int fastuvmc;
int overlap;
int quantizer;
int extended_dmv;
int range_mapy_flag;
int range_mapy;
int range_mapuv_flag;
int range_mapuv;
int rangeredfrm; // range reduction state
} CUVIDVC1PICPARAMS;
/*!
* \struct CUVIDJPEGPICPARAMS
* JPEG Picture Parameters
*/
typedef struct _CUVIDJPEGPICPARAMS
{
int Reserved;
} CUVIDJPEGPICPARAMS;
/*!
* \struct CUVIDHEVCPICPARAMS
* HEVC Picture Parameters
*/
typedef struct _CUVIDHEVCPICPARAMS
{
// sps
int pic_width_in_luma_samples;
int pic_height_in_luma_samples;
unsigned char log2_min_luma_coding_block_size_minus3;
unsigned char log2_diff_max_min_luma_coding_block_size;
unsigned char log2_min_transform_block_size_minus2;
unsigned char log2_diff_max_min_transform_block_size;
unsigned char pcm_enabled_flag;
unsigned char log2_min_pcm_luma_coding_block_size_minus3;
unsigned char log2_diff_max_min_pcm_luma_coding_block_size;
unsigned char pcm_sample_bit_depth_luma_minus1;
unsigned char pcm_sample_bit_depth_chroma_minus1;
unsigned char pcm_loop_filter_disabled_flag;
unsigned char strong_intra_smoothing_enabled_flag;
unsigned char max_transform_hierarchy_depth_intra;
unsigned char max_transform_hierarchy_depth_inter;
unsigned char amp_enabled_flag;
unsigned char separate_colour_plane_flag;
unsigned char log2_max_pic_order_cnt_lsb_minus4;
unsigned char num_short_term_ref_pic_sets;
unsigned char long_term_ref_pics_present_flag;
unsigned char num_long_term_ref_pics_sps;
unsigned char sps_temporal_mvp_enabled_flag;
unsigned char sample_adaptive_offset_enabled_flag;
unsigned char scaling_list_enable_flag;
unsigned char IrapPicFlag;
unsigned char IdrPicFlag;
unsigned char bit_depth_luma_minus8;
unsigned char bit_depth_chroma_minus8;
unsigned char reserved1[14];
// pps
unsigned char dependent_slice_segments_enabled_flag;
unsigned char slice_segment_header_extension_present_flag;
unsigned char sign_data_hiding_enabled_flag;
unsigned char cu_qp_delta_enabled_flag;
unsigned char diff_cu_qp_delta_depth;
signed char init_qp_minus26;
signed char pps_cb_qp_offset;
signed char pps_cr_qp_offset;
unsigned char constrained_intra_pred_flag;
unsigned char weighted_pred_flag;
unsigned char weighted_bipred_flag;
unsigned char transform_skip_enabled_flag;
unsigned char transquant_bypass_enabled_flag;
unsigned char entropy_coding_sync_enabled_flag;
unsigned char log2_parallel_merge_level_minus2;
unsigned char num_extra_slice_header_bits;
unsigned char loop_filter_across_tiles_enabled_flag;
unsigned char loop_filter_across_slices_enabled_flag;
unsigned char output_flag_present_flag;
unsigned char num_ref_idx_l0_default_active_minus1;
unsigned char num_ref_idx_l1_default_active_minus1;
unsigned char lists_modification_present_flag;
unsigned char cabac_init_present_flag;
unsigned char pps_slice_chroma_qp_offsets_present_flag;
unsigned char deblocking_filter_override_enabled_flag;
unsigned char pps_deblocking_filter_disabled_flag;
signed char pps_beta_offset_div2;
signed char pps_tc_offset_div2;
unsigned char tiles_enabled_flag;
unsigned char uniform_spacing_flag;
unsigned char num_tile_columns_minus1;
unsigned char num_tile_rows_minus1;
unsigned short column_width_minus1[21];
unsigned short row_height_minus1[21];
unsigned int reserved3[15];
// RefPicSets
int NumBitsForShortTermRPSInSlice;
int NumDeltaPocsOfRefRpsIdx;
int NumPocTotalCurr;
int NumPocStCurrBefore;
int NumPocStCurrAfter;
int NumPocLtCurr;
int CurrPicOrderCntVal;
int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference)
int PicOrderCntVal[16]; // [refpic]
unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference
unsigned char RefPicSetStCurrBefore[8]; // [0..NumPocStCurrBefore-1] -> refpic (0..15)
unsigned char RefPicSetStCurrAfter[8]; // [0..NumPocStCurrAfter-1] -> refpic (0..15)
unsigned char RefPicSetLtCurr[8]; // [0..NumPocLtCurr-1] -> refpic (0..15)
unsigned char RefPicSetInterLayer0[8];
unsigned char RefPicSetInterLayer1[8];
unsigned int reserved4[12];
// scaling lists (diag order)
unsigned char ScalingList4x4[6][16]; // [matrixId][i]
unsigned char ScalingList8x8[6][64]; // [matrixId][i]
unsigned char ScalingList16x16[6][64]; // [matrixId][i]
unsigned char ScalingList32x32[2][64]; // [matrixId][i]
unsigned char ScalingListDCCoeff16x16[6]; // [matrixId]
unsigned char ScalingListDCCoeff32x32[2]; // [matrixId]
} CUVIDHEVCPICPARAMS;
/*!
* \struct CUVIDVP8PICPARAMS
* VP8 Picture Parameters
*/
typedef struct _CUVIDVP8PICPARAMS
{
int width;
int height;
unsigned int first_partition_size;
//Frame Indexes
unsigned char LastRefIdx;
unsigned char GoldenRefIdx;
unsigned char AltRefIdx;
union {
struct {
unsigned char frame_type : 1; /**< 0 = KEYFRAME, 1 = INTERFRAME */
unsigned char version : 3;
unsigned char show_frame : 1;
unsigned char update_mb_segmentation_data : 1; /**< Must be 0 if segmentation is not enabled */
unsigned char Reserved2Bits : 2;
};
unsigned char wFrameTagFlags;
};
unsigned char Reserved1[4];
unsigned int Reserved2[3];
} CUVIDVP8PICPARAMS;
/*!
* \struct CUVIDVP9PICPARAMS
* VP9 Picture Parameters
*/
typedef struct _CUVIDVP9PICPARAMS
{
unsigned int width;
unsigned int height;
//Frame Indices
unsigned char LastRefIdx;
unsigned char GoldenRefIdx;
unsigned char AltRefIdx;
unsigned char colorSpace;
unsigned short profile : 3;
unsigned short frameContextIdx : 2;
unsigned short frameType : 1;
unsigned short showFrame : 1;
unsigned short errorResilient : 1;
unsigned short frameParallelDecoding : 1;
unsigned short subSamplingX : 1;
unsigned short subSamplingY : 1;
unsigned short intraOnly : 1;
unsigned short allow_high_precision_mv : 1;
unsigned short refreshEntropyProbs : 1;
unsigned short reserved2Bits : 2;
unsigned short reserved16Bits;
unsigned char refFrameSignBias[4];
unsigned char bitDepthMinus8Luma;
unsigned char bitDepthMinus8Chroma;
unsigned char loopFilterLevel;
unsigned char loopFilterSharpness;
unsigned char modeRefLfEnabled;
unsigned char log2_tile_columns;
unsigned char log2_tile_rows;
unsigned char segmentEnabled : 1;
unsigned char segmentMapUpdate : 1;
unsigned char segmentMapTemporalUpdate : 1;
unsigned char segmentFeatureMode : 1;
unsigned char reserved4Bits : 4;
unsigned char segmentFeatureEnable[8][4];
short segmentFeatureData[8][4];
unsigned char mb_segment_tree_probs[7];
unsigned char segment_pred_probs[3];
unsigned char reservedSegment16Bits[2];
int qpYAc;
int qpYDc;
int qpChDc;
int qpChAc;
unsigned int activeRefIdx[3];
unsigned int resetFrameContext;
unsigned int mcomp_filter_type;
unsigned int mbRefLfDelta[4];
unsigned int mbModeLfDelta[2];
unsigned int frameTagSize;
unsigned int offsetToDctParts;
unsigned int reserved128Bits[4];
} CUVIDVP9PICPARAMS;
/*!
* \struct CUVIDPICPARAMS
* Picture Parameters for Decoding
*/
typedef struct _CUVIDPICPARAMS
{
int PicWidthInMbs; /**< Coded Frame Size */
int FrameHeightInMbs; /**< Coded Frame Height */
int CurrPicIdx; /**< Output index of the current picture */
int field_pic_flag; /**< 0=frame picture, 1=field picture */
int bottom_field_flag; /**< 0=top field, 1=bottom field (ignored if field_pic_flag=0) */
int second_field; /**< Second field of a complementary field pair */
// Bitstream data
unsigned int nBitstreamDataLen; /**< Number of bytes in bitstream data buffer */
const unsigned char *pBitstreamData; /**< Ptr to bitstream data for this picture (slice-layer) */
unsigned int nNumSlices; /**< Number of slices in this picture */
const unsigned int *pSliceDataOffsets; /**< nNumSlices entries, contains offset of each slice within the bitstream data buffer */
int ref_pic_flag; /**< This picture is a reference picture */
int intra_pic_flag; /**< This picture is entirely intra coded */
unsigned int Reserved[30]; /**< Reserved for future use */
// Codec-specific data
union {
CUVIDMPEG2PICPARAMS mpeg2; /**< Also used for MPEG-1 */
CUVIDH264PICPARAMS h264;
CUVIDVC1PICPARAMS vc1;
CUVIDMPEG4PICPARAMS mpeg4;
CUVIDJPEGPICPARAMS jpeg;
CUVIDHEVCPICPARAMS hevc;
CUVIDVP8PICPARAMS vp8;
CUVIDVP9PICPARAMS vp9;
unsigned int CodecReserved[1024];
} CodecSpecific;
} CUVIDPICPARAMS;
/*!
* \struct CUVIDPROCPARAMS
* Picture Parameters for Postprocessing
*/
typedef struct _CUVIDPROCPARAMS
{
int progressive_frame; /**< Input is progressive (deinterlace_mode will be ignored) */
int second_field; /**< Output the second field (ignored if deinterlace mode is Weave) */
int top_field_first; /**< Input frame is top field first (1st field is top, 2nd field is bottom) */
int unpaired_field; /**< Input only contains one field (2nd field is invalid) */
// The fields below are used for raw YUV input
unsigned int reserved_flags; /**< Reserved for future use (set to zero) */
unsigned int reserved_zero; /**< Reserved (set to zero) */
unsigned long long raw_input_dptr; /**< Input CUdeviceptr for raw YUV extensions */
unsigned int raw_input_pitch; /**< pitch in bytes of raw YUV input (should be aligned appropriately) */
unsigned int raw_input_format; /**< Reserved for future use (set to zero) */
unsigned long long raw_output_dptr; /**< Reserved for future use (set to zero) */
unsigned int raw_output_pitch; /**< Reserved for future use (set to zero) */
unsigned int Reserved[48];
void *Reserved3[3];
} CUVIDPROCPARAMS;
/**
*
* In order to minimize decode latencies, there should be always at least 2 pictures in the decode
* queue at any time, in order to make sure that all decode engines are always busy.
*
* Overall data flow:
* - cuvidCreateDecoder(...)
* For each picture:
* - cuvidDecodePicture(N)
* - cuvidMapVideoFrame(N-4)
* - do some processing in cuda
* - cuvidUnmapVideoFrame(N-4)
* - cuvidDecodePicture(N+1)
* - cuvidMapVideoFrame(N-3)
* ...
* - cuvidDestroyDecoder(...)
*
* NOTE:
* - When the cuda context is created from a D3D device, the D3D device must also be created
* with the D3DCREATE_MULTITHREADED flag.
* - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces)
* - cuVidDecodePicture may block the calling thread if there are too many pictures pending
* in the decode queue
*/
/**
* \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci)
* Create the decoder object
*/
CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci);
/**
* \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder)
* Destroy the decoder object
*/
CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder);
/**
* \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams)
* Decode a single picture (field or frame)
*/
CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams);
#if !defined(__CUVID_DEVPTR64) || defined(__CUVID_INTERNAL)
/**
* \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
* Post-process and map a video frame for use in cuda
*/
CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx,
unsigned int *pDevPtr, unsigned int *pPitch,
CUVIDPROCPARAMS *pVPP);
/**
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr)
* Unmap a previously mapped video frame
*/
CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr);
#endif
#if defined(WIN64) || defined(_WIN64) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
/**
* \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
* map a video frame
*/
CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr,
unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
/**
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
* Unmap a previously mapped video frame
*/
CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
#if defined(__CUVID_DEVPTR64) && !defined(__CUVID_INTERNAL)
#define cuvidMapVideoFrame cuvidMapVideoFrame64
#define cuvidUnmapVideoFrame cuvidUnmapVideoFrame64
#endif
#endif
/**
*
* Context-locking: to facilitate multi-threaded implementations, the following 4 functions
* provide a simple mutex-style host synchronization. If a non-NULL context is specified
* in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given
* context before making any cuda calls.
* A multi-threaded application could create a lock associated with a context handle so that
* multiple threads can safely share the same cuda context:
* - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context
* that can be passed to cuvidCtxLockCreate.
* - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section.
*
* NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video
* decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls).
*/
/**
* \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx)
*/
CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx);
/**
* \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck)
*/
CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck);
/**
* \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags)
*/
CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags);
/**
* \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags)
*/
CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags);
/** @} */ /* End VIDEO_DECODER */
////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__cplusplus)
// Auto-lock helper for C++ applications
class CCtxAutoLock
{
private:
CUvideoctxlock m_ctx;
public:
CCtxAutoLock(CUvideoctxlock ctx);
~CCtxAutoLock();
};
}
#endif /* __cplusplus */
#endif // __CUDA_VIDEO_H__

View File

@@ -1,321 +0,0 @@
/*
* This copyright notice applies to this header file only:
*
* Copyright (c) 2010-2016 NVIDIA Corporation
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the software, and to permit persons to whom the
* software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/**
* \file nvcuvid.h
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
* \date 2015-2015
* This file contains the interface constants, structure definitions and function prototypes.
*/
#if !defined(__NVCUVID_H__)
#define __NVCUVID_H__
#include "compat/cuda/cuviddec.h"
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
/*********************************
** Initialization
*********************************/
CUresult CUDAAPI cuvidInit(unsigned int Flags);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// High-level helper APIs for video sources
//
typedef void *CUvideosource;
typedef void *CUvideoparser;
typedef long long CUvideotimestamp;
/**
* \addtogroup VIDEO_PARSER Video Parser
* @{
*/
/*!
* \enum cudaVideoState
* Video Source State
*/
typedef enum {
cudaVideoState_Error = -1, /**< Error state (invalid source) */
cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */
cudaVideoState_Started = 1 /**< Source is running and delivering data */
} cudaVideoState;
/*!
* \enum cudaAudioCodec
* Audio compression
*/
typedef enum {
cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */
cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */
cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */
cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */
cudaAudioCodec_LPCM /**< PCM Audio */
} cudaAudioCodec;
/*!
* \struct CUVIDEOFORMAT
* Video format
*/
typedef struct
{
cudaVideoCodec codec; /**< Compression format */
/**
* frame rate = numerator / denominator (for example: 30000/1001)
*/
struct {
unsigned int numerator; /**< frame rate numerator (0 = unspecified or variable frame rate) */
unsigned int denominator; /**< frame rate denominator (0 = unspecified or variable frame rate) */
} frame_rate;
unsigned char progressive_sequence; /**< 0=interlaced, 1=progressive */
unsigned char bit_depth_luma_minus8; /**< high bit depth Luma */
unsigned char bit_depth_chroma_minus8; /**< high bit depth Chroma */
unsigned char reserved1; /**< Reserved for future use */
unsigned int coded_width; /**< coded frame width */
unsigned int coded_height; /**< coded frame height */
/**
* area of the frame that should be displayed
* typical example:
* coded_width = 1920, coded_height = 1088
* display_area = { 0,0,1920,1080 }
*/
struct {
int left; /**< left position of display rect */
int top; /**< top position of display rect */
int right; /**< right position of display rect */
int bottom; /**< bottom position of display rect */
} display_area;
cudaVideoChromaFormat chroma_format; /**< Chroma format */
unsigned int bitrate; /**< video bitrate (bps, 0=unknown) */
/**
* Display Aspect Ratio = x:y (4:3, 16:9, etc)
*/
struct {
int x;
int y;
} display_aspect_ratio;
/**
* Video Signal Description
*/
struct {
unsigned char video_format : 3;
unsigned char video_full_range_flag : 1;
unsigned char reserved_zero_bits : 4;
unsigned char color_primaries;
unsigned char transfer_characteristics;
unsigned char matrix_coefficients;
} video_signal_description;
unsigned int seqhdr_data_length; /**< Additional bytes following (CUVIDEOFORMATEX) */
} CUVIDEOFORMAT;
/*!
* \struct CUVIDEOFORMATEX
* Video format including raw sequence header information
*/
typedef struct
{
CUVIDEOFORMAT format;
unsigned char raw_seqhdr_data[1024];
} CUVIDEOFORMATEX;
/*!
* \struct CUAUDIOFORMAT
* Audio Formats
*/
typedef struct
{
cudaAudioCodec codec; /**< Compression format */
unsigned int channels; /**< number of audio channels */
unsigned int samplespersec; /**< sampling frequency */
unsigned int bitrate; /**< For uncompressed, can also be used to determine bits per sample */
unsigned int reserved1; /**< Reserved for future use */
unsigned int reserved2; /**< Reserved for future use */
} CUAUDIOFORMAT;
/*!
* \enum CUvideopacketflags
* Data packet flags
*/
typedef enum {
CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */
CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */
CUVID_PKT_DISCONTINUITY = 0x04 /**< Set when a discontinuity has to be signalled */
} CUvideopacketflags;
/*!
* \struct CUVIDSOURCEDATAPACKET
* Data Packet
*/
typedef struct _CUVIDSOURCEDATAPACKET
{
unsigned long flags; /**< Combination of CUVID_PKT_XXX flags */
unsigned long payload_size; /**< number of bytes in the payload (may be zero if EOS flag is set) */
const unsigned char *payload; /**< Pointer to packet payload data (may be NULL if EOS flag is set) */
CUvideotimestamp timestamp; /**< Presentation timestamp (10MHz clock), only valid if CUVID_PKT_TIMESTAMP flag is set */
} CUVIDSOURCEDATAPACKET;
// Callback for packet delivery
typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *);
/*!
* \struct CUVIDSOURCEPARAMS
* Source Params
*/
typedef struct _CUVIDSOURCEPARAMS
{
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
unsigned int uReserved1[7]; /**< Reserved for future use - set to zero */
void *pUserData; /**< Parameter passed in to the data handlers */
PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< Called to deliver audio packets */
PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< Called to deliver video packets */
void *pvReserved2[8]; /**< Reserved for future use - set to NULL */
} CUVIDSOURCEPARAMS;
/*!
* \enum CUvideosourceformat_flags
* CUvideosourceformat_flags
*/
typedef enum {
CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */
} CUvideosourceformat_flags;
#if !defined(__APPLE__)
/**
* \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams)
* Create Video Source
*/
CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams)
* Create Video Source
*/
CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj)
* Destroy Video Source
*/
CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj);
/**
* \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state)
* Set Video Source state
*/
CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state);
/**
* \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj)
* Get Video Source state
*/
cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj);
/**
* \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags)
* Get Video Source Format
*/
CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags);
/**
* \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags)
* Set Video Source state
*/
CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags);
#endif
/**
* \struct CUVIDPARSERDISPINFO
*/
typedef struct _CUVIDPARSERDISPINFO
{
int picture_index; /**< */
int progressive_frame; /**< */
int top_field_first; /**< */
int repeat_first_field; /**< Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, -1=unpaired field) */
CUvideotimestamp timestamp; /**< */
} CUVIDPARSERDISPINFO;
//
// Parser callbacks
// The parser will call these synchronously from within cuvidParseVideoData(), whenever a picture is ready to
// be decoded and/or displayed.
//
typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *);
typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *);
typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *);
/**
* \struct CUVIDPARSERPARAMS
*/
typedef struct _CUVIDPARSERPARAMS
{
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
unsigned int ulMaxNumDecodeSurfaces; /**< Max # of decode surfaces (parser will cycle through these) */
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
unsigned int ulErrorThreshold; /**< % Error threshold (0-100) for calling pfnDecodePicture (100=always call pfnDecodePicture even if picture bitstream is fully corrupted) */
unsigned int ulMaxDisplayDelay; /**< Max display queue delay (improves pipelining of decode with display) - 0=no delay (recommended values: 2..4) */
unsigned int uReserved1[5]; /**< Reserved for future use - set to 0 */
void *pUserData; /**< User data for callbacks */
PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< Called before decoding frames and/or whenever there is a format change */
PFNVIDDECODECALLBACK pfnDecodePicture; /**< Called when a picture is ready to be decoded (decode order) */
PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< Called whenever a picture is ready to be displayed (display order) */
void *pvReserved2[7]; /**< Reserved for future use - set to NULL */
CUVIDEOFORMATEX *pExtVideoInfo; /**< [Optional] sequence header data from system layer */
} CUVIDPARSERPARAMS;
/**
* \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams)
*/
CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams);
/**
* \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket)
*/
CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket);
/**
* \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj)
*/
CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj);
/** @} */ /* END VIDEO_PARSER */
////////////////////////////////////////////////////////////////////////////////////////////////
#if defined(__cplusplus)
}
#endif /* __cplusplus */
#endif // __NVCUVID_H__

View File

@@ -1,42 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_DISPATCH_SEMAPHORE_SEMAPHORE_H
#define COMPAT_DISPATCH_SEMAPHORE_SEMAPHORE_H
#include <dispatch/dispatch.h>
#include <errno.h>
#define sem_t dispatch_semaphore_t
#define sem_post(psem) dispatch_semaphore_signal(*psem)
#define sem_wait(psem) dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER)
#define sem_timedwait(psem, val) dispatch_semaphore_wait(*psem, dispatch_walltime(val, 0))
#define sem_destroy(psem) dispatch_release(*psem)
static inline int compat_sem_init(dispatch_semaphore_t *psem,
int unused, int val)
{
int ret = !!(*psem = dispatch_semaphore_create(val)) - 1;
if (ret < 0)
errno = ENOMEM;
return ret;
}
#define sem_init compat_sem_init
#endif /* COMPAT_DISPATCH_SEMAPHORE_SEMAPHORE_H */

View File

@@ -19,8 +19,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_MSVCRT_SNPRINTF_H
#define COMPAT_MSVCRT_SNPRINTF_H
#ifndef COMPAT_SNPRINTF_H
#define COMPAT_SNPRINTF_H
#include <stdarg.h>
#include <stdio.h>
@@ -35,4 +35,4 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap);
#define _snprintf avpriv_snprintf
#define vsnprintf avpriv_vsnprintf
#endif /* COMPAT_MSVCRT_SNPRINTF_H */
#endif /* COMPAT_SNPRINTF_H */

File diff suppressed because it is too large Load Diff

View File

@@ -23,8 +23,8 @@
* os2threads to pthreads wrapper
*/
#ifndef COMPAT_OS2THREADS_H
#define COMPAT_OS2THREADS_H
#ifndef AVCODEC_OS2PTHREADS_H
#define AVCODEC_OS2PTHREADS_H
#define INCL_DOS
#include <os2.h>
@@ -32,71 +32,59 @@
#undef __STRICT_ANSI__ /* for _beginthread() */
#include <stdlib.h>
#include <sys/builtin.h>
#include <sys/fmutex.h>
#include "libavutil/attributes.h"
typedef struct {
TID tid;
void *(*start_routine)(void *);
void *arg;
void *result;
} pthread_t;
#include "libavutil/mem.h"
typedef TID pthread_t;
typedef void pthread_attr_t;
typedef HMTX pthread_mutex_t;
typedef void pthread_mutexattr_t;
typedef struct {
HEV event_sem;
HEV ack_sem;
volatile unsigned wait_count;
HEV event_sem;
int wait_count;
} pthread_cond_t;
typedef void pthread_condattr_t;
typedef struct {
volatile int done;
_fmutex mtx;
} pthread_once_t;
#define PTHREAD_ONCE_INIT {0, _FMUTEX_INITIALIZER}
struct thread_arg {
void *(*start_routine)(void *);
void *arg;
};
static void thread_entry(void *arg)
{
pthread_t *thread = arg;
struct thread_arg *thread_arg = arg;
thread->result = thread->start_routine(thread->arg);
thread_arg->start_routine(thread_arg->arg);
av_free(thread_arg);
}
static av_always_inline int pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void*),
void *arg)
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void*), void *arg)
{
thread->start_routine = start_routine;
thread->arg = arg;
thread->result = NULL;
struct thread_arg *thread_arg;
thread->tid = _beginthread(thread_entry, NULL, 1024 * 1024, thread);
thread_arg = av_mallocz(sizeof(struct thread_arg));
if (!thread_arg)
return ENOMEM;
thread_arg->start_routine = start_routine;
thread_arg->arg = arg;
*thread = _beginthread(thread_entry, NULL, 256 * 1024, thread_arg);
return 0;
}
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
{
DosWaitThread(&thread.tid, DCWW_WAIT);
if (value_ptr)
*value_ptr = thread.result;
DosWaitThread((PTID)&thread, DCWW_WAIT);
return 0;
}
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *attr)
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
{
DosCreateMutexSem(NULL, (PHMTX)mutex, 0, FALSE);
@@ -124,11 +112,9 @@ static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
return 0;
}
static av_always_inline int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr)
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
{
DosCreateEventSem(NULL, &cond->event_sem, DCE_POSTONE, FALSE);
DosCreateEventSem(NULL, &cond->ack_sem, DCE_POSTONE, FALSE);
cond->wait_count = 0;
@@ -138,16 +124,16 @@ static av_always_inline int pthread_cond_init(pthread_cond_t *cond,
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
{
DosCloseEventSem(cond->event_sem);
DosCloseEventSem(cond->ack_sem);
return 0;
}
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
{
if (!__atomic_cmpxchg32(&cond->wait_count, 0, 0)) {
if (cond->wait_count > 0) {
DosPostEventSem(cond->event_sem);
DosWaitEventSem(cond->ack_sem, SEM_INDEFINITE_WAIT);
cond->wait_count--;
}
return 0;
@@ -155,47 +141,26 @@ static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
{
while (!__atomic_cmpxchg32(&cond->wait_count, 0, 0))
pthread_cond_signal(cond);
while (cond->wait_count > 0) {
DosPostEventSem(cond->event_sem);
cond->wait_count--;
}
return 0;
}
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond,
pthread_mutex_t *mutex)
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
__atomic_increment(&cond->wait_count);
cond->wait_count++;
pthread_mutex_unlock(mutex);
DosWaitEventSem(cond->event_sem, SEM_INDEFINITE_WAIT);
__atomic_decrement(&cond->wait_count);
DosPostEventSem(cond->ack_sem);
pthread_mutex_lock(mutex);
return 0;
}
static av_always_inline int pthread_once(pthread_once_t *once_control,
void (*init_routine)(void))
{
if (!once_control->done)
{
_fmutex_request(&once_control->mtx, 0);
if (!once_control->done)
{
init_routine();
once_control->done = 1;
}
_fmutex_release(&once_control->mtx);
}
return 0;
}
#endif /* COMPAT_OS2THREADS_H */
#endif /* AVCODEC_OS2PTHREADS_H */

View File

@@ -1,352 +0,0 @@
#!/usr/bin/env perl
# make_sunver.pl
#
# Copyright (C) 2010, 2011, 2012, 2013
# Free Software Foundation, Inc.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING.GPLv3. If not see
# <http://www.gnu.org/licenses/>.
# This script takes at least two arguments, a GNU style version script and
# a list of object and archive files, and generates a corresponding Sun
# style version script as follows:
#
# Each glob pattern, C++ mangled pattern or literal in the input script is
# matched against all global symbols in the input objects, emitting those
# that matched (or nothing if no match was found).
# A comment with the original pattern and its type is left in the output
# file to make it easy to understand the matches.
#
# It uses elfdump when present (native), GNU readelf otherwise.
# It depends on the GNU version of c++filt, since it must understand the
# GNU mangling style.
use FileHandle;
use IPC::Open2;
# Enforce C locale.
$ENV{'LC_ALL'} = "C";
$ENV{'LANG'} = "C";
# Input version script, GNU style.
my $symvers = shift;
##########
# Get all the symbols from the library, match them, and add them to a hash.
my %sym_hash = ();
# List of objects and archives to process.
my @OBJECTS = ();
# List of shared objects to omit from processing.
my @SHAREDOBJS = ();
# Filter out those input archives that have corresponding shared objects to
# avoid adding all symbols matched in the archive to the output map.
foreach $file (@ARGV) {
if (($so = $file) =~ s/\.a$/.so/ && -e $so) {
printf STDERR "omitted $file -> $so\n";
push (@SHAREDOBJS, $so);
} else {
push (@OBJECTS, $file);
}
}
# We need to detect and ignore hidden symbols. Solaris nm can only detect
# this in the harder to parse default output format, and GNU nm not at all,
# so use elfdump -s in the native case and GNU readelf -s otherwise.
# GNU objdump -t cannot be used since it produces a variable number of
# columns.
# The path to elfdump.
my $elfdump = "/usr/ccs/bin/elfdump";
if (-f $elfdump) {
open ELFDUMP,$elfdump.' -s '.(join ' ',@OBJECTS).'|' or die $!;
my $skip_arsym = 0;
while (<ELFDUMP>) {
chomp;
# Ignore empty lines.
if (/^$/) {
# End of archive symbol table, stop skipping.
$skip_arsym = 0 if $skip_arsym;
next;
}
# Keep skipping until end of archive symbol table.
next if ($skip_arsym);
# Ignore object name header for individual objects and archives.
next if (/:$/);
# Ignore table header lines.
next if (/^Symbol Table Section:/);
next if (/index.*value.*size/);
# Start of archive symbol table: start skipping.
if (/^Symbol Table: \(archive/) {
$skip_arsym = 1;
next;
}
# Split table.
(undef, undef, undef, undef, $bind, $oth, undef, $shndx, $name) = split;
# Error out for unknown input.
die "unknown input line:\n$_" unless defined($bind);
# Ignore local symbols.
next if ($bind eq "LOCL");
# Ignore hidden symbols.
next if ($oth eq "H");
# Ignore undefined symbols.
next if ($shndx eq "UNDEF");
# Error out for unhandled cases.
if ($bind !~ /^(GLOB|WEAK)/ or $oth ne "D") {
die "unhandled symbol:\n$_";
}
# Remember symbol.
$sym_hash{$name}++;
}
close ELFDUMP or die "$elfdump error";
} else {
open READELF, 'readelf -s -W '.(join ' ',@OBJECTS).'|' or die $!;
# Process each symbol.
while (<READELF>) {
chomp;
# Ignore empty lines.
next if (/^$/);
# Ignore object name header.
next if (/^File: .*$/);
# Ignore table header lines.
next if (/^Symbol table.*contains.*:/);
next if (/Num:.*Value.*Size/);
# Split table.
(undef, undef, undef, undef, $bind, $vis, $ndx, $name) = split;
# Error out for unknown input.
die "unknown input line:\n$_" unless defined($bind);
# Ignore local symbols.
next if ($bind eq "LOCAL");
# Ignore hidden symbols.
next if ($vis eq "HIDDEN");
# Ignore undefined symbols.
next if ($ndx eq "UND");
# Error out for unhandled cases.
if ($bind !~ /^(GLOBAL|WEAK)/ or $vis ne "DEFAULT") {
die "unhandled symbol:\n$_";
}
# Remember symbol.
$sym_hash{$name}++;
}
close READELF or die "readelf error";
}
##########
# The various types of glob patterns.
#
# A glob pattern that is to be applied to the demangled name: 'cxx'.
# A glob patterns that applies directly to the name in the .o files: 'glob'.
# This pattern is ignored; used for local variables (usually just '*'): 'ign'.
# The type of the current pattern.
my $glob = 'glob';
# We're currently inside `extern "C++"', which Sun ld doesn't understand.
my $in_extern = 0;
# The c++filt command to use. This *must* be GNU c++filt; the Sun Studio
# c++filt doesn't handle the GNU mangling style.
my $cxxfilt = $ENV{'CXXFILT'} || "c++filt";
# The current version name.
my $current_version = "";
# Was there any attempt to match a symbol to this version?
my $matches_attempted;
# The number of versions which matched this symbol.
my $matched_symbols;
open F,$symvers or die $!;
# Print information about generating this file
print "# This file was generated by make_sunver.pl. DO NOT EDIT!\n";
print "# It was generated by:\n";
printf "# %s %s %s\n", $0, $symvers, (join ' ',@ARGV);
printf "# Omitted archives with corresponding shared libraries: %s\n",
(join ' ', @SHAREDOBJS) if $#SHAREDOBJS >= 0;
print "#\n\n";
print "\$mapfile_version 2\n";
while (<F>) {
# Lines of the form '};'
if (/^([ \t]*)(\}[ \t]*;[ \t]*)$/) {
$glob = 'glob';
if ($in_extern) {
$in_extern--;
print "$1##$2\n";
} else {
print;
}
next;
}
# Lines of the form '} SOME_VERSION_NAME_1.0;'
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
$glob = 'glob';
# We tried to match symbols agains this version, but none matched.
# Emit dummy hidden symbol to avoid marking this version WEAK.
if ($matches_attempted && $matched_symbols == 0) {
print " hidden:\n";
print " .force_WEAK_off_$current_version = DATA S0x0 V0x0;\n";
}
print; next;
}
# Comment and blank lines
if (/^[ \t]*\#/) { print; next; }
if (/^[ \t]*$/) { print; next; }
# Lines of the form '{'
if (/^([ \t]*){$/) {
if ($in_extern) {
print "$1##{\n";
} else {
print;
}
next;
}
# Lines of the form 'SOME_VERSION_NAME_1.1 {'
if (/^([A-Z0-9_.]+)[ \t]+{$/) {
# Record version name.
$current_version = $1;
# Reset match attempts, #matched symbols for this version.
$matches_attempted = 0;
$matched_symbols = 0;
print "SYMBOL_VERSION $1 {\n";
next;
}
# Ignore 'global:'
if (/^[ \t]*global:$/) { print; next; }
# After 'local:', globs should be ignored, they won't be exported.
if (/^[ \t]*local:$/) {
$glob = 'ign';
print;
next;
}
# After 'extern "C++"', globs are C++ patterns
if (/^([ \t]*)(extern \"C\+\+\"[ \t]*)$/) {
$in_extern++;
$glob = 'cxx';
# Need to comment, Sun ld cannot handle this.
print "$1##$2\n"; next;
}
# Chomp newline now we're done with passing through the input file.
chomp;
# Catch globs. Note that '{}' is not allowed in globs by this script,
# so only '*' and '[]' are available.
if (/^([ \t]*)([^ \t;{}#]+);?[ \t]*$/) {
my $ws = $1;
my $ptn = $2;
# Turn the glob into a regex by replacing '*' with '.*', '?' with '.'.
# Keep $ptn so we can still print the original form.
($pattern = $ptn) =~ s/\*/\.\*/g;
$pattern =~ s/\?/\./g;
if ($glob eq 'ign') {
# We're in a local: * section; just continue.
print "$_\n";
next;
}
# Print the glob commented for human readers.
print "$ws##$ptn ($glob)\n";
# We tried to match a symbol to this version.
$matches_attempted++;
if ($glob eq 'glob') {
my %ptn_syms = ();
# Match ptn against symbols in %sym_hash.
foreach my $sym (keys %sym_hash) {
# Maybe it matches one of the patterns based on the symbol in
# the .o file.
$ptn_syms{$sym}++ if ($sym =~ /^$pattern$/);
}
foreach my $sym (sort keys(%ptn_syms)) {
$matched_symbols++;
print "$ws$sym;\n";
}
} elsif ($glob eq 'cxx') {
my %dem_syms = ();
# Verify that we're actually using GNU c++filt. Other versions
# most likely cannot handle GNU style symbol mangling.
my $cxxout = `$cxxfilt --version 2>&1`;
$cxxout =~ m/GNU/ or die "$0 requires GNU c++filt to function";
# Talk to c++filt through a pair of file descriptors.
# Need to start a fresh instance per pattern, otherwise the
# process grows to 500+ MB.
my $pid = open2(*FILTIN, *FILTOUT, $cxxfilt) or die $!;
# Match ptn against symbols in %sym_hash.
foreach my $sym (keys %sym_hash) {
# No? Well, maybe its demangled form matches one of those
# patterns.
printf FILTOUT "%s\n",$sym;
my $dem = <FILTIN>;
chomp $dem;
$dem_syms{$sym}++ if ($dem =~ /^$pattern$/);
}
close FILTOUT or die "c++filt error";
close FILTIN or die "c++filt error";
# Need to wait for the c++filt process to avoid lots of zombies.
waitpid $pid, 0;
foreach my $sym (sort keys(%dem_syms)) {
$matched_symbols++;
print "$ws$sym;\n";
}
} else {
# No? Well, then ignore it.
}
next;
}
# Important sanity check. This script can't handle lots of formats
# that GNU ld can, so be sure to error out if one is seen!
die "strange line `$_'";
}
close F;

View File

@@ -16,8 +16,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_TMS470_MATH_H
#define COMPAT_TMS470_MATH_H
#ifndef FFMPEG_COMPAT_TMS470_MATH_H
#define FFMPEG_COMPAT_TMS470_MATH_H
#include_next <math.h>
@@ -27,4 +27,4 @@
#define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 }))
#define NAN (*(const float*)((const unsigned []){ 0x7fc00000 }))
#endif /* COMPAT_TMS470_MATH_H */
#endif /* FFMPEG_COMPAT_TMS470_MATH_H */

View File

@@ -19,9 +19,6 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef COMPAT_VA_COPY_H
#define COMPAT_VA_COPY_H
#include <stdarg.h>
#if !defined(va_copy) && defined(_MSC_VER)
@@ -30,5 +27,3 @@
#if !defined(va_copy) && defined(__GNUC__) && __GNUC__ < 3
#define va_copy(dst, src) __va_copy(dst, src)
#endif
#endif /* COMPAT_VA_COPY_H */

View File

@@ -26,8 +26,8 @@
* w32threads to pthreads wrapper
*/
#ifndef COMPAT_W32PTHREADS_H
#define COMPAT_W32PTHREADS_H
#ifndef FFMPEG_COMPAT_W32PTHREADS_H
#define FFMPEG_COMPAT_W32PTHREADS_H
/* Build up a pthread-like API using underlying Windows API. Have only static
* methods so as to not conflict with a potentially linked in pthread-win32
@@ -39,11 +39,6 @@
#include <windows.h>
#include <process.h>
#if _WIN32_WINNT < 0x0600 && defined(__MINGW32__)
#undef MemoryBarrier
#define MemoryBarrier __sync_synchronize
#endif
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/internal.h"
@@ -87,29 +82,19 @@ static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
{
thread->func = start_routine;
thread->arg = arg;
#if HAVE_WINRT
thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread,
0, NULL);
#else
thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
0, NULL);
#endif
return !thread->handle;
}
static av_unused int pthread_join(pthread_t thread, void **value_ptr)
static av_unused void pthread_join(pthread_t thread, void **value_ptr)
{
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
if (ret != WAIT_OBJECT_0) {
if (ret == WAIT_ABANDONED)
return EINVAL;
else
return EDEADLK;
}
if (ret != WAIT_OBJECT_0)
return;
if (value_ptr)
*value_ptr = thread.ret;
CloseHandle(thread.handle);
return 0;
}
static inline int pthread_mutex_init(pthread_mutex_t *m, void* attr)
@@ -134,19 +119,6 @@ static inline int pthread_mutex_unlock(pthread_mutex_t *m)
}
#if _WIN32_WINNT >= 0x0600
typedef INIT_ONCE pthread_once_t;
#define PTHREAD_ONCE_INIT INIT_ONCE_STATIC_INIT
static av_unused int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
{
BOOL pending = FALSE;
InitOnceBeginInitialize(once_control, 0, &pending, NULL);
if (pending)
init_routine();
InitOnceComplete(once_control, 0, NULL);
return 0;
}
static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
{
InitializeConditionVariable(cond);
@@ -154,15 +126,14 @@ static inline int pthread_cond_init(pthread_cond_t *cond, const void *unused_att
}
/* native condition variables do not destroy */
static inline int pthread_cond_destroy(pthread_cond_t *cond)
static inline void pthread_cond_destroy(pthread_cond_t *cond)
{
return 0;
return;
}
static inline int pthread_cond_broadcast(pthread_cond_t *cond)
static inline void pthread_cond_broadcast(pthread_cond_t *cond)
{
WakeAllConditionVariable(cond);
return 0;
}
static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
@@ -171,77 +142,14 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
return 0;
}
static inline int pthread_cond_signal(pthread_cond_t *cond)
static inline void pthread_cond_signal(pthread_cond_t *cond)
{
WakeConditionVariable(cond);
return 0;
}
#else // _WIN32_WINNT < 0x0600
/* atomic init state of dynamically loaded functions */
static LONG w32thread_init_state = 0;
static av_unused void w32thread_init(void);
/* for pre-Windows 6.0 platforms, define INIT_ONCE struct,
* compatible to the one used in the native API */
typedef union pthread_once_t {
void * Ptr; ///< For the Windows 6.0+ native functions
LONG state; ///< For the pre-Windows 6.0 compat code
} pthread_once_t;
#define PTHREAD_ONCE_INIT {0}
/* function pointers to init once API on windows 6.0+ kernels */
static BOOL (WINAPI *initonce_begin)(pthread_once_t *lpInitOnce, DWORD dwFlags, BOOL *fPending, void **lpContext);
static BOOL (WINAPI *initonce_complete)(pthread_once_t *lpInitOnce, DWORD dwFlags, void *lpContext);
/* pre-Windows 6.0 compat using a spin-lock */
static inline void w32thread_once_fallback(LONG volatile *state, void (*init_routine)(void))
{
switch (InterlockedCompareExchange(state, 1, 0)) {
/* Initial run */
case 0:
init_routine();
InterlockedExchange(state, 2);
break;
/* Another thread is running init */
case 1:
while (1) {
MemoryBarrier();
if (*state == 2)
break;
Sleep(0);
}
break;
/* Initialization complete */
case 2:
break;
}
}
static av_unused int pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
{
w32thread_once_fallback(&w32thread_init_state, w32thread_init);
/* Use native functions on Windows 6.0+ */
if (initonce_begin && initonce_complete) {
BOOL pending = FALSE;
initonce_begin(once_control, 0, &pending, NULL);
if (pending)
init_routine();
initonce_complete(once_control, 0, NULL);
return 0;
}
w32thread_once_fallback(&once_control->state, init_routine);
return 0;
}
/* for pre-Windows 6.0 platforms we need to define and use our own condition
* variable and api */
typedef struct win32_cond_t {
pthread_mutex_t mtx_broadcast;
pthread_mutex_t mtx_waiter_count;
@@ -261,9 +169,6 @@ static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
{
win32_cond_t *win32_cond = NULL;
w32thread_once_fallback(&w32thread_init_state, w32thread_init);
if (cond_init) {
cond_init(cond);
return 0;
@@ -286,12 +191,12 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
return 0;
}
static av_unused int pthread_cond_destroy(pthread_cond_t *cond)
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
{
win32_cond_t *win32_cond = cond->Ptr;
/* native condition variables do not destroy */
if (cond_init)
return 0;
return;
/* non native condition variables */
CloseHandle(win32_cond->semaphore);
@@ -300,17 +205,16 @@ static av_unused int pthread_cond_destroy(pthread_cond_t *cond)
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
av_freep(&win32_cond);
cond->Ptr = NULL;
return 0;
}
static av_unused int pthread_cond_broadcast(pthread_cond_t *cond)
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
{
win32_cond_t *win32_cond = cond->Ptr;
int have_waiter;
if (cond_broadcast) {
cond_broadcast(cond);
return 0;
return;
}
/* non native condition variables */
@@ -332,7 +236,6 @@ static av_unused int pthread_cond_broadcast(pthread_cond_t *cond)
} else
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
return 0;
}
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
@@ -367,13 +270,13 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
return pthread_mutex_lock(mutex);
}
static av_unused int pthread_cond_signal(pthread_cond_t *cond)
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
{
win32_cond_t *win32_cond = cond->Ptr;
int have_waiter;
if (cond_signal) {
cond_signal(cond);
return 0;
return;
}
pthread_mutex_lock(&win32_cond->mtx_broadcast);
@@ -390,7 +293,6 @@ static av_unused int pthread_cond_signal(pthread_cond_t *cond)
}
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
return 0;
}
#endif
@@ -407,12 +309,8 @@ static av_unused void w32thread_init(void)
(void*)GetProcAddress(kernel_dll, "WakeConditionVariable");
cond_wait =
(void*)GetProcAddress(kernel_dll, "SleepConditionVariableCS");
initonce_begin =
(void*)GetProcAddress(kernel_dll, "InitOnceBeginInitialize");
initonce_complete =
(void*)GetProcAddress(kernel_dll, "InitOnceComplete");
#endif
}
#endif /* COMPAT_W32PTHREADS_H */
#endif /* FFMPEG_COMPAT_W32PTHREADS_H */

1503
configure vendored

File diff suppressed because it is too large Load Diff

9
doc/.gitignore vendored
View File

@@ -1,9 +0,0 @@
/*.1
/*.3
/*.html
/*.pod
/config.texi
/avoptions_codec.texi
/avoptions_format.texi
/fate.txt
/print_options

View File

@@ -2,302 +2,19 @@ Never assume the API of libav* to be stable unless at least 1 month has passed
since the last major version increase or the API was added.
The last version increases were:
libavcodec: 2015-08-28
libavdevice: 2015-08-28
libavfilter: 2015-08-28
libavformat: 2015-08-28
libavresample: 2015-08-28
libpostproc: 2015-08-28
libswresample: 2015-08-28
libswscale: 2015-08-28
libavutil: 2015-08-28
libavcodec: 2014-08-09
libavdevice: 2014-08-09
libavfilter: 2014-08-09
libavformat: 2014-08-09
libavresample: 2014-08-09
libpostproc: 2014-08-09
libswresample: 2014-08-09
libswscale: 2014-08-09
libavutil: 2014-08-09
API changes, most recent first:
-------- 8< --------- FFmpeg 3.2 was cut here -------- 8< ---------
2016-10-24 - 73ead47 - lavf 57.55.100 - avformat.h
Add AV_DISPOSITION_TIMED_THUMBNAILS
2016-10-24 - a246fef - lavf 57.54.100 - avformat.h
Add avformat_init_output() and AVSTREAM_INIT_IN_ macros
2016-10-22 - f5495c9 - lavu 55.33.100 - avassert.h
Add av_assert0_fpu() / av_assert2_fpu()
2016-10-07 - 3f9137c / 32c8359 - lavc 57.61.100 / 57.24.0 - avcodec.h
Decoders now export the frame timestamp as AVFrame.pts. It was
previously exported as AVFrame.pkt_pts, which is now deprecated.
Note: When decoding, AVFrame.pts uses the stream/packet timebase,
and not the codec timebase.
2016-09-28 - eba0414 - lavu 55.32.100 / 55.16.0 - hwcontext.h hwcontext_qsv.h
Add AV_HWDEVICE_TYPE_QSV and a new installed header with QSV-specific
hwcontext definitions.
2016-09-26 - 32c25f0 - lavc 57.59.100 / 57.23.0 - avcodec.h
AVCodecContext.hw_frames_ctx now may be used by decoders.
2016-09-27 - f0b6f72 - lavf 57.51.100 - avformat.h
Add av_stream_get_codec_timebase()
2016-09-27 - 23c0779 - lswr 2.2.100 - swresample.h
Add swr_build_matrix().
2016-09-23 - 30d3e36 - lavc 57.58.100 - avcodec.h
Add AV_CODEC_CAP_AVOID_PROBING codec capability flag.
2016-09-14 - ae1dd0c - lavf 57.49.100 - avformat.h
Add avformat_transfer_internal_stream_timing_info helper to help with stream
copy.
2016-08-29 - 4493390 - lavfi 6.58.100 - avfilter.h
Add AVFilterContext.nb_threads.
2016-08-15 - c3c4c72 - lavc 57.53.100 - avcodec.h
Add trailing_padding to AVCodecContext to match the corresponding
field in AVCodecParameters.
2016-08-15 - b746ed7 - lavc 57.52.100 - avcodec.h
Add a new API for chained BSF filters and passthrough (null) BSF --
av_bsf_list_alloc(), av_bsf_list_free(), av_bsf_list_append(),
av_bsf_list_append2(), av_bsf_list_finalize(), av_bsf_list_parse_str()
and av_bsf_get_null_filter().
2016-08-04 - 82a33c8 - lavf 57.46.100 - avformat.h
Add av_get_frame_filename2()
2016-07-09 - 775389f / 90f469a - lavc 57.50.100 / 57.20.0 - avcodec.h
Add FF_PROFILE_H264_MULTIVIEW_HIGH and FF_PROFILE_H264_STEREO_HIGH.
2016-06-30 - c1c7e0ab - lavf 57.41.100 - avformat.h
Moved codecpar field from AVStream to the end of the struct, so that
the following private fields are in the same location as in FFmpeg 3.0 (lavf 57.25.100).
2016-06-30 - 042fb69d - lavu 55.28.100 - frame.h
Moved hw_frames_ctx field from AVFrame to the end of the struct, so that
the following private fields are in the same location as in FFmpeg 3.0 (lavu 55.17.103).
2016-06-29 - 1a751455 - lavfi 6.47.100 - avfilter.h
Fix accidental ABI breakage in AVFilterContext.
ABI was broken in 8688d3a, lavfi 6.42.100 and released as ffmpeg 3.1.
Because of this, ffmpeg and ffplay built against lavfi>=6.42.100 will not be
compatible with lavfi>=6.47.100. Potentially also affects other users of
libavfilter if they are using one of the affected fields.
-------- 8< --------- FFmpeg 3.1 was cut here -------- 8< ---------
2016-06-26 - 481f320 / 1c9e861 - lavu 55.27.100 / 55.13.0 - hwcontext.h
Add av_hwdevice_ctx_create().
2016-06-26 - b95534b / e47b8bb - lavc 57.48.101 / 57.19.1 - avcodec.h
Adjust values for JPEG 2000 profiles.
2016-06-23 - 5d75e46 / db7968b - lavf 57.40.100 / 57.7.0 - avio.h
Add AVIODataMarkerType, write_data_type, ignore_boundary_point and
avio_write_marker.
2016-06-23 - abb3cc4 / 0c4468d - lavu 55.26.100 / 55.12.0 - opt.h
Add av_stereo3d_type_name() and av_stereo3d_from_name().
2016-06-22 - 3689efe / c46db38 - lavu 55.25.100 / 55.11.0 - hwcontext_dxva2.h
Add new installed header with DXVA2-specific hwcontext definitions.
2016-04-27 - fb91871 - lavu 55.23.100 - log.h
Add a new function av_log_format_line2() which returns number of bytes
written to the target buffer.
2016-04-21 - 7fc329e - lavc 57.37.100 - avcodec.h
Add a new audio/video encoding and decoding API with decoupled input
and output -- avcodec_send_packet(), avcodec_receive_frame(),
avcodec_send_frame() and avcodec_receive_packet().
2016-04-17 - af9cac1 / 33d1898 - lavc 57.35.100 / 57.15.0 - avcodec.h
Add a new bitstream filtering API working with AVPackets.
Deprecate the old bitstream filtering API.
2016-04-14 - 8688d3a / 07a844f - lavfi 6.42.100 / 6.3.0 - avfilter.h
Add AVFilterContext.hw_device_ctx.
2016-04-14 - 28abb21 / 551c677 - lavu 55.22.100 / 55.9.0 - hwcontext_vaapi.h
Add new installed header with VAAPI-specific hwcontext definitions.
2016-04-14 - afccfaf / b1f01e8 - lavu 55.21.100 / 55.7.0 - hwcontext.h
Add AVHWFramesConstraints and associated API.
2016-04-11 - 6f69f7a / 9200514 - lavf 57.33.100 / 57.5.0 - avformat.h
Add AVStream.codecpar, deprecate AVStream.codec.
2016-04-02 - e8a9b64 - lavu 55.20.100 - base64.h
Add AV_BASE64_DECODE_SIZE(x) macro.
2016-xx-xx - lavc 57.33.100 / 57.14.0 - avcodec.h
f9b1cf1 / 998e1b8 - Add AVCodecParameters and its related API.
e6053b3 / a806834 - Add av_get_audio_frame_duration2().
2016-03-11 - 6d8ab35 - lavf/lavc 57.28.101
Add requirement to bitstream filtering API that returned packets with
size == 0 and side_data_elems == 0 are to be skipped by the caller.
2016-03-04 - 9362973 - lavf 57.28.100
Add protocol blacklisting API
2016-02-28 - 4dd4d53 - lavc 57.27.101
Validate AVFrame returned by get_buffer2 to have required
planes not NULL and unused planes set to NULL as crashes
and buffer overflow are possible with certain streams if
that is not the case.
2016-02-26 - 30e7685 - lavc 57.27.100 - avcodec.h
"flags2" decoding option now allows the flag "ass_ro_flush_noop" preventing
the reset of the ASS ReadOrder field on flush. This affects the content of
AVSubtitles.rects[N]->ass when "sub_text_format" is set to "ass" (see
previous entry).
2016-02-26 - 2941282 - lavc 57.26.100 - avcodec.h
Add a "sub_text_format" subtitles decoding option allowing the values "ass"
(recommended) and "ass_with_timings" (not recommended, deprecated, default).
The default value for this option will change to "ass" at the next major
libavcodec version bump.
The current default is "ass_with_timings" for compatibility. This means that
all subtitles text decoders currently still output ASS with timings printed
as strings in the AVSubtitles.rects[N]->ass fields.
Setting "sub_text_format" to "ass" allows a better timing accuracy (ASS
timing is limited to a 1/100 time base, so this is relevant for any subtitles
format needing a bigger one), ease timing adjustments, and prevents the need
of removing the timing from the decoded string yourself. This form is also
known as "the Matroska form". The timing information (start time, duration)
can be found in the AVSubtitles fields.
2016-02-24 - 7e49cdd / 7b3214d0 - lavc 57.25.100 / 57.13.0 - avcodec.h
Add AVCodecContext.hw_frames_ctx.
2016-02-24 - 1042402 / b3dd30d - lavfi 6.36.100 / 6.2.0 - avfilter.h
avfilter.h - Add AVFilterLink.hw_frames_ctx.
buffersrc.h - Add AVBufferSrcParameters and functions for handling it.
2016-02-23 - 14f7a3d - lavc 57.25.100
Add AV_PKT_DATA_MPEGTS_STREAM_ID for exporting the MPEGTS stream ID.
2016-02-18 - 08acab8 - lavu 55.18.100 - audio_fifo.h
Add av_audio_fifo_peek_at().
2016-xx-xx - lavu 55.18.100 / 55.6.0
26abd51 / 721a4ef buffer.h - Add av_buffer_pool_init2().
1a70878 / 89923e4 hwcontext.h - Add a new installed header hwcontext.h with a new API
for handling hwaccel frames.
6992276 / ad884d1 hwcontext_cuda.h - Add a new installed header hwcontext_cuda.h with
CUDA-specific hwcontext definitions.
d779d8d / a001ce3 hwcontext_vdpau.h - Add a new installed header hwcontext_vdpau.h with
VDPAU-specific hwcontext definitions.
63c3e35 / 7bc780c pixfmt.h - Add AV_PIX_FMT_CUDA.
-------- 8< --------- FFmpeg 3.0 was cut here -------- 8< ---------
2016-02-10 - bc9a596 / 9f61abc - lavf 57.25.100 / 57.3.0 - avformat.h
Add AVFormatContext.opaque, io_open and io_close, allowing custom IO
2016-02-01 - 1dba837 - lavf 57.24.100 - avformat.h, avio.h
Add protocol_whitelist to AVFormatContext, AVIOContext
2016-01-31 - 66e9d2f - lavu 55.17.100 - frame.h
Add AV_FRAME_DATA_GOP_TIMECODE for exporting MPEG1/2 GOP timecodes.
2016-01-01 - 5e8b053 / 2c68113 - lavc 57.21.100 / 57.12.0 - avcodec.h
Add AVCodecDescriptor.profiles and avcodec_profile_name().
2015-12-28 - 1f9139b - lavf 57.21.100 - avformat.h
Add automatic bitstream filtering; add av_apply_bitstream_filters()
2015-12-22 - 39a09e9 - lavfi 6.21.101 - avfilter.h
Deprecate avfilter_link_set_closed().
Applications are not supposed to mess with links,
they should close the sinks.
2015-12-17 - lavc 57.18.100 / 57.11.0 - avcodec.h dirac.h
xxxxxxx - Add av_packet_add_side_data().
xxxxxxx - Add AVCodecContext.coded_side_data.
xxxxxxx - Add AVCPBProperties API.
xxxxxxx - Add a new public header dirac.h containing
av_dirac_parse_sequence_header()
2015-12-11 - 676a93f - lavf 57.20.100 - avformat.h
Add av_program_add_stream_index()
2015-11-29 - 93fb4a4 - lavc 57.16.101 - avcodec.h
Deprecate rtp_callback without replacement, i.e. it won't be possible to
get image slices before the full frame is encoded any more. The libavformat
rtpenc muxer can still be used for RFC-2190 packetization.
2015-11-22 - fe20e34 - lavc 57.16.100 - avcodec.h
Add AV_PKT_DATA_FALLBACK_TRACK for making fallback associations between
streams.
2015-11-22 - ad317c9 - lavf 57.19.100 - avformat.h
Add av_stream_new_side_data().
2015-11-22 - e12f403 - lavu 55.8.100 - xtea.h
Add av_xtea_le_init and av_xtea_le_crypt
2015-11-18 - lavu 55.7.100 - mem.h
Add av_fast_mallocz()
2015-10-29 - lavc 57.12.100 / 57.8.0 - avcodec.h
xxxxxx - Deprecate av_free_packet(). Use av_packet_unref() as replacement,
it resets the packet in a more consistent way.
xxxxxx - Deprecate av_dup_packet(), it is a no-op for most cases.
Use av_packet_ref() to make a non-refcounted AVPacket refcounted.
xxxxxx - Add av_packet_alloc(), av_packet_clone(), av_packet_free().
They match the AVFrame functions with the same name.
2015-10-27 - 1e477a9 - lavu 55.5.100 - cpu.h
Add AV_CPU_FLAG_AESNI.
2015-10-22 - ee573b4 / a17a766 - lavc 57.9.100 / 57.5.0 - avcodec.h
Add data and linesize array to AVSubtitleRect, to be used instead of
the ones from the embedded AVPicture.
2015-10-22 - 866a417 / dc923bc - lavc 57.8.100 / 57.0.0 - qsv.h
Add an API for allocating opaque surfaces.
2015-10-15 - 2c2d162 - lavf 57.4.100
Remove the latm demuxer that was a duplicate of the loas demuxer.
2015-10-14 - b994788 / 11c5f43 - lavu 55.4.100 / 55.2.0 - dict.h
Change return type of av_dict_copy() from void to int, so that a proper
error code can be reported.
2015-09-29 - b01891a / 948f3c1 - lavc 57.3.100 / 57.2.0 - avcodec.h
Change type of AVPacket.duration from int to int64_t.
2015-09-17 - 7c46f24 / e3d4784 - lavc 57.3.100 / 57.2.0 - d3d11va.h
Add av_d3d11va_alloc_context(). This function must from now on be used for
allocating AVD3D11VAContext.
2015-09-15 - lavf 57.2.100 - avformat.h
probesize and max_analyze_duration switched to 64bit, both
are only accessible through AVOptions
2015-09-15 - lavf 57.1.100 - avformat.h
bit_rate was changed to 64bit, make sure you update any
printf() or other type sensitive code
2015-09-15 - lavc 57.2.100 - avcodec.h
bit_rate/rc_max_rate/rc_min_rate were changed to 64bit, make sure you update
any printf() or other type sensitive code
2015-09-07 - lavu 55.0.100 / 55.0.0
c734b34 / b8b5d82 - Change type of AVPixFmtDescriptor.flags from uint8_t to uint64_t.
f53569a / 6b3ef7f - Change type of AVComponentDescriptor fields from uint16_t to int
and drop bit packing.
151aa2e / 2268db2 - Add step, offset, and depth to AVComponentDescriptor to replace
the deprecated step_minus1, offset_plus1, and depth_minus1.
-------- 8< --------- FFmpeg 2.8 was cut here -------- 8< ---------
2015-08-27 - 1dd854e1 - lavc 56.58.100 - vaapi.h
@@ -1266,7 +983,7 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
* base -- is now stored in AVBufferRef
* reference, type, buffer_hints -- are unnecessary in the new API
* hwaccel_picture_private, owner, thread_opaque -- should not
have been accessed from outside of lavc
have been acessed from outside of lavc
* qscale_table, qstride, qscale_type, mbskip_table, motion_val,
mb_type, dct_coeff, ref_index -- mpegvideo-specific tables,
which are not exported anymore.
@@ -1303,14 +1020,15 @@ lavd 54.4.100 / 54.0.0, lavfi 3.5.0
Add avresample_set_channel_mapping() for input channel reordering,
duplication, and silencing.
2012-12-29 - lavu 52.13.100 / 52.3.0 - avstring.h
2ce43b3 / d8fd06c - Add av_basename() and av_dirname().
e13d5e9 / c1a02e8 - Add av_pix_fmt_get_chroma_sub_sample and deprecate
avcodec_get_chroma_sub_sample.
2012-12-29 - 2ce43b3 / d8fd06c - lavu 52.13.100 / 52.3.0 - avstring.h
Add av_basename() and av_dirname().
2012-11-11 - 03b0787 / 5980f5d - lavu 52.6.100 / 52.2.0 - audioconvert.h
Rename audioconvert.h to channel_layout.h. audioconvert.h is now deprecated.
2012-11-05 - 7d26be6 / dfde8a3 - lavu 52.5.100 / 52.1.0 - intmath.h
Add av_ctz() for trailing zero bit count
2012-10-21 - e3a91c5 / a893655 - lavu 51.77.100 / 51.45.0 - error.h
Add AVERROR_EXPERIMENTAL

File diff suppressed because it is too large Load Diff

View File

@@ -38,13 +38,13 @@ DOCS = $(DOCS-yes)
DOC_EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
DOC_EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
@@ -124,12 +124,11 @@ $(DOCS) doc/doxy/html: | doc/
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
OBJDIRS += doc/examples
DOXY_INPUT = $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c)
DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) config.mak
DOXY_INPUT = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
doc/doxy/html: TAG = DOXY
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT)
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXYGEN) $(DOXY_INPUT)
install-doc: install-html install-man

View File

@@ -18,7 +18,7 @@ comma-separated list of filters, whose parameters follow the filter
name after a '='.
@example
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1:opt2=str2][,filter2] OUTPUT
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
@end example
Below is a description of the currently available bitstream filters,
@@ -67,10 +67,6 @@ the header stored in extradata to the key packets:
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
@end example
@section dca_core
Extract DCA core from DTS-HD streams.
@section h264_mp4toannexb
Convert an H.264 bitstream from length prefixed mode to start code

View File

@@ -9,7 +9,7 @@ V
DBG
Preprocess x86 external assembler files to a .dbg.asm file in the object
directory, which then gets compiled. Helps in developing those assembler
directory, which then gets compiled. Helps developping those assembler
files.
DESTDIR
@@ -25,10 +25,10 @@ all
Default target, builds all the libraries and the executables.
fate
Run the fate test suite, note that you must have installed it.
Run the fate test suite, note you must have installed it
fate-list
List all fate/regression test targets.
Will list all fate/regression test targets
install
Install headers, libraries and programs.
@@ -39,23 +39,26 @@ examples
libavformat/output-example
Build the libavformat basic example.
libavcodec/api-example
Build the libavcodec basic example.
libswscale/swscale-test
Build the swscale self-test (useful also as an example).
Build the swscale self-test (useful also as example).
config
Reconfigure the project with the current configuration.
Reconfigure the project with current configuration.
Useful standard make commands:
make -t <target>
Touch all files that otherwise would be built, this is useful to reduce
unneeded rebuilding when changing headers, but note that you must force rebuilds
Touch all files that otherwise would be build, this is useful to reduce
unneeded rebuilding when changing headers, but note you must force rebuilds
of files that actually need it by hand then.
make -j<num>
Rebuild with multiple jobs at the same time. Faster on multi processor systems.
rebuild with multiple jobs at the same time. Faster on multi processor systems
make -k
Continue build in case of errors, this is useful for the regression tests
sometimes but note that it will still not run all reg tests.
continue build in case of errors, this is useful for the regression tests
sometimes but note it will still not run all reg tests.

View File

@@ -129,7 +129,7 @@ should be @code{1 / frame_rate} and timestamp increments should be
identically 1.
@item g @var{integer} (@emph{encoding,video})
Set the group of picture (GOP) size. Default value is 12.
Set the group of picture size. Default value is 12.
@item ar @var{integer} (@emph{decoding/encoding,audio})
Set audio sampling rate (in Hz).
@@ -257,7 +257,7 @@ Specify how strictly to follow the standards.
Possible values:
@table @samp
@item very
strictly conform to an older more strict version of the spec or reference software
strictly conform to a older more strict version of the spec or reference software
@item strict
strictly conform to all the things in the spec no matter what consequences
@item normal
@@ -456,9 +456,6 @@ Possible values:
@item aspect @var{rational number} (@emph{encoding,video})
Set sample aspect ratio.
@item sar @var{rational number} (@emph{encoding,video})
Set sample aspect ratio. Alias to @var{aspect}.
@item debug @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
Print specific debug info.
@@ -820,17 +817,13 @@ for codecs that support it. See also @file{doc/examples/export_mvs.c}.
Deprecated, use mpegvideo private options instead.
@item threads @var{integer} (@emph{decoding/encoding,video})
Set the number of threads to be used, in case the selected codec
implementation supports multi-threading.
Possible values:
@table @samp
@item auto, 0
automatically select the number of threads to set
@item auto
detect a good number of threads
@end table
Default value is @samp{auto}.
@item me_threshold @var{integer} (@emph{encoding,video})
Set motion estimation threshold.
@@ -1049,93 +1042,9 @@ Possible values:
@item rc_max_vbv_use @var{float} (@emph{encoding,video})
@item rc_min_vbv_use @var{float} (@emph{encoding,video})
@item ticks_per_frame @var{integer} (@emph{decoding/encoding,audio,video})
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
Possible values:
@table @samp
@item bt709
BT.709
@item bt470m
BT.470 M
@item bt470bg
BT.470 BG
@item smpte170m
SMPTE 170 M
@item smpte240m
SMPTE 240 M
@item film
Film
@item bt2020
BT.2020
@item smpte428_1
SMPTE ST 428-1
@item smpte431
SMPTE 431-2
@item smpte432
SMPTE 432-1
@end table
@item color_trc @var{integer} (@emph{decoding/encoding,video})
Possible values:
@table @samp
@item bt709
BT.709
@item gamma22
BT.470 M
@item gamma28
BT.470 BG
@item smpte170m
SMPTE 170 M
@item smpte240m
SMPTE 240 M
@item linear
Linear
@item log
Log
@item log_sqrt
Log square root
@item iec61966_2_4
IEC 61966-2-4
@item bt1361
BT.1361
@item iec61966_2_1
IEC 61966-2-1
@item bt2020_10bit
BT.2020 - 10 bit
@item bt2020_12bit
BT.2020 - 12 bit
@item smpte2084
SMPTE ST 2084
@item smpte428_1
SMPTE ST 428-1
@item arib-std-b67
ARIB STD-B67
@end table
@item colorspace @var{integer} (@emph{decoding/encoding,video})
Possible values:
@table @samp
@item rgb
RGB
@item bt709
BT.709
@item fcc
FCC
@item bt470bg
BT.470 BG
@item smpte170m
SMPTE 170 M
@item smpte240m
SMPTE 240 M
@item ycocg
YCOCG
@item bt2020_ncl
BT.2020 NCL
@item bt2020_cl
BT.2020 CL
@item smpte2085
SMPTE 2085
@end table
@item color_range @var{integer} (@emph{decoding/encoding,video})
If used as input parameter, it serves as a hint to the decoder, which
@@ -1226,7 +1135,7 @@ Set to 1 to disable processing alpha (transparency). This works like the
instead of alpha. Default is 0.
@item codec_whitelist @var{list} (@emph{input})
"," separated list of allowed decoders. By default all are allowed.
"," separated List of allowed decoders. By default all are allowed.
@item dump_separator @var{string} (@emph{input})
Separator used to separate the fields printed on the command line about the

View File

@@ -279,16 +279,10 @@ present between the subtitle lines because of double-sized teletext charactes.
Default value is 1.
@item txt_duration
Sets the display duration of the decoded teletext pages or subtitles in
milliseconds. Default value is 30000 which is 30 seconds.
miliseconds. Default value is 30000 which is 30 seconds.
@item txt_transparent
Force transparent background of the generated teletext bitmaps. Default value
is 0 which means an opaque background.
@item txt_opacity
Sets the opacity (0-255) of the teletext background. If
@option{txt_transparent} is not set, it only affects characters between a start
box and an end box, typically subtitles. Default value is 0 if
@option{txt_transparent} is set, 255 otherwise.
is 0 which means an opaque (black) background.
@end table
@c man end SUBTILES DECODERS

View File

@@ -72,7 +72,7 @@ Do not try to resynchronize by looking for a certain optional start code.
Virtual concatenation script demuxer.
This demuxer reads a list of files and other directives from a text file and
demuxes them one after the other, as if all their packets had been muxed
demuxes them one after the other, as if all their packet had been muxed
together.
The timestamps in the files are adjusted so that the first file starts at 0
@@ -104,10 +104,10 @@ All subsequent file-related directives apply to that file.
@item @code{ffconcat version 1.0}
Identify the script type and version. It also sets the @option{safe} option
to 1 if it was -1.
to 1 if it was to its default -1.
To make FFmpeg recognize the format automatically, this directive must
appear exactly as is (no extra space or byte-order-mark) on the very first
appears exactly as is (no extra space or byte-order-mark) on the very first
line of the script.
@item @code{duration @var{dur}}
@@ -192,9 +192,7 @@ component.
If set to 0, any file name is accepted.
The default is 1.
-1 is equivalent to 1 if the format was automatically
The default is -1, it is equivalent to 1 if the format was automatically
probed and 0 otherwise.
@item auto_convert
@@ -206,43 +204,8 @@ Currently, the only conversion is adding the h264_mp4toannexb bitstream
filter to H.264 streams in MP4 format. This is necessary in particular if
there are resolution changes.
@item segment_time_metadata
If set to 1, every packet will contain the @var{lavf.concat.start_time} and the
@var{lavf.concat.duration} packet metadata values which are the start_time and
the duration of the respective file segments in the concatenated output
expressed in microseconds. The duration metadata is only set if it is known
based on the concat file.
The default is 0.
@end table
@subsection Examples
@itemize
@item
Use absolute filenames and include some comments:
@example
# my first filename
file /mnt/share/file-1.wav
# my second filename including whitespace
file '/mnt/share/file 2.wav'
# my third filename including whitespace plus single quote
file '/mnt/share/file 3'\''.wav'
@end example
@item
Allow for input format auto-probing, use safe filenames and set the duration of
the first file:
@example
ffconcat version 1.0
file file-1.wav
duration 20.0
file subdir/file-2.wav
@end example
@end itemize
@section flv
Adobe Flash Video Format demuxer.
@@ -254,6 +217,31 @@ This demuxer is used to demux FLV files and RTMP network streams.
Allocate the streams according to the onMetaData array content.
@end table
@section libgme
The Game Music Emu library is a collection of video game music file emulators.
See @url{http://code.google.com/p/game-music-emu/} for more information.
Some files have multiple tracks. The demuxer will pick the first track by
default. The @option{track_index} option can be used to select a different
track. Track indexes start at 0. The demuxer exports the number of tracks as
@var{tracks} meta data entry.
For very large files, the @option{max_size} option may have to be adjusted.
@section libquvi
Play media from Internet services using the quvi project.
The demuxer accepts a @option{format} option to request a specific quality. It
is by default set to @var{best}.
See @url{http://quvi.sourceforge.net/} for more information.
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
enabled.
@section gif
Animated GIF demuxer.
@@ -428,63 +416,6 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
@end example
@end itemize
@section libgme
The Game Music Emu library is a collection of video game music file emulators.
See @url{http://code.google.com/p/game-music-emu/} for more information.
Some files have multiple tracks. The demuxer will pick the first track by
default. The @option{track_index} option can be used to select a different
track. Track indexes start at 0. The demuxer exports the number of tracks as
@var{tracks} meta data entry.
For very large files, the @option{max_size} option may have to be adjusted.
@section libopenmpt
libopenmpt based module demuxer
See @url{https://lib.openmpt.org/libopenmpt/} for more information.
Some files have multiple subsongs (tracks) this can be set with the @option{subsong}
option.
It accepts the following options:
@table @option
@item subsong
Set the subsong index. This can be either 'all', 'auto', or the index of the
subsong. Subsong indexes start at 0. The default is 'auto'.
The default value is to let libopenmpt choose.
@item layout
Set the channel layout. Valid values are 1, 2, and 4 channel layouts.
The default value is STEREO.
@item sample_rate
Set the sample rate for libopenmpt to output.
Range is from 1000 to INT_MAX. The value default is 48000.
@end table
@section mov/mp4/3gp/QuickTime
QuickTime / MP4 demuxer.
This demuxer accepts the following options:
@table @option
@item enable_drefs
Enable loading of external tracks, disabled by default.
Enabling this can theoretically leak information in some use cases.
@item use_absolute_path
Allows loading of external tracks via absolute paths, disabled by default.
Enabling this poses a security risk. It should only be enabled if the source
is known to be non malicious.
@end table
@section mpegts
MPEG-2 transport stream demuxer.
@@ -511,21 +442,6 @@ to 1 (-1 means automatic setting, 1 means enabled, 0 means
disabled). Default value is -1.
@end table
@section mpjpeg
MJPEG encapsulated in multi-part MIME demuxer.
This demuxer allows reading of MJPEG, where each frame is represented as a part of
multipart/x-mixed-replace stream.
@table @option
@item strict_mime_boundary
Default implementation applies a relaxed standard to multi-part MIME boundary detection,
to prevent regression with numerous existing endpoints not generating a proper MIME
MJPEG stream. Turning this option on by setting it to 1 will result in a stricter check
of the boundary value.
@end table
@section rawvideo
Raw video demuxer.

View File

@@ -28,14 +28,14 @@ this document.
For more detailed legal information about the use of FFmpeg in
external programs read the @file{LICENSE} file in the source tree and
consult @url{https://ffmpeg.org/legal.html}.
consult @url{http://ffmpeg.org/legal.html}.
@section Contributing
There are 3 ways by which code gets into FFmpeg.
There are 3 ways by which code gets into ffmpeg.
@itemize @bullet
@item Submitting patches to the main developer mailing list.
See @ref{Submitting patches} for details.
@item Submitting Patches to the main developer mailing list
see @ref{Submitting patches} for details.
@item Directly committing changes to the main tree.
@item Committing changes to a git clone, for example on github.com or
gitorious.org. And asking us to merge these changes.
@@ -65,9 +65,6 @@ rejected by the git repository.
@item
You should try to limit your code lines to 80 characters; however, do so if
and only if this improves readability.
@item
K&R coding style is used.
@end itemize
The presentation is one inspired by 'indent -i4 -kr -nut'.
@@ -127,10 +124,10 @@ the @samp{inline} keyword;
@samp{//} comments;
@item
designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
@item
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
compound literals (@samp{x = (struct s) @{ 17, 23 @};})
@end itemize
These features are supported by all compilers we care about, so we will not
@@ -159,7 +156,7 @@ GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
All names should be composed with underscores (_), not CamelCase. For example,
@samp{avfilter_get_video_buffer} is an acceptable function name and
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
for example structs and enums; they should always be in CamelCase.
for example structs and enums; they should always be in the CamelCase
There are the following conventions for naming variables and functions:
@@ -246,8 +243,8 @@ For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
@section Development Policy
@subsection Patches/Committing
@subheading Licenses for patches must be compatible with FFmpeg.
@enumerate
@item
Contributions should be licensed under the
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
including an "or any later version" clause, or, if you prefer
@@ -260,15 +257,15 @@ preferred.
If you add a new file, give it a proper license header. Do not copy and
paste it from a random place, use an existing file as template.
@subheading You must not commit code which breaks FFmpeg!
This means unfinished code which is enabled and breaks compilation,
or compiles but does not work/breaks the regression tests. Code which
is unfinished but disabled may be permitted under-circumstances, like
missing samples or an implementation with a small subset of features.
Always check the mailing list for any reviewers with issues and test
FATE before you push.
@item
You must not commit code which breaks FFmpeg! (Meaning unfinished but
enabled code which breaks compilation or compiles but does not work or
breaks the regression tests)
You can commit unfinished stuff (for testing etc), but it must be disabled
(#ifdef etc) by default so it does not interfere with other developers'
work.
@subheading Keep the main commit message short with an extended description below.
@item
The commit message should have a short first line in the form of
a @samp{topic: short description} as a header, separated by a newline
from the body consisting of an explanation of why the change is necessary.
@@ -276,24 +273,30 @@ If the commit fixes a known bug on the bug tracker, the commit message
should include its bug ID. Referring to the issue on the bug tracker does
not exempt you from writing an excerpt of the bug in the commit message.
@subheading Testing must be adequate but not excessive.
If it works for you, others, and passes FATE then it should be OK to commit
it, provided it fits the other committing criteria. You should not worry about
over-testing things. If your code has problems (portability, triggers
compiler bugs, unusual environment etc) they will be reported and eventually
fixed.
@item
You do not have to over-test things. If it works for you, and you think it
should work for others, then commit. If your code has problems
(portability, triggers compiler bugs, unusual environment etc) they will be
reported and eventually fixed.
@subheading Do not commit unrelated changes together.
They should be split them into self-contained pieces. Also do not forget
that if part B depends on part A, but A does not depend on B, then A can
and should be committed first and separate from B. Keeping changes well
split into self-contained parts makes reviewing and understanding them on
the commit log mailing list easier. This also helps in case of debugging
later on.
@item
Do not commit unrelated changes together, split them into self-contained
pieces. Also do not forget that if part B depends on part A, but A does not
depend on B, then A can and should be committed first and separate from B.
Keeping changes well split into self-contained parts makes reviewing and
understanding them on the commit log mailing list easier. This also helps
in case of debugging later on.
Also if you have doubts about splitting or not splitting, do not hesitate to
ask/discuss it on the developer mailing list.
@subheading Ask before you change the build system (configure, etc).
@item
Do not change behavior of the programs (renaming options etc) or public
API or ABI without first discussing it on the ffmpeg-devel mailing list.
Do not remove functionality from the code. Just improve!
Note: Redundant code can be removed.
@item
Do not commit changes to the build system (Makefiles, configure script)
which change behavior, defaults etc, without asking first. The same
applies to compiler warning fixes, trivial looking fixes and to code
@@ -302,7 +305,7 @@ the way we do. Send your changes as patches to the ffmpeg-devel mailing
list, and if the code maintainers say OK, you may commit. This does not
apply to files you wrote and/or maintain.
@subheading Cosmetic changes should be kept in separate patches.
@item
We refuse source indentation and other cosmetic changes if they are mixed
with functional changes, such commits will be rejected and removed. Every
developer has his own indentation style, you should not change it. Of course
@@ -316,7 +319,7 @@ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
then either do NOT change the indentation of the inner part within (do not
move it to the right)! or do so in a separate commit
@subheading Commit messages should always be filled out properly.
@item
Always fill out the commit log message. Describe in a few lines what you
changed and why. You can refer to mailing list postings if you fix a
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
@@ -328,31 +331,47 @@ area changed: Short 1 line description
details describing what and why and giving references.
@end example
@subheading Credit the author of the patch.
@item
Make sure the author of the commit is set correctly. (see git commit --author)
If you apply a patch, send an
answer to ffmpeg-devel (or wherever you got the patch from) saying that
you applied the patch.
@subheading Complex patches should refer to discussion surrounding them.
@item
When applying patches that have been discussed (at length) on the mailing
list, reference the thread in the log message.
@subheading Always wait long enough before pushing changes
@item
Do NOT commit to code actively maintained by others without permission.
Send a patch to ffmpeg-devel. If no one answers within a reasonable
time-frame (12h for build failures and security fixes, 3 days small changes,
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
timeframe (12h for build failures and security fixes, 3 days small changes,
1 week for big patches) then commit your patch if you think it is OK.
Also note, the maintainer can simply ask for more time to review!
@subsection Code
@subheading API/ABI changes should be discussed before they are made.
Do not change behavior of the programs (renaming options etc) or public
API or ABI without first discussing it on the ffmpeg-devel mailing list.
Do not remove widely used functionality or features (redundant code can be removed).
@item
Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
are sent there and reviewed by all the other developers. Bugs and possible
improvements or general questions regarding commits are discussed there. We
expect you to react if problems with your code are uncovered.
@subheading Remember to check if you need to bump versions for libav*.
Depending on the change, you may need to change the version integer.
@item
Update the documentation if you change behavior or add features. If you are
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
maintainer(s) will review and commit your stuff.
@item
Try to keep important discussions and requests (also) on the public
developer mailing list, so that all developers can benefit from them.
@item
Never write to unallocated memory, never write over the end of arrays,
always check values read from some untrusted source before using them
as array index or other risky things.
@item
Remember to check if you need to bump versions for the specific libav*
parts (libavutil, libavcodec, libavformat) you are changing. You need
to change the version integer.
Incrementing the first component means no backward compatibility to
previous versions (e.g. removal of a function from the public API).
Incrementing the second component means backward compatible change
@@ -362,7 +381,7 @@ Incrementing the third component means a noteworthy binary compatible
change (e.g. encoder bug fix that matters for the decoder). The third
component always starts at 100 to distinguish FFmpeg from Libav.
@subheading Warnings for correct code may be disabled if there is no other option.
@item
Compiler warnings indicate potential bugs or code with bad style. If a type of
warning always points to correct and clean code, that warning should
be disabled, not the code changed.
@@ -371,65 +390,16 @@ If it is a bug, the bug has to be fixed. If it is not, the code should
be changed to not generate a warning unless that causes a slowdown
or obfuscates the code.
@subheading Check untrusted input properly.
Never write to unallocated memory, never write over the end of arrays,
always check values read from some untrusted source before using them
as array index or other risky things.
@subsection Documentation/Other
@subheading Subscribe to the ffmpeg-cvslog mailing list.
It is important to do this as the diffs of all commits are sent there and
reviewed by all the other developers. Bugs and possible improvements or
general questions regarding commits are discussed there. We expect you to
react if problems with your code are uncovered.
@subheading Keep the documentation up to date.
Update the documentation if you change behavior or add features. If you are
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
maintainer(s) will review and commit your stuff.
@subheading Important discussions should be accessible to all.
Try to keep important discussions and requests (also) on the public
developer mailing list, so that all developers can benefit from them.
@subheading Check your entries in MAINTAINERS.
@item
Make sure that no parts of the codebase that you maintain are missing from the
@file{MAINTAINERS} file. If something that you want to maintain is missing add it with
your name after it.
If at some point you no longer want to maintain some code, then please help in
finding a new maintainer and also don't forget to update the @file{MAINTAINERS} file.
If at some point you no longer want to maintain some code, then please help
finding a new maintainer and also don't forget updating the @file{MAINTAINERS} file.
@end enumerate
We think our rules are not too hard. If you have comments, contact us.
@section Code of conduct
Be friendly and respectful towards others and third parties.
Treat others the way you yourself want to be treated.
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
Different opinions and interpretations help the project.
Looking at issues from a different perspective assists development.
Do not assume malice for things that can be attributed to incompetence. Even if
it is malice, it's rarely good to start with that as initial assumption.
Stay friendly even if someone acts contrarily. Everyone has a bad day
once in a while.
If you yourself have a bad day or are angry then try to take a break and reply
once you are calm and without anger if you have to.
Try to help other team members and cooperate if you can.
The goal of software development is to create technical excellence, not for any
individual to be better and "win" against the others. Large software projects
are only possible and successful through teamwork.
If someone struggles do not put them down. Give them a helping hand
instead and point them in the right direction.
Finally, keep in mind the immortal words of Bill and Ted,
"Be excellent to each other."
@anchor{Submitting patches}
@section Submitting patches
@@ -437,7 +407,7 @@ First, read the @ref{Coding Rules} above if you did not yet, in particular
the rules regarding patch submission.
When you submit your patch, please use @code{git format-patch} or
@code{git send-email}. We cannot read other diffs :-).
@code{git send-email}. We cannot read other diffs :-)
Also please do not submit a patch which contains several unrelated changes.
Split it into separate, self-contained pieces. This does not mean splitting
@@ -460,15 +430,11 @@ Also please if you send several patches, send each patch as a separate mail,
do not attach several unrelated patches to the same mail.
Patches should be posted to the
@uref{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
mailing list. Use @code{git send-email} when possible since it will properly
send patches without requiring extra care. If you cannot, then send patches
as base64-encoded attachments, so your patch is not trashed during
transmission. Also ensure the correct mime type is used
(text/x-diff or text/x-patch or at least text/plain) and that only one
patch is inline or attached per mail.
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
likely was wrong.
transmission.
Your patch will be reviewed on the mailing list. You will likely be asked
to make some changes and are expected to send in an improved version that
@@ -579,7 +545,7 @@ amounts of memory when fed damaged data.
@item
Did you test your decoder or demuxer against sample files?
Samples may be obtained at @url{https://samples.ffmpeg.org}.
Samples may be obtained at @url{http://samples.ffmpeg.org}.
@item
Does the patch not mix functional and cosmetic changes?
@@ -601,7 +567,7 @@ If the patch fixes a bug, did you provide a verbose analysis of the bug?
If the patch fixes a bug, did you provide enough information, including
a sample, so the bug can be reproduced and the fix can be verified?
Note please do not attach samples >100k to mails but rather provide a
URL, you can upload to ftp://upload.ffmpeg.org.
URL, you can upload to ftp://upload.ffmpeg.org
@item
Did you provide a verbose summary about what the patch does change?
@@ -630,10 +596,10 @@ Lines with similar content should be aligned vertically when doing so
improves readability.
@item
Consider adding a regression test for your code.
Consider to add a regression test for your code.
@item
If you added YASM code please check that things still work with --disable-yasm.
If you added YASM code please check that things still work with --disable-yasm
@item
Make sure you check the return values of function and return appropriate
@@ -698,6 +664,7 @@ Once you have a working fate test and fate sample, provide in the commit
message or introductory message for the patch series that you post to
the ffmpeg-devel mailing list, a direct link to download the sample media.
@subsection Visualizing Test Coverage
The FFmpeg build system allows visualizing the test coverage in an easy
@@ -745,7 +712,7 @@ FFmpeg maintains a set of @strong{release branches}, which are the
recommended deliverable for system integrators and distributors (such as
Linux distributions, etc.). At regular times, a @strong{release
manager} prepares, tests and publishes tarballs on the
@url{https://ffmpeg.org} website.
@url{http://ffmpeg.org} website.
There are two kinds of releases:
@@ -824,7 +791,7 @@ Prepare the release tarballs in @code{bz2} and @code{gz} formats, and
supplementing files that contain @code{gpg} signatures
@item
Publish the tarballs at @url{https://ffmpeg.org/releases}. Create and
Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and
push an annotated tag in the form @code{nX}, with @code{X}
containing the version number.
@@ -836,7 +803,7 @@ with a news entry for the website.
Publish the news entry.
@item
Send an announcement to the mailing list.
Send announcement to the mailing list.
@end enumerate
@bye

View File

@@ -1,21 +1,21 @@
#!/bin/sh
OUT_DIR="${1}"
SRC_PATH="${1}"
DOXYFILE="${2}"
DOXYGEN="${3}"
shift 3
if [ -e "VERSION" ]; then
VERSION=`cat "VERSION"`
if [ -e "$SRC_PATH/VERSION" ]; then
VERSION=`cat "$SRC_PATH/VERSION"`
else
VERSION=`git describe`
VERSION=`cd "$SRC_PATH"; git describe`
fi
$DOXYGEN - <<EOF
@INCLUDE = ${DOXYFILE}
INPUT = $@
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
HTML_TIMESTAMP = NO
PROJECT_NUMBER = $VERSION
OUTPUT_DIRECTORY = $OUT_DIR
EOF

1
doc/doxy/.gitignore vendored
View File

@@ -1 +0,0 @@
/html/

View File

@@ -30,120 +30,81 @@ follows.
Advanced Audio Coding (AAC) encoder.
This encoder is the default AAC encoder, natively implemented into FFmpeg. Its
quality is on par or better than libfdk_aac at the default bitrate of 128kbps.
This encoder also implements more options, profiles and samplerates than
other encoders (with only the AAC-HE profile pending to be implemented) so this
encoder has become the default and is the recommended choice.
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
low complexity (AAC-LC) profile is supported. To use this encoder, you must set
@option{strict} option to @samp{experimental} or lower.
As this encoder is experimental, unexpected behavior may exist from time to
time. For a more stable AAC encoder, see @ref{libvo-aacenc}. However, be warned
that it has a worse quality reported by some users.
@c todo @ref{libaacplus}
See also @ref{libfdk-aac-enc,,libfdk_aac} and @ref{libfaac}.
@subsection Options
@table @option
@item b
Set bit rate in bits/s. Setting this automatically activates constant bit rate
(CBR) mode. If this option is unspecified it is set to 128kbps.
(CBR) mode.
@item q
Set quality for variable bit rate (VBR) mode. This option is valid only using
the @command{ffmpeg} command-line tool. For library interface users, use
@option{global_quality}.
@item cutoff
Set cutoff frequency. If unspecified will allow the encoder to dynamically
adjust the cutoff to improve clarity on low bitrates.
@item stereo_mode
Set stereo encoding mode. Possible values:
@table @samp
@item auto
Automatically selected by the encoder.
@item ms_off
Disable middle/side encoding. This is the default.
@item ms_force
Force middle/side encoding.
@end table
@item aac_coder
Set AAC encoder coding method. Possible values:
@table @samp
@item faac
FAAC-inspired method.
This method is a simplified reimplementation of the method used in FAAC, which
sets thresholds proportional to the band energies, and then decreases all the
thresholds with quantizer steps to find the appropriate quantization with
distortion below threshold band by band.
The quality of this method is comparable to the two loop searching method
described below, but somewhat a little better and slower.
@item anmr
Average noise to mask ratio (ANMR) trellis-based solution.
This has a theoretic best quality out of all the coding methods, but at the
cost of the slowest speed.
@item twoloop
Two loop searching (TLS) method.
This method first sets quantizers depending on band thresholds and then tries
to find an optimal combination by adding or subtracting a specific value from
all quantizers and adjusting some individual quantizer a little. Will tune
itself based on whether @option{aac_is}, @option{aac_ms} and @option{aac_pns}
are enabled.
This is the default choice for a coder.
all quantizers and adjusting some individual quantizer a little.
@item anmr
Average noise to mask ratio (ANMR) trellis-based solution.
This is an experimental coder which currently produces a lower quality, is more
unstable and is slower than the default twoloop coder but has potential.
Currently has no support for the @option{aac_is} or @option{aac_pns} options.
Not currently recommended.
This method produces similar quality with the FAAC method and is the default.
@item fast
Constant quantizer method.
This method sets a constant quantizer for all bands. This is the fastest of all
the methods and has no rate control or support for @option{aac_is} or
@option{aac_pns}.
Not recommended.
the methods, yet produces the worst quality.
@end table
@item aac_ms
Sets mid/side coding mode. The default value of "auto" will automatically use
M/S with bands which will benefit from such coding. Can be forced for all bands
using the value "enable", which is mainly useful for debugging or disabled using
"disable".
@item aac_is
Sets intensity stereo coding tool usage. By default, it's enabled and will
automatically toggle IS for similar pairs of stereo bands if it's benefitial.
Can be disabled for debugging by setting the value to "disable".
@item aac_pns
Uses perceptual noise substitution to replace low entropy high frequency bands
with imperceivable white noise during the decoding process. By default, it's
enabled, but can be disabled for debugging purposes by using "disable".
@item aac_tns
Enables the use of a multitap FIR filter which spans through the high frequency
bands to hide quantization noise during the encoding process and is reverted
by the decoder. As well as decreasing unpleasant artifacts in the high range
this also reduces the entropy in the high bands and allows for more bits to
be used by the mid-low bands. By default it's enabled but can be disabled for
debugging by setting the option to "disable".
@item aac_ltp
Enables the use of the long term prediction extension which increases coding
efficiency in very low bandwidth situations such as encoding of voice or
solo piano music by extending constant harmonic peaks in bands throughout
frames. This option is implied by profile:a aac_low and is incompatible with
aac_pred. Use in conjunction with @option{-ar} to decrease the samplerate.
@item aac_pred
Enables the use of a more traditional style of prediction where the spectral
coefficients transmitted are replaced by the difference of the current
coefficients minus the previous "predicted" coefficients. In theory and sometimes
in practice this can improve quality for low to mid bitrate audio.
This option implies the aac_main profile and is incompatible with aac_ltp.
@item profile
Sets the encoding profile, possible values:
@table @samp
@item aac_low
The default, AAC "Low-complexity" profile. Is the most compatible and produces
decent quality.
@item mpeg2_aac_low
Equivalent to @code{-profile:a aac_low -aac_pns 0}. PNS was introduced with the
MPEG4 specifications.
@item aac_ltp
Long term prediction profile, is enabled by and will enable the @option{aac_ltp}
option. Introduced in MPEG4.
@item aac_main
Main-type prediction profile, is enabled by and will enable the @option{aac_pred}
option. Introduced in MPEG2.
@end table
If this option is unspecified it is set to @samp{aac_low}.
@end table
@section ac3 and ac3_fixed
@@ -612,6 +573,113 @@ and slightly improves compression.
@end table
@anchor{libfaac}
@section libfaac
libfaac AAC (Advanced Audio Coding) encoder wrapper.
Requires the presence of the libfaac headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libfaac --enable-nonfree}.
This encoder is considered to be of higher quality with respect to the
@ref{aacenc,,the native experimental FFmpeg AAC encoder}.
For more information see the libfaac project at
@url{http://www.audiocoding.com/faac.html/}.
@subsection Options
The following shared FFmpeg codec options are recognized.
The following options are supported by the libfaac wrapper. The
@command{faac}-equivalent of the options are listed in parentheses.
@table @option
@item b (@emph{-b})
Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
is not explicitly specified, it is automatically set to a suitable
value depending on the selected profile. @command{faac} bitrate is
expressed in kilobits/s.
Note that libfaac does not support CBR (Constant Bit Rate) but only
ABR (Average Bit Rate).
If VBR mode is enabled this option is ignored.
@item ar (@emph{-R})
Set audio sampling rate (in Hz).
@item ac (@emph{-c})
Set the number of audio channels.
@item cutoff (@emph{-C})
Set cutoff frequency. If not specified (or explicitly set to 0) it
will use a value automatically computed by the library. Default value
is 0.
@item profile
Set audio profile.
The following profiles are recognized:
@table @samp
@item aac_main
Main AAC (Main)
@item aac_low
Low Complexity AAC (LC)
@item aac_ssr
Scalable Sample Rate (SSR)
@item aac_ltp
Long Term Prediction (LTP)
@end table
If not specified it is set to @samp{aac_low}.
@item flags +qscale
Set constant quality VBR (Variable Bit Rate) mode.
@item global_quality
Set quality in VBR mode as an integer number of lambda units.
Only relevant when VBR mode is enabled with @code{flags +qscale}. The
value is converted to QP units by dividing it by @code{FF_QP2LAMBDA},
and used to set the quality value used by libfaac. A reasonable range
for the option value in QP units is [10-500], the higher the value the
higher the quality.
@item q (@emph{-q})
Enable VBR mode when set to a non-negative value, and set constant
quality value as a double floating point value in QP units.
The value sets the quality value used by libfaac. A reasonable range
for the option value is [10-500], the higher the value the higher the
quality.
This option is valid only using the @command{ffmpeg} command-line
tool. For library interface users, use @option{global_quality}.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
container:
@example
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
@end example
@item
Use @command{ffmpeg} to convert an audio file to VBR AAC, using the
LTP AAC profile:
@example
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
@end example
@end itemize
@anchor{libfdk-aac-enc}
@section libfdk_aac
@@ -626,10 +694,9 @@ configuration. You need to explicitly configure the build with
so if you allow the use of GPL, you should configure with
@code{--enable-gpl --enable-nonfree --enable-libfdk-aac}.
This encoder is considered to produce output on par or worse at 128kbps to the
@ref{aacenc,,the native FFmpeg AAC encoder} but can often produce better
sounding audio at identical or lower bitrates and has support for the
AAC-HE profiles.
This encoder is considered to be of higher quality with respect to
both @ref{aacenc,,the native experimental FFmpeg AAC encoder} and
@ref{libfaac}.
VBR encoding, enabled through the @option{vbr} or @option{flags
+qscale} options, is experimental and only works with some
@@ -971,6 +1038,31 @@ Set MPEG audio original flag when set to 1. The default value is 0
@end table
@anchor{libvo-aacenc}
@section libvo-aacenc
VisualOn AAC encoder.
Requires the presence of the libvo-aacenc headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libvo-aacenc --enable-version3}.
This encoder is considered to be worse than the
@ref{aacenc,,native experimental FFmpeg AAC encoder}, according to
multiple sources.
@subsection Options
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
channels. It is also CBR-only.
@table @option
@item b
Set bit rate in bits/s.
@end table
@section libvo-amrwbenc
VisualOn Adaptive Multi-Rate Wideband encoder.
@@ -1033,7 +1125,7 @@ kilobits/s.
@item vbr (@emph{vbr}, @emph{hard-cbr}, and @emph{cvbr})
Set VBR mode. The FFmpeg @option{vbr} option has the following
valid arguments, with the @command{opusenc} equivalent options
valid arguments, with the their @command{opusenc} equivalent options
in parentheses:
@table @samp
@@ -1080,17 +1172,6 @@ following: 4000, 6000, 8000, 12000, or 20000, corresponding to
narrowband, mediumband, wideband, super wideband, and fullband
respectively. The default is 0 (cutoff disabled).
@item mapping_family (@emph{mapping_family})
Set channel mapping family to be used by the encoder. The default value of -1
uses mapping family 0 for mono and stereo inputs, and mapping family 1
otherwise. The default also disables the surround masking and LFE bandwidth
optimzations in libopus, and requires that the input contains 8 channels or
fewer.
Other values include 0 for mono and stereo, 1 for surround sound with masking
and LFE bandwidth optimizations, and 255 for independent streams with an
unspecified channel layout.
@end table
@section libvorbis
@@ -1261,72 +1342,6 @@ disabled
A description of some of the currently available video encoders
follows.
@section libopenh264
Cisco libopenh264 H.264/MPEG-4 AVC encoder wrapper.
This encoder requires the presence of the libopenh264 headers and
library during configuration. You need to explicitly configure the
build with @code{--enable-libopenh264}. The library is detected using
@command{pkg-config}.
For more information about the library see
@url{http://www.openh264.org}.
@subsection Options
The following FFmpeg global options affect the configurations of the
libopenh264 encoder.
@table @option
@item b
Set the bitrate (as a number of bits per second).
@item g
Set the GOP size.
@item maxrate
Set the max bitrate (as a number of bits per second).
@item flags +global_header
Set global header in the bitstream.
@item slices
Set the number of slices, used in parallelized encoding. Default value
is 0. This is only used when @option{slice_mode} is set to
@samp{fixed}.
@item slice_mode
Set slice mode. Can assume one of the following possible values:
@table @samp
@item fixed
a fixed number of slices
@item rowmb
one slice per row of macroblocks
@item auto
automatic number of slices according to number of threads
@item dyn
dynamic slicing
@end table
Default value is @samp{auto}.
@item loopfilter
Enable loop filter, if set to 1 (automatically enabled). To disable
set a value of 0.
@item profile
Set profile restrictions. If set to the value of @samp{main} enable
CABAC (set the @code{SEncParamExt.iEntropyCodingModeFlag} flag to 1).
@item max_nal_size
Set maximum NAL size in bytes.
@item allow_skip_frames
Allow skipping frames to hit the target bitrate if set to 1.
@end table
@section jpeg2000
The native jpeg 2000 encoder is lossy by default, the @code{-q:v}
@@ -1491,12 +1506,6 @@ follows: @code{(minrate * 100 / bitrate)}.
@item crf (@emph{end-usage=cq}, @emph{cq-level})
@item tune (@emph{tune})
@table @samp
@item psnr (@emph{psnr})
@item ssim (@emph{ssim})
@end table
@item quality, deadline (@emph{deadline})
@table @samp
@item best
@@ -1773,7 +1782,7 @@ Enable CAVLC and disable CABAC. It generates the same effect as
@end table
@item cmp
Set full pixel motion estimation comparison algorithm. Possible values:
Set full pixel motion estimation comparation algorithm. Possible values:
@table @samp
@item chroma
@@ -2002,10 +2011,6 @@ For example to specify libx264 encoding options with @command{ffmpeg}:
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
@end example
@item a53cc @var{boolean}
Import closed captions (which must be ATSC compatible format) into output.
Only the mpeg2 and h264 decoders provide these. Default is 1 (on).
@item x264-params (N.A.)
Override the x264 configuration using a :-separated list of key=value
parameters.
@@ -2334,165 +2339,15 @@ configuration. You need to explicitly configure the build with
@item b
Set target video bitrate in bit/s and enable rate control.
@item threads
Set number of encoding threads.
@item kvazaar-params
Set kvazaar parameters as a list of @var{name}=@var{value} pairs separated
by commas (,). See kvazaar documentation for a list of options.
@end table
@section QSV encoders
The family of Intel QuickSync Video encoders (MPEG-2, H.264 and HEVC)
The ratecontrol method is selected as follows:
@itemize @bullet
@item
When @option{global_quality} is specified, a quality-based mode is used.
Specifically this means either
@itemize @minus
@item
@var{CQP} - constant quantizer scale, when the @option{qscale} codec flag is
also set (the @option{-qscale} ffmpeg option).
@item
@var{LA_ICQ} - intelligent constant quality with lookahead, when the
@option{look_ahead} option is also set.
@item
@var{ICQ} -- intelligent constant quality otherwise.
@end itemize
@item
Otherwise, a bitrate-based mode is used. For all of those, you should specify at
least the desired average bitrate with the @option{b} option.
@itemize @minus
@item
@var{LA} - VBR with lookahead, when the @option{look_ahead} option is specified.
@item
@var{VCM} - video conferencing mode, when the @option{vcm} option is set.
@item
@var{CBR} - constant bitrate, when @option{maxrate} is specified and equal to
the average bitrate.
@item
@var{VBR} - variable bitrate, when @option{maxrate} is specified, but is higher
than the average bitrate.
@item
@var{AVBR} - average VBR mode, when @option{maxrate} is not specified. This mode
is further configured by the @option{avbr_accuracy} and
@option{avbr_convergence} options.
@end itemize
@end itemize
Note that depending on your system, a different mode than the one you specified
may be selected by the encoder. Set the verbosity level to @var{verbose} or
higher to see the actual settings used by the QSV runtime.
Additional libavcodec global options are mapped to MSDK options as follows:
@itemize
@item
@option{g/gop_size} -> @option{GopPicSize}
@item
@option{bf/max_b_frames}+1 -> @option{GopRefDist}
@item
@option{rc_init_occupancy/rc_initial_buffer_occupancy} ->
@option{InitialDelayInKB}
@item
@option{slices} -> @option{NumSlice}
@item
@option{refs} -> @option{NumRefFrame}
@item
@option{b_strategy/b_frame_strategy} -> @option{BRefType}
@item
@option{cgop/CLOSED_GOP} codec flag -> @option{GopOptFlag}
@item
For the @var{CQP} mode, the @option{i_qfactor/i_qoffset} and
@option{b_qfactor/b_qoffset} set the difference between @var{QPP} and @var{QPI},
and @var{QPP} and @var{QPB} respectively.
@item
Setting the @option{coder} option to the value @var{vlc} will make the H.264
encoder use CAVLC instead of CABAC.
@end itemize
@section vc2
SMPTE VC-2 (previously BBC Dirac Pro). This codec was primarily aimed at
professional broadcasting but since it supports yuv420, yuv422 and yuv444 at
8 (limited range or full range), 10 or 12 bits, this makes it suitable for
other tasks which require low overhead and low compression (like screen
recording).
@subsection Options
@table @option
@item b
Sets target video bitrate. Usually that's around 1:6 of the uncompressed
video bitrate (e.g. for 1920x1080 50fps yuv422p10 that's around 400Mbps). Higher
values (close to the uncompressed bitrate) turn on lossless compression mode.
@item field_order
Enables field coding when set (e.g. to tt - top field first) for interlaced
inputs. Should increase compression with interlaced content as it splits the
fields and encodes each separately.
@item wavelet_depth
Sets the total amount of wavelet transforms to apply, between 1 and 5 (default).
Lower values reduce compression and quality. Less capable decoders may not be
able to handle values of @option{wavelet_depth} over 3.
@item wavelet_type
Sets the transform type. Currently only @var{5_3} (LeGall) and @var{9_7}
(Deslauriers-Dubuc)
are implemented, with 9_7 being the one with better compression and thus
is the default.
@item slice_width
@item slice_height
Sets the slice size for each slice. Larger values result in better compression.
For compatibility with other more limited decoders use @option{slice_width} of
32 and @option{slice_height} of 8.
@item tolerance
Sets the undershoot tolerance of the rate control system in percent. This is
to prevent an expensive search from being run.
@item qm
Sets the quantization matrix preset to use by default or when @option{wavelet_depth}
is set to 5
@itemize @minus
@item
@var{default}
Uses the default quantization matrix from the specifications, extended with
values for the fifth level. This provides a good balance between keeping detail
and omitting artifacts.
@item
@var{flat}
Use a completely zeroed out quantization matrix. This increases PSNR but might
reduce perception. Use in bogus benchmarks.
@item
@var{color}
Reduces detail but attempts to preserve color at extremely low bitrates.
@end itemize
@end table
@c man end VIDEO ENCODERS
@chapter Subtitles Encoders

View File

@@ -76,7 +76,7 @@ EMFILE POSIX ++++++ Too many open files
EMLINK POSIX ++++++ Too many links
EMSGSIZE POSIX +++..+ Message too long
EMULTIHOP POSIX ++4... Multihop attempted
ENAMETOOLONG POSIX - ++++++ File name too long
ENAMETOOLONG POSIX - ++++++ Filen ame too long
ENAVAIL +..... No XENIX semaphores available
ENEEDAUTH .++... Need authenticator
ENETDOWN POSIX +++..+ Network is down

View File

@@ -1,18 +0,0 @@
/avio_dir_cmd
/avio_reading
/decoding_encoding
/demuxing_decoding
/extract_mvs
/filter_audio
/filtering_audio
/filtering_video
/http_multiclient
/metadata
/muxing
/pc-uninstalled
/qsvdec
/remuxing
/resampling_audio
/scaling_video
/transcode_aac
/transcoding

View File

@@ -25,9 +25,9 @@
* libavcodec API use example.
*
* @example decoding_encoding.c
* Note that libavcodec only handles codecs (MPEG, MPEG-4, etc...),
* not file formats (AVI, VOB, MP4, MOV, MKV, MXF, FLV, MPEG-TS, MPEG-PS, etc...).
* See library 'libavformat' for the format handling
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
* format handling
*/
#include <math.h>
@@ -211,7 +211,7 @@ static void audio_encode_example(const char *filename)
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
av_free_packet(&pkt);
}
}
@@ -225,7 +225,7 @@ static void audio_encode_example(const char *filename)
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
av_free_packet(&pkt);
}
}
fclose(f);
@@ -253,7 +253,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
printf("Decode audio file %s to %s\n", filename, outfilename);
/* find the MPEG audio decoder */
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "Codec not found\n");
@@ -356,7 +356,7 @@ static void video_encode_example(const char *filename, int codec_id)
printf("Encode video file %s\n", filename);
/* find the video encoder */
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
@@ -454,7 +454,7 @@ static void video_encode_example(const char *filename, int codec_id)
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
av_free_packet(&pkt);
}
}
@@ -471,11 +471,11 @@ static void video_encode_example(const char *filename, int codec_id)
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_packet_unref(&pkt);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real MPEG file */
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
@@ -543,12 +543,12 @@ static void video_decode_example(const char *outfilename, const char *filename)
av_init_packet(&avpkt);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
printf("Decode video file %s to %s\n", filename, outfilename);
/* find the MPEG-1 video decoder */
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "Codec not found\n");
@@ -613,9 +613,9 @@ static void video_decode_example(const char *outfilename, const char *filename)
exit(1);
}
/* Some codecs, such as MPEG, transmit the I- and P-frame with a
/* some codecs, such as MPEG, transmit the I and P frame with a
latency of one frame. You must do the following to have a
chance to get the last frame of the video. */
chance to get the last frame of the video */
avpkt.data = NULL;
avpkt.size = 0;
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

View File

@@ -55,11 +55,17 @@ static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
/* Enable or disable frame reference counting. You are not supposed to support
* both paths in your application but pick the one most appropriate to your
* needs. Look for the use of refcount in this example to see what are the
* differences of API usage between them. */
static int refcount = 0;
/* The different ways of decoding and managing data memory. You are not
* supposed to support all the modes in your application but pick the one most
* appropriate to your needs. Look for the use of api_mode in this example to
* see what are the differences of API usage between them */
enum {
API_MODE_OLD = 0, /* old method, deprecated */
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
};
static int api_mode = API_MODE_OLD;
static int decode_packet(int *got_frame, int cached)
{
@@ -93,9 +99,10 @@ static int decode_packet(int *got_frame, int cached)
return -1;
}
printf("video_frame%s n:%d coded_n:%d\n",
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number);
video_frame_count++, frame->coded_picture_number,
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
@@ -138,19 +145,20 @@ static int decode_packet(int *got_frame, int cached)
}
}
/* If we use frame reference counting, we own the data and need
/* If we use the new API with reference counting, we own the data and need
* to de-reference it when we don't use it anymore */
if (*got_frame && refcount)
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
av_frame_unref(frame);
return decoded;
}
static int open_codec_context(int *stream_idx,
AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
@@ -164,31 +172,18 @@ static int open_codec_context(int *stream_idx,
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
fprintf(stderr, "Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
return ret;
}
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
if (api_mode == API_MODE_NEW_API_REF_COUNT)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
@@ -233,19 +228,28 @@ int main (int argc, char **argv)
int ret = 0, got_frame;
if (argc != 4 && argc != 5) {
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
"input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n\n"
"If the -refcount option is specified, the program use the\n"
"reference counting frame system which allows keeping a copy of\n"
"the data for longer than one decode call.\n"
"the data for longer than one decode call. If unset, it's using\n"
"the classic old method.\n"
"\n", argv[0]);
exit(1);
}
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
refcount = 1;
if (argc == 5) {
const char *mode = argv[1] + strlen("-refcount=");
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
else {
fprintf(stderr, "unknow mode '%s'\n", mode);
exit(1);
}
argv++;
}
src_filename = argv[1];
@@ -267,8 +271,9 @@ int main (int argc, char **argv)
exit(1);
}
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
@@ -290,8 +295,9 @@ int main (int argc, char **argv)
video_dst_bufsize = ret;
}
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
@@ -309,7 +315,12 @@ int main (int argc, char **argv)
goto end;
}
frame = av_frame_alloc();
/* When using the new API, you need to use the libavutil/frame.h API, while
* the classic frame management is available in libavcodec */
if (api_mode == API_MODE_OLD)
frame = avcodec_alloc_frame();
else
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
@@ -336,7 +347,7 @@ int main (int argc, char **argv)
pkt.data += ret;
pkt.size -= ret;
} while (pkt.size > 0);
av_packet_unref(&orig_pkt);
av_free_packet(&orig_pkt);
}
/* flush cached frames */
@@ -379,14 +390,17 @@ int main (int argc, char **argv)
}
end:
avcodec_free_context(&video_dec_ctx);
avcodec_free_context(&audio_dec_ctx);
avcodec_close(video_dec_ctx);
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_frame_free(&frame);
if (api_mode == API_MODE_OLD)
avcodec_free_frame(&frame);
else
av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;

View File

@@ -167,7 +167,7 @@ int main(int argc, char **argv)
pkt.data += ret;
pkt.size -= ret;
} while (pkt.size > 0);
av_packet_unref(&orig_pkt);
av_free_packet(&orig_pkt);
}
/* flush cached frames */

View File

@@ -33,6 +33,7 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
@@ -65,7 +66,7 @@ static int open_input_file(const char *filename)
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
@@ -273,10 +274,10 @@ int main(int argc, char **argv)
}
if (packet.size <= 0)
av_packet_unref(&packet0);
av_free_packet(&packet0);
} else {
/* discard non-wanted packets */
av_packet_unref(&packet0);
av_free_packet(&packet0);
}
}
end:

View File

@@ -33,6 +33,7 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
@@ -262,7 +263,7 @@ int main(int argc, char **argv)
av_frame_unref(frame);
}
}
av_packet_unref(&packet);
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);

View File

@@ -33,12 +33,12 @@
#include <libavutil/opt.h>
#include <unistd.h>
static void process_client(AVIOContext *client, const char *in_uri)
void process_client(AVIOContext *client, const char *in_uri)
{
AVIOContext *input = NULL;
uint8_t buf[1024];
int ret, n, reply_code;
uint8_t *resource = NULL;
char *resource = NULL;
while ((ret = avio_handshake(client)) > 0) {
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
// check for strlen(resource) is necessary, because av_opt_get()
@@ -97,11 +97,11 @@ end:
int main(int argc, char **argv)
{
av_log_set_level(AV_LOG_TRACE);
AVDictionary *options = NULL;
AVIOContext *client = NULL, *server = NULL;
const char *in_uri, *out_uri;
int ret, pid;
av_log_set_level(AV_LOG_TRACE);
if (argc < 3) {
printf("usage: %s input http://hostname[:port]\n"
"API example program to serve http to multiple clients.\n"

View File

@@ -52,7 +52,6 @@
// a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
/* pts of the next frame that will be generated */
int64_t next_pts;
@@ -105,18 +104,13 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
exit(1);
}
ost->st = avformat_new_stream(oc, NULL);
ost->st = avformat_new_stream(oc, *codec);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = avcodec_alloc_context3(*codec);
if (!c) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
ost->enc = c;
c = ost->st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
@@ -161,7 +155,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B-frames */
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
@@ -219,7 +213,7 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
int ret;
AVDictionary *opt = NULL;
c = ost->enc;
c = ost->st->codec;
/* open it */
av_dict_copy(&opt, opt_arg, 0);
@@ -246,13 +240,6 @@ static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples);
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
/* create resampler context */
ost->swr_ctx = swr_alloc();
if (!ost->swr_ctx) {
@@ -284,13 +271,13 @@ static AVFrame *get_audio_frame(OutputStream *ost)
int16_t *q = (int16_t*)frame->data[0];
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->enc->channels; i++)
for (i = 0; i < ost->st->codec->channels; i++)
*q++ = v;
ost->t += ost->tincr;
ost->tincr += ost->tincr2;
@@ -316,7 +303,7 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->enc;
c = ost->st->codec;
frame = get_audio_frame(ost);
@@ -396,7 +383,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->enc;
AVCodecContext *c = ost->st->codec;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
@@ -427,13 +414,6 @@ static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, A
exit(1);
}
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
if (ret < 0) {
fprintf(stderr, "Could not copy the stream parameters\n");
exit(1);
}
}
/* Prepare a dummy image. */
@@ -468,10 +448,10 @@ static void fill_yuv_image(AVFrame *pict, int frame_index,
static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->enc;
AVCodecContext *c = ost->st->codec;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, c->time_base,
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
@@ -513,25 +493,44 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
AVCodecContext *c;
AVFrame *frame;
int got_packet = 0;
AVPacket pkt = { 0 };
c = ost->enc;
c = ost->st->codec;
frame = get_video_frame(ost);
av_init_packet(&pkt);
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* a hack to avoid data copy with some raw video muxers */
AVPacket pkt;
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (!frame)
return 1;
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ost->st->index;
pkt.data = (uint8_t *)frame;
pkt.size = sizeof(AVPicture);
pkt.pts = pkt.dts = frame->pts;
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
AVPacket pkt = { 0 };
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = 0;
}
}
if (ret < 0) {
@@ -544,7 +543,7 @@ static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_free_context(&ost->enc);
avcodec_close(ost->st->codec);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
@@ -565,7 +564,6 @@ int main(int argc, char **argv)
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
AVDictionary *opt = NULL;
int i;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
@@ -582,9 +580,8 @@ int main(int argc, char **argv)
}
filename = argv[1];
for (i = 2; i+1 < argc; i+=2) {
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
if (argc > 3 && !strcmp(argv[2], "-flags")) {
av_dict_set(&opt, argv[2]+1, argv[3], 0);
}
/* allocate the output media context */
@@ -642,8 +639,8 @@ int main(int argc, char **argv)
while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);

View File

@@ -116,6 +116,15 @@ fail:
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
{
DecodeContext *decode = pthis;
if (decode->surfaces)
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
av_freep(&decode->surfaces);
av_freep(&decode->surface_ids);
av_freep(&decode->surface_used);
decode->nb_surfaces = 0;
return MFX_ERR_NONE;
}
@@ -135,16 +144,6 @@ static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
return MFX_ERR_NONE;
}
static void free_surfaces(DecodeContext *decode)
{
if (decode->surfaces)
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
av_freep(&decode->surfaces);
av_freep(&decode->surface_ids);
av_freep(&decode->surface_used);
decode->nb_surfaces = 0;
}
static void free_buffer(void *opaque, uint8_t *data)
{
int *used = opaque;
@@ -352,7 +351,7 @@ int main(int argc, char **argv)
for (i = 0; i < input_ctx->nb_streams; i++) {
AVStream *st = input_ctx->streams[i];
if (st->codecpar->codec_id == AV_CODEC_ID_H264 && !video_st)
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
video_st = st;
else
st->discard = AVDISCARD_ALL;
@@ -404,16 +403,16 @@ int main(int argc, char **argv)
goto finish;
}
decoder_ctx->codec_id = AV_CODEC_ID_H264;
if (video_st->codecpar->extradata_size) {
decoder_ctx->extradata = av_mallocz(video_st->codecpar->extradata_size +
if (video_st->codec->extradata_size) {
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
AV_INPUT_BUFFER_PADDING_SIZE);
if (!decoder_ctx->extradata) {
ret = AVERROR(ENOMEM);
goto finish;
}
memcpy(decoder_ctx->extradata, video_st->codecpar->extradata,
video_st->codecpar->extradata_size);
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
video_st->codec->extradata_size);
decoder_ctx->extradata_size = video_st->codec->extradata_size;
}
decoder_ctx->refcounted_frames = 1;
@@ -468,12 +467,6 @@ finish:
av_frame_free(&frame);
if (decoder_ctx)
av_freep(&decoder_ctx->hwaccel_context);
avcodec_free_context(&decoder_ctx);
free_surfaces(&decode);
if (decode.mfx_session)
MFXClose(decode.mfx_session);
if (decode.va_dpy)
@@ -481,6 +474,10 @@ finish:
if (dpy)
XCloseDisplay(dpy);
if (decoder_ctx)
av_freep(&decoder_ctx->hwaccel_context);
avcodec_free_context(&decoder_ctx);
avio_close(output_ctx);
return ret;

View File

@@ -143,7 +143,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
av_free_packet(&pkt);
}
av_write_trailer(ofmt_ctx);

View File

@@ -62,7 +62,6 @@ static int open_input_file(const char *filename,
AVFormatContext **input_format_context,
AVCodecContext **input_codec_context)
{
AVCodecContext *avctx;
AVCodec *input_codec;
int error;
@@ -92,39 +91,23 @@ static int open_input_file(const char *filename,
}
/** Find a decoder for the audio stream. */
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
fprintf(stderr, "Could not find input codec\n");
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** allocate a new decoding context */
avctx = avcodec_alloc_context3(input_codec);
if (!avctx) {
fprintf(stderr, "Could not allocate a decoding context\n");
avformat_close_input(input_format_context);
return AVERROR(ENOMEM);
}
/** initialize the stream parameters with demuxer information */
error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
if (error < 0) {
avformat_close_input(input_format_context);
avcodec_free_context(&avctx);
return error;
}
/** Open the decoder for the audio stream to use it later. */
if ((error = avcodec_open2(avctx, input_codec, NULL)) < 0) {
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
input_codec, NULL)) < 0) {
fprintf(stderr, "Could not open input codec (error '%s')\n",
get_error_text(error));
avcodec_free_context(&avctx);
avformat_close_input(input_format_context);
return error;
}
/** Save the decoder context for easier access later. */
*input_codec_context = avctx;
*input_codec_context = (*input_format_context)->streams[0]->codec;
return 0;
}
@@ -139,7 +122,6 @@ static int open_output_file(const char *filename,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context)
{
AVCodecContext *avctx = NULL;
AVIOContext *output_io_context = NULL;
AVStream *stream = NULL;
AVCodec *output_codec = NULL;
@@ -179,31 +161,27 @@ static int open_output_file(const char *filename,
}
/** Create a new audio stream in the output file container. */
if (!(stream = avformat_new_stream(*output_format_context, NULL))) {
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
fprintf(stderr, "Could not create new stream\n");
error = AVERROR(ENOMEM);
goto cleanup;
}
avctx = avcodec_alloc_context3(output_codec);
if (!avctx) {
fprintf(stderr, "Could not allocate an encoding context\n");
error = AVERROR(ENOMEM);
goto cleanup;
}
/** Save the encoder context for easier access later. */
*output_codec_context = stream->codec;
/**
* Set the basic encoder parameters.
* The input file's sample rate is used to avoid a sample rate conversion.
*/
avctx->channels = OUTPUT_CHANNELS;
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
avctx->sample_rate = input_codec_context->sample_rate;
avctx->sample_fmt = output_codec->sample_fmts[0];
avctx->bit_rate = OUTPUT_BIT_RATE;
(*output_codec_context)->channels = OUTPUT_CHANNELS;
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
/** Allow the use of the experimental AAC encoder */
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/** Set the sample rate for the container. */
stream->time_base.den = input_codec_context->sample_rate;
@@ -214,28 +192,18 @@ static int open_output_file(const char *filename,
* Mark the encoder so that it behaves accordingly.
*/
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
(*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/** Open the encoder for the audio stream to use it later. */
if ((error = avcodec_open2(avctx, output_codec, NULL)) < 0) {
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
fprintf(stderr, "Could not open output codec (error '%s')\n",
get_error_text(error));
goto cleanup;
}
error = avcodec_parameters_from_context(stream->codecpar, avctx);
if (error < 0) {
fprintf(stderr, "Could not initialize stream parameters\n");
goto cleanup;
}
/** Save the encoder context for easier access later. */
*output_codec_context = avctx;
return 0;
cleanup:
avcodec_free_context(&avctx);
avio_closep(&(*output_format_context)->pb);
avformat_free_context(*output_format_context);
*output_format_context = NULL;
@@ -364,7 +332,7 @@ static int decode_audio_frame(AVFrame *frame,
data_present, &input_packet)) < 0) {
fprintf(stderr, "Could not decode frame (error '%s')\n",
get_error_text(error));
av_packet_unref(&input_packet);
av_free_packet(&input_packet);
return error;
}
@@ -374,7 +342,7 @@ static int decode_audio_frame(AVFrame *frame,
*/
if (*finished && *data_present)
*finished = 0;
av_packet_unref(&input_packet);
av_free_packet(&input_packet);
return 0;
}
@@ -603,7 +571,7 @@ static int encode_audio_frame(AVFrame *frame,
frame, data_present)) < 0) {
fprintf(stderr, "Could not encode frame (error '%s')\n",
get_error_text(error));
av_packet_unref(&output_packet);
av_free_packet(&output_packet);
return error;
}
@@ -612,11 +580,11 @@ static int encode_audio_frame(AVFrame *frame,
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
fprintf(stderr, "Could not write frame (error '%s')\n",
get_error_text(error));
av_packet_unref(&output_packet);
av_free_packet(&output_packet);
return error;
}
av_packet_unref(&output_packet);
av_free_packet(&output_packet);
}
return 0;
@@ -788,13 +756,13 @@ cleanup:
av_audio_fifo_free(fifo);
swr_free(&resample_context);
if (output_codec_context)
avcodec_free_context(&output_codec_context);
avcodec_close(output_codec_context);
if (output_format_context) {
avio_closep(&output_format_context->pb);
avformat_free_context(output_format_context);
}
if (input_codec_context)
avcodec_free_context(&input_codec_context);
avcodec_close(input_codec_context);
if (input_format_context)
avformat_close_input(&input_format_context);

View File

@@ -31,6 +31,7 @@
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
@@ -128,10 +129,7 @@ static int open_output_file(const char *filename)
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
if (encoder->pix_fmts)
enc_ctx->pix_fmt = encoder->pix_fmts[0];
else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
enc_ctx->pix_fmt = encoder->pix_fmts[0];
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
} else {
@@ -539,7 +537,7 @@ int main(int argc, char **argv)
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
av_free_packet(&packet);
}
/* flush filters and encoders */
@@ -563,7 +561,7 @@ int main(int argc, char **argv)
av_write_trailer(ofmt_ctx);
end:
av_packet_unref(&packet);
av_free_packet(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(ifmt_ctx->streams[i]->codec);

View File

@@ -147,7 +147,7 @@ exec /usr/bin/pkg-config "$@@"
Try a @code{make distclean} in the ffmpeg source directory before the build.
If this does not help see
(@url{https://ffmpeg.org/bugreports.html}).
(@url{http://ffmpeg.org/bugreports.html}).
@section How do I encode single pictures into movies?
@@ -311,18 +311,18 @@ invoking ffmpeg with several @option{-i} options.
For audio, to put all channels together in a single stream (example: two
mono streams into one stereo stream): this is sometimes called to
@emph{merge} them, and can be done using the
@url{ffmpeg-filters.html#amerge, @code{amerge}} filter.
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
@item
For audio, to play one on top of the other: this is called to @emph{mix}
them, and can be done by first merging them into a single stream and then
using the @url{ffmpeg-filters.html#pan, @code{pan}} filter to mix
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
the channels at will.
@item
For video, to display both together, side by side or one on top of a part of
the other; it can be done using the
@url{ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@end itemize
@@ -333,19 +333,19 @@ There are several solutions, depending on the exact circumstances.
@subsection Concatenating using the concat @emph{filter}
FFmpeg has a @url{ffmpeg-filters.html#concat,
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
@code{concat}} filter designed specifically for that, with examples in the
documentation. This operation is recommended if you need to re-encode.
@subsection Concatenating using the concat @emph{demuxer}
FFmpeg has a @url{ffmpeg-formats.html#concat,
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
your format doesn't support file level concatenation.
@subsection Concatenating using the concat @emph{protocol} (file level)
FFmpeg has a @url{ffmpeg-protocols.html#concat,
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
@code{concat}} protocol designed specifically for that, with examples in the
documentation.
@@ -485,7 +485,7 @@ scaling adjusts the SAR to keep the DAR constant.
If you want to stretch, or “unstretch”, the image, you need to override the
information with the
@url{ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
Do not forget to examine carefully the original video to check whether the
stretching comes from the image or from the aspect ratio information.
@@ -589,7 +589,7 @@ see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in
@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm?
see @url{https://www.ffmpeg.org/~michael/}
see @url{http://www.ffmpeg.org/~michael/}
@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?

View File

@@ -12,7 +12,7 @@
@chapter Synopsis
ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_url}@} ... @{[@var{output_file_options}] @file{output_url}@} ...
ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_file}@} ... @{[@var{output_file_options}] @file{output_file}@} ...
@chapter Description
@c man begin DESCRIPTION
@@ -24,10 +24,10 @@ rates and resize video on the fly with a high quality polyphase filter.
@command{ffmpeg} reads from an arbitrary number of input "files" (which can be regular
files, pipes, network streams, grabbing devices, etc.), specified by the
@code{-i} option, and writes to an arbitrary number of output "files", which are
specified by a plain output url. Anything found on the command line which
cannot be interpreted as an option is considered to be an output url.
specified by a plain output filename. Anything found on the command line which
cannot be interpreted as an option is considered to be an output filename.
Each input or output url can, in principle, contain any number of streams of
Each input or output file can, in principle, contain any number of streams of
different types (video/audio/subtitle/attachment/data). The allowed number and/or
types of streams may be limited by the container format. Selecting which
streams from which inputs will go into which output is either done automatically
@@ -223,7 +223,7 @@ with the highest resolution, for audio, it is the stream with the most channels,
subtitles, it is the first subtitle stream. In the case where several streams of
the same type rate equally, the stream with the lowest index is chosen.
You can disable some of those defaults by using the @code{-vn/-an/-sn/-dn} options. For
You can disable some of those defaults by using the @code{-vn/-an/-sn} options. For
full manual control, use the @code{-map} option, which disables the defaults just
described.
@@ -243,8 +243,8 @@ Force input or output file format. The format is normally auto detected for inpu
files and guessed from the file extension for output files, so this option is not
needed in most cases.
@item -i @var{url} (@emph{input})
input file url
@item -i @var{filename} (@emph{input})
input file name
@item -y (@emph{global})
Overwrite output files without asking.
@@ -253,10 +253,6 @@ Overwrite output files without asking.
Do not overwrite output files, and exit immediately if a specified
output file already exists.
@item -stream_loop @var{number} (@emph{input})
Set number of times input stream shall be looped. Loop 0 means no loop,
loop -1 means infinite loop.
@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
Select an encoder (when used before an output file) or a decoder (when used
@@ -281,7 +277,7 @@ libx264, and the 138th audio, which will be encoded with libvorbis.
When used as an input option (before @code{-i}), limit the @var{duration} of
data read from the input file.
When used as an output option (before an output url), stop writing the
When used as an output option (before an output filename), stop writing the
output after its duration reaches @var{duration}.
@var{duration} must be a time duration specification,
@@ -297,9 +293,7 @@ see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1)
-to and -t are mutually exclusive and -t has priority.
@item -fs @var{limit_size} (@emph{output})
Set the file size limit, expressed in bytes. No further chunk of bytes is written
after the limit is exceeded. The size of the output file is slightly more than the
requested file size.
Set the file size limit, expressed in bytes.
@item -ss @var{position} (@emph{input/output})
When used as an input option (before @code{-i}), seeks in this input file to
@@ -310,7 +304,7 @@ extra segment between the seek point and @var{position} will be decoded and
discarded. When doing stream copy or when @option{-noaccurate_seek} is used, it
will be preserved.
When used as an output option (before an output url), decodes but discards
When used as an output option (before an output filename), decodes but discards
input until the timestamps reach @var{position}.
@var{position} must be a time duration specification,
@@ -341,8 +335,8 @@ see @ref{date syntax,,the Date section in the ffmpeg-utils(1) manual,ffmpeg-util
Set a metadata key/value pair.
An optional @var{metadata_specifier} may be given to set metadata
on streams, chapters or programs. See @code{-map_metadata}
documentation for details.
on streams or chapters. See @code{-map_metadata} documentation for
details.
This option overrides metadata set with @code{-map_metadata}. It is
also possible to delete metadata by using an empty value.
@@ -357,11 +351,6 @@ To set the language of the first audio stream:
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
@end example
@item -program [title=@var{title}:][program_num=@var{program_num}:]st=@var{stream}[:st=@var{stream}...] (@emph{output})
Creates a program with the specified @var{title}, @var{program_num} and adds the specified
@var{stream}(s) to it.
@item -target @var{type} (@emph{output})
Specify target file type (@code{vcd}, @code{svcd}, @code{dvd}, @code{dv},
@code{dv50}). @var{type} may be prefixed with @code{pal-}, @code{ntsc-} or
@@ -682,16 +671,6 @@ Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
@item dxva2
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
@item qsv
Use the Intel QuickSync Video acceleration for video transcoding.
Unlike most other values, this option does not enable accelerated decoding (that
is used automatically whenever a qsv decoder is selected), but accelerated
transcoding, without copying the frames into the system memory.
For it to work, both the decoder and the encoder must support QSV acceleration
and no filters must be used.
@end table
This option has no effect if the selected hwaccel is not available or not
@@ -718,20 +697,6 @@ is not specified, the value of the @var{DISPLAY} environment variable is used
@item dxva2
For DXVA2, this option should contain the number of the display adapter to use.
If this option is not specified, the default adapter is used.
@item qsv
For QSV, this option corresponds to the values of MFX_IMPL_* . Allowed values
are:
@table @option
@item auto
@item sw
@item hw
@item auto_any
@item hw_any
@item hw2
@item hw3
@item hw4
@end table
@end table
@item -hwaccels
@@ -1008,7 +973,7 @@ Dump each input packet to stderr.
@item -hex (@emph{global})
When dumping packets, also dump the payload.
@item -re (@emph{input})
Read input at native frame rate. Mainly used to simulate a grab device,
Read input at native frame rate. Mainly used to simulate a grab device.
or live input stream (e.g. when reading from a file). Should not be used
with actual grab devices or live input streams (where it can cause packet
loss).
@@ -1129,7 +1094,7 @@ may be reassigned to a different value.
For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
an output mpegts file:
@example
ffmpeg -i inurl -streamid 0:33 -streamid 1:36 out.ts
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
@end example
@item -bsf[:@var{stream_specifier}] @var{bitstream_filters} (@emph{output,per-stream})
@@ -1268,26 +1233,9 @@ Discard all frames excepts keyframes.
Discard all frames.
@end table
@item -abort_on @var{flags} (@emph{global})
Stop and abort on various conditions. The following flags are available:
@table @option
@item empty_output
No packets were passed to the muxer, the output is empty.
@end table
@item -xerror (@emph{global})
Stop and exit on error
@item -max_muxing_queue_size @var{packets} (@emph{output,per-stream})
When transcoding audio and/or video streams, ffmpeg will not begin writing into
the output until it has one packet for each such stream. While waiting for that
to happen, packets for other streams are buffered. This option sets the size of
this buffer, in packets, for the matching output stream.
The default value of this option should be high enough for most uses, so only
touch this option if you are sure that you need it.
@end table
As a special exception, you can use a bitmap subtitle stream as input: it

View File

@@ -12,7 +12,7 @@
@chapter Synopsis
ffplay [@var{options}] [@file{input_url}]
ffplay [@var{options}] [@file{input_file}]
@chapter Description
@c man begin DESCRIPTION
@@ -106,8 +106,8 @@ the input audio.
Use the option "-filters" to show all the available filters (including
sources and sinks).
@item -i @var{input_url}
Read @var{input_url}.
@item -i @var{input_file}
Read @var{input_file}.
@end table
@section Advanced options
@@ -197,15 +197,6 @@ Toggle full screen.
@item p, SPC
Pause.
@item m
Toggle mute.
@item 9, 0
Decrease and increase volume respectively.
@item /, *
Decrease and increase volume respectively.
@item a
Cycle audio channel in the current program.
@@ -238,12 +229,9 @@ Seek to the previous/next chapter.
or if there are no chapters
Seek backward/forward 10 minutes.
@item right mouse click
@item mouse click
Seek to percentage in file corresponding to fraction of width.
@item left mouse double-click
Toggle full screen.
@end table
@c man end

View File

@@ -12,7 +12,7 @@
@chapter Synopsis
ffprobe [@var{options}] [@file{input_url}]
ffprobe [@var{options}] [@file{input_file}]
@chapter Description
@c man begin DESCRIPTION
@@ -24,8 +24,8 @@ For example it can be used to check the format of the container used
by a multimedia stream and the format and type of each media stream
contained in it.
If a url is specified in input, ffprobe will try to open and
probe the url content. If the url cannot be opened or recognized as
If a filename is specified in input, ffprobe will try to open and
probe the file content. If the file cannot be opened or recognized as
a multimedia file, a positive exit code is returned.
ffprobe may be employed both as a standalone application or in
@@ -245,7 +245,7 @@ continue reading from that.
Each interval is specified by two optional parts, separated by "%".
The first part specifies the interval start position. It is
interpreted as an absolute position, or as a relative offset from the
interpreted as an abolute position, or as a relative offset from the
current position if it is preceded by the "+" character. If this first
part is not specified, no seeking will be performed when reading this
interval.
@@ -332,8 +332,8 @@ with name "PIXEL_FORMAT".
Force bitexact output, useful to produce output which is not dependent
on the specific build.
@item -i @var{input_url}
Read @var{input_url}.
@item -i @var{input_file}
Read @var{input_file}.
@end table
@c man end

View File

@@ -129,7 +129,6 @@
<xsd:complexType name="frameSideDataType">
<xsd:attribute name="side_data_type" type="xsd:string"/>
<xsd:attribute name="side_data_size" type="xsd:int" />
<xsd:attribute name="timecode" type="xsd:string"/>
</xsd:complexType>
<xsd:complexType name="subtitleType">
@@ -166,7 +165,6 @@
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
</xsd:complexType>
<xsd:complexType name="streamType">
@@ -202,7 +200,6 @@
<xsd:attribute name="color_transfer" type="xsd:string"/>
<xsd:attribute name="color_primaries" type="xsd:string"/>
<xsd:attribute name="chroma_location" type="xsd:string"/>
<xsd:attribute name="field_order" type="xsd:string"/>
<xsd:attribute name="timecode" type="xsd:string"/>
<xsd:attribute name="refs" type="xsd:int"/>

View File

@@ -317,7 +317,7 @@ StartSendOnKey
#AVPresetVideo baseline
#AVOptionVideo flags +global_header
#
#AudioCodec aac
#AudioCodec libfaac
#AudioBitRate 32
#AudioChannels 2
#AudioSampleRate 22050

View File

@@ -176,10 +176,10 @@ loglevel will be used. If multiple loglevel parameters are given, using
Show nothing at all; be silent.
@item panic, 0
Only show fatal errors which could lead the process to crash, such as
an assertion failure. This is not currently used for anything.
and assert failure. This is not currently used for anything.
@item fatal, 8
Only show fatal errors. These are errors after which the process absolutely
cannot continue.
cannot continue after.
@item error, 16
Show all errors, including ones which can be recovered from.
@item warning, 24
@@ -195,13 +195,13 @@ Show everything, including debugging information.
@item trace, 56
@end table
By default the program logs to stderr. If coloring is supported by the
By default the program logs to stderr, if coloring is supported by the
terminal, colors are used to mark errors and warnings. Log coloring
can be disabled setting the environment variable
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
the environment variable @env{AV_LOG_FORCE_COLOR}.
The use of the environment variable @env{NO_COLOR} is deprecated and
will be dropped in a future FFmpeg version.
will be dropped in a following FFmpeg version.
@item -report
Dump full command line and console output to a file named

View File

@@ -232,8 +232,7 @@ Frame scheduling
one of its inputs, repeatedly until at least one frame has been pushed.
Return values:
if request_frame could produce a frame, or at least make progress
towards producing a frame, it should return 0;
if request_frame could produce a frame, it should return 0;
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
if it could not because there are no more frames, it should return
AVERROR_EOF.
@@ -245,18 +244,20 @@ Frame scheduling
push_one_frame();
return 0;
}
input = input_where_a_frame_is_most_needed();
ret = ff_request_frame(input);
if (ret == AVERROR_EOF) {
process_eof_on_input();
} else if (ret < 0) {
return ret;
while (!frame_pushed) {
input = input_where_a_frame_is_most_needed();
ret = ff_request_frame(input);
if (ret == AVERROR_EOF) {
process_eof_on_input();
} else if (ret < 0) {
return ret;
}
}
return 0;
Note that, except for filters that can have queued frames, request_frame
does not push frames: it requests them to its input, and as a reaction,
the filter_frame method possibly will be called and do the work.
the filter_frame method will be called and do the work.
Legacy API
==========

File diff suppressed because it is too large Load Diff

View File

@@ -61,10 +61,6 @@ Reduce the latency introduced by optional buffering
Only write platform-, build- and time-independent data.
This ensures that file and data checksums are reproducible and match between
platforms. Its primary use is for regression testing.
@item shortest
Stop muxing at the end of the shortest stream.
It may be needed to increase max_interleave_delta to avoid flushing the longer
streams before EOF.
@end table
@item seek2any @var{integer} (@emph{input})
@@ -151,7 +147,7 @@ a packet for each stream, regardless of the maximum timestamp
difference between the buffered packets.
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
Use wallclock as timestamps if set to 1. Default is 0.
Use wallclock as timestamps.
@item avoid_negative_ts @var{integer} (@emph{output})
@@ -199,7 +195,7 @@ delayed bt the time duration specified in @var{offset}. Default value
is @code{0} (meaning that no offset is applied).
@item format_whitelist @var{list} (@emph{input})
"," separated list of allowed demuxers. By default all are allowed.
"," separated List of allowed demuxers. By default all are allowed.
@item dump_separator @var{string} (@emph{input})
Separator used to separate the fields printed on the command line about the

View File

@@ -53,6 +53,14 @@ instructions for installing the libraries.
Then pass @code{--enable-libopencore-amrnb} and/or
@code{--enable-libopencore-amrwb} to configure to enable them.
@subsection VisualOn AAC encoder library
FFmpeg can make use of the VisualOn AACenc library for AAC encoding.
Go to @url{http://sourceforge.net/projects/opencore-amr/} and follow the
instructions for installing the library.
Then pass @code{--enable-libvo-aacenc} to configure to enable it.
@subsection VisualOn AMR-WB encoder library
FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding.
@@ -103,19 +111,12 @@ enable it.
@section OpenH264
FFmpeg can make use of the OpenH264 library for H.264 encoding and decoding.
FFmpeg can make use of the OpenH264 library for H.264 encoding.
Go to @url{http://www.openh264.org/} and follow the instructions for
installing the library. Then pass @code{--enable-libopenh264} to configure to
enable it.
For decoding, this library is much more limited than the built-in decoder
in libavcodec; currently, this library lacks support for decoding B-frames
and some other main/high profile features. (It currently only supports
constrained baseline profile and CABAC.) Using it is mostly useful for
testing and for taking advantage of Cisco's patent portfolio license
(@url{http://www.openh264.org/BINARY_LICENSE.txt}).
@section x264
FFmpeg can make use of the x264 library for H.264 encoding.
@@ -172,6 +173,12 @@ Go to @url{http://sourceforge.net/projects/zapping/} and follow the instructions
installing the library. Then pass @code{--enable-libzvbi} to configure to
enable it.
@float NOTE
libzvbi is licensed under the GNU General Public License Version 2 or later
(see @url{http://www.gnu.org/licenses/old-licenses/gpl-2.0.html} for details),
you must upgrade FFmpeg's license to GPL in order to use it.
@end float
@section AviSynth
FFmpeg can read AviSynth scripts as input. To enable support, pass
@@ -216,7 +223,6 @@ library:
@multitable @columnfractions .4 .1 .1 .4
@item Name @tab Encoding @tab Decoding @tab Comments
@item 3dostr @tab @tab X
@item 4xm @tab @tab X
@tab 4X Technologies format, used in some games.
@item 8088flex TMV @tab @tab X
@@ -235,14 +241,10 @@ library:
@tab Multimedia format used in game Heart Of Darkness.
@item Apple HTTP Live Streaming @tab @tab X
@item Artworx Data Format @tab @tab X
@item Interplay ACM @tab @tab X
@tab Audio only format used in some Interplay games.
@item ADP @tab @tab X
@tab Audio format used on the Nintendo Gamecube.
@item AFC @tab @tab X
@tab Audio format used on the Nintendo Gamecube.
@item ADS/SS2 @tab @tab X
@tab Audio format used on the PS2.
@item APNG @tab X @tab X
@item ASF @tab X @tab X
@item AST @tab X @tab X
@@ -283,7 +285,6 @@ library:
@item CD+G @tab @tab X
@tab Video format used by CD+G karaoke disks
@item Phantom Cine @tab @tab X
@item Cineform HD @tab @tab X
@item Commodore CDXL @tab @tab X
@tab Amiga CD video format
@item Core Audio Format @tab X @tab X
@@ -295,7 +296,6 @@ library:
@tab Audio format used in some games by CRYO Interactive Entertainment.
@item D-Cinema audio @tab X @tab X
@item Deluxe Paint Animation @tab @tab X
@item DCSTR @tab @tab X
@item DFA @tab @tab X
@tab This format is used in Chronomaster game
@item DirectDraw Surface @tab @tab X
@@ -322,8 +322,6 @@ library:
@item G.723.1 @tab X @tab X
@item G.729 BIT @tab X @tab X
@item G.729 raw @tab @tab X
@item GENH @tab @tab X
@tab Audio format for various games.
@item GIF Animation @tab X @tab X
@item GXF @tab X @tab X
@tab General eXchange Format SMPTE 360M, used by Thomson Grass Valley
@@ -346,7 +344,6 @@ library:
@tab A format generated by IndigoVision 8000 video server.
@item IVF (On2) @tab X @tab X
@tab A format used by libvpx
@item Internet Video Recording @tab @tab X
@item IRCAM @tab X @tab X
@item LATM @tab X @tab X
@item LMLM4 @tab @tab X
@@ -383,8 +380,6 @@ library:
@tab also known as DVB Transport Stream
@item MPEG-4 @tab X @tab X
@tab MPEG-4 is a variant of QuickTime.
@item MSF @tab @tab X
@tab Audio format used on the PS3.
@item Mirillis FIC video @tab @tab X
@tab No cursor rendering.
@item MIME multipart JPEG @tab X @tab
@@ -468,7 +463,6 @@ library:
@item Redirector @tab @tab X
@item RedSpark @tab @tab X
@item Renderware TeXture Dictionary @tab @tab X
@item Resolume DXV @tab @tab X
@item RL2 @tab @tab X
@tab Audio and video format used in some games by Entertainment Software Partners.
@item RPL/ARMovie @tab @tab X
@@ -501,22 +495,16 @@ library:
@item SoX native format @tab X @tab X
@item SUN AU format @tab X @tab X
@item SUP raw PGS subtitles @tab @tab X
@item SVAG @tab @tab X
@tab Audio format used in Konami PS2 games.
@item TDSC @tab @tab X
@item Text files @tab @tab X
@item THP @tab @tab X
@tab Used on the Nintendo GameCube.
@item Tiertex Limited SEQ @tab @tab X
@tab Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
@item True Audio @tab X @tab X
@item VAG @tab @tab X
@tab Audio format used in many Sony PS2 games.
@item True Audio @tab @tab X
@item VC-1 test bitstream @tab X @tab X
@item Vidvox Hap @tab X @tab X
@item Vivo @tab @tab X
@item VPK @tab @tab X
@tab Audio format used in Sony PS games.
@item WAV @tab X @tab X
@item WavPack @tab X @tab X
@item WebM @tab X @tab X
@@ -527,12 +515,8 @@ library:
@tab Multimedia format used in Westwood Studios games.
@item Westwood Studios VQA @tab @tab X
@tab Multimedia format used in Westwood Studios games.
@item Wideband Single-bit Data (WSD) @tab @tab X
@item WVE @tab @tab X
@item XMV @tab @tab X
@tab Microsoft video container used in Xbox games.
@item XVAG @tab @tab X
@tab Audio format used on the PS3.
@item xWMA @tab @tab X
@tab Microsoft audio container used by XAudio 2.
@item eXtended BINary text (XBIN) @tab @tab X
@@ -651,7 +635,6 @@ following image formats are supported:
@item Bethesda VID video @tab @tab X
@tab Used in some games from Bethesda Softworks.
@item Bink Video @tab @tab X
@item BitJazz SheerVideo @tab @tab X
@item Bitmap Brothers JV video @tab @tab X
@item y41p Brooktree uncompressed 4:1:1 12-bit @tab X @tab X
@item Brute Force & Ignorance @tab @tab X
@@ -686,8 +669,6 @@ following image formats are supported:
@tab fourcc: DUCK
@item Duck TrueMotion 2.0 @tab @tab X
@tab fourcc: TM20
@item Duck TrueMotion 2.0 RT @tab @tab X
@tab fourcc: TR20
@item DV (Digital Video) @tab X @tab X
@item Dxtory capture format @tab @tab X
@item Feeble Files/ScummVM DXA @tab @tab X
@@ -754,7 +735,6 @@ following image formats are supported:
@item LucasArts SANM/Smush @tab @tab X
@tab Used in LucasArts games / SMUSH animations.
@item lossless MJPEG @tab X @tab X
@item MagicYUV Video @tab @tab X
@item Microsoft ATC Screen @tab @tab X
@tab Also known as Microsoft Screen 3.
@item Microsoft Expression Encoder Screen @tab @tab X
@@ -819,7 +799,6 @@ following image formats are supported:
@tab Texture dictionaries used by the Renderware Engine.
@item RL2 video @tab @tab X
@tab used in some games by Entertainment Software Partners
@item Screenpresso @tab @tab X
@item Sierra VMD video @tab @tab X
@tab Used in Sierra VMD files.
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
@@ -886,13 +865,12 @@ following image formats are supported:
@item Name @tab Encoding @tab Decoding @tab Comments
@item 8SVX exponential @tab @tab X
@item 8SVX fibonacci @tab @tab X
@item AAC @tab EX @tab X
@tab encoding supported through internal encoder and external library libfdk-aac
@item AAC+ @tab E @tab IX
@tab encoding supported through external library libfdk-aac
@tab encoding supported through external library libaacplus
@item AAC @tab E @tab X
@tab encoding supported through external library libfaac and libvo-aacenc
@item AC-3 @tab IX @tab IX
@item ADPCM 4X Movie @tab @tab X
@item APDCM Yamaha AICA @tab @tab X
@item ADPCM CDROM XA @tab @tab X
@item ADPCM Creative Technology @tab @tab X
@tab 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
@@ -928,7 +906,6 @@ following image formats are supported:
@item ADPCM Nintendo Gamecube AFC @tab @tab X
@item ADPCM Nintendo Gamecube DTK @tab @tab X
@item ADPCM Nintendo THP @tab @tab X
@item APDCM Playstation @tab @tab X
@item ADPCM QT IMA @tab X @tab X
@item ADPCM SEGA CRI ADX @tab X @tab X
@tab Used in Sega Dreamcast games.
@@ -936,7 +913,7 @@ following image formats are supported:
@item ADPCM Sound Blaster Pro 2-bit @tab @tab X
@item ADPCM Sound Blaster Pro 2.6-bit @tab @tab X
@item ADPCM Sound Blaster Pro 4-bit @tab @tab X
@item ADPCM VIMA @tab @tab X
@item ADPCM VIMA
@tab Used in LucasArts SMUSH animations.
@item ADPCM Westwood Studios IMA @tab @tab X
@tab Used in Westwood Studios games like Command and Conquer.
@@ -962,13 +939,11 @@ following image formats are supported:
@item COOK @tab @tab X
@tab All versions except 5.1 are supported.
@item DCA (DTS Coherent Acoustics) @tab X @tab X
@tab supported extensions: XCh, XXCH, X96, XBR, XLL, LBR (partially)
@tab supported extensions: XCh, XLL (partially)
@item DPCM id RoQ @tab X @tab X
@tab Used in Quake III, Jedi Knight 2 and other computer games.
@item DPCM Interplay @tab @tab X
@tab Used in various Interplay computer games.
@item DPCM Squareroot-Delta-Exact @tab @tab X
@tab Used in various games.
@item DPCM Sierra Online @tab @tab X
@tab Used in Sierra Online game audio files.
@item DPCM Sol @tab @tab X
@@ -979,12 +954,11 @@ following image formats are supported:
@item DSD (Direct Stream Digitial), least significant bit first, planar @tab @tab X
@item DSD (Direct Stream Digitial), most significant bit first, planar @tab @tab X
@item DSP Group TrueSpeech @tab @tab X
@item DST (Direct Stream Transfer) @tab @tab X
@item DV audio @tab @tab X
@item Enhanced AC-3 @tab X @tab X
@item EVRC (Enhanced Variable Rate Codec) @tab @tab X
@item FLAC (Free Lossless Audio Codec) @tab X @tab IX
@item G.723.1 @tab X @tab X
@item G.723.1 @tab X @tab X
@item G.729 @tab @tab X
@item GSM @tab E @tab X
@tab encoding supported through external library libgsm
@@ -994,10 +968,9 @@ following image formats are supported:
@item iLBC (Internet Low Bitrate Codec) @tab E @tab E
@tab encoding and decoding supported through external library libilbc
@item IMC (Intel Music Coder) @tab @tab X
@item Interplay ACM @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 3:1 @tab @tab X
@item MACE (Macintosh Audio Compression/Expansion) 6:1 @tab @tab X
@item MLP (Meridian Lossless Packing) @tab X @tab X
@item MLP (Meridian Lossless Packing) @tab @tab X
@tab Used in DVD-Audio discs.
@item Monkey's Audio @tab @tab X
@item MP1 (MPEG audio layer 1) @tab @tab IX
@@ -1064,7 +1037,7 @@ following image formats are supported:
@tab supported through external library libspeex
@item TAK (Tom's lossless Audio Kompressor) @tab @tab X
@item True Audio (TTA) @tab X @tab X
@item TrueHD @tab X @tab X
@item TrueHD @tab @tab X
@tab Used in HD-DVD and Blu-Ray discs.
@item TwinVQ (VQF flavor) @tab @tab X
@item VIMA @tab @tab X
@@ -1079,8 +1052,6 @@ following image formats are supported:
@item Windows Media Audio Lossless @tab @tab X
@item Windows Media Audio Pro @tab @tab X
@item Windows Media Audio Voice @tab @tab X
@item Xbox Media Audio 1 @tab @tab X
@item Xbox Media Audio 2 @tab @tab X
@end multitable
@code{X} means that encoding (resp. decoding) is supported.

View File

@@ -65,21 +65,6 @@ git clone git@@source.ffmpeg.org:ffmpeg <target>
This will put the FFmpeg sources into the directory @var{<target>} and let
you push back your changes to the remote repository.
@example
git clone gil@@ffmpeg.org:ffmpeg-web <target>
@end example
This will put the source of the FFmpeg website into the directory
@var{<target>} and let you push back your changes to the remote repository.
(Note that @var{gil} stands for GItoLite and is not a typo of @var{git}.)
If you don't have write-access to the ffmpeg-web repository, you can
create patches after making a read-only ffmpeg-web clone:
@example
git clone git://ffmpeg.org/ffmpeg-web <target>
@end example
Make sure that you do not have Windows line endings in your checkouts,
otherwise you may experience spurious compilation failures. One way to
achieve this is to run
@@ -408,7 +393,7 @@ with @option{--dry-run} first. And then inspecting the commits listed with
@command{git log -p 1234567..987654}. The @command{git status} command
may help in finding local changes that have been forgotten to be added.
Next let the code pass through a full run of our test suite.
Next let the code pass through a full run of our testsuite.
@itemize
@item @command{make distclean}
@@ -418,7 +403,7 @@ Next let the code pass through a full run of our test suite.
@end itemize
Make sure all your changes have been checked before pushing them, the
test suite only checks against regressions and that only to some extend. It does
testsuite only checks against regressions and that only to some extend. It does
obviously not check newly added features/code to be working unless you have
added a test for that (which is recommended).

View File

@@ -121,7 +121,7 @@ Specify the audio device by its index. Overrides anything given in the input fil
@item -pixel_format <FORMAT>
Request the video device to use a specific pixel format.
If the specified format is not supported, a list of available formats is given
and the first one in this list is used instead. Available pixel formats are:
und the first one in this list is used instead. Available pixel formats are:
@code{monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
yuv420p, nv12, yuyv422, gray}
@@ -218,8 +218,7 @@ On Windows, you need to run the IDL files through @command{widl}.
DeckLink is very picky about the formats it supports. Pixel format is
uyvy422 or v210, framerate and video size must be determined for your device with
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
of channels can be 2, 8 or 16. Note that all audio channels are bundled in one single
audio track.
of channels can be 2, 8 or 16.
@subsection Options
@@ -237,46 +236,6 @@ Defaults to @option{false}.
If set to @samp{1}, video is captured in 10 bit v210 instead
of uyvy422. Not all Blackmagic devices support this option.
@item teletext_lines
If set to nonzero, an additional teletext stream will be captured from the
vertical ancillary data. This option is a bitmask of the VBI lines checked,
specifically lines 6 to 22, and lines 318 to 335. Line 6 is the LSB in the mask.
Selected lines which do not contain teletext information will be ignored. You
can use the special @option{all} constant to select all possible lines, or
@option{standard} to skip lines 6, 318 and 319, which are not compatible with all
receivers. Capturing teletext only works for SD PAL sources in 8 bit mode.
To use this option, ffmpeg needs to be compiled with @code{--enable-libzvbi}.
@item channels
Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp{16}.
Defaults to @samp{2}.
@item duplex_mode
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
Defaults to @samp{unset}.
@item video_input
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
@samp{optical_sdi}, @samp{component}, @samp{composite} or @samp{s_video}.
Defaults to @samp{unset}.
@item audio_input
Sets the audio input source. Must be @samp{unset}, @samp{embedded},
@samp{aes_ebu}, @samp{analog}, @samp{analog_xlr}, @samp{analog_rca} or
@samp{microphone}. Defaults to @samp{unset}.
@item video_pts
Sets the video packet timestamp source. Must be @samp{video}, @samp{audio},
@samp{reference} or @samp{wallclock}. Defaults to @samp{video}.
@item audio_pts
Sets the audio packet timestamp source. Must be @samp{video}, @samp{audio},
@samp{reference} or @samp{wallclock}. Defaults to @samp{audio}.
@item draw_bars
If set to @samp{true}, color bars are drawn in the event of a signal loss.
Defaults to @samp{true}.
@end table
@subsection Examples
@@ -307,12 +266,6 @@ Capture video clip at 1080i50 10 bit:
ffmpeg -bm_v210 1 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
@end example
@item
Capture video clip at 1080i50 with 16 audio channels:
@example
ffmpeg -channels 16 -f decklink -i 'UltraStudio Mini Recorder@@11' -acodec copy -vcodec copy output.avi
@end example
@end itemize
@section dshow
@@ -682,7 +635,7 @@ is an exact value. For HDV, it is not frame exact, since HDV does
not have a fixed frame size.
@item dvguid
Select the capture device by specifying its GUID. Capturing will only
Select the capture device by specifying it's GUID. Capturing will only
be performed from the specified device and fails if no device with the
given GUID is found. This is useful to select the input if multiple
devices are connected at the same time.
@@ -1345,7 +1298,7 @@ ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
@table @option
@item draw_mouse
Specify whether to draw the mouse pointer. A value of @code{0} specifies
Specify whether to draw the mouse pointer. A value of @code{0} specify
not to draw the pointer. Default value is @code{1}.
@item follow_mouse
@@ -1399,13 +1352,17 @@ Set the video frame size. Default value is @code{vga}.
Use the MIT-SHM extension for shared memory. Default value is @code{1}.
It may be necessary to disable it for remote displays (legacy x11grab
only).
@item grab_x
@item grab_y
Set the grabbing region coordinates. They are expressed as offset from
the top left corner of the X11 window and correspond to the
@var{x_offset} and @var{y_offset} parameters in the device name. The
default value for both options is 0.
@end table
@subsection @var{grab_x} @var{grab_y} AVOption
The syntax is:
@example
-grab_x @var{x_offset} -grab_y @var{y_offset}
@end example
Set the grabbing region coordinates. They are expressed as offset from the top left
corner of the X11 window. The default value is 0.
@c man end INPUT DEVICES

View File

@@ -1,6 +1,8 @@
FFmpeg's bug/feature request tracker manual
=================================================
NOTE: This is a draft.
Overview:
---------
@@ -20,9 +22,9 @@ a mail for every change to every issue.
(the above does all work already after light testing)
The subscription URL for the ffmpeg-trac list is:
https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
http(s)://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
The URL of the webinterface of the tracker is:
https://trac.ffmpeg.org
http(s)://trac.ffmpeg.org
Type:
-----
@@ -40,16 +42,12 @@ feature request / enhancement
where the current implementation cannot be considered wrong.
license violation
Ticket to keep track of (L)GPL violations of ffmpeg by others.
ticket to keep track of (L)GPL violations of ffmpeg by others
sponsoring request
Developer requests for hardware, software, specifications, money,
refunds, etc.
task
A task/reminder such as setting up a FATE client, adding filters to
Trac, etc.
Priority:
---------
critical
@@ -68,8 +66,7 @@ important
don't exist in a past revision or another branch.
normal
Default setting. Use this if the bug does not match the other
priorities or if you are unsure of what priority to choose.
minor
Bugs about things like spelling errors, "mp2" instead of
@@ -166,23 +163,14 @@ Component:
avcodec
issues in libavcodec/*
avdevice
issues in libavdevice/*
avfilter
issues in libavfilter/*
avformat
issues in libavformat/*
avutil
issues in libavutil/*
build system
issues in or related to configure/Makefile
documentation
issues in or related to doc/*
regression test
issues in tests/*
ffmpeg
issues in or related to ffmpeg.c
@@ -196,23 +184,11 @@ ffprobe
ffserver
issues in or related to ffserver.c
postproc
issues in libpostproc/*
build system
issues in or related to configure/Makefile
swresample
issues in libswresample/*
swscale
issues in libswscale/*
regression
bugs which were not present in a past revision
trac
issues related to our issue tracker
undetermined
default component; choose this if unsure
website
issues related to the website
wiki
issues related to the wiki

View File

@@ -1,98 +0,0 @@
CONTEXT
=======
The FFmpeg project merges all the changes from the Libav project
(https://libav.org) since the origin of the fork (around 2011).
With the exceptions of some commits due to technical/political disagreements or
issues, the changes are merged on a more or less regular schedule (daily for
years thanks to Michael, but more sparse nowadays).
WHY
===
The majority of the active developers believe the project needs to keep this
policy for various reasons.
The most important one is that we don't want our users to have to choose
between two distributors of libraries of the exact same name in order to have a
different set of features and bugfixes. By taking the responsibility of
unifying the two codebases, we allow users to benefit from the changes from the
two teams.
Today, FFmpeg has a much larger user database (we are distributed by every
major distribution), so we consider this mission a priority.
A different approach to the merge could have been to pick the changes we are
interested in and drop most of the cosmetics and other less important changes.
Unfortunately, this makes the following picks much harder, especially since the
Libav project is involved in various deep API changes. As a result, we decide
to virtually take everything done there.
Any Libav developer is of course welcome anytime to contribute directly to the
FFmpeg tree. Of course, we fully understand and are forced to accept that very
few Libav developers are interested in doing so, but we still want to recognize
their work. This leads us to create merge commits for every single one from
Libav. The original commit appears totally unchanged with full authorship in
our history (and the conflict are solved in the merge one). That way, not a
single thing from Libav will be lost in the future in case some reunification
happens, or that project disappears one way or another.
DOWNSIDES
=========
Of course, there are many downsides to this approach.
- It causes a non negligible merge commits pollution. We make sure there are
not several level of merges entangled (we do a 1:1 merge/commit), but it's
still a non-linear history.
- Many duplicated work. For instance, we added libavresample in our tree to
keep compatibility with Libav when our libswresample was already covering the
exact same purpose. The same thing happened for various elements such as the
ProRes support (but differences in features, bugs, licenses, ...). There are
many work to do to unify them, and any help is very much welcome.
- So much manpower from both FFmpeg and Libav is lost because of this mess. We
know it, and we don't know how to fix it. It takes incredible time to do
these merges, so we have even less time to work on things we personally care
about. The bad vibes also do not help with keeping our developers motivated.
- There is a growing technical risk factor with the merges due to the codebase
differing more and more.
MERGE GUIDELINES
================
The following gives developer guidelines on how to proceed when merging Libav commits.
Before starting, you can reduce the risk of errors on merge conflicts by using
a different merge conflict style:
$ git config --global merge.conflictstyle diff3
tools/libav-merge-next-commit is a script to help merging the next commit in
the queue. It assumes a remote named libav. It has two modes: merge, and noop.
The noop mode creates a merge with no change to the HEAD. You can pass a hash
as extra argument to reference a justification (it is common that we already
have the change done in FFmpeg).
Also see tools/murge, you can copy and paste a 3 way conflict into its stdin
and it will display colored diffs. Any arguments to murge (like ones to suppress
whitespace differences) are passed into colordiff.
TODO/FIXME/UNMERGED
===================
Stuff that didn't reach the codebase:
-------------------------------------
- HEVC DSP and x86 MC SIMD improvements from Libav (see https://ffmpeg.org/pipermail/ffmpeg-devel/2015-December/184777.html)
- QSV HWContext integration (04b17ff and 130e1f1), QSV scaling filter (62c58c5)
Collateral damage that needs work locally:
------------------------------------------
- Merge proresdec2.c and proresdec_lgpl.c
- Merge proresenc_anatoliy.c and proresenc_kostya.c
- Remove ADVANCED_PARSER in libavcodec/hevc_parser.c

View File

@@ -37,61 +37,6 @@ ID3v2.3 and ID3v2.4) are supported. The default is version 4.
@end table
@anchor{asf}
@section asf
Advanced Systems Format muxer.
Note that Windows Media Audio (wma) and Windows Media Video (wmv) use this
muxer too.
@subsection Options
It accepts the following options:
@table @option
@item packet_size
Set the muxer packet size. By tuning this setting you may reduce data
fragmentation or muxer overhead depending on your source. Default value is
3200, minimum is 100, maximum is 64k.
@end table
@anchor{chromaprint}
@section chromaprint
Chromaprint fingerprinter
This muxer feeds audio data to the Chromaprint library, which generates
a fingerprint for the provided audio data. It takes a single signed
native-endian 16-bit raw audio stream.
@subsection Options
@table @option
@item silence_threshold
Threshold for detecting silence, ranges from 0 to 32767. -1 for default
(required for use with the AcoustID service).
@item algorithm
Algorithm index to fingerprint with.
@item fp_format
Format to output the fingerprint as. Accepts the following options:
@table @samp
@item raw
Binary raw fingerprint
@item compressed
Binary compressed fingerprint
@item base64
Base64 compressed fingerprint
@end table
@end table
@anchor{crc}
@section crc
@@ -129,27 +74,6 @@ and the input video converted to MPEG-2 video, use the command:
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
@end example
@section flv
Adobe Flash Video Format muxer.
This muxer accepts the following options:
@table @option
@item flvflags @var{flags}
Possible values:
@table @samp
@item aac_seq_header_detect
Place AAC sequence header based on audio stream data.
@item no_sequence_end
Disable sequence end tag.
@end table
@end table
@anchor{framecrc}
@section framecrc
@@ -195,70 +119,30 @@ ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
See also the @ref{crc} muxer.
@anchor{framehash}
@section framehash
Per-packet hash testing format.
This muxer computes and prints a cryptographic hash for each audio
and video packet. This can be used for packet-by-packet equality
checks without having to individually do a binary comparison on each.
By default audio frames are converted to signed 16-bit raw audio and
video frames to raw video before computing the hash, but the output
of explicit conversions to other codecs can also be used. It uses the
SHA-256 cryptographic hash function by default, but supports several
other algorithms.
The output of the muxer consists of a line for each audio and video
packet of the form:
@example
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, @var{hash}
@end example
@var{hash} is a hexadecimal number representing the computed hash
for the packet.
@table @option
@item hash @var{algorithm}
Use the cryptographic hash function specified by the string @var{algorithm}.
Supported values include @code{MD5}, @code{murmur3}, @code{RIPEMD128},
@code{RIPEMD160}, @code{RIPEMD256}, @code{RIPEMD320}, @code{SHA160},
@code{SHA224}, @code{SHA256} (default), @code{SHA512/224}, @code{SHA512/256},
@code{SHA384}, @code{SHA512}, @code{CRC32} and @code{adler32}.
@end table
@subsection Examples
To compute the SHA-256 hash of the audio and video frames in @file{INPUT},
converted to raw audio and video packets, and store it in the file
@file{out.sha256}:
@example
ffmpeg -i INPUT -f framehash out.sha256
@end example
To print the information to stdout, using the MD5 hash function, use
the command:
@example
ffmpeg -i INPUT -f framehash -hash md5 -
@end example
See also the @ref{hash} muxer.
@anchor{framemd5}
@section framemd5
Per-packet MD5 testing format.
This is a variant of the @ref{framehash} muxer. Unlike that muxer,
it defaults to using the MD5 hash function.
This muxer computes and prints the MD5 hash for each audio
and video packet. By default audio frames are converted to signed
16-bit raw audio and video frames to raw video before computing the
hash.
The output of the muxer consists of a line for each audio and video
packet of the form:
@example
@var{stream_index}, @var{packet_dts}, @var{packet_pts}, @var{packet_duration}, @var{packet_size}, @var{MD5}
@end example
@var{MD5} is a hexadecimal number representing the computed MD5 hash
for the packet.
@subsection Examples
To compute the MD5 hash of the audio and video frames in @file{INPUT},
converted to raw audio and video packets, and store it in the file
@file{out.md5}:
For example to compute the MD5 of the audio and video frames in
@file{INPUT}, converted to raw audio and video packets, and store it
in the file @file{out.md5}:
@example
ffmpeg -i INPUT -f framemd5 out.md5
@end example
@@ -268,7 +152,7 @@ To print the information to stdout, use the command:
ffmpeg -i INPUT -f framemd5 -
@end example
See also the @ref{framehash} and @ref{md5} muxers.
See also the @ref{md5} muxer.
@anchor{gif}
@section gif
@@ -295,59 +179,14 @@ the loops:
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
@end example
Note 1: if you wish to extract the frames into separate GIF files, you need to
Note 1: if you wish to extract the frames in separate GIF files, you need to
force the @ref{image2} muxer:
@example
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
@end example
Note 2: the GIF format has a very large time base: the delay between two frames
can therefore not be smaller than one centi second.
@anchor{hash}
@section hash
Hash testing format.
This muxer computes and prints a cryptographic hash of all the input
audio and video frames. This can be used for equality checks without
having to do a complete binary comparison.
By default audio frames are converted to signed 16-bit raw audio and
video frames to raw video before computing the hash, but the output
of explicit conversions to other codecs can also be used. Timestamps
are ignored. It uses the SHA-256 cryptographic hash function by default,
but supports several other algorithms.
The output of the muxer consists of a single line of the form:
@var{algo}=@var{hash}, where @var{algo} is a short string representing
the hash function used, and @var{hash} is a hexadecimal number
representing the computed hash.
@table @option
@item hash @var{algorithm}
Use the cryptographic hash function specified by the string @var{algorithm}.
Supported values include @code{MD5}, @code{murmur3}, @code{RIPEMD128},
@code{RIPEMD160}, @code{RIPEMD256}, @code{RIPEMD320}, @code{SHA160},
@code{SHA224}, @code{SHA256} (default), @code{SHA512/224}, @code{SHA512/256},
@code{SHA384}, @code{SHA512}, @code{CRC32} and @code{adler32}.
@end table
@subsection Examples
To compute the SHA-256 hash of the input converted to raw audio and
video, and store it in the file @file{out.sha256}:
@example
ffmpeg -i INPUT -f hash out.sha256
@end example
To print an MD5 hash to stdout use the command:
@example
ffmpeg -i INPUT -f hash -hash md5 -
@end example
See also the @ref{framehash} muxer.
Note 2: the GIF format has a very small time base: the delay between two frames
can not be smaller than one centi second.
@anchor{hls}
@section hls
@@ -378,15 +217,8 @@ segmentation.
This muxer supports the following options:
@table @option
@item hls_init_time @var{seconds}
Set the initial target segment length in seconds. Default value is @var{0}.
Segment will be cut on the next key frame after this time has passed on the first m3u8 list.
After the initial playlist is filled @command{ffmpeg} will cut segments
at duration equal to @code{hls_time}
@item hls_time @var{seconds}
Set the target segment length in seconds. Default value is 2.
Segment will be cut on the next key frame after this time has passed.
Set the segment length in seconds. Default value is 2.
@item hls_list_size @var{size}
Set the maximum number of playlist entries. If set to 0 the list file
@@ -423,34 +255,14 @@ which can be cyclic, for example if the @option{wrap} option is
specified.
@item hls_segment_filename @var{filename}
Set the segment filename. Unless @code{hls_flags single_file} is set,
@var{filename} is used as a string format with the segment number:
Set the segment filename. Unless hls_flags single_file is set @var{filename}
is used as a string format with the segment number:
@example
ffmpeg -i in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
@end example
This example will produce the playlist, @file{out.m3u8}, and segment files:
@file{file000.ts}, @file{file001.ts}, @file{file002.ts}, etc.
@item use_localtime
Use strftime on @var{filename} to expand the segment filename with localtime.
The segment number (%d) is not available in this mode.
@example
ffmpeg -i in.nut -use_localtime 1 -hls_segment_filename 'file-%Y%m%d-%s.ts' out.m3u8
@end example
This example will produce the playlist, @file{out.m3u8}, and segment files:
@file{file-20160215-1455569023.ts}, @file{file-20160215-1455569024.ts}, etc.
@item use_localtime_mkdir
Used together with -use_localtime, it will create up to one subdirectory which
is expanded in @var{filename}.
@example
ffmpeg -i in.nut -use_localtime 1 -use_localtime_mkdir 1 -hls_segment_filename '%Y%m%d/file-%Y%m%d-%s.ts' out.m3u8
@end example
This example will create a directory 201560215 (if it does not exist), and then
produce the playlist, @file{out.m3u8}, and segment files:
@file{201560215/file-20160215-1455569023.ts}, @file{201560215/file-20160215-1455569024.ts}, etc.
@item hls_key_info_file @var{key_info_file}
Use the information in @var{key_info_file} for segment encryption. The first
line of @var{key_info_file} specifies the key URI written to the playlist. The
@@ -521,49 +333,6 @@ Will produce the playlist, @file{out.m3u8}, and a single segment file,
@item hls_flags delete_segments
Segment files removed from the playlist are deleted after a period of time
equal to the duration of the segment plus the duration of the playlist.
@item hls_flags append_list
Append new segments into the end of old segment list,
and remove the @code{#EXT-X-ENDLIST} from the old segment list.
@item hls_flags round_durations
Round the duration info in the playlist file segment info to integer
values, instead of using floating point.
@item hls_flags discont_starts
Add the @code{#EXT-X-DISCONTINUITY} tag to the playlist, before the
first segment's information.
@item hls_flags omit_endlist
Do not append the @code{EXT-X-ENDLIST} tag at the end of the playlist.
@item hls_flags split_by_time
Allow segments to start on frames other than keyframes. This improves
behavior on some players when the time between keyframes is inconsistent,
but may make things worse on others, and can cause some oddities during
seeking. This flag should be used with the @code{hls_time} option.
@item hls_flags program_date_time
Generate @code{EXT-X-PROGRAM-DATE-TIME} tags.
@item hls_playlist_type event
Emit @code{#EXT-X-PLAYLIST-TYPE:EVENT} in the m3u8 header. Forces
@option{hls_list_size} to 0; the playlist can only be appended to.
@item hls_playlist_type vod
Emit @code{#EXT-X-PLAYLIST-TYPE:VOD} in the m3u8 header. Forces
@option{hls_list_size} to 0; the playlist must not change.
@item method
Use the given HTTP method to create the hls files.
@example
ffmpeg -re -i in.ts -f hls -method PUT http://example.com/live/out.m3u8
@end example
This example will upload all the mpegts segment files to the HTTP
server using the HTTP PUT method, and update the m3u8 files every
@code{refresh} times using the same method.
Note that the HTTP server must support the given method for uploading
files.
@end table
@anchor{ico}
@@ -777,12 +546,16 @@ have no effect if it is not.
MD5 testing format.
This is a variant of the @ref{hash} muxer. Unlike that muxer, it
defaults to using the MD5 hash function.
This muxer computes and prints the MD5 hash of all the input audio
and video frames. By default audio frames are converted to signed
16-bit raw audio and video frames to raw video before computing the
hash.
@subsection Examples
The output of the muxer consists of a single line of the form:
MD5=@var{MD5}, where @var{MD5} is a hexadecimal number representing
the computed MD5 hash.
To compute the MD5 hash of the input converted to raw
For example to compute the MD5 hash of the input converted to raw
audio and video, and store it in the file @file{out.md5}:
@example
ffmpeg -i INPUT -f md5 out.md5
@@ -793,7 +566,7 @@ You can print the MD5 to stdout with the command:
ffmpeg -i INPUT -f md5 -
@end example
See also the @ref{hash} and @ref{framemd5} muxers.
See also the @ref{framemd5} muxer.
@section mov, mp4, ismv
@@ -884,9 +657,6 @@ the new default-base-is-moof flag instead. This flag is new from
14496-12:2012. This may make the fragments easier to parse in certain
circumstances (avoiding basing track fragment location calculations
on the implicit end of the previous track fragment).
@item -write_tmcd
Specify @code{on} to force writing a timecode track, @code{off} to disable it
and @code{auto} to write a timecode track only for mov and mp4 output (default).
@end table
@subsection Example
@@ -973,41 +743,41 @@ and @code{service_name}. If they are not set the default for
The muxer options are:
@table @option
@item mpegts_original_network_id @var{number}
@item -mpegts_original_network_id @var{number}
Set the original_network_id (default 0x0001). This is unique identifier
of a network in DVB. Its main use is in the unique identification of a
service through the path Original_Network_ID, Transport_Stream_ID.
@item mpegts_transport_stream_id @var{number}
@item -mpegts_transport_stream_id @var{number}
Set the transport_stream_id (default 0x0001). This identifies a
transponder in DVB.
@item mpegts_service_id @var{number}
@item -mpegts_service_id @var{number}
Set the service_id (default 0x0001) also known as program in DVB.
@item mpegts_service_type @var{number}
@item -mpegts_service_type @var{number}
Set the program service_type (default @var{digital_tv}), see below
a list of pre defined values.
@item mpegts_pmt_start_pid @var{number}
@item -mpegts_pmt_start_pid @var{number}
Set the first PID for PMT (default 0x1000, max 0x1f00).
@item mpegts_start_pid @var{number}
@item -mpegts_start_pid @var{number}
Set the first PID for data packets (default 0x0100, max 0x0f00).
@item mpegts_m2ts_mode @var{number}
@item -mpegts_m2ts_mode @var{number}
Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
@item muxrate @var{number}
@item -muxrate @var{number}
Set a constant muxrate (default VBR).
@item pcr_period @var{numer}
@item -pcr_period @var{numer}
Override the default PCR retransmission time (default 20ms), ignored
if variable muxrate is selected.
@item pat_period @var{number}
Maximal time in seconds between PAT/PMT tables.
@item sdt_period @var{number}
Maximal time in seconds between SDT tables.
@item pes_payload_size @var{number}
@item -pes_payload_size @var{number}
Set minimum PES packet payload in bytes.
@item mpegts_flags @var{flags}
@item -mpegts_flags @var{flags}
Set flags (see below).
@item mpegts_copyts @var{number}
@item -mpegts_copyts @var{number}
Preserve original timestamps, if value is set to 1. Default value is -1, which
results in shifting timestamps so that they start from 0.
@item tables_version @var{number}
@item -tables_version @var{number}
Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
This option allows updating stream structure so that standard consumer may
detect the change. To do so, reopen output AVFormatContext (in case of API
@@ -1023,7 +793,7 @@ ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
@end example
@end table
Option @option{mpegts_service_type} accepts the following values:
Option mpegts_service_type accepts the following values:
@table @option
@item hex_value
@@ -1044,7 +814,7 @@ Advanced Codec Digital SDTV service.
Advanced Codec Digital HDTV service.
@end table
Option @option{mpegts_flags} may take a set of such flags:
Option mpegts_flags may take a set of such flags:
@table @option
@item resend_headers
@@ -1053,8 +823,6 @@ Reemit PAT/PMT before writing the next packet.
Use LATM packetization for AAC.
@item pat_pmt_at_frames
Reemit PAT and PMT at each video frame.
@item system_b
Conform to System B (DVB) instead of System A (ATSC).
@end table
@subsection Example
@@ -1192,12 +960,6 @@ implementation for HLS segmentation.
The segment muxer supports the following options:
@table @option
@item increment_tc @var{1|0}
if set to @code{1}, increment timecode between each segment
If this is selected, the input need to have
a timecode in the first video stream. Default value is
@code{0}.
@item reference_stream @var{specifier}
Set the reference stream, as specified by the string @var{specifier}.
If @var{specifier} is set to @code{auto}, the reference is chosen
@@ -1301,28 +1063,6 @@ to create files at 12:00 o'clock, 12:15, 12:30, etc.
Default value is "0".
@item segment_clocktime_offset @var{duration}
Delay the segment splitting times with the specified duration when using
@option{segment_atclocktime}.
For example with @option{segment_time} set to "900" and
@option{segment_clocktime_offset} set to "300" this makes it possible to
create files at 12:05, 12:20, 12:35, etc.
Default value is "0".
@item segment_clocktime_wrap_duration @var{duration}
Force the segmenter to only start a new segment if a packet reaches the muxer
within the specified duration after the segmenting clock time. This way you
can make the segmenter more resilient to backward local time jumps, such as
leap seconds or transition to standard time from daylight savings time.
Assuming that the delay between the packets of your source is less than 0.5
second you can detect a leap second by specifying 0.5 as the duration.
Default is the maximum possible duration which means starting a new segment
regardless of the elapsed time since the last clock time.
@item segment_time_delta @var{delta}
Specify the accuracy time when selecting the start time for a
segment, expressed as a duration specification. Default value is "0".
@@ -1385,11 +1125,6 @@ muxers/codecs. It is set to @code{0} by default.
@item initial_offset @var{offset}
Specify timestamp offset to apply to the output packet timestamps. The
argument must be a time duration specification, and defaults to 0.
@item write_empty_segments @var{1|0}
If enabled, write an empty segment if there are no packets during the period a
segment would usually span. Otherwise, the segment will be filled with the next
packet written. Defaults to @code{0}.
@end table
@subsection Examples
@@ -1437,9 +1172,9 @@ ffmpeg -i in.mkv -codec copy -map 0 -f segment -segment_list out.csv -segment_fr
@item
Convert the @file{in.mkv} to TS segments using the @code{libx264}
and @code{aac} encoders:
and @code{libfaac} encoders:
@example
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a aac -f ssegment -segment_list out.list out%03d.ts
ffmpeg -i in.mkv -map 0 -codec:v libx264 -codec:a libfaac -f ssegment -segment_list out.list out%03d.ts
@end example
@item
@@ -1473,101 +1208,6 @@ Specify whether to remove all fragments when finished. Default 0 (do not remove)
@end table
@section fifo
The fifo pseudo-muxer allows the separation of encoding and muxing by using
first-in-first-out queue and running the actual muxer in a separate thread. This
is especially useful in combination with the @ref{tee} muxer and can be used to
send data to several destinations with different reliability/writing speed/latency.
API users should be aware that callback functions (interrupt_callback,
io_open and io_close) used within its AVFormatContext must be thread-safe.
The behavior of the fifo muxer if the queue fills up or if the output fails is
selectable,
@itemize @bullet
@item
output can be transparently restarted with configurable delay between retries
based on real time or time of the processed stream.
@item
encoding can be blocked during temporary failure, or continue transparently
dropping packets in case fifo queue fills up.
@end itemize
@table @option
@item fifo_format
Specify the format name. Useful if it cannot be guessed from the
output name suffix.
@item queue_size
Specify size of the queue (number of packets). Default value is 60.
@item format_opts
Specify format options for the underlying muxer. Muxer options can be specified
as a list of @var{key}=@var{value} pairs separated by ':'.
@item drop_pkts_on_overflow @var{bool}
If set to 1 (true), in case the fifo queue fills up, packets will be dropped
rather than blocking the encoder. This makes it possible to continue streaming without
delaying the input, at the cost of omitting part of the stream. By default
this option is set to 0 (false), so in such cases the encoder will be blocked
until the muxer processes some of the packets and none of them is lost.
@item attempt_recovery @var{bool}
If failure occurs, attempt to recover the output. This is especially useful
when used with network output, since it makes it possible to restart streaming transparently.
By default this option is set to 0 (false).
@item max_recovery_attempts
Sets maximum number of successive unsuccessful recovery attempts after which
the output fails permanently. By default this option is set to 0 (unlimited).
@item recovery_wait_time @var{duration}
Waiting time before the next recovery attempt after previous unsuccessful
recovery attempt. Default value is 5 seconds.
@item recovery_wait_streamtime @var{bool}
If set to 0 (false), the real time is used when waiting for the recovery
attempt (i.e. the recovery will be attempted after at least
recovery_wait_time seconds).
If set to 1 (true), the time of the processed stream is taken into account
instead (i.e. the recovery will be attempted after at least @var{recovery_wait_time}
seconds of the stream is omitted).
By default, this option is set to 0 (false).
@item recover_any_error @var{bool}
If set to 1 (true), recovery will be attempted regardless of type of the error
causing the failure. By default this option is set to 0 (false) and in case of
certain (usually permanent) errors the recovery is not attempted even when
@var{attempt_recovery} is set to 1.
@item restart_with_keyframe @var{bool}
Specify whether to wait for the keyframe after recovering from
queue overflow or failure. This option is set to 0 (false) by default.
@end table
@subsection Examples
@itemize
@item
Stream something to rtmp server, continue processing the stream at real-time
rate even in case of temporary failure (network outage) and attempt to recover
streaming every second indefinitely.
@example
ffmpeg -re -i ... -c:v libx264 -c:a aac -f fifo -fifo_format flv -map 0:v -map 0:a
-drop_pkts_on_overflow 1 -attempt_recovery 1 -recovery_wait_time 1 rtmp://example.com/live/stream_name
@end example
@end itemize
@anchor{tee}
@section tee
The tee muxer can be used to write the same data to several files or any
@@ -1612,14 +1252,7 @@ Several bitstream filters can be specified, separated by ",".
@item select
Select the streams that should be mapped to the slave output,
specified by a stream specifier. If not specified, this defaults to
all the input streams. You may use multiple stream specifiers
separated by commas (@code{,}) e.g.: @code{a:0,v}
@item onfail
Specify behaviour on output failure. This can be set to either @code{abort} (which is
default) or @code{ignore}. @code{abort} will cause whole process to fail in case of failure
on this slave output. @code{ignore} will ignore failure on this output, so other outputs
will continue without being affected.
all the input streams.
@end table
@subsection Examples
@@ -1633,14 +1266,6 @@ ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
"archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
@end example
@item
As above, but continue streaming even if output to local file fails
(for example local drive fills up):
@example
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
"[onfail=ignore]archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
@end example
@item
Use @command{ffmpeg} to encode the input, and send the output
to three different destinations. The @code{dump_extra} bitstream

View File

@@ -34,7 +34,7 @@ NUT has some variants signaled by using the flags field in its main header.
The BROADCAST variant provides a secondary time reference to facilitate
detecting endpoint latency and network delays.
It assumes all the endpoint clocks are synchronized.
It assumes all the endpoint clocks are syncronized.
To be used in real-time scenarios.
@section PIPE

View File

@@ -7,7 +7,7 @@ If you plan to do non-x86 architecture specific optimizations (SIMD normally),
then take a look in the x86/ directory, as most important functions are
already optimized for MMX.
If you want to do x86 optimizations then you can either try to fine-tune the
If you want to do x86 optimizations then you can either try to finetune the
stuff in the x86 directory or find some other functions in the C source to
optimize, but there aren't many left.
@@ -163,7 +163,7 @@ general x86 registers (e.g. eax) as well as XMM registers. This last one is
particularly important on Win64, where xmm6-15 are callee-save, and not
restoring their contents leads to undefined results. In external asm (e.g.
yasm), you do this by using:
cglobal function_name, num_args, num_regs, num_xmm_regs
cglobal functon_name, num_args, num_regs, num_xmm_regs
In inline asm, you specify clobbered registers at the end of your asm:
__asm__(".." ::: "%eax").
If gcc is not set to support sse (-msse) it will not accept xmm registers

View File

@@ -1,10 +0,0 @@
Patchwork states
NEW: Initial state of new patches
Accepted: The patch was pushed to the main master repository
Rejected: The patch has been rejected
Withdrawn: The patch was withdrawn by the author
Not Applicable: The patch does not apply to the main master repository
Superseded: A newer version of the patch has been posted
Changes Requested: The patch has been or is under review and changes have been requested
RFC: The patch is not intended to be applied but only for comments

View File

@@ -63,7 +63,7 @@ bash ./configure
@section Darwin (Mac OS X, iPhone)
The toolchain provided with Xcode is sufficient to build the basic
unaccelerated code.
unacelerated code.
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
@url{https://github.com/FFmpeg/gas-preprocessor} or
@@ -107,13 +107,8 @@ Notes:
@itemize
@item Building for the MSYS environment is discouraged, MSYS2 provides a full
MinGW-w64 environment through @file{mingw64_shell.bat} or
@file{mingw32_shell.bat} that should be used instead of the environment
provided by @file{msys2_shell.bat}.
@item Building using MSYS2 can be sped up by disabling implicit rules in the
Makefile by calling @code{make -r} instead of plain @code{make}. This
@item Building natively using MSYS2 can be sped up by disabling implicit rules
in the Makefile by calling @code{make -r} instead of plain @code{make}. This
speed up is close to non-existent for normal one-off builds and is only
noticeable when running make for a second time (for example during
@code{make install}).
@@ -127,25 +122,6 @@ libavformat) as DLLs.
@end itemize
@subsection Native Windows compilation using MSYS2
The MSYS2 MinGW-w64 environment provides ready to use toolchains and dependencies
through @command{pacman}.
Make sure to use @file{mingw64_shell.bat} or @file{mingw32_shell.bat} to have
the correct MinGW-w64 environment. The default install provides shortcuts to
them under @command{MinGW-w64 Win64 Shell} and @command{MinGW-w64 Win32 Shell}.
@example
# normal msys2 packages
pacman -S make pkgconf diffutils
# mingw-w64 packages and toolchains
pacman -S mingw-w64-x86_64-yasm mingw-w64-x86_64-gcc mingw-w64-x86_64-SDL
@end example
To target 32 bits replace @code{x86_64} with @code{i686} in the command above.
@section Microsoft Visual C++ or Intel C++ Compiler for Windows
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
@@ -173,7 +149,7 @@ earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your
Next, make sure any other headers and libs you want to use, such as zlib, are
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
and @code{INCLUDE} environment variables to include the @strong{Windows-style}
paths to these directories. Alternatively, you can try to use the
paths to these directories. Alternatively, you can try and use the
@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC
2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too.
@@ -314,7 +290,7 @@ These library packages are only available from
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
@example
yasm, libSDL-devel, libgsm-devel, libmp3lame-devel,
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
@end example

View File

@@ -1,22 +1,3 @@
@chapter Protocol Options
@c man begin PROTOCOL OPTIONS
The libavformat library provides some generic global options, which
can be set on all the protocols. In addition each protocol may support
so-called private options, which are specific for that component.
The list of supported options follows:
@table @option
@item protocol_whitelist @var{list} (@emph{input})
Set a ","-separated list of allowed protocols. "ALL" matches all protocols. Protocols
prefixed by "-" are disabled.
All protocols are allowed by default but protocols used by an another
protocol (nested protocols) are restricted to a per protocol subset.
@end table
@c man end PROTOCOL OPTIONS
@chapter Protocols
@c man begin PROTOCOLS
@@ -36,14 +17,6 @@ particular protocol using the option
The option "-protocols" of the ff* tools will display the list of
supported protocols.
All protocols accept the following options:
@table @option
@item rw_timeout
Maximum time to wait for (network) read/write operations to complete,
in microseconds.
@end table
A description of the currently available protocols follows.
@section async
@@ -224,17 +197,6 @@ it, unless special care is taken (tests, customized server configuration
etc.). Different FTP servers behave in different way during seek
operation. ff* tools may produce incomplete content due to server limitations.
This protocol accepts the following options:
@table @option
@item follow
If set to 1, the protocol will retry reading at the end of the file, allowing
reading files that still are being written. In order for this to terminate,
you either need to use the rw_timeout option, or use the interrupt callback
(for API users).
@end table
@section gopher
Gopher protocol.
@@ -276,10 +238,7 @@ value is -1.
If set to 1 use chunked Transfer-Encoding for posts, default is 1.
@item content_type
Set a specific content type for the POST messages or for listen mode.
@item http_proxy
set HTTP proxy to tunnel through e.g. http://example.com:1234
Set a specific content type for the POST messages.
@item headers
Set custom HTTP headers, can override built in default headers. The
@@ -291,28 +250,16 @@ Use persistent connections if set to 1, default is 0.
@item post_data
Set custom HTTP post data.
@item user-agent
@item user_agent
Override the User-Agent header. If not specified the protocol will use a
string describing the libavformat build. ("Lavf/<version>")
@item user-agent
This is a deprecated option, you can use user_agent instead it.
@item timeout
Set timeout in microseconds of socket I/O operations used by the underlying low level
operation. By default it is set to -1, which means that the timeout is
not specified.
@item reconnect_at_eof
If set then eof is treated like an error and causes reconnection, this is useful
for live / endless streams.
@item reconnect_streamed
If set then even streamed/non seekable streams will be reconnected on errors.
@item reconnect_delay_max
Sets the maximum delay in seconds after which to give up reconnecting
@item mime_type
Export the MIME type.
@@ -357,7 +304,7 @@ autodetection in the future.
If set to 1 enables experimental HTTP server. This can be used to send data when
used as an output option, or read data from a client with HTTP POST when used as
an input option.
If set to 2 enables experimental multi-client HTTP server. This is not yet implemented
If set to 2 enables experimental mutli-client HTTP server. This is not yet implemented
in ffmpeg.c or ffserver.c and thus must not be used as a command line option.
@example
# Server side (sending):
@@ -697,7 +644,7 @@ This protocol accepts the following options.
@table @option
@item timeout
Set timeout in milliseconds of socket I/O operations used by the underlying
Set timeout in miliseconds of socket I/O operations used by the underlying
low level operation. By default it is set to -1, which means that the timeout
is not specified.
@@ -1161,15 +1108,6 @@ Play an AVI file directly from a TAR archive:
subfile,,start,183241728,end,366490624,,:archive.tar
@end example
@section tee
Writes the output to multiple protocols. The individual outputs are separated
by |
@example
tee:file://path/to/local/this.avi|file://path/to/local/that.avi
@end example
@section tcp
Transmission Control Protocol.
@@ -1196,12 +1134,6 @@ than this time interval, raise error.
@item listen_timeout=@var{milliseconds}
Set listen timeout, expressed in milliseconds.
@item recv_buffer_size=@var{bytes}
Set receive buffer size, expressed bytes.
@item send_buffer_size=@var{bytes}
Set send buffer size, expressed bytes.
@end table
The following example shows how to setup a listening TCP connection
@@ -1296,14 +1228,6 @@ Set the UDP maximum socket buffer size in bytes. This is used to set either
the receive or send buffer size, depending on what the socket is used for.
Default is 64KB. See also @var{fifo_size}.
@item bitrate=@var{bitrate}
If set to nonzero, the output will have the specified constant bitrate if the
input has enough packets to sustain it.
@item burst_bits=@var{bits}
When using @var{bitrate} this specifies the maximum number of bits in
packet bursts.
@item localport=@var{port}
Override the local UDP port to bind with.

View File

@@ -66,8 +66,8 @@ Set rematrix volume. Default value is 1.0.
@item rematrix_maxval
Set maximum output value for rematrixing.
This can be used to prevent clipping vs. preventing volume reduction.
A value of 1.0 prevents clipping.
This can be used to prevent clipping vs. preventing volumn reduction
A value of 1.0 prevents cliping.
@item flags, swr_flags
Set flags used by the converter. Default value is 0.
@@ -94,13 +94,13 @@ select triangular dither
@item triangular_hp
select triangular dither with high pass
@item lipshitz
select Lipshitz noise shaping dither.
select lipshitz noise shaping dither
@item shibata
select Shibata noise shaping dither.
select shibata noise shaping dither
@item low_shibata
select low Shibata noise shaping dither.
select low shibata noise shaping dither
@item high_shibata
select high Shibata noise shaping dither.
select high shibata noise shaping dither
@item f_weighted
select f-weighted noise shaping dither
@item modified_e_weighted
@@ -120,8 +120,8 @@ select the native SW Resampler; filter options precision and cheby are not
applicable in this case.
@item soxr
select the SoX Resampler (where available); compensation, and filter options
filter_size, phase_shift, exact_rational, filter_type & kaiser_beta, are not
applicable in this case.
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
case.
@end table
@item filter_size
@@ -132,12 +132,7 @@ For swr only, set resampling phase shift, default value is 10, and must be in
the interval [0,30].
@item linear_interp
Use linear interpolation if set to 1, default value is 0.
@item exact_rational
For swr only, when enabled, try to use exact phase_count based on input and
output sample rate. However, if it is larger than @code{1 << phase_shift},
the phase_count will be @code{1 << phase_shift} as fallback. Default is disabled.
Use Linear Interpolation if set to 1, default value is 0.
@item cutoff
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
@@ -219,13 +214,13 @@ It accepts the following values:
@item cubic
select cubic
@item blackman_nuttall
select Blackman Nuttall windowed sinc
select Blackman Nuttall Windowed Sinc
@item kaiser
select Kaiser windowed sinc
select Kaiser Windowed Sinc
@end table
@item kaiser_beta
For swr only, set Kaiser window beta value. Must be a double float value in the
For swr only, set Kaiser Window Beta value. Must be an integer in the
interval [2,16], default value is 9.
@item output_sample_bits

View File

@@ -46,7 +46,7 @@ Select Gaussian rescaling algorithm.
Select sinc rescaling algorithm.
@item lanczos
Select Lanczos rescaling algorithm.
Select lanczos rescaling algorithm.
@item spline
Select natural bicubic spline rescaling algorithm.
@@ -91,7 +91,6 @@ Select source range.
@item dst_range
Select destination range.
@anchor{sws_params}
@item param0, param1
Set scaling algorithm parameters. The specified values are specific of
some scaling algorithms and ignored by others. The specified values

View File

@@ -527,7 +527,7 @@ Wavelet Transform:
==================
Snow supports 2 wavelet transforms, the symmetric biorthogonal 5/3 integer
transform and an integer approximation of the symmetric biorthogonal 9/7
transform and a integer approximation of the symmetric biorthogonal 9/7
daubechies wavelet.
2D IDWT (inverse discrete wavelet transform)

View File

@@ -10,12 +10,12 @@ Current (simplified) Architecture:
/ \
special converter [Input to YUV converter]
| |
| (8-bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:0:0 )
| (8bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:0:0 )
| |
| v
| Horizontal scaler
| |
| (15-bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:1:1 / 4:0:0 )
| (15bit YUV 4:4:4 / 4:2:2 / 4:2:0 / 4:1:1 / 4:0:0 )
| |
| v
| Vertical scaler and output converter

View File

@@ -22,7 +22,7 @@ EOT
my $TEMPLATE_HEADER2 = $ENV{"FFMPEG_HEADER2"} || <<EOT;
</head>
<body>
<div class="container">
<div style="width: 95%; margin: auto">
EOT
my $TEMPLATE_FOOTER = $ENV{"FFMPEG_FOOTER"} || <<EOT;

View File

@@ -174,7 +174,7 @@ EOT
<link rel="stylesheet" type="text/css" href="style.min.css">
</head>
<body>
<div class="container">
<div style="width: 95%; margin: auto">
<h1>
EOT

View File

@@ -869,7 +869,7 @@ Return 1 if @var{x} is lesser than or equal to @var{y}, 0 otherwise.
Return the maximum between @var{x} and @var{y}.
@item min(x, y)
Return the minimum between @var{x} and @var{y}.
Return the maximum between @var{x} and @var{y}.
@item mod(x, y)
Compute the remainder of division of @var{x} by @var{y}.

View File

@@ -3,8 +3,8 @@ libavfilter.
Foreword: just like everything else in FFmpeg, libavfilter is monolithic, which
means that it is highly recommended that you submit your filters to the FFmpeg
development mailing-list and make sure that they are applied. Otherwise, your filters
are likely to have a very short lifetime due to more or less regular internal API
development mailing-list and make sure it is applied. Otherwise, your filter is
likely to have a very short lifetime due to more a less regular internal API
changes, and a limited distribution, review, and testing.
Bootstrap
@@ -64,7 +64,7 @@ filter, so you can update the boilerplate with your credits.
Doxy
----
Next chunk is the Doxygen about the file. See https://ffmpeg.org/doxygen/trunk/.
Next chunk is the Doxygen about the file. See http://ffmpeg.org/doxygen/trunk/.
Detail here what the filter is, does, and add some references if you feel like
it.
@@ -73,11 +73,11 @@ Context
Skip the headers and scroll down to the definition of FoobarContext. This is
your local state context. It is already filled with 0 when you get it so do not
worry about uninitialized reads into this context. This is where you put all
"global" information that you need; typically the variables storing the user options.
worry about uninitialized read into this context. This is where you put every
"global" information you need, typically the variable storing the user options.
You'll notice the first field "const AVClass *class"; it's the only field you
need to keep assuming you have a context. There is some magic you don't need to
care about around this field, just let it be (in the first position) for now.
need to keep assuming you have a context. There are some magic you don't care
about around this field, just let it be (in first position) for now.
Options
-------
@@ -87,7 +87,7 @@ options. For example, -vf foobar=mode=colormix:high=0.4:low=0.1. Most options
have the following pattern:
name, description, offset, type, default value, minimum value, maximum value, flags
- name is the option name, keep it simple and lowercase
- name is the option name, keep it simple, lowercase
- description are short, in lowercase, without period, and describe what they
do, for example "set the foo of the bar"
- offset is the offset of the field in your local context, see the OFFSET()
@@ -99,7 +99,7 @@ have the following pattern:
- min and max values define the range of available values, inclusive
- flags are AVOption generic flags. See AV_OPT_FLAG_* definitions
When in doubt, just look at the other AVOption definitions all around the codebase,
In doubt, just look at the other AVOption definitions all around the codebase,
there are tons of examples.
Class
@@ -146,14 +146,14 @@ we won't cover this here since vf_foobar is just a simple 1:1 filter.
uninit()
~~~~~~~~
Similarly, there is the uninit() callback, doing what the name suggests. Free
Similarly, there is the uninit() callback, doing what the name suggest. Free
everything you allocated here.
query_formats()
~~~~~~~~~~~~~~~
This follows the init() and is used for the format negotiation. Basically
you specify here what pixel format(s) (gray, rgb 32, yuv 4:2:0, ...) you accept
This is following the init() and is used for the format negotiation, basically
where you say what pixel format(s) (gray, rgb 32, yuv 4:2:0, ...) you accept
for your inputs, and what you can output. All pixel formats are defined in
libavutil/pixfmt.h. If you don't change the pixel format between the input and
the output, you just have to define a pixel formats array and call
@@ -182,7 +182,7 @@ will update outlink->w and outlink->h.
filter_frame()
~~~~~~~~~~~~~~
This is the callback you are waiting for from the beginning: it is where you
This is the callback you are waiting from the beginning: it is where you
process the received frames. Along with the frame, you get the input link from
where the frame comes from.
@@ -317,7 +317,7 @@ Adding timeline support
feature to add. In the most simple case, you just have to add
AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC to the AVFilter.flags. You can typically
do this when your filter does not need to save the previous context frames, or
basically if your filter just alters whatever goes in and doesn't need
basically if your filter just alter whatever goes in and doesn't need
previous/future information. See for instance commit 86cb986ce that adds
timeline support to the fieldorder filter.

1539
ffmpeg.c

File diff suppressed because it is too large Load Diff

View File

@@ -64,9 +64,6 @@ enum HWAccelID {
HWACCEL_DXVA2,
HWACCEL_VDA,
HWACCEL_VIDEOTOOLBOX,
HWACCEL_QSV,
HWACCEL_VAAPI,
HWACCEL_CUVID,
};
typedef struct HWAccel {
@@ -115,7 +112,6 @@ typedef struct OptionsContext {
/* input options */
int64_t input_ts_offset;
int loop;
int rate_emu;
int accurate_seek;
int thread_queue_size;
@@ -128,8 +124,6 @@ typedef struct OptionsContext {
int nb_hwaccels;
SpecifierOpt *hwaccel_devices;
int nb_hwaccel_devices;
SpecifierOpt *hwaccel_output_formats;
int nb_hwaccel_output_formats;
SpecifierOpt *autorotate;
int nb_autorotate;
@@ -212,8 +206,6 @@ typedef struct OptionsContext {
int nb_pass;
SpecifierOpt *passlogfiles;
int nb_passlogfiles;
SpecifierOpt *max_muxing_queue_size;
int nb_max_muxing_queue_size;
SpecifierOpt *guess_layout_max;
int nb_guess_layout_max;
SpecifierOpt *apad;
@@ -222,8 +214,6 @@ typedef struct OptionsContext {
int nb_discard;
SpecifierOpt *disposition;
int nb_disposition;
SpecifierOpt *program;
int nb_program;
} OptionsContext;
typedef struct InputFilter {
@@ -283,12 +273,9 @@ typedef struct InputStream {
int64_t filter_in_rescale_delta_last;
int64_t min_pts; /* pts with the smallest value in a current stream */
int64_t max_pts; /* pts with the higher value in a current stream */
int64_t nb_samples; /* number of samples in the last decoded audio frame before looping */
double ts_scale;
int saw_first_ts;
int showed_multi_packet_warning;
AVDictionary *decoder_opts;
AVRational framerate; /* framerate forced with -r */
int top_field_first;
@@ -330,7 +317,6 @@ typedef struct InputStream {
/* hwaccel options */
enum HWAccelID hwaccel_id;
char *hwaccel_device;
enum AVPixelFormat hwaccel_output_format;
/* hwaccel context */
enum HWAccelID active_hwaccel_id;
@@ -340,7 +326,6 @@ typedef struct InputStream {
int (*hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame);
enum AVPixelFormat hwaccel_pix_fmt;
enum AVPixelFormat hwaccel_retrieved_pix_fmt;
AVBufferRef *hw_frames_ctx;
/* stats */
// combined size of all the packets read
@@ -350,9 +335,6 @@ typedef struct InputStream {
// number of frames/samples retrieved from the decoder
uint64_t frames_decoded;
uint64_t samples_decoded;
int64_t *dts_buffer;
int nb_dts_buffer;
} InputStream;
typedef struct InputFile {
@@ -360,12 +342,7 @@ typedef struct InputFile {
int eof_reached; /* true if eof reached */
int eagain; /* true if last read attempt returned EAGAIN */
int ist_index; /* index of first stream in input_streams */
int loop; /* set number of times input stream should be looped */
int64_t duration; /* actual duration of the longest stream in a file
at the moment when looping happens */
AVRational time_base; /* time base of the duration */
int64_t input_ts_offset;
int64_t ts_offset;
int64_t last_ts;
int64_t start_time; /* user-specified start time in AV_TIME_BASE or AV_NOPTS_VALUE */
@@ -395,8 +372,6 @@ enum forced_keyframes_const {
FKF_NB
};
#define ABORT_ON_FLAG_EMPTY_OUTPUT (1 << 0)
extern const char *const forced_keyframes_const_names[];
typedef enum {
@@ -420,25 +395,17 @@ typedef struct OutputStream {
int64_t first_pts;
/* dts of the last packet sent to the muxer */
int64_t last_mux_dts;
int nb_bitstream_filters;
uint8_t *bsf_extradata_updated;
AVBSFContext **bsf_ctx;
AVBitStreamFilterContext *bitstream_filters;
AVCodecContext *enc_ctx;
AVCodecParameters *ref_par; /* associated input codec parameters with encoders options applied */
AVCodec *enc;
int64_t max_frames;
AVFrame *filtered_frame;
AVFrame *last_frame;
int last_dropped;
int last_droped;
int last_nb0_frames[3];
void *hwaccel_ctx;
/* video only */
AVRational frame_rate;
int is_cfr;
int force_fps;
int top_field_first;
int rotate_overridden;
@@ -469,16 +436,11 @@ typedef struct OutputStream {
AVDictionary *sws_dict;
AVDictionary *swr_opts;
AVDictionary *resample_opts;
AVDictionary *bsf_args;
char *apad;
OSTFinished finished; /* no more packets should be written for this stream */
int unavailable; /* true if the steram is unavailable (possibly temporarily) */
int stream_copy;
// init_output_stream() has been called for this stream
// The encoder and the bitstream filters have been initialized and the stream
// parameters are set in the AVStream.
int initialized;
const char *attachment_filename;
int copy_initial_nonkeyframes;
int copy_prior_start;
@@ -487,7 +449,6 @@ typedef struct OutputStream {
int keep_pix_fmt;
AVCodecParserContext *parser;
AVCodecContext *parser_avctx;
/* stats */
// combined size of all the packets written
@@ -501,11 +462,6 @@ typedef struct OutputStream {
/* packet quality factor */
int quality;
int max_muxing_queue_size;
/* the packets are buffered here until the muxer is ready to be initialized */
AVFifoBuffer *muxing_queue;
/* packet picture type */
int pict_type;
@@ -522,8 +478,6 @@ typedef struct OutputFile {
uint64_t limit_filesize; /* filesize limit expressed in bytes */
int shortest;
int header_written;
} OutputFile;
extern InputStream **input_streams;
@@ -560,21 +514,19 @@ extern int start_at_zero;
extern int copy_tb;
extern int debug_ts;
extern int exit_on_error;
extern int abort_on_flags;
extern int print_stats;
extern int qp_hist;
extern int stdin_interaction;
extern int frame_bits_per_raw_sample;
extern AVIOContext *progress_avio;
extern float max_error_rate;
extern int vdpau_api_ver;
extern char *videotoolbox_pixfmt;
extern const AVIOInterruptCB int_cb;
extern const OptionDef options[];
extern const HWAccel hwaccels[];
extern int hwaccel_lax_profile_check;
extern AVBufferRef *hw_device_ctx;
void term_init(void);
@@ -596,8 +548,7 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec);
int configure_filtergraph(FilterGraph *fg);
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
int filtergraph_is_simple(FilterGraph *fg);
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int init_complex_filtergraph(FilterGraph *fg);
int ffmpeg_parse_options(int argc, char **argv);
@@ -606,11 +557,5 @@ int vdpau_init(AVCodecContext *s);
int dxva2_init(AVCodecContext *s);
int vda_init(AVCodecContext *s);
int videotoolbox_init(AVCodecContext *s);
int qsv_init(AVCodecContext *s);
int qsv_transcode_init(OutputStream *ost);
int vaapi_decode_init(AVCodecContext *avctx);
int vaapi_device_init(const char *device);
int cuvid_init(AVCodecContext *s);
int cuvid_transcode_init(OutputStream *ost);
#endif /* FFMPEG_H */

View File

@@ -1,155 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/hwcontext.h"
#include "ffmpeg.h"
typedef struct CUVIDContext {
AVBufferRef *hw_frames_ctx;
} CUVIDContext;
static void cuvid_uninit(AVCodecContext *avctx)
{
InputStream *ist = avctx->opaque;
CUVIDContext *ctx = ist->hwaccel_ctx;
if (ctx) {
av_buffer_unref(&ctx->hw_frames_ctx);
av_freep(&ctx);
}
av_buffer_unref(&ist->hw_frames_ctx);
ist->hwaccel_ctx = 0;
ist->hwaccel_uninit = 0;
}
int cuvid_init(AVCodecContext *avctx)
{
InputStream *ist = avctx->opaque;
CUVIDContext *ctx = ist->hwaccel_ctx;
av_log(NULL, AV_LOG_TRACE, "Initializing cuvid hwaccel\n");
if (!ctx) {
av_log(NULL, AV_LOG_ERROR, "CUVID transcoding is not initialized. "
"-hwaccel cuvid should only be used for one-to-one CUVID transcoding "
"with no (software) filters.\n");
return AVERROR(EINVAL);
}
return 0;
}
int cuvid_transcode_init(OutputStream *ost)
{
InputStream *ist;
const enum AVPixelFormat *pix_fmt;
AVHWFramesContext *hwframe_ctx;
AVBufferRef *device_ref = NULL;
CUVIDContext *ctx = NULL;
int ret = 0;
av_log(NULL, AV_LOG_TRACE, "Initializing cuvid transcoding\n");
if (ost->source_index < 0)
return 0;
ist = input_streams[ost->source_index];
/* check if the encoder supports CUVID */
if (!ost->enc->pix_fmts)
goto cancel;
for (pix_fmt = ost->enc->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++)
if (*pix_fmt == AV_PIX_FMT_CUDA)
break;
if (*pix_fmt == AV_PIX_FMT_NONE)
goto cancel;
/* check if the decoder supports CUVID */
if (ist->hwaccel_id != HWACCEL_CUVID || !ist->dec || !ist->dec->pix_fmts)
goto cancel;
for (pix_fmt = ist->dec->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++)
if (*pix_fmt == AV_PIX_FMT_CUDA)
break;
if (*pix_fmt == AV_PIX_FMT_NONE)
goto cancel;
av_log(NULL, AV_LOG_VERBOSE, "Setting up CUVID transcoding\n");
if (ist->hwaccel_ctx) {
ctx = ist->hwaccel_ctx;
} else {
ctx = av_mallocz(sizeof(*ctx));
if (!ctx) {
ret = AVERROR(ENOMEM);
goto error;
}
}
if (!ctx->hw_frames_ctx) {
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_CUDA,
ist->hwaccel_device, NULL, 0);
if (ret < 0)
goto error;
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(device_ref);
if (!ctx->hw_frames_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
av_buffer_unref(&device_ref);
ist->hw_frames_ctx = av_buffer_ref(ctx->hw_frames_ctx);
if (!ist->hw_frames_ctx) {
av_log(NULL, AV_LOG_ERROR, "av_buffer_ref failed\n");
ret = AVERROR(ENOMEM);
goto error;
}
ist->hwaccel_ctx = ctx;
ist->resample_pix_fmt = AV_PIX_FMT_CUDA;
ist->hwaccel_uninit = cuvid_uninit;
/* This is a bit hacky, av_hwframe_ctx_init is called by the cuvid decoder
* once it has probed the necessary format information. But as filters/nvenc
* need to know the format/sw_format, set them here so they are happy.
* This is fine as long as CUVID doesn't add another supported pix_fmt.
*/
hwframe_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
}
return 0;
error:
av_freep(&ctx);
av_buffer_unref(&device_ref);
return ret;
cancel:
if (ist->hwaccel_id == HWACCEL_CUVID) {
av_log(NULL, AV_LOG_ERROR, "CUVID hwaccel requested, but impossible to achieve.\n");
return AVERROR(EINVAL);
}
return 0;
}

View File

@@ -40,9 +40,6 @@
#include "libavutil/imgutils.h"
#include "libavutil/pixfmt.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_dxva2.h"
/* define all the GUIDs used directly here,
to avoid problems with inconsistent dxva2api.h versions in mingw-w64 and different MSVC version */
#include <initguid.h>
@@ -56,11 +53,12 @@ DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main10,0x107af0e0, 0xef1a,0x4d19,0xab,0xa8,0x67,0xa1,0x63,0x07,0x3d,0x13);
DEFINE_GUID(DXVA2_ModeVP9_VLD_Profile0, 0x463707f8, 0xa1d0,0x4585,0x87,0x6d,0x83,0xaa,0x6d,0x60,0xb8,0x9e);
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
typedef IDirect3D9* WINAPI pDirect3DCreate9(UINT);
typedef HRESULT WINAPI pCreateDeviceManager9(UINT *, IDirect3DDeviceManager9 **);
typedef struct dxva2_mode {
const GUID *guid;
enum AVCodecID codec;
@@ -85,27 +83,67 @@ static const dxva2_mode dxva2_modes[] = {
/* HEVC/H.265 */
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
{ &DXVA2_ModeHEVC_VLD_Main10,AV_CODEC_ID_HEVC },
/* VP8/9 */
{ &DXVA2_ModeVP9_VLD_Profile0, AV_CODEC_ID_VP9 },
{ NULL, 0 },
};
typedef struct surface_info {
int used;
uint64_t age;
} surface_info;
typedef struct DXVA2Context {
HMODULE d3dlib;
HMODULE dxva2lib;
HANDLE deviceHandle;
IDirect3D9 *d3d9;
IDirect3DDevice9 *d3d9device;
IDirect3DDeviceManager9 *d3d9devmgr;
IDirectXVideoDecoderService *decoder_service;
IDirectXVideoDecoder *decoder;
GUID decoder_guid;
DXVA2_ConfigPictureDecode decoder_config;
IDirectXVideoDecoderService *decoder_service;
LPDIRECT3DSURFACE9 *surfaces;
surface_info *surface_infos;
uint32_t num_surfaces;
uint64_t surface_age;
AVFrame *tmp_frame;
AVBufferRef *hw_device_ctx;
AVBufferRef *hw_frames_ctx;
} DXVA2Context;
typedef struct DXVA2SurfaceWrapper {
DXVA2Context *ctx;
LPDIRECT3DSURFACE9 surface;
IDirectXVideoDecoder *decoder;
} DXVA2SurfaceWrapper;
static void dxva2_destroy_decoder(AVCodecContext *s)
{
InputStream *ist = s->opaque;
DXVA2Context *ctx = ist->hwaccel_ctx;
int i;
if (ctx->surfaces) {
for (i = 0; i < ctx->num_surfaces; i++) {
if (ctx->surfaces[i])
IDirect3DSurface9_Release(ctx->surfaces[i]);
}
}
av_freep(&ctx->surfaces);
av_freep(&ctx->surface_infos);
ctx->num_surfaces = 0;
ctx->surface_age = 0;
if (ctx->decoder) {
IDirectXVideoDecoder_Release(ctx->decoder);
ctx->decoder = NULL;
}
}
static void dxva2_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
@@ -115,11 +153,29 @@ static void dxva2_uninit(AVCodecContext *s)
ist->hwaccel_get_buffer = NULL;
ist->hwaccel_retrieve_data = NULL;
if (ctx->decoder)
dxva2_destroy_decoder(s);
if (ctx->decoder_service)
IDirectXVideoDecoderService_Release(ctx->decoder_service);
av_buffer_unref(&ctx->hw_frames_ctx);
av_buffer_unref(&ctx->hw_device_ctx);
if (ctx->d3d9devmgr && ctx->deviceHandle != INVALID_HANDLE_VALUE)
IDirect3DDeviceManager9_CloseDeviceHandle(ctx->d3d9devmgr, ctx->deviceHandle);
if (ctx->d3d9devmgr)
IDirect3DDeviceManager9_Release(ctx->d3d9devmgr);
if (ctx->d3d9device)
IDirect3DDevice9_Release(ctx->d3d9device);
if (ctx->d3d9)
IDirect3D9_Release(ctx->d3d9);
if (ctx->d3dlib)
FreeLibrary(ctx->d3dlib);
if (ctx->dxva2lib)
FreeLibrary(ctx->dxva2lib);
av_frame_free(&ctx->tmp_frame);
@@ -127,34 +183,119 @@ static void dxva2_uninit(AVCodecContext *s)
av_freep(&s->hwaccel_context);
}
static void dxva2_release_buffer(void *opaque, uint8_t *data)
{
DXVA2SurfaceWrapper *w = opaque;
DXVA2Context *ctx = w->ctx;
int i;
for (i = 0; i < ctx->num_surfaces; i++) {
if (ctx->surfaces[i] == w->surface) {
ctx->surface_infos[i].used = 0;
break;
}
}
IDirect3DSurface9_Release(w->surface);
IDirectXVideoDecoder_Release(w->decoder);
av_free(w);
}
static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
InputStream *ist = s->opaque;
DXVA2Context *ctx = ist->hwaccel_ctx;
int i, old_unused = -1;
LPDIRECT3DSURFACE9 surface;
DXVA2SurfaceWrapper *w = NULL;
return av_hwframe_get_buffer(ctx->hw_frames_ctx, frame, 0);
av_assert0(frame->format == AV_PIX_FMT_DXVA2_VLD);
for (i = 0; i < ctx->num_surfaces; i++) {
surface_info *info = &ctx->surface_infos[i];
if (!info->used && (old_unused == -1 || info->age < ctx->surface_infos[old_unused].age))
old_unused = i;
}
if (old_unused == -1) {
av_log(NULL, AV_LOG_ERROR, "No free DXVA2 surface!\n");
return AVERROR(ENOMEM);
}
i = old_unused;
surface = ctx->surfaces[i];
w = av_mallocz(sizeof(*w));
if (!w)
return AVERROR(ENOMEM);
frame->buf[0] = av_buffer_create((uint8_t*)surface, 0,
dxva2_release_buffer, w,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) {
av_free(w);
return AVERROR(ENOMEM);
}
w->ctx = ctx;
w->surface = surface;
IDirect3DSurface9_AddRef(w->surface);
w->decoder = ctx->decoder;
IDirectXVideoDecoder_AddRef(w->decoder);
ctx->surface_infos[i].used = 1;
ctx->surface_infos[i].age = ctx->surface_age++;
frame->data[3] = (uint8_t *)surface;
return 0;
}
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
LPDIRECT3DSURFACE9 surface = (LPDIRECT3DSURFACE9)frame->data[3];
InputStream *ist = s->opaque;
DXVA2Context *ctx = ist->hwaccel_ctx;
D3DSURFACE_DESC surfaceDesc;
D3DLOCKED_RECT LockedRect;
HRESULT hr;
int ret;
ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0);
IDirect3DSurface9_GetDesc(surface, &surfaceDesc);
ctx->tmp_frame->width = frame->width;
ctx->tmp_frame->height = frame->height;
ctx->tmp_frame->format = AV_PIX_FMT_NV12;
ret = av_frame_get_buffer(ctx->tmp_frame, 32);
if (ret < 0)
return ret;
ret = av_frame_copy_props(ctx->tmp_frame, frame);
if (ret < 0) {
av_frame_unref(ctx->tmp_frame);
return ret;
hr = IDirect3DSurface9_LockRect(surface, &LockedRect, NULL, D3DLOCK_READONLY);
if (FAILED(hr)) {
av_log(NULL, AV_LOG_ERROR, "Unable to lock DXVA2 surface\n");
return AVERROR_UNKNOWN;
}
av_image_copy_plane(ctx->tmp_frame->data[0], ctx->tmp_frame->linesize[0],
(uint8_t*)LockedRect.pBits,
LockedRect.Pitch, frame->width, frame->height);
av_image_copy_plane(ctx->tmp_frame->data[1], ctx->tmp_frame->linesize[1],
(uint8_t*)LockedRect.pBits + LockedRect.Pitch * surfaceDesc.Height,
LockedRect.Pitch, frame->width, frame->height / 2);
IDirect3DSurface9_UnlockRect(surface);
ret = av_frame_copy_props(ctx->tmp_frame, frame);
if (ret < 0)
goto fail;
av_frame_unref(frame);
av_frame_move_ref(frame, ctx->tmp_frame);
return 0;
fail:
av_frame_unref(ctx->tmp_frame);
return ret;
}
static int dxva2_alloc(AVCodecContext *s)
@@ -162,40 +303,94 @@ static int dxva2_alloc(AVCodecContext *s)
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
DXVA2Context *ctx;
HANDLE device_handle;
pDirect3DCreate9 *createD3D = NULL;
pCreateDeviceManager9 *createDeviceManager = NULL;
HRESULT hr;
AVHWDeviceContext *device_ctx;
AVDXVA2DeviceContext *device_hwctx;
int ret;
D3DPRESENT_PARAMETERS d3dpp = {0};
D3DDISPLAYMODE d3ddm;
unsigned resetToken = 0;
UINT adapter = D3DADAPTER_DEFAULT;
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
return AVERROR(ENOMEM);
ctx->deviceHandle = INVALID_HANDLE_VALUE;
ist->hwaccel_ctx = ctx;
ist->hwaccel_uninit = dxva2_uninit;
ist->hwaccel_get_buffer = dxva2_get_buffer;
ist->hwaccel_retrieve_data = dxva2_retrieve_data;
ret = av_hwdevice_ctx_create(&ctx->hw_device_ctx, AV_HWDEVICE_TYPE_DXVA2,
ist->hwaccel_device, NULL, 0);
if (ret < 0)
ctx->d3dlib = LoadLibrary("d3d9.dll");
if (!ctx->d3dlib) {
av_log(NULL, loglevel, "Failed to load D3D9 library\n");
goto fail;
device_ctx = (AVHWDeviceContext*)ctx->hw_device_ctx->data;
device_hwctx = device_ctx->hwctx;
hr = IDirect3DDeviceManager9_OpenDeviceHandle(device_hwctx->devmgr,
&device_handle);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to open a device handle\n");
}
ctx->dxva2lib = LoadLibrary("dxva2.dll");
if (!ctx->dxva2lib) {
av_log(NULL, loglevel, "Failed to load DXVA2 library\n");
goto fail;
}
hr = IDirect3DDeviceManager9_GetVideoService(device_hwctx->devmgr, device_handle,
&IID_IDirectXVideoDecoderService,
(void **)&ctx->decoder_service);
IDirect3DDeviceManager9_CloseDeviceHandle(device_hwctx->devmgr, device_handle);
createD3D = (pDirect3DCreate9 *)GetProcAddress(ctx->d3dlib, "Direct3DCreate9");
if (!createD3D) {
av_log(NULL, loglevel, "Failed to locate Direct3DCreate9\n");
goto fail;
}
createDeviceManager = (pCreateDeviceManager9 *)GetProcAddress(ctx->dxva2lib, "DXVA2CreateDirect3DDeviceManager9");
if (!createDeviceManager) {
av_log(NULL, loglevel, "Failed to locate DXVA2CreateDirect3DDeviceManager9\n");
goto fail;
}
ctx->d3d9 = createD3D(D3D_SDK_VERSION);
if (!ctx->d3d9) {
av_log(NULL, loglevel, "Failed to create IDirect3D object\n");
goto fail;
}
if (ist->hwaccel_device) {
adapter = atoi(ist->hwaccel_device);
av_log(NULL, AV_LOG_INFO, "Using HWAccel device %d\n", adapter);
}
IDirect3D9_GetAdapterDisplayMode(ctx->d3d9, adapter, &d3ddm);
d3dpp.Windowed = TRUE;
d3dpp.BackBufferWidth = 640;
d3dpp.BackBufferHeight = 480;
d3dpp.BackBufferCount = 0;
d3dpp.BackBufferFormat = d3ddm.Format;
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dpp.Flags = D3DPRESENTFLAG_VIDEO;
hr = IDirect3D9_CreateDevice(ctx->d3d9, adapter, D3DDEVTYPE_HAL, GetDesktopWindow(),
D3DCREATE_SOFTWARE_VERTEXPROCESSING | D3DCREATE_MULTITHREADED | D3DCREATE_FPU_PRESERVE,
&d3dpp, &ctx->d3d9device);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to create Direct3D device\n");
goto fail;
}
hr = createDeviceManager(&resetToken, &ctx->d3d9devmgr);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to create Direct3D device manager\n");
goto fail;
}
hr = IDirect3DDeviceManager9_ResetDevice(ctx->d3d9devmgr, ctx->d3d9device, resetToken);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to bind Direct3D device to device manager\n");
goto fail;
}
hr = IDirect3DDeviceManager9_OpenDeviceHandle(ctx->d3d9devmgr, &ctx->deviceHandle);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to open device handle\n");
goto fail;
}
hr = IDirect3DDeviceManager9_GetVideoService(ctx->d3d9devmgr, ctx->deviceHandle, &IID_IDirectXVideoDecoderService, (void **)&ctx->decoder_service);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to create IDirectXVideoDecoderService\n");
goto fail;
@@ -271,17 +466,13 @@ static int dxva2_create_decoder(AVCodecContext *s)
GUID *guid_list = NULL;
unsigned guid_count = 0, i, j;
GUID device_guid = GUID_NULL;
const D3DFORMAT surface_format = (s->sw_pix_fmt == AV_PIX_FMT_YUV420P10) ? MKTAG('P','0','1','0') : MKTAG('N','V','1','2');
D3DFORMAT target_format = 0;
DXVA2_VideoDesc desc = { 0 };
DXVA2_ConfigPictureDecode config;
HRESULT hr;
int surface_alignment, num_surfaces;
int surface_alignment;
int ret;
AVDXVA2FramesContext *frames_hwctx;
AVHWFramesContext *frames_ctx;
hr = IDirectXVideoDecoderService_GetDecoderDeviceGuids(ctx->decoder_service, &guid_count, &guid_list);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to retrieve decoder device GUIDs\n");
@@ -308,7 +499,7 @@ static int dxva2_create_decoder(AVCodecContext *s)
}
for (j = 0; j < target_count; j++) {
const D3DFORMAT format = target_list[j];
if (format == surface_format) {
if (format == MKTAG('N','V','1','2')) {
target_format = format;
break;
}
@@ -347,43 +538,41 @@ static int dxva2_create_decoder(AVCodecContext *s)
surface_alignment = 16;
/* 4 base work surfaces */
num_surfaces = 4;
ctx->num_surfaces = 4;
/* add surfaces based on number of possible refs */
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
num_surfaces += 16;
else if (s->codec_id == AV_CODEC_ID_VP9)
num_surfaces += 8;
ctx->num_surfaces += 16;
else
num_surfaces += 2;
ctx->num_surfaces += 2;
/* add extra surfaces for frame threading */
if (s->active_thread_type & FF_THREAD_FRAME)
num_surfaces += s->thread_count;
ctx->num_surfaces += s->thread_count;
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(ctx->hw_device_ctx);
if (!ctx->hw_frames_ctx)
ctx->surfaces = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surfaces));
ctx->surface_infos = av_mallocz(ctx->num_surfaces * sizeof(*ctx->surface_infos));
if (!ctx->surfaces || !ctx->surface_infos) {
av_log(NULL, loglevel, "Unable to allocate surface arrays\n");
goto fail;
frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
frames_hwctx = frames_ctx->hwctx;
}
frames_ctx->format = AV_PIX_FMT_DXVA2_VLD;
frames_ctx->sw_format = (target_format == MKTAG('P','0','1','0') ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12);
frames_ctx->width = FFALIGN(s->coded_width, surface_alignment);
frames_ctx->height = FFALIGN(s->coded_height, surface_alignment);
frames_ctx->initial_pool_size = num_surfaces;
frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
ret = av_hwframe_ctx_init(ctx->hw_frames_ctx);
if (ret < 0) {
av_log(NULL, loglevel, "Failed to initialize the HW frames context\n");
hr = IDirectXVideoDecoderService_CreateSurface(ctx->decoder_service,
FFALIGN(s->coded_width, surface_alignment),
FFALIGN(s->coded_height, surface_alignment),
ctx->num_surfaces - 1,
target_format, D3DPOOL_DEFAULT, 0,
DXVA2_VideoDecoderRenderTarget,
ctx->surfaces, NULL);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to create %d video surfaces\n", ctx->num_surfaces);
goto fail;
}
hr = IDirectXVideoDecoderService_CreateVideoDecoder(ctx->decoder_service, &device_guid,
&desc, &config, frames_hwctx->surfaces,
frames_hwctx->nb_surfaces, &frames_hwctx->decoder_to_release);
&desc, &config, ctx->surfaces,
ctx->num_surfaces, &ctx->decoder);
if (FAILED(hr)) {
av_log(NULL, loglevel, "Failed to create DXVA2 video decoder\n");
goto fail;
@@ -393,16 +582,16 @@ static int dxva2_create_decoder(AVCodecContext *s)
ctx->decoder_config = config;
dxva_ctx->cfg = &ctx->decoder_config;
dxva_ctx->decoder = frames_hwctx->decoder_to_release;
dxva_ctx->surface = frames_hwctx->surfaces;
dxva_ctx->surface_count = frames_hwctx->nb_surfaces;
dxva_ctx->decoder = ctx->decoder;
dxva_ctx->surface = ctx->surfaces;
dxva_ctx->surface_count = ctx->num_surfaces;
if (IsEqualGUID(&ctx->decoder_guid, &DXVADDI_Intel_ModeH264_E))
dxva_ctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO;
return 0;
fail:
av_buffer_unref(&ctx->hw_frames_ctx);
dxva2_destroy_decoder(s);
return AVERROR(EINVAL);
}
@@ -426,13 +615,8 @@ int dxva2_init(AVCodecContext *s)
return AVERROR(EINVAL);
}
if (s->codec_id == AV_CODEC_ID_HEVC &&
s->profile != FF_PROFILE_HEVC_MAIN && s->profile != FF_PROFILE_HEVC_MAIN_10) {
av_log(NULL, loglevel, "Unsupported HEVC profile for DXVA2 HWAccel: %d\n", s->profile);
return AVERROR(EINVAL);
}
av_buffer_unref(&ctx->hw_frames_ctx);
if (ctx->decoder)
dxva2_destroy_decoder(s);
ret = dxva2_create_decoder(s);
if (ret < 0) {

View File

@@ -24,7 +24,6 @@
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavresample/avresample.h"
@@ -39,27 +38,6 @@
#include "libavutil/imgutils.h"
#include "libavutil/samplefmt.h"
static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
{
static const enum AVPixelFormat mjpeg_formats[] =
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_NONE };
static const enum AVPixelFormat ljpeg_formats[] =
{ AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
AV_PIX_FMT_NONE};
if (codec_id == AV_CODEC_ID_MJPEG) {
return mjpeg_formats;
} else if (codec_id == AV_CODEC_ID_LJPEG) {
return ljpeg_formats;
} else {
return default_formats;
}
}
enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCodec *codec, enum AVPixelFormat target)
{
if (codec && codec->pix_fmts) {
@@ -67,9 +45,18 @@ enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx, AVCod
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
enum AVPixelFormat best= AV_PIX_FMT_NONE;
static const enum AVPixelFormat mjpeg_formats[] =
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
static const enum AVPixelFormat ljpeg_formats[] =
{ AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(enc_ctx->codec_id, p);
if (enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
p = mjpeg_formats;
} else if (enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
p =ljpeg_formats;
}
}
for (; *p != AV_PIX_FMT_NONE; p++) {
best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
@@ -94,19 +81,19 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec)
if (codec && codec->sample_fmts) {
const enum AVSampleFormat *p = codec->sample_fmts;
for (; *p != -1; p++) {
if (*p == st->codecpar->format)
if (*p == st->codec->sample_fmt)
break;
}
if (*p == -1) {
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codecpar->format) > av_get_sample_fmt_name(codec->sample_fmts[0]))
if((codec->capabilities & AV_CODEC_CAP_LOSSLESS) && av_get_sample_fmt_name(st->codec->sample_fmt) > av_get_sample_fmt_name(codec->sample_fmts[0]))
av_log(NULL, AV_LOG_ERROR, "Conversion will not be lossless.\n");
if(av_get_sample_fmt_name(st->codecpar->format))
if(av_get_sample_fmt_name(st->codec->sample_fmt))
av_log(NULL, AV_LOG_WARNING,
"Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
av_get_sample_fmt_name(st->codecpar->format),
av_get_sample_fmt_name(st->codec->sample_fmt),
codec->name,
av_get_sample_fmt_name(codec->sample_fmts[0]));
st->codecpar->format = codec->sample_fmts[0];
st->codec->sample_fmt = codec->sample_fmts[0];
}
}
}
@@ -139,7 +126,12 @@ static char *choose_pix_fmts(OutputStream *ost)
p = ost->enc->pix_fmts;
if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
if (ost->enc_ctx->codec_id == AV_CODEC_ID_MJPEG) {
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE };
} else if (ost->enc_ctx->codec_id == AV_CODEC_ID_LJPEG) {
p = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
}
}
for (; *p != AV_PIX_FMT_NONE; p++) {
@@ -193,7 +185,7 @@ DEF_CHOOSE_FORMAT(int, sample_rate, supported_samplerates, 0,
DEF_CHOOSE_FORMAT(uint64_t, channel_layout, channel_layouts, 0,
GET_CH_LAYOUT_NAME)
int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
FilterGraph *init_simple_filtergraph(InputStream *ist, OutputStream *ost)
{
FilterGraph *fg = av_mallocz(sizeof(*fg));
@@ -221,7 +213,7 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
GROW_ARRAY(filtergraphs, nb_filtergraphs);
filtergraphs[nb_filtergraphs - 1] = fg;
return 0;
return fg;
}
static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
@@ -251,7 +243,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
s = input_files[file_idx]->ctx;
for (i = 0; i < s->nb_streams; i++) {
enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
enum AVMediaType stream_type = s->streams[i]->codec->codec_type;
if (stream_type != type &&
!(stream_type == AVMEDIA_TYPE_SUBTITLE &&
type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
@@ -428,7 +420,7 @@ static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter,
if (ret < 0)
return ret;
if (!hw_device_ctx && (codec->width || codec->height)) {
if (codec->width || codec->height) {
char args[255];
AVFilterContext *filter;
AVDictionaryEntry *e = NULL;
@@ -611,7 +603,7 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
int i;
for (i=0; i<of->ctx->nb_streams; i++)
if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
if (of->ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
break;
if (i<of->ctx->nb_streams) {
@@ -656,7 +648,7 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
DESCRIBE_FILTER_LINK(ofilter, out, 0);
if (!ofilter->ost) {
av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
av_log(NULL, AV_LOG_FATAL, "Filter %s has a unconnected output\n", ofilter->name);
exit_program(1);
}
@@ -673,15 +665,15 @@ static int sub2video_prepare(InputStream *ist)
int i, w, h;
/* Compute the size of the canvas for the subtitles stream.
If the subtitles codecpar has set a size, use it. Otherwise use the
If the subtitles codec has set a size, use it. Otherwise use the
maximum dimensions of the video streams in the same file. */
w = ist->dec_ctx->width;
h = ist->dec_ctx->height;
if (!(w && h)) {
for (i = 0; i < avf->nb_streams; i++) {
if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codecpar->width);
h = FFMAX(h, avf->streams[i]->codecpar->height);
if (avf->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
w = FFMAX(w, avf->streams[i]->codec->width);
h = FFMAX(h, avf->streams[i]->codec->height);
}
}
if (!(w && h)) {
@@ -719,17 +711,10 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
char name[255];
int ret, pad_idx = 0;
int64_t tsoffset = 0;
AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
if (!par)
return AVERROR(ENOMEM);
memset(par, 0, sizeof(*par));
par->format = AV_PIX_FMT_NONE;
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
ret = AVERROR(EINVAL);
goto fail;
return AVERROR(EINVAL);
}
if (!fr.num)
@@ -738,7 +723,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
ret = sub2video_prepare(ist);
if (ret < 0)
goto fail;
return ret;
}
sar = ist->st->sample_aspect_ratio.num ?
@@ -759,15 +744,9 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index);
if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
args.str, NULL, fg->graph)) < 0)
goto fail;
par->hw_frames_ctx = ist->hw_frames_ctx;
ret = av_buffersrc_parameters_set(ifilter->filter, par);
if (ret < 0)
goto fail;
av_freep(&par);
return ret;
last_filter = ifilter->filter;
if (ist->autorotate) {
@@ -841,10 +820,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
return ret;
return 0;
fail:
av_freep(&par);
return ret;
}
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
@@ -979,7 +954,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
int configure_filtergraph(FilterGraph *fg)
{
AVFilterInOut *inputs, *outputs, *cur;
int ret, i, simple = filtergraph_is_simple(fg);
int ret, i, simple = !fg->graph_desc;
const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
fg->graph_desc;
@@ -1027,12 +1002,6 @@ int configure_filtergraph(FilterGraph *fg)
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
return ret;
if (hw_device_ctx) {
for (i = 0; i < fg->graph->nb_filters; i++) {
fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(hw_device_ctx);
}
}
if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
const char *num_inputs;
const char *num_outputs;
@@ -1077,14 +1046,8 @@ int configure_filtergraph(FilterGraph *fg)
for (i = 0; i < fg->nb_outputs; i++) {
OutputStream *ost = fg->outputs[i]->ost;
if (!ost->enc) {
/* identical to the same check in ffmpeg.c, needed because
complex filter graphs are initialized earlier */
av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
return AVERROR(EINVAL);
}
if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
if (ost &&
ost->enc->type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
av_buffersink_set_frame_size(ost->filter->filter,
ost->enc_ctx->frame_size);
@@ -1102,7 +1065,3 @@ int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
return 0;
}
int filtergraph_is_simple(FilterGraph *fg)
{
return !fg->graph_desc;
}

View File

@@ -40,6 +40,7 @@
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/pixfmt.h"
#include "libavutil/time_internal.h"
#define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
@@ -77,20 +78,9 @@ const HWAccel hwaccels[] = {
#endif
#if CONFIG_VIDEOTOOLBOX
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
#endif
#if CONFIG_LIBMFX
{ "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV },
#endif
#if CONFIG_VAAPI
{ "vaapi", vaapi_decode_init, HWACCEL_VAAPI, AV_PIX_FMT_VAAPI },
#endif
#if CONFIG_CUVID
{ "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA },
#endif
{ 0 },
};
int hwaccel_lax_profile_check = 0;
AVBufferRef *hw_device_ctx;
char *vstats_filename;
char *sdp_filename;
@@ -113,7 +103,6 @@ int start_at_zero = 0;
int copy_tb = -1;
int debug_ts = 0;
int exit_on_error = 0;
int abort_on_flags = 0;
int print_stats = -1;
int qp_hist = 0;
int stdin_interaction = 1;
@@ -207,24 +196,6 @@ static AVDictionary *strip_specifiers(AVDictionary *dict)
return ret;
}
static int opt_abort_on(void *optctx, const char *opt, const char *arg)
{
static const AVOption opts[] = {
{ "abort_on" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
{ "empty_output" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = ABORT_ON_FLAG_EMPTY_OUTPUT }, .unit = "flags" },
{ NULL },
};
static const AVClass class = {
.class_name = "",
.item_name = av_default_item_name,
.option = opts,
.version = LIBAVUTIL_VERSION_INT,
};
const AVClass *pclass = &class;
return av_opt_eval_flags(&pclass, &opts[0], arg, &abort_on_flags);
}
static int opt_sameq(void *optctx, const char *opt, const char *arg)
{
av_log(NULL, AV_LOG_ERROR, "Option '%s' was removed. "
@@ -429,12 +400,12 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
exit_program(1);
}
st = input_files[m->file_idx]->ctx->streams[m->stream_idx];
if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_FATAL, "mapchan: stream #%d.%d is not an audio stream.\n",
m->file_idx, m->stream_idx);
exit_program(1);
}
if (m->channel_idx < 0 || m->channel_idx >= st->codecpar->channels) {
if (m->channel_idx < 0 || m->channel_idx >= st->codec->channels) {
av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n",
m->file_idx, m->stream_idx, m->channel_idx);
exit_program(1);
@@ -449,17 +420,6 @@ static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
return 0;
}
#if CONFIG_VAAPI
static int opt_vaapi_device(void *optctx, const char *opt, const char *arg)
{
int err;
err = vaapi_device_init(arg);
if (err < 0)
exit_program(1);
return 0;
}
#endif
/**
* Parse a metadata specifier passed as 'arg' parameter.
* @param arg metadata string to parse
@@ -634,11 +594,11 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (codec_name) {
AVCodec *codec = find_codec_or_die(codec_name, st->codecpar->codec_type, 0);
st->codecpar->codec_id = codec->id;
AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
st->codec->codec_id = codec->id;
return codec;
} else
return avcodec_find_decoder(st->codecpar->codec_id);
return avcodec_find_decoder(st->codec->codec_id);
}
/* Add all the streams from the given input file to the global
@@ -649,15 +609,13 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i];
AVCodecParameters *par = st->codecpar;
AVCodecContext *dec = st->codec;
InputStream *ist = av_mallocz(sizeof(*ist));
char *framerate = NULL, *hwaccel = NULL, *hwaccel_device = NULL;
char *hwaccel_output_format = NULL;
char *codec_tag = NULL;
char *next;
char *discard_str = NULL;
const AVClass *cc = avcodec_get_class();
const AVOption *discard_opt = av_opt_find(&cc, "skip_frame", NULL, 0, 0);
const AVOption *discard_opt = av_opt_find(dec, "skip_frame", NULL, 0, 0);
if (!ist)
exit_program(1);
@@ -669,9 +627,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
ist->file_index = nb_input_files;
ist->discard = 1;
st->discard = AVDISCARD_ALL;
ist->nb_samples = 0;
ist->min_pts = INT64_MAX;
ist->max_pts = INT64_MIN;
ist->ts_scale = 1.0;
MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
@@ -684,18 +639,18 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
st->codecpar->codec_tag = tag;
st->codec->codec_tag = tag;
}
ist->dec = choose_decoder(o, ic, st);
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codecpar->codec_id, ic, st, ist->dec);
ist->decoder_opts = filter_codec_opts(o->g->codec_opts, ist->st->codec->codec_id, ic, st, ist->dec);
ist->reinit_filters = -1;
MATCH_PER_STREAM_OPT(reinit_filters, i, ist->reinit_filters, ic, st);
MATCH_PER_STREAM_OPT(discard, str, discard_str, ic, st);
ist->user_set_discard = AVDISCARD_NONE;
if (discard_str && av_opt_eval_int(&cc, discard_opt, discard_str, &ist->user_set_discard) < 0) {
if (discard_str && av_opt_eval_int(dec, discard_opt, discard_str, &ist->user_set_discard) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing discard %s.\n",
discard_str);
exit_program(1);
@@ -709,30 +664,22 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
exit_program(1);
}
ret = avcodec_parameters_to_context(ist->dec_ctx, par);
ret = avcodec_copy_context(ist->dec_ctx, dec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
switch (par->codec_type) {
switch (dec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if(!ist->dec)
ist->dec = avcodec_find_decoder(par->codec_id);
ist->dec = avcodec_find_decoder(dec->codec_id);
#if FF_API_EMU_EDGE
if (av_codec_get_lowres(st->codec)) {
av_codec_set_lowres(ist->dec_ctx, av_codec_get_lowres(st->codec));
ist->dec_ctx->width = st->codec->width;
ist->dec_ctx->height = st->codec->height;
ist->dec_ctx->coded_width = st->codec->coded_width;
ist->dec_ctx->coded_height = st->codec->coded_height;
ist->dec_ctx->flags |= CODEC_FLAG_EMU_EDGE;
if (av_codec_get_lowres(dec)) {
dec->flags |= CODEC_FLAG_EMU_EDGE;
}
#endif
// avformat_find_stream_info() doesn't set this for us anymore.
ist->dec_ctx->framerate = st->avg_frame_rate;
ist->resample_height = ist->dec_ctx->height;
ist->resample_width = ist->dec_ctx->width;
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
@@ -781,19 +728,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
if (!ist->hwaccel_device)
exit_program(1);
}
MATCH_PER_STREAM_OPT(hwaccel_output_formats, str,
hwaccel_output_format, ic, st);
if (hwaccel_output_format) {
ist->hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
if (ist->hwaccel_output_format == AV_PIX_FMT_NONE) {
av_log(NULL, AV_LOG_FATAL, "Unrecognised hwaccel output "
"format: %s", hwaccel_output_format);
}
} else {
ist->hwaccel_output_format = AV_PIX_FMT_NONE;
}
ist->hwaccel_pix_fmt = AV_PIX_FMT_NONE;
break;
@@ -812,7 +746,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
case AVMEDIA_TYPE_SUBTITLE: {
char *canvas_size = NULL;
if(!ist->dec)
ist->dec = avcodec_find_decoder(par->codec_id);
ist->dec = avcodec_find_decoder(dec->codec_id);
MATCH_PER_STREAM_OPT(fix_sub_duration, i, ist->fix_sub_duration, ic, st);
MATCH_PER_STREAM_OPT(canvas_sizes, str, canvas_size, ic, st);
if (canvas_size &&
@@ -828,12 +762,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
default:
abort();
}
ret = avcodec_parameters_from_context(par, ist->dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
}
}
@@ -872,7 +800,7 @@ static void dump_attachment(AVStream *st, const char *filename)
AVIOContext *out = NULL;
AVDictionaryEntry *e;
if (!st->codecpar->extradata_size) {
if (!st->codec->extradata_size) {
av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
nb_input_files - 1, st->index);
return;
@@ -893,7 +821,7 @@ static void dump_attachment(AVStream *st, const char *filename)
exit_program(1);
}
avio_write(out, st->codecpar->extradata, st->codecpar->extradata_size);
avio_write(out, st->codec->extradata, st->codec->extradata_size);
avio_flush(out);
avio_close(out);
}
@@ -997,8 +925,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
if (err < 0) {
print_error(filename, err);
if (err == AVERROR_PROTOCOL_NOT_FOUND)
av_log(NULL, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
exit_program(1);
}
if (scan_all_pmts_set)
@@ -1043,8 +969,8 @@ static int open_input_file(OptionsContext *o, const char *filename)
if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
int dts_heuristic = 0;
for (i=0; i<ic->nb_streams; i++) {
const AVCodecParameters *par = ic->streams[i]->codecpar;
if (par->video_delay)
AVCodecContext *avctx = ic->streams[i]->codec;
if (avctx->has_b_frames)
dts_heuristic = 1;
}
if (dts_heuristic) {
@@ -1079,9 +1005,6 @@ static int open_input_file(OptionsContext *o, const char *filename)
f->nb_streams = ic->nb_streams;
f->rate_emu = o->rate_emu;
f->accurate_seek = o->accurate_seek;
f->loop = o->loop;
f->duration = 0;
f->time_base = (AVRational){ 1, 1 };
#if HAVE_PTHREADS
f->thread_queue_size = o->thread_queue_size > 0 ? o->thread_queue_size : 8;
#endif
@@ -1189,39 +1112,21 @@ static int get_preset_file_2(const char *preset_name, const char *codec_name, AV
return ret;
}
static int choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
{
enum AVMediaType type = ost->st->codecpar->codec_type;
char *codec_name = NULL;
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO || type == AVMEDIA_TYPE_SUBTITLE) {
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codecpar->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
NULL, ost->st->codecpar->codec_type);
ost->enc = avcodec_find_encoder(ost->st->codecpar->codec_id);
if (!ost->enc) {
av_log(NULL, AV_LOG_FATAL, "Automatic encoder selection failed for "
"output stream #%d:%d. Default encoder for format %s (codec %s) is "
"probably disabled. Please choose an encoder manually.\n",
ost->file_index, ost->index, s->oformat->name,
avcodec_get_name(ost->st->codecpar->codec_id));
return AVERROR_ENCODER_NOT_FOUND;
}
} else if (!strcmp(codec_name, "copy"))
ost->stream_copy = 1;
else {
ost->enc = find_codec_or_die(codec_name, ost->st->codecpar->codec_type, 1);
ost->st->codecpar->codec_id = ost->enc->id;
}
ost->encoding_needed = !ost->stream_copy;
} else {
/* no encoding supported for other media types */
ost->stream_copy = 1;
ost->encoding_needed = 0;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
NULL, ost->st->codec->codec_type);
ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
} else if (!strcmp(codec_name, "copy"))
ost->stream_copy = 1;
else {
ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
ost->st->codec->codec_id = ost->enc->id;
}
return 0;
}
static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type, int source_index)
@@ -1229,8 +1134,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
OutputStream *ost;
AVStream *st = avformat_new_stream(oc, NULL);
int idx = oc->nb_streams - 1, ret = 0;
const char *bsfs = NULL;
char *next, *codec_tag = NULL;
char *bsf = NULL, *next, *codec_tag = NULL;
AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
double qscale = -1;
int i;
@@ -1250,14 +1155,8 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->file_index = nb_output_files - 1;
ost->index = idx;
ost->st = st;
st->codecpar->codec_type = type;
ret = choose_encoder(o, oc, ost);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error selecting an encoder for stream "
"%d:%d\n", ost->file_index, ost->index);
exit_program(1);
}
st->codec->codec_type = type;
choose_encoder(o, oc, ost);
ost->enc_ctx = avcodec_alloc_context3(ost->enc);
if (!ost->enc_ctx) {
@@ -1266,12 +1165,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
}
ost->enc_ctx->codec_type = type;
ost->ref_par = avcodec_parameters_alloc();
if (!ost->ref_par) {
av_log(NULL, AV_LOG_ERROR, "Error allocating the encoding parameters.\n");
exit_program(1);
}
if (ost->enc) {
AVIOContext *s = NULL;
char *buf = NULL, *arg = NULL, *preset = NULL;
@@ -1319,62 +1212,25 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->copy_prior_start = -1;
MATCH_PER_STREAM_OPT(copy_prior_start, i, ost->copy_prior_start, oc ,st);
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsfs, oc, st);
while (bsfs && *bsfs) {
const AVBitStreamFilter *filter;
char *bsf, *bsf_options_str, *bsf_name;
bsf = av_get_token(&bsfs, ",");
if (!bsf)
exit_program(1);
bsf_name = av_strtok(bsf, "=", &bsf_options_str);
if (!bsf_name)
exit_program(1);
filter = av_bsf_get_by_name(bsf_name);
if (!filter) {
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf_name);
MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
while (bsf) {
char *arg = NULL;
if (next = strchr(bsf, ','))
*next++ = 0;
if (arg = strchr(bsf, '='))
*arg++ = 0;
if (!(bsfc = av_bitstream_filter_init(bsf))) {
av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
exit_program(1);
}
if (bsfc_prev)
bsfc_prev->next = bsfc;
else
ost->bitstream_filters = bsfc;
av_dict_set(&ost->bsf_args, bsfc->filter->name, arg, 0);
ost->bsf_ctx = av_realloc_array(ost->bsf_ctx,
ost->nb_bitstream_filters + 1,
sizeof(*ost->bsf_ctx));
if (!ost->bsf_ctx)
exit_program(1);
ret = av_bsf_alloc(filter, &ost->bsf_ctx[ost->nb_bitstream_filters]);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error allocating a bitstream filter context\n");
exit_program(1);
}
ost->nb_bitstream_filters++;
if (bsf_options_str && filter->priv_class) {
const AVOption *opt = av_opt_next(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, NULL);
const char * shorthand[2] = {NULL};
if (opt)
shorthand[0] = opt->name;
ret = av_opt_set_from_string(ost->bsf_ctx[ost->nb_bitstream_filters-1]->priv_data, bsf_options_str, shorthand, "=", ":");
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error parsing options for bitstream filter %s\n", bsf_name);
exit_program(1);
}
}
av_freep(&bsf);
if (*bsfs)
bsfs++;
}
if (ost->nb_bitstream_filters) {
ost->bsf_extradata_updated = av_mallocz_array(ost->nb_bitstream_filters, sizeof(*ost->bsf_extradata_updated));
if (!ost->bsf_extradata_updated) {
av_log(NULL, AV_LOG_FATAL, "Bitstream filter memory allocation failed\n");
exit_program(1);
}
bsfc_prev = bsfc;
bsf = next;
}
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
@@ -1382,7 +1238,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
uint32_t tag = strtol(codec_tag, &next, 0);
if (*next)
tag = AV_RL32(codec_tag);
ost->st->codecpar->codec_tag =
ost->st->codec->codec_tag =
ost->enc_ctx->codec_tag = tag;
}
@@ -1395,10 +1251,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
MATCH_PER_STREAM_OPT(disposition, str, ost->disposition, oc, st);
ost->disposition = av_strdup(ost->disposition);
ost->max_muxing_queue_size = 128;
MATCH_PER_STREAM_OPT(max_muxing_queue_size, i, ost->max_muxing_queue_size, oc, st);
ost->max_muxing_queue_size *= sizeof(AVPacket);
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
@@ -1418,10 +1270,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
}
ost->last_mux_dts = AV_NOPTS_VALUE;
ost->muxing_queue = av_fifo_alloc(8 * sizeof(AVPacket));
if (!ost->muxing_queue)
exit_program(1);
return ost;
}
@@ -1487,7 +1335,7 @@ static char *get_ost_filters(OptionsContext *o, AVFormatContext *oc,
else if (ost->filters)
return av_strdup(ost->filters);
return av_strdup(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ?
return av_strdup(st->codec->codec_type == AVMEDIA_TYPE_VIDEO ?
"null" : "anull");
}
@@ -1919,9 +1767,9 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
AVCodec *codec;
const char *enc_config;
codec = avcodec_find_encoder(ic->streams[i]->codecpar->codec_id);
codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
if (!codec) {
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codecpar->codec_id);
av_log(s, AV_LOG_ERROR, "no encoder found for codec id %i\n", ic->streams[i]->codec->codec_id);
return AVERROR(EINVAL);
}
if (codec->type == AVMEDIA_TYPE_AUDIO)
@@ -1940,10 +1788,10 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
av_dict_free(&opts);
}
if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
choose_sample_fmt(st, codec);
else if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
choose_pixel_fmt(st, st->codec, codec, st->codecpar->format);
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
choose_pixel_fmt(st, st->codec, codec, st->codec->pix_fmt);
avcodec_copy_context(ost->enc_ctx, st->codec);
if (enc_config)
av_dict_parse_string(&ost->encoder_opts, enc_config, "=", ",", 0);
@@ -2011,7 +1859,7 @@ static int configure_complex_filters(void)
int i, ret = 0;
for (i = 0; i < nb_filtergraphs; i++)
if (!filtergraph_is_simple(filtergraphs[i]) &&
if (!filtergraphs[i]->graph &&
(ret = configure_filtergraph(filtergraphs[i])) < 0)
return ret;
return 0;
@@ -2115,18 +1963,18 @@ static int open_output_file(OptionsContext *o, const char *filename)
ost = output_streams[j];
for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i];
if(ist->st->codecpar->codec_type == ost->st->codecpar->codec_type){
if(ist->st->codec->codec_type == ost->st->codec->codec_type){
ost->sync_ist= ist;
ost->source_index= i;
if(ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
if(ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO) ost->avfilter = av_strdup("anull");
if(ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) ost->avfilter = av_strdup("null");
ist->discard = 0;
ist->st->discard = ist->user_set_discard;
break;
}
}
if(!ost->sync_ist){
av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codecpar->codec_type));
av_log(NULL, AV_LOG_FATAL, "Missing %s stream which is required by this ffm\n", av_get_media_type_string(ost->st->codec->codec_type));
exit_program(1);
}
}
@@ -2141,10 +1989,10 @@ static int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int new_area;
ist = input_streams[i];
new_area = ist->st->codecpar->width * ist->st->codecpar->height + 100000000*!!ist->st->codec_info_nb_frames;
new_area = ist->st->codec->width * ist->st->codec->height + 100000000*!!ist->st->codec_info_nb_frames;
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
new_area = 1;
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
new_area > area) {
if((qcr==MKTAG('A', 'P', 'I', 'C')) && !(ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
continue;
@@ -2162,8 +2010,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
for (i = 0; i < nb_input_streams; i++) {
int score;
ist = input_streams[i];
score = ist->st->codecpar->channels + 100000000*!!ist->st->codec_info_nb_frames;
if (ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
score = ist->st->codec->channels + 100000000*!!ist->st->codec_info_nb_frames;
if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
score > best_score) {
best_score = score;
idx = i;
@@ -2177,9 +2025,9 @@ static int open_output_file(OptionsContext *o, const char *filename)
MATCH_PER_TYPE_OPT(codec_names, str, subtitle_codec_name, oc, "s");
if (!o->subtitle_disable && (avcodec_find_encoder(oc->oformat->subtitle_codec) || subtitle_codec_name)) {
for (i = 0; i < nb_input_streams; i++)
if (input_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) {
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
AVCodecDescriptor const *input_descriptor =
avcodec_descriptor_get(input_streams[i]->st->codecpar->codec_id);
avcodec_descriptor_get(input_streams[i]->st->codec->codec_id);
AVCodecDescriptor const *output_descriptor = NULL;
AVCodec const *output_codec =
avcodec_find_encoder(oc->oformat->subtitle_codec);
@@ -2205,8 +2053,8 @@ static int open_output_file(OptionsContext *o, const char *filename)
if (!o->data_disable ) {
enum AVCodecID codec_id = av_guess_codec(oc->oformat, NULL, filename, NULL, AVMEDIA_TYPE_DATA);
for (i = 0; codec_id != AV_CODEC_ID_NONE && i < nb_input_streams; i++) {
if (input_streams[i]->st->codecpar->codec_type == AVMEDIA_TYPE_DATA
&& input_streams[i]->st->codecpar->codec_id == codec_id )
if (input_streams[i]->st->codec->codec_type == AVMEDIA_TYPE_DATA
&& input_streams[i]->st->codec->codec_id == codec_id )
new_data_stream(o, oc, i);
}
}
@@ -2243,17 +2091,17 @@ loop_end:
int src_idx = input_files[map->file_index]->ist_index + map->stream_index;
ist = input_streams[input_files[map->file_index]->ist_index + map->stream_index];
if(o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
if(o->subtitle_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
continue;
if(o-> audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
if(o-> audio_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
continue;
if(o-> video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
if(o-> video_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
continue;
if(o-> data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA)
if(o-> data_disable && ist->st->codec->codec_type == AVMEDIA_TYPE_DATA)
continue;
ost = NULL;
switch (ist->st->codecpar->codec_type) {
switch (ist->st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO: ost = new_video_stream (o, oc, src_idx); break;
case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream (o, oc, src_idx); break;
case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream (o, oc, src_idx); break;
@@ -2308,17 +2156,17 @@ loop_end:
avio_read(pb, attachment, len);
ost = new_attachment_stream(o, oc, -1);
ost->stream_copy = 0;
ost->stream_copy = 1;
ost->attachment_filename = o->attachments[i];
ost->st->codecpar->extradata = attachment;
ost->st->codecpar->extradata_size = len;
ost->finished = 1;
ost->st->codec->extradata = attachment;
ost->st->codec->extradata_size = len;
p = strrchr(o->attachments[i], '/');
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
avio_closep(&pb);
}
#if FF_API_LAVF_AVCTX
for (i = nb_output_streams - oc->nb_streams; i < nb_output_streams; i++) { //for all streams of this output file
AVDictionaryEntry *e;
ost = output_streams[i];
@@ -2329,7 +2177,6 @@ loop_end:
if (av_opt_set(ost->st->codec, "flags", e->value, 0) < 0)
exit_program(1);
}
#endif
if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
av_dump_format(oc, nb_output_files - 1, oc->filename, 1);
@@ -2379,25 +2226,14 @@ loop_end:
}
av_dict_free(&unused_opts);
/* set the decoding_needed flags and create simple filtergraphs */
/* set the encoding/decoding_needed flags */
for (i = of->ost_index; i < nb_output_streams; i++) {
OutputStream *ost = output_streams[i];
ost->encoding_needed = !ost->stream_copy;
if (ost->encoding_needed && ost->source_index >= 0) {
InputStream *ist = input_streams[ost->source_index];
ist->decoding_needed |= DECODING_FOR_OST;
if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
err = init_simple_filtergraph(ist, ost);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error initializing a simple filtergraph between streams "
"%d:%d->%d:%d\n", ist->file_index, ost->source_index,
nb_output_files - 1, ost->st->index);
exit_program(1);
}
}
}
}
@@ -2490,78 +2326,13 @@ loop_end:
}
}
/* process manually set programs */
for (i = 0; i < o->nb_program; i++) {
const char *p = o->program[i].u.str;
int progid = i+1;
AVProgram *program;
while(*p) {
const char *p2 = av_get_token(&p, ":");
const char *to_dealloc = p2;
char *key;
if (!p2)
break;
if(*p) p++;
key = av_get_token(&p2, "=");
if (!key || !*p2) {
av_freep(&to_dealloc);
av_freep(&key);
break;
}
p2++;
if (!strcmp(key, "program_num"))
progid = strtol(p2, NULL, 0);
av_freep(&to_dealloc);
av_freep(&key);
}
program = av_new_program(oc, progid);
p = o->program[i].u.str;
while(*p) {
const char *p2 = av_get_token(&p, ":");
const char *to_dealloc = p2;
char *key;
if (!p2)
break;
if(*p) p++;
key = av_get_token(&p2, "=");
if (!key) {
av_log(NULL, AV_LOG_FATAL,
"No '=' character in program string %s.\n",
p2);
exit_program(1);
}
if (!*p2)
exit_program(1);
p2++;
if (!strcmp(key, "title")) {
av_dict_set(&program->metadata, "title", p2, 0);
} else if (!strcmp(key, "program_num")) {
} else if (!strcmp(key, "st")) {
int st_num = strtol(p2, NULL, 0);
av_program_add_stream_index(oc, progid, st_num);
} else {
av_log(NULL, AV_LOG_FATAL, "Unknown program key %s.\n", key);
exit_program(1);
}
av_freep(&to_dealloc);
av_freep(&key);
}
}
/* process manually set metadata */
for (i = 0; i < o->nb_metadata; i++) {
AVDictionary **m;
char type, *val;
const char *stream_spec;
int index = 0, j, ret = 0;
char now_time[256];
val = strchr(o->metadata[i].u.str, '=');
if (!val) {
@@ -2571,6 +2342,17 @@ loop_end:
}
*val++ = 0;
if (!strcmp(o->metadata[i].u.str, "creation_time") &&
!strcmp(val, "now")) {
time_t now = time(0);
struct tm *ptm, tmbuf;
ptm = localtime_r(&now, &tmbuf);
if (ptm) {
if (strftime(now_time, sizeof(now_time), "%Y-%m-%d %H:%M:%S", ptm))
val = now_time;
}
}
parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
if (type == 's') {
for (j = 0; j < oc->nb_streams; j++) {
@@ -2596,13 +2378,6 @@ loop_end:
}
m = &oc->chapters[index]->metadata;
break;
case 'p':
if (index < 0 || index >= oc->nb_programs) {
av_log(NULL, AV_LOG_FATAL, "Invalid program index %d in metadata specifier.\n", index);
exit_program(1);
}
m = &oc->programs[index]->metadata;
break;
default:
av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
exit_program(1);
@@ -2635,10 +2410,11 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
int i, j, fr;
for (j = 0; j < nb_input_files; j++) {
for (i = 0; i < input_files[j]->nb_streams; i++) {
AVStream *st = input_files[j]->ctx->streams[i];
if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
AVCodecContext *c = input_files[j]->ctx->streams[i]->codec;
if (c->codec_type != AVMEDIA_TYPE_VIDEO ||
!c->time_base.num)
continue;
fr = st->time_base.den * 1000 / st->time_base.num;
fr = c->time_base.den * 1000 / c->time_base.num;
if (fr == 25000) {
norm = PAL;
break;
@@ -3047,7 +2823,6 @@ void show_help_default(const char *opt, const char *arg)
" -h -- print basic options\n"
" -h long -- print more options\n"
" -h full -- print all options (including all format and codec specific options, very long)\n"
" -h type=name -- print all options for the named decoder/encoder/demuxer/muxer/filter\n"
" See man %s for detailed description of the options.\n"
"\n", program_name);
@@ -3108,8 +2883,8 @@ enum OptGroup {
};
static const OptionGroupDef groups[] = {
[GROUP_OUTFILE] = { "output url", NULL, OPT_OUTPUT },
[GROUP_INFILE] = { "input url", "i", OPT_INPUT },
[GROUP_OUTFILE] = { "output file", NULL, OPT_OUTPUT },
[GROUP_INFILE] = { "input file", "i", OPT_INPUT },
};
static int open_files(OptionGroupList *l, const char *inout,
@@ -3168,9 +2943,6 @@ int ffmpeg_parse_options(int argc, char **argv)
goto fail;
}
/* configure terminal and setup signal handlers */
term_init();
/* open input files */
ret = open_files(&octx.groups[GROUP_INFILE], "input", open_input_file);
if (ret < 0) {
@@ -3292,8 +3064,6 @@ const OptionDef options[] = {
"set the recording timestamp ('now' to set the current time)", "time" },
{ "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
"add metadata", "string=string" },
{ "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
"add program with specified streams", "title=string:st=number..." },
{ "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
OPT_OUTPUT, { .func_arg = opt_data_frames },
"set the number of data frames to output", "number" },
@@ -3317,7 +3087,7 @@ const OptionDef options[] = {
{ "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
"with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
{ "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
{ "vsync", HAS_ARG | OPT_EXPERT, { opt_vsync },
"video sync method", "" },
{ "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
"frame drop threshold", "" },
@@ -3343,8 +3113,6 @@ const OptionDef options[] = {
"timestamp error delta threshold", "threshold" },
{ "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
"exit on error", "error" },
{ "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
"abort on the specified condition flags", "flags" },
{ "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
"copy initial non-keyframes" },
@@ -3383,8 +3151,6 @@ const OptionDef options[] = {
{ "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(dump_attachment) },
"extract an attachment into a file", "filename" },
{ "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
{ "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
"print timestamp debugging info" },
{ "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
@@ -3441,9 +3207,9 @@ const OptionDef options[] = {
"this option is deprecated, use the yadif filter instead" },
{ "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
"calculate PSNR of compressed frames" },
{ "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
{ "vstats", OPT_VIDEO | OPT_EXPERT , { &opt_vstats },
"dump video coding statistics to file" },
{ "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
{ "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { opt_vstats_file },
"dump video coding statistics to file", "file" },
{ "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
"set video filters", "filter_graph" },
@@ -3483,9 +3249,9 @@ const OptionDef options[] = {
{ "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
"select a device for HW acceleration", "devicename" },
{ "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
"select output format used with HW accelerated decoding", "format" },
#if HAVE_VDPAU_X11
{ "vdpau_api_ver", HAS_ARG | OPT_INT | OPT_EXPERT, { &vdpau_api_ver }, "" },
#endif
#if CONFIG_VDA || CONFIG_VIDEOTOOLBOX
{ "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
#endif
@@ -3494,8 +3260,6 @@ const OptionDef options[] = {
{ "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
"automatically insert correct rotate filters" },
{ "hwaccel_lax_profile_check", OPT_BOOL | OPT_EXPERT, { &hwaccel_lax_profile_check},
"attempt to decode anyway if HW accelerated decoder's supported profiles do not exactly match the stream" },
/* audio options */
{ "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
@@ -3555,7 +3319,7 @@ const OptionDef options[] = {
"set the initial demux-decode delay", "seconds" },
{ "override_ffserver", OPT_BOOL | OPT_EXPERT | OPT_OUTPUT, { &override_ffserver },
"override the options from ffserver", "" },
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
{ "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { opt_sdp_file },
"specify a file in which to print sdp information", "file" },
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
@@ -3573,20 +3337,11 @@ const OptionDef options[] = {
"set the subtitle options to the indicated preset", "preset" },
{ "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
"set options from indicated preset file", "filename" },
{ "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
"maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
/* data codec support */
{ "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
"force data codec ('copy' to copy stream)", "codec" },
{ "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
"disable data" },
#if CONFIG_VAAPI
{ "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
"set VAAPI hardware device (DRM path or X11 display name)", "device" },
#endif
{ NULL, },
};

View File

@@ -1,267 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <mfx/mfxvideo.h>
#include <stdlib.h>
#include "libavutil/dict.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavcodec/qsv.h"
#include "ffmpeg.h"
typedef struct QSVContext {
OutputStream *ost;
mfxSession session;
mfxExtOpaqueSurfaceAlloc opaque_alloc;
AVBufferRef *opaque_surfaces_buf;
uint8_t *surface_used;
mfxFrameSurface1 **surface_ptrs;
int nb_surfaces;
mfxExtBuffer *ext_buffers[1];
} QSVContext;
static void buffer_release(void *opaque, uint8_t *data)
{
*(uint8_t*)opaque = 0;
}
static int qsv_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx;
int i;
for (i = 0; i < qsv->nb_surfaces; i++) {
if (qsv->surface_used[i])
continue;
frame->buf[0] = av_buffer_create((uint8_t*)qsv->surface_ptrs[i], sizeof(*qsv->surface_ptrs[i]),
buffer_release, &qsv->surface_used[i], 0);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[3] = (uint8_t*)qsv->surface_ptrs[i];
qsv->surface_used[i] = 1;
return 0;
}
return AVERROR(ENOMEM);
}
static int init_opaque_surf(QSVContext *qsv)
{
AVQSVContext *hwctx_enc = qsv->ost->enc_ctx->hwaccel_context;
mfxFrameSurface1 *surfaces;
int i;
qsv->nb_surfaces = hwctx_enc->nb_opaque_surfaces;
qsv->opaque_surfaces_buf = av_buffer_ref(hwctx_enc->opaque_surfaces);
qsv->surface_ptrs = av_mallocz_array(qsv->nb_surfaces, sizeof(*qsv->surface_ptrs));
qsv->surface_used = av_mallocz_array(qsv->nb_surfaces, sizeof(*qsv->surface_used));
if (!qsv->opaque_surfaces_buf || !qsv->surface_ptrs || !qsv->surface_used)
return AVERROR(ENOMEM);
surfaces = (mfxFrameSurface1*)qsv->opaque_surfaces_buf->data;
for (i = 0; i < qsv->nb_surfaces; i++)
qsv->surface_ptrs[i] = surfaces + i;
qsv->opaque_alloc.Out.Surfaces = qsv->surface_ptrs;
qsv->opaque_alloc.Out.NumSurface = qsv->nb_surfaces;
qsv->opaque_alloc.Out.Type = hwctx_enc->opaque_alloc_type;
qsv->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
qsv->opaque_alloc.Header.BufferSz = sizeof(qsv->opaque_alloc);
qsv->ext_buffers[0] = (mfxExtBuffer*)&qsv->opaque_alloc;
return 0;
}
static void qsv_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx;
av_freep(&qsv->ost->enc_ctx->hwaccel_context);
av_freep(&s->hwaccel_context);
av_buffer_unref(&qsv->opaque_surfaces_buf);
av_freep(&qsv->surface_used);
av_freep(&qsv->surface_ptrs);
av_freep(&qsv);
}
int qsv_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
QSVContext *qsv = ist->hwaccel_ctx;
AVQSVContext *hwctx_dec;
int ret;
if (!qsv) {
av_log(NULL, AV_LOG_ERROR, "QSV transcoding is not initialized. "
"-hwaccel qsv should only be used for one-to-one QSV transcoding "
"with no filters.\n");
return AVERROR_BUG;
}
ret = init_opaque_surf(qsv);
if (ret < 0)
return ret;
hwctx_dec = av_qsv_alloc_context();
if (!hwctx_dec)
return AVERROR(ENOMEM);
hwctx_dec->session = qsv->session;
hwctx_dec->iopattern = MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
hwctx_dec->ext_buffers = qsv->ext_buffers;
hwctx_dec->nb_ext_buffers = FF_ARRAY_ELEMS(qsv->ext_buffers);
av_freep(&s->hwaccel_context);
s->hwaccel_context = hwctx_dec;
ist->hwaccel_get_buffer = qsv_get_buffer;
ist->hwaccel_uninit = qsv_uninit;
return 0;
}
static mfxIMPL choose_implementation(const InputStream *ist)
{
static const struct {
const char *name;
mfxIMPL impl;
} impl_map[] = {
{ "auto", MFX_IMPL_AUTO },
{ "sw", MFX_IMPL_SOFTWARE },
{ "hw", MFX_IMPL_HARDWARE },
{ "auto_any", MFX_IMPL_AUTO_ANY },
{ "hw_any", MFX_IMPL_HARDWARE_ANY },
{ "hw2", MFX_IMPL_HARDWARE2 },
{ "hw3", MFX_IMPL_HARDWARE3 },
{ "hw4", MFX_IMPL_HARDWARE4 },
};
mfxIMPL impl = MFX_IMPL_AUTO_ANY;
int i;
if (ist->hwaccel_device) {
for (i = 0; i < FF_ARRAY_ELEMS(impl_map); i++)
if (!strcmp(ist->hwaccel_device, impl_map[i].name)) {
impl = impl_map[i].impl;
break;
}
if (i == FF_ARRAY_ELEMS(impl_map))
impl = strtol(ist->hwaccel_device, NULL, 0);
}
return impl;
}
int qsv_transcode_init(OutputStream *ost)
{
InputStream *ist;
const enum AVPixelFormat *pix_fmt;
AVDictionaryEntry *e;
const AVOption *opt;
int flags = 0;
int err, i;
QSVContext *qsv = NULL;
AVQSVContext *hwctx = NULL;
mfxIMPL impl;
mfxVersion ver = { { 3, 1 } };
/* check if the encoder supports QSV */
if (!ost->enc->pix_fmts)
return 0;
for (pix_fmt = ost->enc->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++)
if (*pix_fmt == AV_PIX_FMT_QSV)
break;
if (*pix_fmt == AV_PIX_FMT_NONE)
return 0;
if (strcmp(ost->avfilter, "null") || ost->source_index < 0)
return 0;
/* check if the decoder supports QSV and the output only goes to this stream */
ist = input_streams[ost->source_index];
if (ist->hwaccel_id != HWACCEL_QSV || !ist->dec || !ist->dec->pix_fmts)
return 0;
for (pix_fmt = ist->dec->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++)
if (*pix_fmt == AV_PIX_FMT_QSV)
break;
if (*pix_fmt == AV_PIX_FMT_NONE)
return 0;
for (i = 0; i < nb_output_streams; i++)
if (output_streams[i] != ost &&
output_streams[i]->source_index == ost->source_index)
return 0;
av_log(NULL, AV_LOG_VERBOSE, "Setting up QSV transcoding\n");
qsv = av_mallocz(sizeof(*qsv));
hwctx = av_qsv_alloc_context();
if (!qsv || !hwctx)
goto fail;
impl = choose_implementation(ist);
err = MFXInit(impl, &ver, &qsv->session);
if (err != MFX_ERR_NONE) {
av_log(NULL, AV_LOG_ERROR, "Error initializing an MFX session: %d\n", err);
goto fail;
}
e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
opt = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
if (e && opt)
av_opt_eval_flags(ost->enc_ctx, opt, e->value, &flags);
qsv->ost = ost;
hwctx->session = qsv->session;
hwctx->iopattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY;
hwctx->opaque_alloc = 1;
hwctx->nb_opaque_surfaces = 16;
ost->hwaccel_ctx = qsv;
ost->enc_ctx->hwaccel_context = hwctx;
ost->enc_ctx->pix_fmt = AV_PIX_FMT_QSV;
ist->hwaccel_ctx = qsv;
ist->dec_ctx->pix_fmt = AV_PIX_FMT_QSV;
ist->resample_pix_fmt = AV_PIX_FMT_QSV;
return 0;
fail:
av_freep(&hwctx);
av_freep(&qsv);
return AVERROR_UNKNOWN;
}

View File

@@ -1,538 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <fcntl.h>
#include <unistd.h>
#include <va/va.h>
#if HAVE_VAAPI_X11
# include <va/va_x11.h>
#endif
#if HAVE_VAAPI_DRM
# include <va/va_drm.h>
#endif
#include "libavutil/avassert.h"
#include "libavutil/avconfig.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_vaapi.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixfmt.h"
#include "libavcodec/vaapi.h"
#include "ffmpeg.h"
static AVClass vaapi_class = {
.class_name = "vaapi",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
};
#define DEFAULT_SURFACES 20
typedef struct VAAPIDecoderContext {
const AVClass *class;
AVBufferRef *device_ref;
AVHWDeviceContext *device;
AVBufferRef *frames_ref;
AVHWFramesContext *frames;
VAProfile va_profile;
VAEntrypoint va_entrypoint;
VAConfigID va_config;
VAContextID va_context;
enum AVPixelFormat decode_format;
int decode_width;
int decode_height;
int decode_surfaces;
// The output need not have the same format, width and height as the
// decoded frames - the copy for non-direct-mapped access is actually
// a whole vpp instance which can do arbitrary scaling and format
// conversion.
enum AVPixelFormat output_format;
struct vaapi_context decoder_vaapi_context;
} VAAPIDecoderContext;
static int vaapi_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
{
InputStream *ist = avctx->opaque;
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
int err;
err = av_hwframe_get_buffer(ctx->frames_ref, frame, 0);
if (err < 0) {
av_log(ctx, AV_LOG_ERROR, "Failed to allocate decoder surface.\n");
} else {
av_log(ctx, AV_LOG_DEBUG, "Decoder given surface %#x.\n",
(unsigned int)(uintptr_t)frame->data[3]);
}
return err;
}
static int vaapi_retrieve_data(AVCodecContext *avctx, AVFrame *input)
{
InputStream *ist = avctx->opaque;
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
AVFrame *output = 0;
int err;
av_assert0(input->format == AV_PIX_FMT_VAAPI);
if (ctx->output_format == AV_PIX_FMT_VAAPI) {
// Nothing to do.
return 0;
}
av_log(ctx, AV_LOG_DEBUG, "Retrieve data from surface %#x.\n",
(unsigned int)(uintptr_t)input->data[3]);
output = av_frame_alloc();
if (!output)
return AVERROR(ENOMEM);
output->format = ctx->output_format;
err = av_hwframe_transfer_data(output, input, 0);
if (err < 0) {
av_log(ctx, AV_LOG_ERROR, "Failed to transfer data to "
"output frame: %d.\n", err);
goto fail;
}
err = av_frame_copy_props(output, input);
if (err < 0) {
av_frame_unref(output);
goto fail;
}
av_frame_unref(input);
av_frame_move_ref(input, output);
av_frame_free(&output);
return 0;
fail:
if (output)
av_frame_free(&output);
return err;
}
static const struct {
enum AVCodecID codec_id;
int codec_profile;
VAProfile va_profile;
} vaapi_profile_map[] = {
#define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, VAProfile ## v }
MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2Simple ),
MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2Main ),
MAP(H263, UNKNOWN, H263Baseline),
MAP(MPEG4, MPEG4_SIMPLE, MPEG4Simple ),
MAP(MPEG4, MPEG4_ADVANCED_SIMPLE,
MPEG4AdvancedSimple),
MAP(MPEG4, MPEG4_MAIN, MPEG4Main ),
MAP(H264, H264_CONSTRAINED_BASELINE,
H264ConstrainedBaseline),
MAP(H264, H264_BASELINE, H264Baseline),
MAP(H264, H264_MAIN, H264Main ),
MAP(H264, H264_HIGH, H264High ),
#if VA_CHECK_VERSION(0, 37, 0)
MAP(HEVC, HEVC_MAIN, HEVCMain ),
#endif
MAP(WMV3, VC1_SIMPLE, VC1Simple ),
MAP(WMV3, VC1_MAIN, VC1Main ),
MAP(WMV3, VC1_COMPLEX, VC1Advanced ),
MAP(WMV3, VC1_ADVANCED, VC1Advanced ),
MAP(VC1, VC1_SIMPLE, VC1Simple ),
MAP(VC1, VC1_MAIN, VC1Main ),
MAP(VC1, VC1_COMPLEX, VC1Advanced ),
MAP(VC1, VC1_ADVANCED, VC1Advanced ),
#if VA_CHECK_VERSION(0, 35, 0)
MAP(VP8, UNKNOWN, VP8Version0_3 ),
#endif
#if VA_CHECK_VERSION(0, 37, 1)
MAP(VP9, VP9_0, VP9Profile0 ),
#endif
#undef MAP
};
static int vaapi_build_decoder_config(VAAPIDecoderContext *ctx,
AVCodecContext *avctx,
int fallback_allowed)
{
AVVAAPIDeviceContext *hwctx = ctx->device->hwctx;
AVVAAPIHWConfig *hwconfig = NULL;
AVHWFramesConstraints *constraints = NULL;
VAStatus vas;
int err, i, j;
int loglevel = fallback_allowed ? AV_LOG_VERBOSE : AV_LOG_ERROR;
const AVCodecDescriptor *codec_desc;
const AVPixFmtDescriptor *pix_desc;
enum AVPixelFormat pix_fmt;
VAProfile profile, *profile_list = NULL;
int profile_count, exact_match, alt_profile;
codec_desc = avcodec_descriptor_get(avctx->codec_id);
if (!codec_desc) {
err = AVERROR(EINVAL);
goto fail;
}
profile_count = vaMaxNumProfiles(hwctx->display);
profile_list = av_malloc(profile_count * sizeof(VAProfile));
if (!profile_list) {
err = AVERROR(ENOMEM);
goto fail;
}
vas = vaQueryConfigProfiles(hwctx->display,
profile_list, &profile_count);
if (vas != VA_STATUS_SUCCESS) {
av_log(ctx, loglevel, "Failed to query profiles: %d (%s).\n",
vas, vaErrorStr(vas));
err = AVERROR(EIO);
goto fail;
}
profile = VAProfileNone;
exact_match = 0;
for (i = 0; i < FF_ARRAY_ELEMS(vaapi_profile_map); i++) {
int profile_match = 0;
if (avctx->codec_id != vaapi_profile_map[i].codec_id)
continue;
if (avctx->profile == vaapi_profile_map[i].codec_profile)
profile_match = 1;
profile = vaapi_profile_map[i].va_profile;
for (j = 0; j < profile_count; j++) {
if (profile == profile_list[j]) {
exact_match = profile_match;
break;
}
}
if (j < profile_count) {
if (exact_match)
break;
alt_profile = vaapi_profile_map[i].codec_profile;
}
}
av_freep(&profile_list);
if (profile == VAProfileNone) {
av_log(ctx, loglevel, "No VAAPI support for codec %s.\n",
codec_desc->name);
err = AVERROR(ENOSYS);
goto fail;
}
if (!exact_match) {
if (fallback_allowed || !hwaccel_lax_profile_check) {
av_log(ctx, loglevel, "No VAAPI support for codec %s "
"profile %d.\n", codec_desc->name, avctx->profile);
if (!fallback_allowed) {
av_log(ctx, AV_LOG_WARNING, "If you want attempt decoding "
"anyway with a possibly-incompatible profile, add "
"the option -hwaccel_lax_profile_check.\n");
}
err = AVERROR(EINVAL);
goto fail;
} else {
av_log(ctx, AV_LOG_WARNING, "No VAAPI support for codec %s "
"profile %d: trying instead with profile %d.\n",
codec_desc->name, avctx->profile, alt_profile);
av_log(ctx, AV_LOG_WARNING, "This may fail or give "
"incorrect results, depending on your hardware.\n");
}
}
ctx->va_profile = profile;
ctx->va_entrypoint = VAEntrypointVLD;
vas = vaCreateConfig(hwctx->display, ctx->va_profile,
ctx->va_entrypoint, 0, 0, &ctx->va_config);
if (vas != VA_STATUS_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "Failed to create decode pipeline "
"configuration: %d (%s).\n", vas, vaErrorStr(vas));
err = AVERROR(EIO);
goto fail;
}
hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
if (!hwconfig) {
err = AVERROR(ENOMEM);
goto fail;
}
hwconfig->config_id = ctx->va_config;
constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
hwconfig);
if (!constraints)
goto fail;
// Decide on the decoder target format.
// If the user specified something with -hwaccel_output_format then
// try to use that to minimise conversions later.
ctx->decode_format = AV_PIX_FMT_NONE;
if (ctx->output_format != AV_PIX_FMT_NONE &&
ctx->output_format != AV_PIX_FMT_VAAPI) {
for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
if (constraints->valid_sw_formats[i] == ctx->output_format) {
ctx->decode_format = ctx->output_format;
av_log(ctx, AV_LOG_DEBUG, "Using decode format %s (output "
"format).\n", av_get_pix_fmt_name(ctx->decode_format));
break;
}
}
}
// Otherwise, we would like to try to choose something which matches the
// decoder output, but there isn't enough information available here to
// do so. Assume for now that we are always dealing with YUV 4:2:0, so
// pick a format which does that.
if (ctx->decode_format == AV_PIX_FMT_NONE) {
for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
pix_fmt = constraints->valid_sw_formats[i];
pix_desc = av_pix_fmt_desc_get(pix_fmt);
if (pix_desc->nb_components == 3 &&
pix_desc->log2_chroma_w == 1 &&
pix_desc->log2_chroma_h == 1) {
ctx->decode_format = pix_fmt;
av_log(ctx, AV_LOG_DEBUG, "Using decode format %s (format "
"matched).\n", av_get_pix_fmt_name(ctx->decode_format));
break;
}
}
}
// Otherwise pick the first in the list and hope for the best.
if (ctx->decode_format == AV_PIX_FMT_NONE) {
ctx->decode_format = constraints->valid_sw_formats[0];
av_log(ctx, AV_LOG_DEBUG, "Using decode format %s (first in list).\n",
av_get_pix_fmt_name(ctx->decode_format));
if (i > 1) {
// There was a choice, and we picked randomly. Warn the user
// that they might want to choose intelligently instead.
av_log(ctx, AV_LOG_WARNING, "Using randomly chosen decode "
"format %s.\n", av_get_pix_fmt_name(ctx->decode_format));
}
}
// Ensure the picture size is supported by the hardware.
ctx->decode_width = avctx->coded_width;
ctx->decode_height = avctx->coded_height;
if (ctx->decode_width < constraints->min_width ||
ctx->decode_height < constraints->min_height ||
ctx->decode_width > constraints->max_width ||
ctx->decode_height >constraints->max_height) {
av_log(ctx, AV_LOG_ERROR, "VAAPI hardware does not support image "
"size %dx%d (constraints: width %d-%d height %d-%d).\n",
ctx->decode_width, ctx->decode_height,
constraints->min_width, constraints->max_width,
constraints->min_height, constraints->max_height);
err = AVERROR(EINVAL);
goto fail;
}
av_hwframe_constraints_free(&constraints);
av_freep(&hwconfig);
// Decide how many reference frames we need. This might be doable more
// nicely based on the codec and input stream?
ctx->decode_surfaces = DEFAULT_SURFACES;
// For frame-threaded decoding, one additional surfaces is needed for
// each thread.
if (avctx->active_thread_type & FF_THREAD_FRAME)
ctx->decode_surfaces += avctx->thread_count;
return 0;
fail:
av_hwframe_constraints_free(&constraints);
av_freep(&hwconfig);
vaDestroyConfig(hwctx->display, ctx->va_config);
av_freep(&profile_list);
return err;
}
static void vaapi_decode_uninit(AVCodecContext *avctx)
{
InputStream *ist = avctx->opaque;
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
if (ctx) {
AVVAAPIDeviceContext *hwctx = ctx->device->hwctx;
if (ctx->va_context != VA_INVALID_ID) {
vaDestroyContext(hwctx->display, ctx->va_context);
ctx->va_context = VA_INVALID_ID;
}
if (ctx->va_config != VA_INVALID_ID) {
vaDestroyConfig(hwctx->display, ctx->va_config);
ctx->va_config = VA_INVALID_ID;
}
av_buffer_unref(&ctx->frames_ref);
av_buffer_unref(&ctx->device_ref);
av_free(ctx);
}
av_buffer_unref(&ist->hw_frames_ctx);
ist->hwaccel_ctx = 0;
ist->hwaccel_uninit = 0;
ist->hwaccel_get_buffer = 0;
ist->hwaccel_retrieve_data = 0;
}
int vaapi_decode_init(AVCodecContext *avctx)
{
InputStream *ist = avctx->opaque;
AVVAAPIDeviceContext *hwctx;
AVVAAPIFramesContext *avfc;
VAAPIDecoderContext *ctx;
VAStatus vas;
int err;
int loglevel = (ist->hwaccel_id != HWACCEL_VAAPI ? AV_LOG_VERBOSE
: AV_LOG_ERROR);
if (ist->hwaccel_ctx)
vaapi_decode_uninit(avctx);
// We have -hwaccel without -vaapi_device, so just initialise here with
// the device passed as -hwaccel_device (if -vaapi_device was passed, it
// will always have been called before now).
if (!hw_device_ctx) {
err = vaapi_device_init(ist->hwaccel_device);
if (err < 0)
return err;
}
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
return AVERROR(ENOMEM);
ctx->class = &vaapi_class;
ctx->device_ref = av_buffer_ref(hw_device_ctx);
ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
ctx->va_config = VA_INVALID_ID;
ctx->va_context = VA_INVALID_ID;
hwctx = ctx->device->hwctx;
ctx->output_format = ist->hwaccel_output_format;
err = vaapi_build_decoder_config(ctx, avctx,
ist->hwaccel_id != HWACCEL_VAAPI);
if (err < 0) {
av_log(ctx, loglevel, "No supported configuration for this codec.");
goto fail;
}
avctx->pix_fmt = ctx->output_format;
ctx->frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
if (!ctx->frames_ref) {
av_log(ctx, loglevel, "Failed to create VAAPI frame context.\n");
err = AVERROR(ENOMEM);
goto fail;
}
ctx->frames = (AVHWFramesContext*)ctx->frames_ref->data;
ctx->frames->format = AV_PIX_FMT_VAAPI;
ctx->frames->sw_format = ctx->decode_format;
ctx->frames->width = ctx->decode_width;
ctx->frames->height = ctx->decode_height;
ctx->frames->initial_pool_size = ctx->decode_surfaces;
err = av_hwframe_ctx_init(ctx->frames_ref);
if (err < 0) {
av_log(ctx, loglevel, "Failed to initialise VAAPI frame "
"context: %d\n", err);
goto fail;
}
avfc = ctx->frames->hwctx;
vas = vaCreateContext(hwctx->display, ctx->va_config,
ctx->decode_width, ctx->decode_height,
VA_PROGRESSIVE,
avfc->surface_ids, avfc->nb_surfaces,
&ctx->va_context);
if (vas != VA_STATUS_SUCCESS) {
av_log(ctx, AV_LOG_ERROR, "Failed to create decode pipeline "
"context: %d (%s).\n", vas, vaErrorStr(vas));
err = AVERROR(EINVAL);
goto fail;
}
av_log(ctx, AV_LOG_DEBUG, "VAAPI decoder (re)init complete.\n");
// We would like to set this on the AVCodecContext for use by whoever gets
// the frames from the decoder, but unfortunately the AVCodecContext we
// have here need not be the "real" one (H.264 makes many copies for
// threading purposes). To avoid the problem, we instead store it in the
// InputStream and propagate it from there.
ist->hw_frames_ctx = av_buffer_ref(ctx->frames_ref);
if (!ist->hw_frames_ctx) {
err = AVERROR(ENOMEM);
goto fail;
}
ist->hwaccel_ctx = ctx;
ist->hwaccel_uninit = vaapi_decode_uninit;
ist->hwaccel_get_buffer = vaapi_get_buffer;
ist->hwaccel_retrieve_data = vaapi_retrieve_data;
ctx->decoder_vaapi_context.display = hwctx->display;
ctx->decoder_vaapi_context.config_id = ctx->va_config;
ctx->decoder_vaapi_context.context_id = ctx->va_context;
avctx->hwaccel_context = &ctx->decoder_vaapi_context;
return 0;
fail:
vaapi_decode_uninit(avctx);
return err;
}
static AVClass *vaapi_log = &vaapi_class;
av_cold int vaapi_device_init(const char *device)
{
int err;
err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
device, NULL, 0);
if (err < 0) {
av_log(&vaapi_log, AV_LOG_ERROR, "Failed to create a VAAPI device\n");
return err;
}
return 0;
}

View File

@@ -18,21 +18,49 @@
#include <stdint.h>
#include <vdpau/vdpau.h>
#include <vdpau/vdpau_x11.h>
#include <X11/Xlib.h>
#include "ffmpeg.h"
#include "libavcodec/vdpau.h"
#include "libavutil/avassert.h"
#include "libavutil/buffer.h"
#include "libavutil/frame.h"
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_vdpau.h"
#include "libavutil/pixfmt.h"
typedef struct VDPAUContext {
AVBufferRef *hw_frames_ctx;
Display *dpy;
VdpDevice device;
VdpDecoder decoder;
VdpGetProcAddress *get_proc_address;
VdpGetErrorString *get_error_string;
VdpGetInformationString *get_information_string;
VdpDeviceDestroy *device_destroy;
#if 1 // for ffmpegs older vdpau API, not the oldest though
VdpDecoderCreate *decoder_create;
VdpDecoderDestroy *decoder_destroy;
VdpDecoderRender *decoder_render;
#endif
VdpVideoSurfaceCreate *video_surface_create;
VdpVideoSurfaceDestroy *video_surface_destroy;
VdpVideoSurfaceGetBitsYCbCr *video_surface_get_bits;
VdpVideoSurfaceGetParameters *video_surface_get_parameters;
VdpVideoSurfaceQueryGetPutBitsYCbCrCapabilities *video_surface_query;
AVFrame *tmp_frame;
enum AVPixelFormat pix_fmt;
VdpYCbCrFormat vdpau_format;
} VDPAUContext;
int vdpau_api_ver = 2;
static void vdpau_uninit(AVCodecContext *s)
{
InputStream *ist = s->opaque;
@@ -42,54 +70,140 @@ static void vdpau_uninit(AVCodecContext *s)
ist->hwaccel_get_buffer = NULL;
ist->hwaccel_retrieve_data = NULL;
av_buffer_unref(&ctx->hw_frames_ctx);
if (ctx->decoder_destroy)
ctx->decoder_destroy(ctx->decoder);
if (ctx->device_destroy)
ctx->device_destroy(ctx->device);
if (ctx->dpy)
XCloseDisplay(ctx->dpy);
av_frame_free(&ctx->tmp_frame);
av_freep(&ist->hwaccel_ctx);
av_freep(&s->hwaccel_context);
}
static void vdpau_release_buffer(void *opaque, uint8_t *data)
{
VdpVideoSurface surface = *(VdpVideoSurface*)data;
VDPAUContext *ctx = opaque;
ctx->video_surface_destroy(surface);
av_freep(&data);
}
static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
{
InputStream *ist = s->opaque;
VDPAUContext *ctx = ist->hwaccel_ctx;
VdpVideoSurface *surface;
VdpStatus err;
VdpChromaType chroma;
uint32_t width, height;
return av_hwframe_get_buffer(ctx->hw_frames_ctx, frame, 0);
av_assert0(frame->format == AV_PIX_FMT_VDPAU);
if (av_vdpau_get_surface_parameters(s, &chroma, &width, &height))
return AVERROR(ENOSYS);
surface = av_malloc(sizeof(*surface));
if (!surface)
return AVERROR(ENOMEM);
frame->buf[0] = av_buffer_create((uint8_t*)surface, sizeof(*surface),
vdpau_release_buffer, ctx,
AV_BUFFER_FLAG_READONLY);
if (!frame->buf[0]) {
av_freep(&surface);
return AVERROR(ENOMEM);
}
// properly we should keep a pool of surfaces instead of creating
// them anew for each frame, but since we don't care about speed
// much in this code, we don't bother
err = ctx->video_surface_create(ctx->device, chroma, width, height,
surface);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error allocating a VDPAU video surface: %s\n",
ctx->get_error_string(err));
av_buffer_unref(&frame->buf[0]);
return AVERROR_UNKNOWN;
}
frame->data[3] = (uint8_t*)(uintptr_t)*surface;
return 0;
}
static int vdpau_retrieve_data(AVCodecContext *s, AVFrame *frame)
{
VdpVideoSurface surface = (VdpVideoSurface)(uintptr_t)frame->data[3];
InputStream *ist = s->opaque;
VDPAUContext *ctx = ist->hwaccel_ctx;
int ret;
VdpStatus err;
int ret, chroma_type;
ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0);
err = ctx->video_surface_get_parameters(surface, &chroma_type,
&ctx->tmp_frame->width,
&ctx->tmp_frame->height);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error getting surface parameters: %s\n",
ctx->get_error_string(err));
return AVERROR_UNKNOWN;
}
ctx->tmp_frame->format = ctx->pix_fmt;
ret = av_frame_get_buffer(ctx->tmp_frame, 32);
if (ret < 0)
return ret;
ret = av_frame_copy_props(ctx->tmp_frame, frame);
if (ret < 0) {
av_frame_unref(ctx->tmp_frame);
return ret;
ctx->tmp_frame->width = frame->width;
ctx->tmp_frame->height = frame->height;
err = ctx->video_surface_get_bits(surface, ctx->vdpau_format,
(void * const *)ctx->tmp_frame->data,
ctx->tmp_frame->linesize);
if (err != VDP_STATUS_OK) {
av_log(NULL, AV_LOG_ERROR, "Error retrieving frame data from VDPAU: %s\n",
ctx->get_error_string(err));
ret = AVERROR_UNKNOWN;
goto fail;
}
if (ctx->vdpau_format == VDP_YCBCR_FORMAT_YV12)
FFSWAP(uint8_t*, ctx->tmp_frame->data[1], ctx->tmp_frame->data[2]);
ret = av_frame_copy_props(ctx->tmp_frame, frame);
if (ret < 0)
goto fail;
av_frame_unref(frame);
av_frame_move_ref(frame, ctx->tmp_frame);
return 0;
fail:
av_frame_unref(ctx->tmp_frame);
return ret;
}
static const int vdpau_formats[][2] = {
{ VDP_YCBCR_FORMAT_YV12, AV_PIX_FMT_YUV420P },
{ VDP_YCBCR_FORMAT_NV12, AV_PIX_FMT_NV12 },
{ VDP_YCBCR_FORMAT_YUYV, AV_PIX_FMT_YUYV422 },
{ VDP_YCBCR_FORMAT_UYVY, AV_PIX_FMT_UYVY422 },
};
static int vdpau_alloc(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
AVVDPAUContext *vdpau_ctx;
VDPAUContext *ctx;
int ret;
AVBufferRef *device_ref = NULL;
AVHWDeviceContext *device_ctx;
AVVDPAUDeviceContext *device_hwctx;
AVHWFramesContext *frames_ctx;
const char *display, *vendor;
VdpStatus err;
int i;
ctx = av_mallocz(sizeof(*ctx));
if (!ctx)
@@ -104,48 +218,145 @@ static int vdpau_alloc(AVCodecContext *s)
if (!ctx->tmp_frame)
goto fail;
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_VDPAU,
ist->hwaccel_device, NULL, 0);
if (ret < 0)
ctx->dpy = XOpenDisplay(ist->hwaccel_device);
if (!ctx->dpy) {
av_log(NULL, loglevel, "Cannot open the X11 display %s.\n",
XDisplayName(ist->hwaccel_device));
goto fail;
device_ctx = (AVHWDeviceContext*)device_ref->data;
device_hwctx = device_ctx->hwctx;
}
display = XDisplayString(ctx->dpy);
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(device_ref);
if (!ctx->hw_frames_ctx)
err = vdp_device_create_x11(ctx->dpy, XDefaultScreen(ctx->dpy), &ctx->device,
&ctx->get_proc_address);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel, "VDPAU device creation on X11 display %s failed.\n",
display);
goto fail;
av_buffer_unref(&device_ref);
}
frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
frames_ctx->format = AV_PIX_FMT_VDPAU;
frames_ctx->sw_format = s->sw_pix_fmt;
frames_ctx->width = s->coded_width;
frames_ctx->height = s->coded_height;
#define GET_CALLBACK(id, result) \
do { \
void *tmp; \
err = ctx->get_proc_address(ctx->device, id, &tmp); \
if (err != VDP_STATUS_OK) { \
av_log(NULL, loglevel, "Error getting the " #id " callback.\n"); \
goto fail; \
} \
ctx->result = tmp; \
} while (0)
ret = av_hwframe_ctx_init(ctx->hw_frames_ctx);
if (ret < 0)
GET_CALLBACK(VDP_FUNC_ID_GET_ERROR_STRING, get_error_string);
GET_CALLBACK(VDP_FUNC_ID_GET_INFORMATION_STRING, get_information_string);
GET_CALLBACK(VDP_FUNC_ID_DEVICE_DESTROY, device_destroy);
if (vdpau_api_ver == 1) {
GET_CALLBACK(VDP_FUNC_ID_DECODER_CREATE, decoder_create);
GET_CALLBACK(VDP_FUNC_ID_DECODER_DESTROY, decoder_destroy);
GET_CALLBACK(VDP_FUNC_ID_DECODER_RENDER, decoder_render);
}
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_CREATE, video_surface_create);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_DESTROY, video_surface_destroy);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_BITS_Y_CB_CR, video_surface_get_bits);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_GET_PARAMETERS, video_surface_get_parameters);
GET_CALLBACK(VDP_FUNC_ID_VIDEO_SURFACE_QUERY_GET_PUT_BITS_Y_CB_CR_CAPABILITIES,
video_surface_query);
for (i = 0; i < FF_ARRAY_ELEMS(vdpau_formats); i++) {
VdpBool supported;
err = ctx->video_surface_query(ctx->device, VDP_CHROMA_TYPE_420,
vdpau_formats[i][0], &supported);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel,
"Error querying VDPAU surface capabilities: %s\n",
ctx->get_error_string(err));
goto fail;
}
if (supported)
break;
}
if (i == FF_ARRAY_ELEMS(vdpau_formats)) {
av_log(NULL, loglevel,
"No supported VDPAU format for retrieving the data.\n");
return AVERROR(EINVAL);
}
ctx->vdpau_format = vdpau_formats[i][0];
ctx->pix_fmt = vdpau_formats[i][1];
if (vdpau_api_ver == 1) {
vdpau_ctx = av_vdpau_alloc_context();
if (!vdpau_ctx)
goto fail;
vdpau_ctx->render = ctx->decoder_render;
s->hwaccel_context = vdpau_ctx;
} else
if (av_vdpau_bind_context(s, ctx->device, ctx->get_proc_address,
AV_HWACCEL_FLAG_IGNORE_LEVEL))
goto fail;
if (av_vdpau_bind_context(s, device_hwctx->device, device_hwctx->get_proc_address, 0))
goto fail;
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU to decode input stream #%d:%d.\n",
ist->file_index, ist->st->index);
ctx->get_information_string(&vendor);
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU -- %s -- on X11 display %s, "
"to decode input stream #%d:%d.\n", vendor,
display, ist->file_index, ist->st->index);
return 0;
fail:
av_log(NULL, loglevel, "VDPAU init failed for stream #%d:%d.\n",
ist->file_index, ist->st->index);
av_buffer_unref(&device_ref);
vdpau_uninit(s);
return AVERROR(EINVAL);
}
static int vdpau_old_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
AVVDPAUContext *vdpau_ctx;
VDPAUContext *ctx;
VdpStatus err;
int profile, ret;
if (!ist->hwaccel_ctx) {
ret = vdpau_alloc(s);
if (ret < 0)
return ret;
}
ctx = ist->hwaccel_ctx;
vdpau_ctx = s->hwaccel_context;
ret = av_vdpau_get_profile(s, &profile);
if (ret < 0) {
av_log(NULL, loglevel, "No known VDPAU decoder profile for this stream.\n");
return AVERROR(EINVAL);
}
if (ctx->decoder)
ctx->decoder_destroy(ctx->decoder);
err = ctx->decoder_create(ctx->device, profile,
s->coded_width, s->coded_height,
16, &ctx->decoder);
if (err != VDP_STATUS_OK) {
av_log(NULL, loglevel, "Error creating the VDPAU decoder: %s\n",
ctx->get_error_string(err));
return AVERROR_UNKNOWN;
}
vdpau_ctx->decoder = ctx->decoder;
ist->hwaccel_get_buffer = vdpau_get_buffer;
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
return 0;
}
int vdpau_init(AVCodecContext *s)
{
InputStream *ist = s->opaque;
if (vdpau_api_ver == 1)
return vdpau_old_init(s);
if (!ist->hwaccel_ctx) {
int ret = vdpau_alloc(s);
if (ret < 0)

View File

@@ -16,12 +16,9 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if HAVE_UTGETOSTYPEFROMSTRING
#include <CoreServices/CoreServices.h>
#endif
#include "config.h"
#include "libavcodec/avcodec.h"
#if CONFIG_VDA
# include "libavcodec/vda.h"
@@ -157,13 +154,7 @@ int videotoolbox_init(AVCodecContext *s)
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
#if HAVE_UTGETOSTYPEFROMSTRING
vtctx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
#else
av_log(s, loglevel, "UTGetOSTypeFromString() is not available "
"on this platform, %s pixel format can not be honored from "
"the command line\n", videotoolbox_pixfmt);
#endif
ret = av_videotoolbox_default_init2(s, vtctx);
CFRelease(pixfmt_str);
}
@@ -177,13 +168,7 @@ int videotoolbox_init(AVCodecContext *s)
CFStringRef pixfmt_str = CFStringCreateWithCString(kCFAllocatorDefault,
videotoolbox_pixfmt,
kCFStringEncodingUTF8);
#if HAVE_UTGETOSTYPEFROMSTRING
vdactx->cv_pix_fmt_type = UTGetOSTypeFromString(pixfmt_str);
#else
av_log(s, loglevel, "UTGetOSTypeFromString() is not available "
"on this platform, %s pixel format can not be honored from "
"the command line\n", videotoolbox_pixfmt);
#endif
ret = av_vda_default_init2(s, vdactx);
CFRelease(pixfmt_str);
}

996
ffplay.c

File diff suppressed because it is too large Load Diff

641
ffprobe.c

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -42,8 +42,8 @@ static void report_config_error(const char *filename, int line_num,
int log_level, int *errors, const char *fmt,
...);
#define ERROR(...) report_config_error(config->filename, config->line_num,\
AV_LOG_ERROR, &config->errors, __VA_ARGS__)
#define ERROR(...) report_config_error(config->filename, config->line_num,\
AV_LOG_ERROR, &config->errors, __VA_ARGS__)
#define WARNING(...) report_config_error(config->filename, config->line_num,\
AV_LOG_WARNING, &config->warnings, __VA_ARGS__)
@@ -116,8 +116,7 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
{
char arg[1024];
FFServerIPAddressACL acl;
FFServerIPAddressACL *nacl;
FFServerIPAddressACL **naclp;
int errors = 0;
ffserver_get_arg(arg, sizeof(arg), &p);
if (av_strcasecmp(arg, "allow") == 0)
@@ -127,7 +126,7 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
else {
fprintf(stderr, "%s:%d: ACL action '%s' should be ALLOW or DENY.\n",
filename, line_num, arg);
goto bail;
errors++;
}
ffserver_get_arg(arg, sizeof(arg), &p);
@@ -136,10 +135,9 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
fprintf(stderr,
"%s:%d: ACL refers to invalid host or IP address '%s'\n",
filename, line_num, arg);
goto bail;
}
acl.last = acl.first;
errors++;
} else
acl.last = acl.first;
ffserver_get_arg(arg, sizeof(arg), &p);
@@ -148,37 +146,37 @@ void ffserver_parse_acl_row(FFServerStream *stream, FFServerStream* feed,
fprintf(stderr,
"%s:%d: ACL refers to invalid host or IP address '%s'\n",
filename, line_num, arg);
goto bail;
errors++;
}
}
nacl = av_mallocz(sizeof(*nacl));
naclp = 0;
if (!errors) {
FFServerIPAddressACL *nacl = av_mallocz(sizeof(*nacl));
FFServerIPAddressACL **naclp = 0;
acl.next = 0;
*nacl = acl;
acl.next = 0;
*nacl = acl;
if (stream)
naclp = &stream->acl;
else if (feed)
naclp = &feed->acl;
else if (ext_acl)
naclp = &ext_acl;
else
fprintf(stderr, "%s:%d: ACL found not in <Stream> or <Feed>\n",
filename, line_num);
if (stream)
naclp = &stream->acl;
else if (feed)
naclp = &feed->acl;
else if (ext_acl)
naclp = &ext_acl;
else {
fprintf(stderr, "%s:%d: ACL found not in <Stream> or <Feed>\n",
filename, line_num);
errors++;
}
if (naclp) {
while (*naclp)
naclp = &(*naclp)->next;
*naclp = nacl;
} else
av_free(nacl);
bail:
return;
if (naclp) {
while (*naclp)
naclp = &(*naclp)->next;
*naclp = nacl;
} else
av_free(nacl);
}
}
/* add a codec and set the default parameters */
@@ -460,7 +458,7 @@ static int ffserver_set_int_param(int *dest, const char *value, int factor,
if (tmp < min || tmp > max)
goto error;
if (factor) {
if (tmp == INT_MIN || FFABS(tmp) > INT_MAX / FFABS(factor))
if (FFABS(tmp) > INT_MAX / FFABS(factor))
goto error;
tmp *= factor;
}
@@ -685,8 +683,8 @@ static int ffserver_parse_config_global(FFServerConfig *config, const char *cmd,
return 0;
}
static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd,
const char **p, FFServerStream **pfeed)
static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, const char **p,
FFServerStream **pfeed)
{
FFServerStream *feed;
char arg[1024];
@@ -793,8 +791,7 @@ static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd,
return 0;
}
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
const char **p,
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p,
FFServerStream **pstream)
{
char arg[1024], arg2[1024];
@@ -1138,8 +1135,6 @@ static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd,
av_dict_free(&config->audio_opts);
avcodec_free_context(&config->dummy_vctx);
avcodec_free_context(&config->dummy_actx);
config->no_video = 0;
config->no_audio = 0;
*pstream = NULL;
} else if (!av_strcasecmp(cmd, "File") ||
!av_strcasecmp(cmd, "ReadOnlyFile")) {

View File

@@ -1,4 +0,0 @@
/*_tablegen
/*_tables.c
/*_tables.h
/bsf_list.c

View File

@@ -532,7 +532,7 @@ static int decode_i_block(FourXContext *f, int16_t *block)
}
i += code >> 4;
if (i >= 64) {
av_log(f->avctx, AV_LOG_ERROR, "run %d overflow\n", i);
av_log(f->avctx, AV_LOG_ERROR, "run %d oveflow\n", i);
return 0;
}

View File

@@ -28,6 +28,7 @@
* Supports: PAL8 (RGB 8bpp, paletted)
* : BGR24 (RGB 24bpp) (can also output it as RGB32)
* : RGB32 (RGB 32bpp, 4th plane is alpha)
*
*/
#include <stdio.h>
@@ -41,7 +42,7 @@
static const enum AVPixelFormat pixfmt_rgb24[] = {
AV_PIX_FMT_BGR24, AV_PIX_FMT_0RGB32, AV_PIX_FMT_NONE };
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE };
typedef struct EightBpsContext {
AVCodecContext *avctx;
@@ -65,7 +66,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
unsigned int dlen, p, row;
const unsigned char *lp, *dp, *ep;
unsigned char count;
unsigned int px_inc;
unsigned int planes = c->planes;
unsigned char *planemap = c->planemap;
int ret;
@@ -78,8 +78,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
/* Set data pointer after line lengths */
dp = encoded + planes * (height << 1);
px_inc = planes + (avctx->pix_fmt == AV_PIX_FMT_0RGB32);
for (p = 0; p < planes; p++) {
/* Lines length pointer for this plane */
lp = encoded + p * (height << 1);
@@ -98,21 +96,21 @@ static int decode_frame(AVCodecContext *avctx, void *data,
if ((count = *dp++) <= 127) {
count++;
dlen -= count + 1;
if (pixptr_end - pixptr < count * px_inc)
if (pixptr_end - pixptr < count * planes)
break;
if (ep - dp < count)
return AVERROR_INVALIDDATA;
while (count--) {
*pixptr = *dp++;
pixptr += px_inc;
pixptr += planes;
}
} else {
count = 257 - count;
if (pixptr_end - pixptr < count * px_inc)
if (pixptr_end - pixptr < count * planes)
break;
while (count--) {
*pixptr = *dp;
pixptr += px_inc;
pixptr += planes;
}
dp++;
dlen -= 2;
@@ -122,15 +120,12 @@ static int decode_frame(AVCodecContext *avctx, void *data,
}
if (avctx->bits_per_coded_sample <= 8) {
int size;
const uint8_t *pal = av_packet_get_side_data(avpkt,
AV_PKT_DATA_PALETTE,
&size);
if (pal && size == AVPALETTE_SIZE) {
NULL);
if (pal) {
frame->palette_has_changed = 1;
memcpy(c->pal, pal, AVPALETTE_SIZE);
} else if (pal) {
av_log(avctx, AV_LOG_ERROR, "Palette size %d is wrong\n", size);
}
memcpy (frame->data[1], c->pal, AVPALETTE_SIZE);

Some files were not shown because too many files have changed in this diff Show More