mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-06 06:49:59 +01:00
Compare commits
354 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
192d1d34eb | ||
|
|
14644e3322 | ||
|
|
c1fb94fcac | ||
|
|
2852aa5084 | ||
|
|
9db961861a | ||
|
|
fd53f6745e | ||
|
|
4bc84f4f7d | ||
|
|
662accb728 | ||
|
|
4667920455 | ||
|
|
1cf238d3bf | ||
|
|
cb3a59ca82 | ||
|
|
25b5331a1d | ||
|
|
525a8ee3d8 | ||
|
|
9bea771035 | ||
|
|
4abd0e1282 | ||
|
|
fd674648a2 | ||
|
|
31e169948d | ||
|
|
cb1111b04a | ||
|
|
067b2c0c28 | ||
|
|
8681622d7b | ||
|
|
3679bda78b | ||
|
|
affedbd027 | ||
|
|
d7fbabaeb5 | ||
|
|
9511cfe07f | ||
|
|
5f14ba4776 | ||
|
|
bc17113954 | ||
|
|
ddb35d510e | ||
|
|
846c61789c | ||
|
|
01f5442b82 | ||
|
|
090d10ce60 | ||
|
|
7a1b6aa6ac | ||
|
|
19691eb4d5 | ||
|
|
ef722f7692 | ||
|
|
3c0fcc7779 | ||
|
|
60605ffa5c | ||
|
|
075b337798 | ||
|
|
747245ce0e | ||
|
|
1f88bbc9f2 | ||
|
|
d6cc432751 | ||
|
|
d39a058707 | ||
|
|
96e1ca6e05 | ||
|
|
50ed50a03b | ||
|
|
6bb2004c82 | ||
|
|
7bf4d235c0 | ||
|
|
3bd30882b1 | ||
|
|
573cfcc52b | ||
|
|
fe04b47cea | ||
|
|
3292f6c6be | ||
|
|
c98cecea59 | ||
|
|
d808a43e29 | ||
|
|
079db0014b | ||
|
|
7ee5d5bf66 | ||
|
|
e0f9f52938 | ||
|
|
3266d05538 | ||
|
|
b02b306f73 | ||
|
|
5f8e1a014f | ||
|
|
f21ef41c14 | ||
|
|
31240bb703 | ||
|
|
8c1c43c6c1 | ||
|
|
10fb811c0d | ||
|
|
1aeef9979d | ||
|
|
4cd8ae5b9c | ||
|
|
0ae9a8cdbb | ||
|
|
b56388541b | ||
|
|
f0bd54aaa7 | ||
|
|
3a6ef19263 | ||
|
|
afd3574959 | ||
|
|
f75b377857 | ||
|
|
96ccd5665c | ||
|
|
9eecca08e7 | ||
|
|
074d7c2f8d | ||
|
|
3c53cdb1ad | ||
|
|
1c531e7d76 | ||
|
|
d79972badd | ||
|
|
b0c18a836a | ||
|
|
e57cb9429a | ||
|
|
3a91eb37c4 | ||
|
|
1c24ab39b6 | ||
|
|
922837561b | ||
|
|
f3c4718f1b | ||
|
|
88d97044cb | ||
|
|
9cf2764389 | ||
|
|
62dae886b6 | ||
|
|
651e9773ed | ||
|
|
61fd1484c4 | ||
|
|
0c6d17ae87 | ||
|
|
c599f7ed76 | ||
|
|
556bb822a0 | ||
|
|
d32058276c | ||
|
|
571c66659d | ||
|
|
3613a0df40 | ||
|
|
58226980a6 | ||
|
|
b8425d0e26 | ||
|
|
72c9dab15d | ||
|
|
f9738b2af3 | ||
|
|
a06c0fadc8 | ||
|
|
c609312a47 | ||
|
|
1c1b94aaae | ||
|
|
f348a0bc3c | ||
|
|
e634dc98b2 | ||
|
|
59a2b67c79 | ||
|
|
5cf9d6c586 | ||
|
|
63162b9f97 | ||
|
|
267ee47529 | ||
|
|
f3a90da0b5 | ||
|
|
b55ec3f327 | ||
|
|
6be30c4f8e | ||
|
|
8f71cd980c | ||
|
|
99243eea7b | ||
|
|
87e8bfeb90 | ||
|
|
28ddc0b9b8 | ||
|
|
f66e52fd96 | ||
|
|
3a9432ec64 | ||
|
|
4006aecd19 | ||
|
|
2eddfd7cfd | ||
|
|
59479f474f | ||
|
|
f2457bd115 | ||
|
|
a93c1d1e83 | ||
|
|
d3d4ba2dbe | ||
|
|
f75c931238 | ||
|
|
a3e5542744 | ||
|
|
7755265387 | ||
|
|
6496bfcc65 | ||
|
|
36ba4471d4 | ||
|
|
64c2abf53f | ||
|
|
81672bf00f | ||
|
|
b97aaf791f | ||
|
|
b786eed33a | ||
|
|
21d514a67a | ||
|
|
f20ca5d729 | ||
|
|
e34028dd81 | ||
|
|
3a46c84945 | ||
|
|
f725378bff | ||
|
|
70ef5ce67e | ||
|
|
0259532a6e | ||
|
|
3f919ef19c | ||
|
|
90e449a690 | ||
|
|
030884f6a6 | ||
|
|
d9259e05c2 | ||
|
|
3410c67da1 | ||
|
|
b07290fa84 | ||
|
|
ee7d2ea4f6 | ||
|
|
a4c6ba7ea7 | ||
|
|
f6c9c455b6 | ||
|
|
50a81bd978 | ||
|
|
22a784c6db | ||
|
|
ae1b3038d0 | ||
|
|
7bd58702f9 | ||
|
|
71f3bb58df | ||
|
|
fb8e3a5b44 | ||
|
|
da773624b6 | ||
|
|
292c492271 | ||
|
|
44b48d6acb | ||
|
|
9cb50bb3cc | ||
|
|
3a82e564cc | ||
|
|
ed47b3d429 | ||
|
|
9c3ae17cc1 | ||
|
|
3a40d5ab2f | ||
|
|
6f2723e54b | ||
|
|
37e1cc6186 | ||
|
|
101244dad9 | ||
|
|
4d0bd531f4 | ||
|
|
8323e0dc73 | ||
|
|
321d838098 | ||
|
|
f5f0e11378 | ||
|
|
838b359225 | ||
|
|
2e527ed7b1 | ||
|
|
e59b387e0a | ||
|
|
32a9a34f86 | ||
|
|
d4602f21da | ||
|
|
519532549f | ||
|
|
c9754099e5 | ||
|
|
3d60a87a5b | ||
|
|
53f38b7b82 | ||
|
|
447a67589b | ||
|
|
9e58eb10ba | ||
|
|
962b0345a5 | ||
|
|
6e6f0027fd | ||
|
|
1493a952ed | ||
|
|
c0f315b835 | ||
|
|
026f243d71 | ||
|
|
331b5ac3c9 | ||
|
|
c3fb2bd9aa | ||
|
|
6271b13be6 | ||
|
|
0373a4ce53 | ||
|
|
e08778c3ea | ||
|
|
8eb6296172 | ||
|
|
c7d53daf9a | ||
|
|
1beae222db | ||
|
|
0147b74205 | ||
|
|
daa398e80e | ||
|
|
dc2bae1b3b | ||
|
|
0bf92a41c3 | ||
|
|
28aafef295 | ||
|
|
75cd59ec21 | ||
|
|
f72580eb0f | ||
|
|
69e32fd0b1 | ||
|
|
372f9254c3 | ||
|
|
8ae4a2915a | ||
|
|
9b81e32f01 | ||
|
|
315362028e | ||
|
|
da988851dc | ||
|
|
4f9200a963 | ||
|
|
bc09450e29 | ||
|
|
38fde9e95f | ||
|
|
6789b3c2c3 | ||
|
|
2e260c2271 | ||
|
|
58e6635324 | ||
|
|
83e85e9798 | ||
|
|
48659851e2 | ||
|
|
69be8cc6e0 | ||
|
|
48ae235848 | ||
|
|
2f89f24eb9 | ||
|
|
d3fef1a3bd | ||
|
|
69abae318a | ||
|
|
0493699813 | ||
|
|
2722fc2bcf | ||
|
|
40123639fe | ||
|
|
289838b7bd | ||
|
|
82a3a623f0 | ||
|
|
252ef2329a | ||
|
|
8da31e9eef | ||
|
|
57365f67a0 | ||
|
|
44fe41a1ca | ||
|
|
61853f7503 | ||
|
|
2ec1b096b1 | ||
|
|
0eb1088960 | ||
|
|
25273ef23a | ||
|
|
1529dfb73a | ||
|
|
e66d4725c7 | ||
|
|
5a1e0cae2f | ||
|
|
d3b45f1378 | ||
|
|
5b44aec196 | ||
|
|
6c583ec9bd | ||
|
|
ee89d9e3d6 | ||
|
|
1dec90d456 | ||
|
|
3de33c6e76 | ||
|
|
6a19167a6f | ||
|
|
457ed86478 | ||
|
|
a7f6b27e3c | ||
|
|
517fd68acd | ||
|
|
69db79074f | ||
|
|
372c91b199 | ||
|
|
ba7ba6db74 | ||
|
|
fd235d7428 | ||
|
|
7edcd88a3f | ||
|
|
948e655d13 | ||
|
|
92e021ff95 | ||
|
|
b34033dec2 | ||
|
|
7a9b43671a | ||
|
|
ada9293402 | ||
|
|
7823b70004 | ||
|
|
fc2bb55605 | ||
|
|
cf65da16f8 | ||
|
|
209a28bb74 | ||
|
|
53ed19f374 | ||
|
|
818a3fd27c | ||
|
|
b881ea0f9e | ||
|
|
da3e2efad6 | ||
|
|
61268f2454 | ||
|
|
d34b5c938b | ||
|
|
9f61f2f1ea | ||
|
|
36019fc088 | ||
|
|
3349be5745 | ||
|
|
80ecb421fe | ||
|
|
bcc1fe5165 | ||
|
|
634f590061 | ||
|
|
eba31bf944 | ||
|
|
fc902dd374 | ||
|
|
6d5377c622 | ||
|
|
2547f92410 | ||
|
|
2a59101eb1 | ||
|
|
be36e13e66 | ||
|
|
3bff0de66b | ||
|
|
3223f4229a | ||
|
|
3520590810 | ||
|
|
4d7bbeb164 | ||
|
|
f5c6f81576 | ||
|
|
4eef201e15 | ||
|
|
ebc43bef1f | ||
|
|
7e9bb72dd6 | ||
|
|
eda64cda63 | ||
|
|
ee20e3ff2d | ||
|
|
b205d5a6d2 | ||
|
|
6449c086f1 | ||
|
|
c99cb72d27 | ||
|
|
611eb95943 | ||
|
|
0f8e2a0b86 | ||
|
|
b4e9103709 | ||
|
|
3a17fe2bdd | ||
|
|
c1dc4d2d50 | ||
|
|
3dd3e8e24a | ||
|
|
e008f89cfa | ||
|
|
370c346d5d | ||
|
|
299e0dff1f | ||
|
|
c8dcda22f1 | ||
|
|
75384bc464 | ||
|
|
35e9d9cbf7 | ||
|
|
d1c81070bc | ||
|
|
5152602ba8 | ||
|
|
6e53b43d48 | ||
|
|
a2df7e44b3 | ||
|
|
f4b254e299 | ||
|
|
34a40aeb73 | ||
|
|
d5cd7fe5af | ||
|
|
06688a8cc5 | ||
|
|
6443b95de6 | ||
|
|
772d91d6b0 | ||
|
|
11f01ad26c | ||
|
|
3a3c02be9a | ||
|
|
5764b92f82 | ||
|
|
414144a371 | ||
|
|
8b95d93e07 | ||
|
|
4c16a8fe67 | ||
|
|
f9597a5a11 | ||
|
|
8d17180884 | ||
|
|
f06fced6df | ||
|
|
739f93ebe1 | ||
|
|
8b8f5fd05e | ||
|
|
889fdc690a | ||
|
|
25aa7ddd31 | ||
|
|
99ecd0cfc9 | ||
|
|
fc6f02b297 | ||
|
|
d09370b060 | ||
|
|
c74712dae3 | ||
|
|
7ce1e57c01 | ||
|
|
c8dea60fca | ||
|
|
c9322598f4 | ||
|
|
4f57240859 | ||
|
|
44119e5ad6 | ||
|
|
7821480db5 | ||
|
|
7f71ebded4 | ||
|
|
4739a62451 | ||
|
|
a2c8df28c9 | ||
|
|
f30d67341e | ||
|
|
a33fd08266 | ||
|
|
5fa0b18c95 | ||
|
|
8a03611020 | ||
|
|
6b04a5dd2d | ||
|
|
8d2e23508f | ||
|
|
598496e50c | ||
|
|
907027a4f2 | ||
|
|
9cb0da0bfe | ||
|
|
86de65fbf0 | ||
|
|
e33ea0f503 | ||
|
|
af3541fc7e | ||
|
|
6a5ed71d36 | ||
|
|
1df4a99e89 | ||
|
|
3abec7f397 | ||
|
|
a3d986ff47 | ||
|
|
65434823a1 | ||
|
|
c60e1d6be5 | ||
|
|
7c4064d9df | ||
|
|
984950cc99 |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -19,12 +19,8 @@
|
||||
*.swp
|
||||
*.ver
|
||||
*.version
|
||||
*.metal.air
|
||||
*.metallib
|
||||
*.metallib.c
|
||||
*.ptx
|
||||
*.ptx.c
|
||||
*.ptx.gz
|
||||
*_g
|
||||
\#*
|
||||
.\#*
|
||||
@@ -35,8 +31,8 @@
|
||||
/ffprobe
|
||||
/config.asm
|
||||
/config.h
|
||||
/config_components.h
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/lcov/
|
||||
/src
|
||||
/mapfile
|
||||
|
||||
24
.mailmap
24
.mailmap
@@ -1,24 +0,0 @@
|
||||
<jeebjp@gmail.com> <jan.ekstrom@aminocom.com>
|
||||
<sw@jkqxz.net> <mrt@jkqxz.net>
|
||||
<u@pkh.me> <cboesch@gopro.com>
|
||||
<zhilizhao@tencent.com> <quinkblack@foxmail.com>
|
||||
<zhilizhao@tencent.com> <wantlamy@gmail.com>
|
||||
<modmaker@google.com> <modmaker-at-google.com@ffmpeg.org>
|
||||
<stebbins@jetheaddev.com> <jstebbins@jetheaddev.com>
|
||||
<barryjzhao@tencent.com> <mypopydev@gmail.com>
|
||||
<barryjzhao@tencent.com> <jun.zhao@intel.com>
|
||||
<josh@itanimul.li> <joshdk@obe.tv>
|
||||
<michael@niedermayer.cc> <michaelni@gmx.at>
|
||||
<linjie.justin.fu@gmail.com> <linjie.fu@intel.com>
|
||||
<linjie.justin.fu@gmail.com> <fulinjie@zju.edu.cn>
|
||||
<ceffmpeg@gmail.com> <cehoyos@ag.or.at>
|
||||
<ceffmpeg@gmail.com> <cehoyos@rainbow.studorg.tuwien.ac.at>
|
||||
<ffmpeg@gyani.pro> <gyandoshi@gmail.com>
|
||||
<atomnuker@gmail.com> <rpehlivanov@obe.tv>
|
||||
<lizhong1008@gmail.com> <zhong.li@intel.com>
|
||||
<lizhong1008@gmail.com> <zhongli_dev@126.com>
|
||||
<andreas.rheinhardt@gmail.com> <andreas.rheinhardt@googlemail.com>
|
||||
rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
|
||||
<thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com>
|
||||
<liuqi05@kuaishou.com> <lq@chinaffmpeg.org>
|
||||
<ruiling.song83@gmail.com> <ruiling.song@intel.com>
|
||||
4
CREDITS
4
CREDITS
@@ -1,6 +1,6 @@
|
||||
See the Git history of the project (https://git.ffmpeg.org/ffmpeg) to
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
https://git.ffmpeg.org/ffmpeg
|
||||
http://source.ffmpeg.org.
|
||||
|
||||
754
Changelog
754
Changelog
@@ -1,465 +1,303 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 6.0.1:
|
||||
avcodec/4xm: Check for cfrm exhaustion
|
||||
avformat/mov: Disallow FTYP after streams
|
||||
doc/html: fix styling issue with Texinfo 7.0
|
||||
doc/html: support texinfo 7.0
|
||||
Changelog: update
|
||||
avformat/lafdec: Check for 0 parameters
|
||||
avformat/lafdec: Check for 0 parameters
|
||||
avfilter/buffersink: fix order of operation with = and <0
|
||||
avfilter/framesync: fix order of operation with = and <0
|
||||
tools/target_dec_fuzzer: Adjust threshold for CSCD
|
||||
avcodec/dovi_rpu: Use 64 bit in get_us/se_coeff()
|
||||
avformat/mov: Check that is_still_picture_avif has no trak based streams
|
||||
avformat/matroskadec: Fix declaration-after-statement warnings
|
||||
Update for FFmpeg 6.0.1
|
||||
fftools/ffmpeg_mux_init: Restrict disabling automatic copying of metadata
|
||||
avformat/rtsp: Use rtsp_st->stream_index
|
||||
avformat/rtsp: Use rtsp_st->stream_index
|
||||
avutil/tx_template: fix integer ovberflwo in fft3()
|
||||
avcodec/jpeg2000dec: Check image offset
|
||||
avformat/mxfdec: Check klv offset
|
||||
libavutil/ppc/cpu.c: check that AT_HWCAP2 is defined
|
||||
avcodec/h2645_parse: Avoid EAGAIN
|
||||
avcodec/xvididct: Make c* unsigned to avoid undefined overflows
|
||||
avcodec/bonk: Fix undefined overflow in predictor_calc_error()
|
||||
avformat/tmv: Check video chunk size
|
||||
avcodec/h264_parser: saturate dts a bit
|
||||
avformat/asfdec_f: Saturate presentation time in marker
|
||||
avformat/xwma: sanity check bits_per_coded_sample
|
||||
avformat/matroskadec: Check prebuffered_ns for overflow
|
||||
avformat/wavdec: Check left avio_tell for overflow
|
||||
avformat/tta: Better totalframes check
|
||||
avformat/rpl: Check for number_of_chunks overflow
|
||||
avformat/mov: compute absolute dts difference without overflow in mov_find_next_sample()
|
||||
avformat/jacosubdec: Check timeres
|
||||
avformat/jacosubdec: avoid signed integer overflows in get_shift()
|
||||
avformat/jacosubdec: Factorize code in get_shift() a bit
|
||||
avformat/sbgdec: Check for negative duration or un-representable end pts
|
||||
avcodec/escape124: Do not return random numbers
|
||||
avcodec/apedec: Fix an integer overflow in predictor_update_filter()
|
||||
tools/target_dec_fuzzer: Adjust wmapro threshold
|
||||
avcodec/wavarc: Allocate AV_INPUT_BUFFER_PADDING_SIZE
|
||||
avcodec/wavarc: Fix integer overflwo in do_stereo()
|
||||
avutil/tx_template: Fix some signed integer overflows in DECL_FFT5()
|
||||
avcodec/aacdec_template: Better avoidance of signed integer overflow in imdct_and_windowing_eld()
|
||||
tools/target_dec_fuzzer: Adjust threshold for MVHA
|
||||
avformat/avs: Check if return code is representable
|
||||
avcodec/flacdec: Fix integer overflow in "33bit" DECODER_SUBFRAME_FIXED_WIDE()
|
||||
avcodec/flacdec: Fix overflow in "33bit" decorrelate
|
||||
avcodec/lcldec: Make PNG filter addressing match the code afterwards
|
||||
avformat/westwood_vqa: Check chunk size
|
||||
avformat/sbgdec: Check for period overflow
|
||||
avformat/concatdec: Check in/outpoint for overflow
|
||||
avformat/mov: Check avif_info
|
||||
avformat/mxfdec: Remove this_partition
|
||||
avcodec/xvididct: Fix integer overflow in idct_row()
|
||||
avcodec/celp_math: avoid overflow in shift
|
||||
tools/target_dec_fuzzer: Adjust threshold for rtv1
|
||||
avformat/hls: reduce default max reload to 3
|
||||
avformat/format: Stop reading data at EOF during probing
|
||||
avcodec/bonk: Fix integer overflow in predictor_calc_error()
|
||||
avcodec/jpeg2000dec: jpeg2000 has its own lowres option
|
||||
avcodec/huffyuvdec: avoid undefined behavior with get_vlc2() failure
|
||||
avcodec/cscd: Fix "CamStudio Lossless Codec 1.0" gzip files
|
||||
avcodec/cscd: Check for CamStudio Lossless Codec 1.0 behavior in end check of LZO files
|
||||
avcodec/mpeg4videodec: consider lowres in dest_pcm[]
|
||||
avcodec/hevcdec: Fix undefined memcpy()
|
||||
avcodec/mpeg4videodec: more unsigned in amv computation
|
||||
avcodec/tta: fix signed overflow in decorrelate
|
||||
avcodec/apedec: remove unused variable
|
||||
avcodec/apedec: Fix 48khz 24bit below insane level
|
||||
avcodec/apedec: Fix CRC for 24bps and bigendian
|
||||
avcodec/wavarc: Check that nb_samples is not negative
|
||||
avcodec/wavarc: Check shift
|
||||
avcodec/xvididct: Fix integer overflow in idct_row()
|
||||
avformat/avr: Check sample rate
|
||||
avformat/imf_cpl: Replace NULL content_title_utf8 by ""
|
||||
avformat/imf_cpl: xmlNodeListGetString() can return NULL
|
||||
avcodec/aacdec_template: Fix undefined signed interger operations
|
||||
avcodec/wavarc: Fix k limit
|
||||
avcodec/rka: Fix integer overflow in decode_filter()
|
||||
avformat/rka: bps < 8 is invalid
|
||||
avcodec/pcm: allow Changing parameters
|
||||
avutil/tx_template: extend to 2M
|
||||
avcodec/jpeg2000dec: Check for reduction factor and image offset
|
||||
avutil/softfloat: Basic documentation for av_sincos_sf()
|
||||
avutil/softfloat: fix av_sincos_sf()
|
||||
tools/target_dec_fuzzer: Adjust threshold for speex
|
||||
avcodec/utils: fix 2 integer overflows in get_audio_frame_duration()
|
||||
avcodec/hevcdec: Avoid null pointer dereferences in MC
|
||||
avcodec/takdsp: Fix integer overflows
|
||||
avcodec/mpegvideo_dec: consider interlaced lowres 4:2:0 chroma in edge emulation check better
|
||||
avcodec/rka: use unsigned for buf0 additions
|
||||
avcodec/rka: Avoid undefined left shift
|
||||
avcodec: Ignoring errors is only possible before the input end
|
||||
avformat/jpegxl_probe: Forward error codes
|
||||
avformat/jpegxl_probe: check length instead of blindly reading
|
||||
avformat/jpegxl_probe: Remove intermediate macro obfuscation around get_bits*()
|
||||
avcodec/noise_bsf: Check for wrapped frames
|
||||
avformat/oggparsetheora: clip duration within 64bit
|
||||
avcodec/rka: avoid undefined multiply in cmode==0
|
||||
avcodec/rka: use 64bit for srate_pad computation
|
||||
avcodec/bonk: Avoid undefined integer overflow in predictor_calc_error()
|
||||
avformat/wavdec: Check that smv block fits in available space
|
||||
avcodec/adpcm: Fix integer overflow in intermediate in ADPCM_XMD
|
||||
avcodec/dpcm: fix undefined interger overflow in wady
|
||||
avcodec/tiff: add a zero DNG_LINEARIZATION_TABLE check
|
||||
avcodec/tak: Check remaining bits in ff_tak_decode_frame_header()
|
||||
avcodec/sonic: Fix two undefined integer overflows
|
||||
avcodec/utils: the IFF_ILBM implementation assumes that there are a multiple of 16 allocated
|
||||
avcodec/flacdec: Fix signed integre overflow
|
||||
avcodec/exr: Cleanup befor return
|
||||
avcodec/pngdec: Do not pass AVFrame into global header decode
|
||||
avcodec/pngdec: remove AVFrame argument from decode_iccp_chunk()
|
||||
avcodec/wavarc: Check order before using it to write the list
|
||||
avcodec/bonk: decode multiple passes in intlist_read() at once
|
||||
avcodec/vorbisdec: Check codebook float values to be finite
|
||||
avcodec/g2meet: Replace fake allocation avoidance for framebuf
|
||||
avutil/tx_priv: Use unsigned in BF() to avoid signed overflows
|
||||
avcodec/lcldec: More space for rgb24
|
||||
avcodec/lcldec: Support 4:1:1 and 4:2:2 with odd width
|
||||
libavcodec/lcldec: width and height should not be unsigned
|
||||
avformat/imf: fix invalid resource handling
|
||||
avcodec/escape124: Check that blocks are allocated before use
|
||||
avcodec/rka: Fix signed integer overflow in decode_filter()
|
||||
avcodec/huffyuvdec: Fix undefined behavior with shift
|
||||
avcodec/j2kenc: Replace RGB24 special case by generic test
|
||||
avcodec/j2kenc: Replace BGR48 / GRAY16 test by test for number of bits
|
||||
avcodec/j2kenc: simplify pixel format setup
|
||||
avcodec/j2kenc: Fix funky bpno errors on decoding
|
||||
avcodec/j2kenc: remove misleading pred value
|
||||
avcodec/j2kenc: fix 5/3 DWT identifer
|
||||
avcodec/vp3: Check width to avoid assertion failure
|
||||
avcodec/g729postfilter: Limit shift in long term filter
|
||||
avcodec/wavarc: Fix several integer overflows
|
||||
avcodec/tests/snowenc: Fix 2nd test
|
||||
avcodec/tests/snowenc: return a failure if DWT/IDWT mismatches
|
||||
avcodec/snowenc: Fix visual weight calculation
|
||||
avcodec/tests/snowenc: unbreak DWT tests
|
||||
avcodec/mpeg12dec: Check input size
|
||||
avcodec/escape124: Fix some return codes
|
||||
avcodec/escape124: fix signdness of end of input check
|
||||
Use https for repository links
|
||||
avcodec/nvdec_hevc: fail to initialize on unsupported profiles
|
||||
fftools/ffmpeg_enc: apply -top to individual encoded frames
|
||||
avcodec/on2avc: use correct fft sizes
|
||||
avcodec/on2avc: use the matching AVTX context for the 512 sized iMDCT
|
||||
examples: fix build of mux and resample_audio
|
||||
avcodec/nvenc: stop using deprecated rc modes with SDK 12.1
|
||||
configure: use non-deprecated nvenc GUID for conftest
|
||||
avcodec/x86/mathops: clip constants used with shift instructions within inline assembly
|
||||
avfilter/vsrc_ddagrab: calculate pointer position on rotated screens
|
||||
avfilter/vsrc_ddagrab: account for mouse-only frames during probing
|
||||
avcodec/aac_ac3_parser: add preprocessor checks for codec specific code
|
||||
avcodec/nvenc: handle frame durations and AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE
|
||||
Revert "lavc/nvenc: handle frame durations and AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE"
|
||||
Revert "avcodec/nvenc: fix b-frame DTS behavior with fractional framerates"
|
||||
avcodec/vdpau_mpeg4: fix order of quant matrix coefficients
|
||||
avcodec/vdpau_mpeg12: fix order of quant matrix coefficients
|
||||
avcodec/nvdec_mpeg4: fix order of quant matrix coefficients
|
||||
avcodec/nvdec_mpeg2: fix order of quant matrix coefficients
|
||||
fftools/ffmpeg_filter: fix leak of AVIOContext in read_binary()
|
||||
fftools/ffmpeg: avoid possible invalid reads with short -tag values
|
||||
avcodec/mp_cmp: reject invalid comparison function values
|
||||
avcodec/aacpsy: clip global_quality within the psy_vbr_map array boundaries
|
||||
avutil/wchar_filename: propagate MultiByteToWideChar() and WideCharToMultiByte() failures
|
||||
avformat/concatf: check if any nodes were allocated
|
||||
avcodec/nvenc: fix b-frame DTS behavior with fractional framerates
|
||||
avcodec/vorbisdec: export skip_samples instead of dropping frames
|
||||
fftools/ffmpeg_mux_init: avoid invalid reads in forced keyframe parsing
|
||||
lavfi/vf_vpp_qsv: set the right timestamp for AVERROR_EOF
|
||||
avfilter/vf_untile: swap the chroma shift values used for plane offsets
|
||||
lavc/decode: stop mangling last_pkt_props->opaque
|
||||
avcodec/nvenc: avoid failing b_ref_mode check when unset
|
||||
lavu/vulkan: fix handle type for 32-bit targets
|
||||
vulkan: Fix win/i386 calling convention
|
||||
avfilter/graphparser: fix filter instance name when an id is provided
|
||||
avcodec/aacps_tablegen: fix build error after avutil bump
|
||||
avcodec/nvenc: fix potential NULL pointer dereference
|
||||
|
||||
|
||||
version 6.0:
|
||||
- Radiance HDR image support
|
||||
- ddagrab (Desktop Duplication) video capture filter
|
||||
- ffmpeg -shortest_buf_duration option
|
||||
- ffmpeg now requires threading to be built
|
||||
- ffmpeg now runs every muxer in a separate thread
|
||||
- Add new mode to cropdetect filter to detect crop-area based on motion vectors and edges
|
||||
- VAAPI decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- WBMP (Wireless Application Protocol Bitmap) image format
|
||||
- a3dscope filter
|
||||
- bonk decoder and demuxer
|
||||
- Micronas SC-4 audio decoder
|
||||
- LAF demuxer
|
||||
- APAC decoder and demuxer
|
||||
- Media 100i decoders
|
||||
- DTS to PTS reorder bsf
|
||||
- ViewQuest VQC decoder
|
||||
- backgroundkey filter
|
||||
- nvenc AV1 encoding support
|
||||
- MediaCodec decoder via NDKMediaCodec
|
||||
- MediaCodec encoder
|
||||
- oneVPL support for QSV
|
||||
- QSV AV1 encoder
|
||||
- QSV decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- showcwt multimedia filter
|
||||
- corr video filter
|
||||
- adrc audio filter
|
||||
- afdelaysrc audio filter
|
||||
- WADY DPCM decoder and demuxer
|
||||
- CBD2 DPCM decoder
|
||||
- ssim360 video filter
|
||||
- ffmpeg CLI new options: -stats_enc_pre[_fmt], -stats_enc_post[_fmt],
|
||||
-stats_mux_pre[_fmt]
|
||||
- hstack_vaapi, vstack_vaapi and xstack_vaapi filters
|
||||
- XMD ADPCM decoder and demuxer
|
||||
- media100 to mjpegb bsf
|
||||
- ffmpeg CLI new option: -fix_sub_duration_heartbeat
|
||||
- WavArc decoder and demuxer
|
||||
- CrystalHD decoders deprecated
|
||||
- SDNS demuxer
|
||||
- RKA decoder and demuxer
|
||||
- filtergraph syntax in ffmpeg CLI now supports passing file contents
|
||||
as option values, by prefixing option name with '/'
|
||||
- hstack_qsv, vstack_qsv and xstack_qsv filters
|
||||
|
||||
|
||||
version 5.1:
|
||||
- add ipfs/ipns gateway support
|
||||
- dialogue enhance audio filter
|
||||
- dropped obsolete XvMC hwaccel
|
||||
- pcm-bluray encoder
|
||||
- DFPWM audio encoder/decoder and raw muxer/demuxer
|
||||
- SITI filter
|
||||
- Vizrt Binary Image encoder/decoder
|
||||
- avsynctest source filter
|
||||
- feedback video filter
|
||||
- pixelize video filter
|
||||
- colormap video filter
|
||||
- colorchart video source filter
|
||||
- multiply video filter
|
||||
- PGS subtitle frame merge bitstream filter
|
||||
- blurdetect filter
|
||||
- tiltshelf audio filter
|
||||
- QOI image format support
|
||||
- ffprobe -o option
|
||||
- virtualbass audio filter
|
||||
- VDPAU AV1 hwaccel
|
||||
- PHM image format support
|
||||
- remap_opencl filter
|
||||
- added chromakey_cuda filter
|
||||
- added bilateral_cuda filter
|
||||
|
||||
|
||||
version 5.0:
|
||||
- ADPCM IMA Westwood encoder
|
||||
- Westwood AUD muxer
|
||||
- ADPCM IMA Acorn Replay decoder
|
||||
- Argonaut Games CVG demuxer
|
||||
- Argonaut Games CVG muxer
|
||||
- Concatf protocol
|
||||
- afwtdn audio filter
|
||||
- audio and video segment filters
|
||||
- Apple Graphics (SMC) encoder
|
||||
- hsvkey and hsvhold video filters
|
||||
- adecorrelate audio filter
|
||||
- atilt audio filter
|
||||
- grayworld video filter
|
||||
- AV1 Low overhead bitstream format muxer
|
||||
- swscale slice threading
|
||||
- MSN Siren decoder
|
||||
- scharr video filter
|
||||
- apsyclip audio filter
|
||||
- morpho video filter
|
||||
- amr parser
|
||||
- (a)latency filters
|
||||
- GEM Raster image decoder
|
||||
- asdr audio filter
|
||||
- speex decoder
|
||||
- limitdiff video filter
|
||||
- xcorrelate video filter
|
||||
- varblur video filter
|
||||
- huesaturation video filter
|
||||
- colorspectrum source video filter
|
||||
- RTP packetizer for uncompressed video (RFC 4175)
|
||||
- bitpacked encoder
|
||||
- VideoToolbox VP9 hwaccel
|
||||
- VideoToolbox ProRes hwaccel
|
||||
- support loongarch.
|
||||
- aspectralstats audio filter
|
||||
- adynamicsmooth audio filter
|
||||
- libplacebo filter
|
||||
- vflip_vulkan, hflip_vulkan and flip_vulkan filters
|
||||
- adynamicequalizer audio filter
|
||||
- yadif_videotoolbox filter
|
||||
- VideoToolbox ProRes encoder
|
||||
- anlmf audio filter
|
||||
- IMF demuxer (experimental)
|
||||
|
||||
|
||||
version 4.4:
|
||||
- AudioToolbox output device
|
||||
- MacCaption demuxer
|
||||
- PGX decoder
|
||||
- chromanr video filter
|
||||
- VDPAU accelerated HEVC 10/12bit decoding
|
||||
- ADPCM IMA Ubisoft APM encoder
|
||||
- Rayman 2 APM muxer
|
||||
- AV1 encoding support SVT-AV1
|
||||
- Cineform HD encoder
|
||||
- ADPCM Argonaut Games encoder
|
||||
- Argonaut Games ASF muxer
|
||||
- AV1 Low overhead bitstream format demuxer
|
||||
- RPZA video encoder
|
||||
- ADPCM IMA MOFLEX decoder
|
||||
- MobiClip FastAudio decoder
|
||||
- MobiClip video decoder
|
||||
- MOFLEX demuxer
|
||||
- MODS demuxer
|
||||
- PhotoCD decoder
|
||||
- MCA demuxer
|
||||
- AV1 decoder (Hardware acceleration used only)
|
||||
- SVS demuxer
|
||||
- Argonaut Games BRP demuxer
|
||||
- DAT demuxer
|
||||
- aax demuxer
|
||||
- IPU decoder, parser and demuxer
|
||||
- Intel QSV-accelerated AV1 decoding
|
||||
- Argonaut Games Video decoder
|
||||
- libwavpack encoder removed
|
||||
- ACE demuxer
|
||||
- AVS3 demuxer
|
||||
- AVS3 video decoder via libuavs3d
|
||||
- Cintel RAW decoder
|
||||
- VDPAU accelerated VP9 10/12bit decoding
|
||||
- afreqshift and aphaseshift filters
|
||||
- High Voltage Software ADPCM encoder
|
||||
- LEGO Racers ALP (.tun & .pcm) muxer
|
||||
- AV1 VAAPI decoder
|
||||
- adenorm filter
|
||||
- ADPCM IMA AMV encoder
|
||||
- AMV muxer
|
||||
- NVDEC AV1 hwaccel
|
||||
- DXVA2/D3D11VA hardware accelerated AV1 decoding
|
||||
- speechnorm filter
|
||||
- SpeedHQ encoder
|
||||
- asupercut filter
|
||||
- asubcut filter
|
||||
- Microsoft Paint (MSP) version 2 decoder
|
||||
- Microsoft Paint (MSP) demuxer
|
||||
- AV1 monochrome encoding support via libaom >= 2.0.1
|
||||
- asuperpass and asuperstop filter
|
||||
- shufflepixels filter
|
||||
- tmidequalizer filter
|
||||
- estdif filter
|
||||
- epx filter
|
||||
- Dolby E parser
|
||||
- shear filter
|
||||
- kirsch filter
|
||||
- colortemperature filter
|
||||
- colorcontrast filter
|
||||
- PFM encoder
|
||||
- colorcorrect filter
|
||||
- binka demuxer
|
||||
- XBM parser
|
||||
- xbm_pipe demuxer
|
||||
- colorize filter
|
||||
- CRI parser
|
||||
- aexciter audio filter
|
||||
- exposure video filter
|
||||
- monochrome video filter
|
||||
- setts bitstream filter
|
||||
- vif video filter
|
||||
- OpenEXR image encoder
|
||||
- Simbiosis IMX decoder
|
||||
- Simbiosis IMX demuxer
|
||||
- Digital Pictures SGA demuxer and decoders
|
||||
- TTML subtitle encoder and muxer
|
||||
- identity video filter
|
||||
- msad video filter
|
||||
- gophers protocol
|
||||
- RIST protocol via librist
|
||||
|
||||
|
||||
version 4.3:
|
||||
- v360 filter
|
||||
- Intel QSV-accelerated MJPEG decoding
|
||||
- Intel QSV-accelerated VP9 decoding
|
||||
- Support for TrueHD in mp4
|
||||
- Support AMD AMF encoder on Linux (via Vulkan)
|
||||
- IMM5 video decoder
|
||||
- ZeroMQ protocol
|
||||
- support Sipro ACELP.KELVIN decoding
|
||||
- streamhash muxer
|
||||
- sierpinski video source
|
||||
- scroll video filter
|
||||
- photosensitivity filter
|
||||
- anlms filter
|
||||
- arnndn filter
|
||||
- bilateral filter
|
||||
- maskedmin and maskedmax filters
|
||||
- VDPAU VP9 hwaccel
|
||||
- median filter
|
||||
- QSV-accelerated VP9 encoding
|
||||
- AV1 encoding support via librav1e
|
||||
- AV1 frame merge bitstream filter
|
||||
- AV1 Annex B demuxer
|
||||
- axcorrelate filter
|
||||
- mvdv decoder
|
||||
- mvha decoder
|
||||
- MPEG-H 3D Audio support in mp4
|
||||
- thistogram filter
|
||||
- freezeframes filter
|
||||
- Argonaut Games ADPCM decoder
|
||||
- Argonaut Games ASF demuxer
|
||||
- xfade video filter
|
||||
- xfade_opencl filter
|
||||
- afirsrc audio filter source
|
||||
- pad_opencl filter
|
||||
- Simon & Schuster Interactive ADPCM decoder
|
||||
- Real War KVAG demuxer
|
||||
- CDToons video decoder
|
||||
- siren audio decoder
|
||||
- Rayman 2 ADPCM decoder
|
||||
- Rayman 2 APM demuxer
|
||||
- cas video filter
|
||||
- High Voltage Software ADPCM decoder
|
||||
- LEGO Racers ALP (.tun & .pcm) demuxer
|
||||
- AMQP 0-9-1 protocol (RabbitMQ)
|
||||
- Vulkan support
|
||||
- avgblur_vulkan, overlay_vulkan, scale_vulkan and chromaber_vulkan filters
|
||||
- ADPCM IMA MTF decoder
|
||||
- FWSE demuxer
|
||||
- DERF DPCM decoder
|
||||
- DERF demuxer
|
||||
- CRI HCA decoder
|
||||
- CRI HCA demuxer
|
||||
- overlay_cuda filter
|
||||
- switch from AvxSynth to AviSynth+ on Linux
|
||||
- mv30 decoder
|
||||
- Expanded styling support for 3GPP Timed Text Subtitles (movtext)
|
||||
- WebP parser
|
||||
- tmedian filter
|
||||
- maskedthreshold filter
|
||||
- Support for muxing pcm and pgs in m2ts
|
||||
- Cunning Developments ADPCM decoder
|
||||
- asubboost filter
|
||||
- Pro Pinball Series Soundbank demuxer
|
||||
- pcm_rechunk bitstream filter
|
||||
- scdet filter
|
||||
- NotchLC decoder
|
||||
- gradients source video filter
|
||||
- MediaFoundation encoder wrapper
|
||||
- untile filter
|
||||
- Simon & Schuster Interactive ADPCM encoder
|
||||
- PFM decoder
|
||||
- dblur video filter
|
||||
- Real War KVAG muxer
|
||||
version 4.2.2
|
||||
- cbs_mpeg2: Fix parsing the last unit
|
||||
- cbs_mpeg2: Rearrange start code search
|
||||
- cbs_mpeg2: Decompose Sequence End
|
||||
- cbs_mpeg2: Fix parsing of picture and slice headers
|
||||
- cbs: Remove useless initializations
|
||||
- mpeg2_metadata, cbs_mpeg2: Fix handling of colour_description
|
||||
- lavc/cbs_h2645_syntax_template: Fix memleak
|
||||
- avcodec/cbs: Fix potential overflow
|
||||
- avcodec/cbs: Factor out common code for writing units
|
||||
- avcodec/ffwavesynth: Fix undefined overflow in wavesynth_synth_sample()
|
||||
- avcodec/ffwavesynth: Fix undefined overflow in wavesynth_synth_sample()
|
||||
- avcodec/cook: Use 3 stage VLC decoding for channel_coupling
|
||||
- avcodec/wmalosslessdec: Fixes undefined overflow in dequantization in decode_subframe()
|
||||
- avcodec/sonic: Check e in get_symbol()
|
||||
- avcodec/twinvqdec: Correct overflow in block align check
|
||||
- avcodec/vc1dec: Fix "return -1" cases
|
||||
- avcodec/vc1dec: Free sprite_output_frame on error
|
||||
- avcodec/atrac9dec: Clamp band_ext_data to max that can be read if skipped.
|
||||
- avcodec/agm: Include block size in the MV check for flags == 3
|
||||
- avcodec/wmadec: Keep track of exponent initialization per channel
|
||||
- avcodec/iff: Check that video_size is large enough for the read parameters
|
||||
- avcodec/cbs_vp9: Check data_size
|
||||
- avcodec/cbs_vp9: Check index_size
|
||||
- avcodec/adpcm: Clip predictor for APC
|
||||
- avcodec/targa: Check colors vs. available space
|
||||
- avcodec/dstdec: Use get_ur_golomb_jpegls()
|
||||
- avcodec/wmavoice: Check remaining input in parse_packet_header()
|
||||
- avcodec/wmalosslessdec: Fix 2 overflows in mclms
|
||||
- avcodec/wmaprodec: Fixes integer overflow with 32bit samples
|
||||
- avcodec/adpcm: Fix invalid shift in xa_decode()
|
||||
- avcodec/wmalosslessdec: Fix several integer issues
|
||||
- avcodec/wmalosslessdec: Check that padding bits is not more than sample bits
|
||||
- avcodec/iff: Skip overflowing runs in decode_delta_d()
|
||||
- avcodec/pnm: Check that the header is not truncated
|
||||
- avcodec/mp3_header_decompress_bsf: Check sample_rate_index
|
||||
- avcodec/cbs_av1_syntax_template: Check num_y_points
|
||||
- avformat/rmdec: Initialize and sanity check offset in ivr_read_header()
|
||||
- avcodec/agm: Do not allow MVs out of the picture area as no edge is allocated
|
||||
- avcodec/apedec: Fix 2 integer overflows
|
||||
- avformat/id3v2: Fix double-free on error
|
||||
- avcodec/wmaprodec: Set packet_loss when we error out on a sanity check
|
||||
- avcodec/wmaprodec: Check offset
|
||||
- avcodec/truemotion2: Fix 2 integer overflows in tm2_low_res_block()
|
||||
- avcodec/wmaprodec: Check if the channel sum of all internal contexts match the external
|
||||
- avcodec/atrac9dec: Check q_unit_cnt more completely before using it to access at9_tab_band_ext_group
|
||||
- avcodec/fitsdec: Use lrint()
|
||||
- avcodec/g729dec: Avoid using buf_size
|
||||
- avcodec/g729dec: Factor block_size out
|
||||
- avcodec/g729dec: require buf_size to be non 0
|
||||
- avcodec/alac: Fix integer overflow in lpc_prediction() with sign
|
||||
- avcodec/wmaprodec: Fix buflen computation in save_bits()
|
||||
- avcodec/vc1_block: Fix integer overflow in AC rescaling in vc1_decode_i_block_adv()
|
||||
- avcodec/vmdaudio: Check chunk counts to avoid integer overflow
|
||||
- avformat/mxfdec: Clear metadata_sets_count in mxf_read_close()
|
||||
- avcodec/nuv: Use ff_set_dimensions()
|
||||
- avformat/vividas: Error out on audio packets in the absence of audio streams
|
||||
- avformat/vividas: Check and require 1 video stream
|
||||
- avcodec/ffwavesynth: Fix integer overflow with pink_ts_cur/next
|
||||
- avcodec/ralf: Fix integer overflows with the filter coefficient in decode_channel()
|
||||
- avcodec/g729dec: Use 64bit and clip in scalar product
|
||||
- avcodec/mxpegdec: Check for multiple SOF
|
||||
- avcodec/nuv: Move comptype check up
|
||||
- avcodec/wmavoice: Fix integer overflow in synth_frame()
|
||||
- avcodec/rawdec: Check bits_per_coded_sample more pedantically for 16bit cases
|
||||
- avutil/lfg: Correct index increment type to avoid undefined behavior
|
||||
- avcodec/cngdec: Remove AV_CODEC_CAP_DELAY
|
||||
- avcodec/iff: Move index use after check in decodeplane8()
|
||||
- avcodec/atrac3: Check for huge block aligns
|
||||
- avcodec/ralf: use multiply instead of shift to avoid undefined behavior in decode_block()
|
||||
- avcodec/wmadec: Require previous exponents for reuse
|
||||
- avcodec/vc1_block: Fix undefined behavior in ac prediction rescaling
|
||||
- avcodec/qdm2: The smallest header seems to have 2 bytes so treat 1 as invalid
|
||||
- avcodec/apedec: Fixes integer overflow of res+*data in do_apply_filter()
|
||||
- avcodec/sonic: Fix integer overflow in predictor_calc_error()
|
||||
- avformat/vividas: Add EOF check in val_1 loop in track_header()
|
||||
- avcodec/atrac9dec: Check precision_fine/coarse
|
||||
- avformat/mp3dec: Check that the frame fits within the probe buffer
|
||||
- vcodec/agm: Alloc based on coded dimensions
|
||||
- avcodec/wmaprodec: get frame during frame decode
|
||||
- avcodec/interplayacm: Fix overflow of last unused value
|
||||
- avcodec/adpcm: Fix undefined behavior with negative predictions in IMA OKI
|
||||
- avcodec/cook: Move up and extend block_align check
|
||||
- avcodec/sbcdec: Fix integer overflows in sbc_synthesize_four()
|
||||
- avcodec/twinvq: Check block_align
|
||||
- avcodec/cook: Enlarge gain table
|
||||
- avcodec/cook: Check samples_per_channel earlier
|
||||
- avcodec/atrac3plus: Check split point in fill mode 3
|
||||
- avcodec/wmavoice: Check sample_rate
|
||||
- avcodec/xsubdec: fix overflow in alpha handling
|
||||
- avcodec/iff: Check available space before entering loop in decode_long_vertical_delta2() / decode_long_vertical_delta()
|
||||
- avcodec/apedec: Fix integer overflow in filter_3800()
|
||||
- avutil/lfg: Document the AVLFG struct
|
||||
- avcodec/ffv1dec: Use a different error message for the slice level CRC
|
||||
- avcodec/apedec: Fix undefined integer overflow in long_filter_ehigh_3830()
|
||||
- avcodec/dstdec: Check that AC probabilities are within range
|
||||
- avcodec/dstdec: Check read_table() for failure
|
||||
- avformat/vividas: Fix n_sb_blocks Check
|
||||
- avcodec/snowenc: Set mb_num to avoid ratecontrol floating point divisions by 0.0
|
||||
- avcodec/snowenc: Fix 2 undefined shifts
|
||||
- avformat/nutenc: Do not pass NULL to memcmp() in get_needed_flags()
|
||||
- avcodec/aptx: Check the number of channels
|
||||
- avcodec/aacdec_template: Check samplerate
|
||||
- avcodec/truemotion2: Fix several integer overflows in tm2_low_res_block()
|
||||
- avcodec/utils: Check block_align
|
||||
- avcodec/wmalosslessdec: Fix some integer anomalies
|
||||
- avcodec/adpcm: Fix invalid shifts in ADPCM DTK
|
||||
- avcodec/apedec: Only clear the needed buffer space, instead of all
|
||||
- avcodec/libvorbisdec: Fix insufficient input checks leading to out of array reads
|
||||
- avcodec/g723_1dec: fix invalid shift with negative sid_gain
|
||||
- avcodec/vp5: Check render_x/y
|
||||
- avcodec/hcom: Check the root entry and the right entries
|
||||
- avcodec/qdrw: Check input for header/skiped space before get_buffer()
|
||||
- avcodec/ralf: Skip initializing unused filter variables
|
||||
- avcodec/takdec: Fix overflow with large sample rates
|
||||
- avcodec/atrac9dec: Set channels
|
||||
- avcodec/alsdec: Check that input space for header exists in read_diff_float_data()
|
||||
- avformat/pjsdec: Check duration for overflow
|
||||
- avcodec/agm: Check for reference frame earlier
|
||||
- avcodec/ptx: Check that the input contains at least one line
|
||||
- avcodec/alac: Fix integer overflow in LPC
|
||||
- avcodec/smacker: Fix integer overflows in pred[] in smka_decode_frame()
|
||||
- avcodec/aliaspixdec: Check input size against minimal picture size
|
||||
- avcodec/ffwavesynth: Fix integer overflows in pink noise addition
|
||||
- avcodec/vc1_block: Fixes integer overflow in vc1_decode_i_block_adv()
|
||||
- avcodec/wmalosslessdec: Check block_align
|
||||
- avcodec/g729dec: Avoid computing invalid temporary pointers for ff_acelp_weighted_vector_sum()
|
||||
- avcodec/g729postfilter: Fix left shift of negative value
|
||||
- avcodec/binkaudio: Check sample rate
|
||||
- avcodec/sbcdec: Fix integer overflows in sbc_synthesize_eight()
|
||||
- avcodec/adpcm: Check initial predictor for ADPCM_IMA_EA_EACS
|
||||
- avcodec/g723_1dec: Fix overflow in shift
|
||||
- avcodec/apedec: Fix integer overflow in predictor_update_3930()
|
||||
- avcodec/g729postfilter: Fix undefined intermediate pointers
|
||||
- avcodec/g729postfilter: Fix undefined shifts
|
||||
- avcodec/lsp: Fix undefined shifts in lsp2poly()
|
||||
- avcodec/adpcm: Fix left shifts in AV_CODEC_ID_ADPCM_EA
|
||||
- avformat/shortendec: Check k in probe
|
||||
- avfilter/vf_geq: Use av_clipd() instead of av_clipf()
|
||||
- avcodec/wmaprodec: Check that the streams channels do not exceed the overall channels
|
||||
- avcodec/qdmc: Check input space in qdmc_get_vlc()
|
||||
- avcodec/wmaprodec: Fix cleanup on error
|
||||
- avcodec/pcm: Check bits_per_coded_sample
|
||||
- avcodec/exr: Allow duplicate use of channel indexes
|
||||
- avcodec/fitsdec: Fail on 0 naxisn
|
||||
- avcodec/dxv: Subtract 12 earlier in dxv_decompress_cocg()
|
||||
- libavcodec/dxv: Remove redundant seek
|
||||
- avcodec/ituh263dec: Check input for minimal frame size
|
||||
- avcodec/truemotion1: Check that the input has enough space for a minimal index_stream
|
||||
- avformat/mpsubdec: Clear queue on error
|
||||
- avcodec/sunrast: Check that the input is large enough for the maximally compressed image
|
||||
- avcodec/sunrast: Check for availability of maplength before allocating image
|
||||
- avformat/subtitles: Check nb_subs in ff_subtitles_queue_finalize()
|
||||
- avcodec/vc1_block: Fix invalid left shift in vc1_decode_p_mb()
|
||||
- avcodec/wmaprodec: Check if there is a stream
|
||||
- avcodec/g2meet: Check for end of input in jpg_decode_block()
|
||||
- avcodec/g2meet: Check if adjusted pixel was on the stack
|
||||
- avformat/electronicarts: If no packet has been read at the end do not treat it as if theres a packet
|
||||
- avcodec/dxv: Check op_offset in dxv_decompress_yo()
|
||||
- avcodec/utils: Check sample_rate before opening the decoder
|
||||
- avcodec/aptx: Fix multiple shift anomalies
|
||||
- avcodec/fitsdec: fix use of uninitialised values
|
||||
- avcodec/motionpixels: Mark 2 functions as always_inline
|
||||
- avcodec/ituh263dec: Make the condition for the studio slice start code match between ff_h263_resync() and ff_mpeg4_decode_studio_slice_header()
|
||||
- avcodec/ralf: Fix integer overflow in decode_channel()
|
||||
- vcodec/vc1: compute rangex/y only for P/B frames
|
||||
- avcodec/vc1_pred: Fix invalid shifts in scaleforopp()
|
||||
- avcodec/vc1_block: Fix invalid shift with rangeredfrm
|
||||
- avcodec/vc1: Check for excessive resolution
|
||||
- avcodec/vc1: check REFDIST
|
||||
- avcodec/apedec: Fix several integer overflows in predictor_update_filter() and do_apply_filter()
|
||||
- avcodec/hevc_cabac: Tighten the limit on k in ff_hevc_cu_qp_delta_abs()
|
||||
- avcodec/4xm: Check index in decode_i_block() also in the path where its not used.
|
||||
- avcodec/loco: Check for end of input in the first line
|
||||
- avcodec/atrac3: Check block_align
|
||||
- avcodec/alsdec: Avoid dereferencing context pointer in inner interleave loop
|
||||
- avcodec/hcom: Check that there are dictionary entries
|
||||
- avcodec/fitsdec: Prevent division by 0 with huge data_max
|
||||
- avcodec/dstdec: Fix integer overflow in samples_per_frame computation
|
||||
- avcodec/g729_parser: Check block_size
|
||||
- avcodec/sbcdec: Initialize number of channels
|
||||
- avcodec/utils: Optimize ff_color_frame() using memcpy()
|
||||
- avcodec/aacdec: Check if we run out of input in read_stream_mux_config()
|
||||
- avcodec/utils: Use av_memcpy_backptr() in ff_color_frame()
|
||||
- avcodec/smacker: Fix integer overflow in signed int multiply in SMK_BLK_FILL
|
||||
- avcodec/alac: Fix invalid shifts in 20/24 bps
|
||||
- avcodec/alac: fix undefined behavior with INT_MIN in lpc_prediction()
|
||||
- avcodec/ffwavesynth: Fix integer overflow in timestamps
|
||||
- avformat/vividas: Test size and packet numbers a bit more
|
||||
- avformat/vividas: Check n_sb_blocks against input space
|
||||
- avcodec/dxv: Check op_offset in both directions
|
||||
- avcodec/adpcm: Check number of channels for MTAF
|
||||
- avcodec/sunrast: Fix indention
|
||||
- avcodec/sunrast: Fix return type for "unsupported (compression) type"
|
||||
- avcodec/utils: Check channels fully earlier
|
||||
- avformat/mov: Check for EOF in mov_read_meta()
|
||||
- avcodec/hevcdec: Fix memleak of a53_caption
|
||||
- avformat/vividas: Remove align offset which is always masked off
|
||||
- avformat/vividas: remove dead assignment
|
||||
- avformat/cdxl: Fix integer overflow in intermediate
|
||||
- avcodec/hevcdec: repeat character in skiped
|
||||
- repeat an even number of characters in occured
|
||||
- avcodec/gdv: Replace assert() checking bitstream by if()
|
||||
- libavcodec/utils: Free threads on init failure
|
||||
- avcodec/htmlsubtitles: Avoid locale dependant isdigit()
|
||||
- avcodec/alsdec: Check k from being outside what our implementation can handle
|
||||
- avcodec/takdec: Fix integer overflow in decorrelate()
|
||||
- avcodec/aacps: Fix integer overflows in hybrid_synthesis()
|
||||
- avcodec/mpeg4videodec: Fix integer overflow in mpeg4_decode_studio_block()
|
||||
- avcodec/vp56rac: delay signaling an error on truncated input
|
||||
- avcodec/pnm_parser: Use memchr() in pnm_parse()
|
||||
- tests: Fix bash errors in lavf_container tests.
|
||||
- avformat/matroskadec: Fix use-after-free when demuxing ProRes
|
||||
- avformat/matroskadec: Fix demuxing ProRes
|
||||
- avcodec/cbs_av1: fix array size for ar_coeffs_cb_plus_128 and ar_coeffs_cr_plus_128
|
||||
- avcodec/cbs_av1: avoid reading trailing bits when obu type is OBU_TILE_LIST
|
||||
- lavc/cbs_h2645: Fix incorrect max size of nalu unit
|
||||
- avcodec/extract_extradata_bsf: Don't unref uninitialized buffers
|
||||
- avformat/av1: Fix leak of dynamic buffer in case of parsing failure
|
||||
- libavformat/rtsp: return error if rtsp_hd_out is null instead of crash
|
||||
- cbs_h264: Fix missing inferred colour description fields
|
||||
- avcodec/cbs_av1: keep separate reference frame state for reading and writing
|
||||
- avcodec/cbs_av1: fix reading reference order hint in skip_mode_params()
|
||||
- avcodec/amfnec: allocate packets using av_new_packet()
|
||||
- avcodec/nvenc: make sure newly allocated packets are refcounted
|
||||
- lavc/mpeg4audio: add chan_config check to avoid indeterminate channels
|
||||
- aformat/movenc: add missing padding to output track extradata
|
||||
- avcodec/nvenc: add driver version info for SDK 9.1
|
||||
- avcodec/bsf: check that AVBSFInternal was allocated before dereferencing it
|
||||
|
||||
version 4.2.1:
|
||||
- avformat/vividas: check for tiny blocks using alignment
|
||||
- avcodec/vc1_pred: Fix refdist in scaleforopp()
|
||||
- avcodec/vorbisdec: fix FASTDIV usage for vr_type == 2
|
||||
- avcodec/iff: Check for overlap in cmap_read_palette()
|
||||
- avcodec/apedec: Fix 32bit int overflow in do_apply_filter()
|
||||
- lavf/rawenc: Only accept the appropriate stream type for raw muxers.
|
||||
- avformat/matroskadec: use av_fast_realloc to reallocate ebml list arrays
|
||||
- avformat/matroskadec: use proper types for some EbmlSyntax fields
|
||||
- avcodec/ralf: fix undefined shift in extend_code()
|
||||
- avcodec/ralf: fix undefined shift
|
||||
- avcodec/bgmc: Check input space in ff_bgmc_decode_init()
|
||||
- avcodec/vp3: Check for end of input in 2 places of vp4_unpack_macroblocks()
|
||||
- avcodec/truemotion2: Fix multiple integer overflows in tm2_null_res_block()
|
||||
- avcodec/vc1_block: Check the return code from vc1_decode_p_block()
|
||||
- avcodec/vc1dec: Require res_sprite for wmv3images
|
||||
- avcodec/vc1_block: Check for double escapes
|
||||
- avcodec/vorbisdec: Check get_vlc2() failure
|
||||
- avcodec/tta: Fix integer overflow in prediction
|
||||
- avcodec/vb: Check input packet size to be large enough to contain flags
|
||||
- avcodec/cavsdec: Limit the number of access units per packet to 2
|
||||
- avcodec/atrac9dec: Check block_align
|
||||
- avcodec/alac: Check for bps of 0
|
||||
- avcodec/alac: Fix multiple integer overflows in lpc_prediction()
|
||||
- avcodec/rl2: set dimensions
|
||||
- avcodec/aacdec: Add FF_CODEC_CAP_INIT_CLEANUP
|
||||
- avcodec/idcinvideo: Add 320x240 default maximum resolution
|
||||
- avformat/realtextdec: free queue on error
|
||||
- avcodec/vp5/6/8: use vpX_rac_is_end()
|
||||
- avformat/vividas: Check av_xiphlacing() return value before use
|
||||
- avcodec/alsdec: Fix integer overflow in decode_var_block_data()
|
||||
- avcodec/alsdec: Limit maximum channels to 512
|
||||
- avcodec/anm: Check input size for a frame with just a stop code
|
||||
- avcodec/flicvideo: Optimize and Simplify FLI_COPY in flic_decode_frame_24BPP() by using bytestream2_get_buffer()
|
||||
- avcodec/loco: Check left column value
|
||||
- avcodec/ffwavesynth: Fixes invalid shift with pink noise seeking
|
||||
- avcodec/ffwavesynth: Fix integer overflow for some corner case values
|
||||
- avcodec/indeo2: Check remaining input more often
|
||||
- avcodec/diracdec: Check that slices are fewer than pixels
|
||||
- avcodec/vp56: Consider the alpha start as end of the prior header
|
||||
- avcodec/4xm: Check for end of input in decode_p_block()
|
||||
- avcodec/hevcdec: Check delta_luma_weight_l0/1
|
||||
- avcodec/hnm4video: Optimize postprocess_current_frame()
|
||||
- avcodec/hevc_refs: Optimize 16bit generate_missing_ref()
|
||||
- avcodec/scpr: Use av_memcpy_backptr() in type 17 and 33
|
||||
- avcodec/tiff: Enforce increasing offsets
|
||||
- avcodec/dds: Use ff_set_dimensions()
|
||||
- avformat/vividas: Fix another infinite loop
|
||||
- avformat/vividas: Fix infinite loop in header parser
|
||||
- avcodec/mpc8: Fix 32bit mask/enum
|
||||
- avcodec/alsdec: Fix integer overflows of raw_samples in decode_var_block_data()
|
||||
- avcodec/alsdec: Fix integer overflow of raw_samples in decode_blocks()
|
||||
- avcodec/alsdec: fix mantisse shift
|
||||
- avcodec/pngdec: consider chunk size in minimal size check
|
||||
- avcodec/vc1_block: Fix invalid shifts in vc1_decode_i_blocks()
|
||||
- avcodec/vc1_block: fix invalid shift in vc1_decode_p_mb()
|
||||
- avcodec/aacdec_template: fix integer overflow in imdct_and_windowing()
|
||||
- avformat/mpegts: Check if ready on SCTE reception
|
||||
- avcodec/omx: fix xFramerate calculation
|
||||
- avformat/avidec: add support for recognizing HEVC fourcc when demuxing
|
||||
- avformat/mpegts: fix teletext PTS when selecting teletext streams only
|
||||
- avcodec/h2645_parse: zero initialize the rbsp buffer
|
||||
- avcodec/omx: Fix handling of fragmented buffers
|
||||
- avcodec/omx: ensure zerocopy mode can be disabled on rpi builds
|
||||
- avformat/mxfdec: do not ignore bad size errors
|
||||
- avformat/matroskadec: Fix seeking
|
||||
- ffplay: properly detect all window size changes
|
||||
|
||||
version 4.2:
|
||||
- tpad filter
|
||||
|
||||
30
LICENSE.md
30
LICENSE.md
@@ -21,11 +21,10 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `compat/solaris/make_sunver.pl`
|
||||
- `doc/t2h.pm`
|
||||
- `doc/texi2pod.pl`
|
||||
- `libswresample/tests/swresample.c`
|
||||
- `libswresample/swresample-test.c`
|
||||
- `tests/checkasm/*`
|
||||
- `tests/tiny_ssim.c`
|
||||
- the following filters in libavfilter:
|
||||
- `signature_lookup.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
@@ -35,13 +34,13 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_lensfun.c` (GPL version 3 or later)
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_nnedi.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
@@ -50,14 +49,12 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_signature.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vf_vaguedenoiser.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
@@ -83,39 +80,24 @@ affect the licensing of binaries resulting from the combination.
|
||||
|
||||
### Compatible libraries
|
||||
|
||||
The following libraries are under GPL version 2:
|
||||
- avisynth
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libdavs2
|
||||
- librubberband
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxavs2
|
||||
- libxvid
|
||||
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
|
||||
The following libraries are under LGPL version 3:
|
||||
- gmp
|
||||
- libaribb24
|
||||
- liblensfun
|
||||
|
||||
When combining them with FFmpeg, use the configure option `--enable-version3` to
|
||||
upgrade FFmpeg to the LGPL v3.
|
||||
|
||||
The VMAF, mbedTLS, RK MPI, OpenCORE and VisualOn libraries are under the Apache License
|
||||
2.0. That license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
|
||||
The smbclient library is under the GPL v3, to combine it with FFmpeg,
|
||||
the options `--enable-gpl` and `--enable-version3` have to be passed to
|
||||
configure to upgrade FFmpeg to the GPL v3.
|
||||
|
||||
### Incompatible libraries
|
||||
|
||||
There are certain libraries you can combine with FFmpeg whose licenses are not
|
||||
|
||||
77
MAINTAINERS
77
MAINTAINERS
@@ -11,11 +11,17 @@ A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
|
||||
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
final design decisions
|
||||
|
||||
|
||||
Applications
|
||||
============
|
||||
|
||||
ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer, Anton Khirnov
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c Marton Balint
|
||||
@@ -34,8 +40,7 @@ Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server day to day operations Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
project server emergencies Árpád Gereöffy, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -48,11 +53,11 @@ Communication
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork Andriy Gelman
|
||||
mailing lists Baptiste Coudurier
|
||||
Twitter Reynaldo H. Verdejo Pinochet
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, rcombs, wm4
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4
|
||||
|
||||
|
||||
libavutil
|
||||
@@ -73,7 +78,6 @@ Other:
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_vulkan* Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
@@ -84,7 +88,6 @@ Other:
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
tx* Lynne
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -110,6 +113,8 @@ Generic Parts:
|
||||
lzw.* Michael Niedermayer
|
||||
floating point AAN DCT:
|
||||
faandct.c, faandct.h Michael Niedermayer
|
||||
Non-power-of-two MDCT:
|
||||
mdct15.c, mdct15.h Rostislav Pehlivanov
|
||||
Golomb coding:
|
||||
golomb.c, golomb.h Michael Niedermayer
|
||||
motion estimation:
|
||||
@@ -131,15 +136,13 @@ Codecs:
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||
adpcm.c Zane van Iperen
|
||||
alacenc.c Jaikrishnan Menon
|
||||
alsdec.c Thilo Borgmann, Umair Khan
|
||||
amfenc* Dmitrii Ovchinnikov
|
||||
aptx.c Aurelien Jacobs
|
||||
ass* Aurelien Jacobs
|
||||
asv* Michael Niedermayer
|
||||
atrac3plus* Maxim Poliakovski
|
||||
audiotoolbox* rcombs
|
||||
audiotoolbox* Rodger Combs
|
||||
avs2* Huiwen Ren
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
binkaudio.c Peter Ross
|
||||
@@ -151,10 +154,10 @@ Codecs:
|
||||
ccaption_dec.c Anshul Maheshwari, Aman Gupta
|
||||
cljr Alex Beregszaszi
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
cuviddec.c Timo Rothenpieler
|
||||
dca* foo86
|
||||
dfpwm* Jack Bruienne
|
||||
dirac* Rostislav Pehlivanov
|
||||
dnxhd* Baptiste Coudurier
|
||||
dolby_e* foo86
|
||||
@@ -187,17 +190,14 @@ Codecs:
|
||||
libcodec2.c Tomas Härdin
|
||||
libdirac* David Conrad
|
||||
libdavs2.c Huiwen Ren
|
||||
libjxl*.c, libjxl.h Leo Izen
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
libxavs.c Stefan Gehrer
|
||||
libxavs2.c Huiwen Ren
|
||||
libzvbi-teletextdec.c Marton Balint
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
@@ -213,7 +213,6 @@ Codecs:
|
||||
msvideo1.c Mike Melanson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvdec*, nvenc* Timo Rothenpieler
|
||||
omx.c Martin Storsjo, Aman Gupta
|
||||
opus* Rostislav Pehlivanov
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
@@ -221,7 +220,7 @@ Codecs:
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni
|
||||
qsv* Mark Thompson, Zhong Li, Haihao Xiang
|
||||
qsv* Mark Thompson, Zhong Li
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@@ -231,6 +230,7 @@ Codecs:
|
||||
rv10.c Michael Niedermayer
|
||||
s3tc* Ivo van Poorten
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
speedhq.c Steinar H. Gunderson
|
||||
@@ -261,14 +261,16 @@ Codecs:
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xface Stefano Sabatini
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||
d3d11va* Steve Lhomme
|
||||
mediacodec* Matthieu Bouron, Aman Gupta
|
||||
vaapi* Haihao Xiang
|
||||
vaapi_encode* Mark Thompson, Haihao Xiang
|
||||
vaapi* Gwenole Beauchesne
|
||||
vaapi_encode* Mark Thompson
|
||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||
videotoolbox* Rick Kern, Aman Gupta
|
||||
|
||||
@@ -347,7 +349,6 @@ Filters:
|
||||
vf_il.c Paul B Mahol
|
||||
vf_(t)interlace Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_libplacebo.c Niklas Haas
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_mestimate.c Davinder Singh
|
||||
vf_minterpolate.c Davinder Singh
|
||||
@@ -367,8 +368,6 @@ Filters:
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
dnn Yejun Guo
|
||||
|
||||
libavformat
|
||||
===========
|
||||
|
||||
@@ -387,13 +386,7 @@ Muxers/Demuxers:
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
alp.c Zane van Iperen
|
||||
amvenc.c Zane van Iperen
|
||||
apm.c Zane van Iperen
|
||||
apngdec.c Benoit Fouet
|
||||
argo_asf.c Zane van Iperen
|
||||
argo_brp.c Zane van Iperen
|
||||
argo_cvg.c Zane van Iperen
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
@@ -410,7 +403,6 @@ Muxers/Demuxers:
|
||||
dashdec.c Steven Liu
|
||||
dashenc.c Karthick Jeyapal
|
||||
daud.c Reimar Doeffinger
|
||||
dfpwmdec.c Jack Bruienne
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dtshddec.c Paul B Mahol
|
||||
@@ -427,22 +419,19 @@ Muxers/Demuxers:
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
imf* Pierre-Anthony Lemieux
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
iss.c Stefan Gehrer
|
||||
jpegxl_probe.* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
kvag.c Zane van Iperen
|
||||
libmodplug.c Clément Bœsch
|
||||
libopenmpt.c Josh de Kock
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskadec.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskaenc.c David Conrad, Andreas Rheinhardt
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
@@ -457,7 +446,7 @@ Muxers/Demuxers:
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier, Tomas Härdin
|
||||
mxf* Baptiste Coudurier
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
@@ -465,9 +454,9 @@ Muxers/Demuxers:
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oggparsedaala* Rostislav Pehlivanov
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
pp_bnk.c Zane van Iperen
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
@@ -512,9 +501,7 @@ Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libsrt.c Zhao Zhili
|
||||
libssh.c Lukasz Marek
|
||||
libzmq.c Andriy Gelman
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
@@ -539,7 +526,6 @@ Operating systems / CPU architectures
|
||||
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
LoongArch Shiyou Yin
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Lauri Kasanen
|
||||
@@ -571,7 +557,6 @@ Joakim Plate
|
||||
Jun Zhao
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
Martin Storsjö
|
||||
Panagiotis Issaris
|
||||
Pedro Arthur
|
||||
@@ -610,23 +595,17 @@ Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Leo Izen (thebombzen) B6FD 3CFC 7ACF 83FC 9137 6945 5A71 C331 FD2F A19A
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||
Lou Logan (llogan) 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Niklas Haas (haasn) 1DDB 8076 B14D 5B48 32FC 99D9 EB52 DA9C 02BA 6FB4
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Pierre-Anthony Lemieux (pal) F4B3 9492 E6F2 E4AF AEC8 46CB 698F A1F0 F8D4 EED4
|
||||
Ramiro Polla 7859 C65B 751B 1179 792E DAE8 8E95 8B2F 9B6C 5700
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
@@ -635,9 +614,7 @@ Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Thilo Borgmann (thilo) CE1D B7F4 4D20 FC3A DD9F FE5A 257C 5B8F 1D20 B92F
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin (thardin) A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
Zane van Iperen (zane) 61AE D40F 368B 6F26 9DAE 3892 6861 6B2D 8AC4 DCC5
|
||||
|
||||
35
Makefile
35
Makefile
@@ -13,19 +13,17 @@ vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %.cu $(SRC_PATH)
|
||||
vpath %.ptx $(SRC_PATH)
|
||||
vpath %.metal $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
@@ -52,49 +50,32 @@ $(TOOLS): %$(EXESUF): %.o
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
target_dem_%_fuzzer$(EXESUF): target_dem_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
|
||||
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/scale_slice_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/scale_slice_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/target_dec_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/target_dem_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavcodec/hwaccels.h \
|
||||
$(SRC_PATH)/libavcodec/parsers.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config_components.h: ffbuild/.config
|
||||
config.h: ffbuild/.config
|
||||
ffbuild/.config: $(CONFIGURABLE_COMPONENTS)
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?) newer than config_components.h, rerun configure\n\n'
|
||||
@-printf '\nWARNING: $(?) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS RVV-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS LSX-OBJS LASX-OBJS OBJS SLIBOBJS SHLIBOBJS \
|
||||
STLIBOBJS HOSTOBJS TESTOBJS
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -116,13 +97,12 @@ include $(SRC_PATH)/fftools/Makefile
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
$(ALLFFLIBS:%=lib%/version.o): libavutil/ffversion.h
|
||||
libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
ifeq ($(STRIPTYPE),direct)
|
||||
$(STRIP) -o $@ $<
|
||||
else
|
||||
$(RM) $@
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
@@ -163,12 +143,11 @@ clean::
|
||||
$(RM) -rf coverage.info coverage.info.in lcov
|
||||
|
||||
distclean:: clean
|
||||
$(RM) .version config.asm config.h config_components.h mapfile \
|
||||
$(RM) .version avversion.h config.asm config.h mapfile \
|
||||
ffbuild/.config ffbuild/config.* libavutil/avconfig.h \
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c \
|
||||
libavcodec/codec_list.c libavcodec/parser_list.c \
|
||||
libavfilter/filter_list.c libavdevice/indev_list.c libavdevice/outdev_list.c \
|
||||
libavformat/muxer_list.c libavformat/demuxer_list.c
|
||||
ifeq ($(SRC_LINK),src)
|
||||
$(RM) src
|
||||
|
||||
@@ -9,7 +9,7 @@ such as audio, video, subtitles and related metadata.
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides means to alter decoded audio and video through a directed graph of connected filters.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
┌────────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 6.0 "Von Neumann" │
|
||||
└────────────────────────────────────────────┘
|
||||
┌────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.2 "Ada" │
|
||||
└────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 6.0 "Von Neumann", about 6
|
||||
months after the release of FFmpeg 5.1.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.2 "Ada", about 8
|
||||
months after the release of FFmpeg 4.1.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
|
||||
@@ -96,7 +96,7 @@ do { \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
|
||||
InterlockedExchangePointer(object, desired);
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
1264
compat/avisynth/avisynth_c.h
Normal file
1264
compat/avisynth/avisynth_c.h
Normal file
File diff suppressed because it is too large
Load Diff
94
compat/avisynth/avs/capi.h
Normal file
94
compat/avisynth/avs/capi.h
Normal file
@@ -0,0 +1,94 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# if defined(GCC) && defined(X86_32)
|
||||
# define AVSC_CC
|
||||
# else // MSVC builds and 64-bit GCC
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# endif
|
||||
#else // needed for programs that talk to AviSynth+
|
||||
# ifndef AVSC_WIN32_GCC32 // see comment below
|
||||
# ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
# else
|
||||
# define AVSC_CC __stdcall
|
||||
# endif
|
||||
# else
|
||||
# define AVSC_CC
|
||||
# endif
|
||||
#endif
|
||||
|
||||
// On 64-bit Windows, there's only one calling convention,
|
||||
// so there is no difference between MSVC and GCC. On 32-bit,
|
||||
// this isn't true. The convention that GCC needs to use to
|
||||
// even build AviSynth+ as 32-bit makes anything that uses
|
||||
// it incompatible with 32-bit MSVC builds of AviSynth+.
|
||||
// The AVSC_WIN32_GCC32 define is meant to provide a user
|
||||
// switchable way to make builds of FFmpeg to test 32-bit
|
||||
// GCC builds of AviSynth+ without having to screw around
|
||||
// with alternate headers, while still default to the usual
|
||||
// situation of using 32-bit MSVC builds of AviSynth+.
|
||||
|
||||
// Hopefully, this situation will eventually be resolved
|
||||
// and a broadly compatible solution will arise so the
|
||||
// same 32-bit FFmpeg build can handle either MSVC or GCC
|
||||
// builds of AviSynth+.
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT __declspec(dllexport)
|
||||
# define AVSC_API(ret, name) EXTERN_C AVSC_EXPORT ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
||||
70
compat/avisynth/avs/config.h
Normal file
70
compat/avisynth/avs/config.h
Normal file
@@ -0,0 +1,70 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 64
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
# define MSVC
|
||||
#elif defined(__GNUC__)
|
||||
# define GCC
|
||||
#elif defined(__clang__)
|
||||
# define CLANG
|
||||
#else
|
||||
# error Unsupported compiler.
|
||||
#endif
|
||||
|
||||
#if defined(GCC)
|
||||
# undef __forceinline
|
||||
# define __forceinline inline
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
||||
57
compat/avisynth/avs/types.h
Normal file
57
compat/avisynth/avs/types.h
Normal file
@@ -0,0 +1,57 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
#ifdef __cplusplus
|
||||
#include <cstddef>
|
||||
#else
|
||||
#include <stddef.h>
|
||||
#endif
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
||||
728
compat/avisynth/avxsynth_c.h
Normal file
728
compat/avisynth/avxsynth_c.h
Normal file
@@ -0,0 +1,728 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
||||
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
@@ -0,0 +1,85 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
||||
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
@@ -0,0 +1,77 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Minimum CUDA compatibility definitions header
|
||||
*
|
||||
* Copyright (c) 2019 rcombs
|
||||
* Copyright (c) 2019 Rodger Combs
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -49,16 +49,6 @@ typedef struct __device_builtin__ __align__(4) ushort2
|
||||
unsigned short x, y;
|
||||
} ushort2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) float2
|
||||
{
|
||||
float x, y;
|
||||
} float2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) int2
|
||||
{
|
||||
int x, y;
|
||||
} int2;
|
||||
|
||||
typedef struct __device_builtin__ uint3
|
||||
{
|
||||
unsigned int x, y, z;
|
||||
@@ -66,6 +56,11 @@ typedef struct __device_builtin__ uint3
|
||||
|
||||
typedef struct uint3 dim3;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) int2
|
||||
{
|
||||
int x, y;
|
||||
} int2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) uchar4
|
||||
{
|
||||
unsigned char x, y, z, w;
|
||||
@@ -73,7 +68,7 @@ typedef struct __device_builtin__ __align__(4) uchar4
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) ushort4
|
||||
{
|
||||
unsigned short x, y, z, w;
|
||||
unsigned char x, y, z, w;
|
||||
} ushort4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) int4
|
||||
@@ -81,11 +76,6 @@ typedef struct __device_builtin__ __align__(16) int4
|
||||
int x, y, z, w;
|
||||
} int4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) float4
|
||||
{
|
||||
float x, y, z, w;
|
||||
} float4;
|
||||
|
||||
// Accessors for special registers
|
||||
#define GETCOMP(reg, comp) \
|
||||
asm("mov.u32 %0, %%" #reg "." #comp ";" : "=r"(tmp)); \
|
||||
@@ -110,31 +100,24 @@ GET(getThreadIdx, tid)
|
||||
#define threadIdx (getThreadIdx())
|
||||
|
||||
// Basic initializers (simple macros rather than inline functions)
|
||||
#define make_int2(a, b) ((int2){.x = a, .y = b})
|
||||
#define make_uchar2(a, b) ((uchar2){.x = a, .y = b})
|
||||
#define make_ushort2(a, b) ((ushort2){.x = a, .y = b})
|
||||
#define make_float2(a, b) ((float2){.x = a, .y = b})
|
||||
#define make_int4(a, b, c, d) ((int4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_uchar4(a, b, c, d) ((uchar4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_ushort4(a, b, c, d) ((ushort4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_float4(a, b, c, d) ((float4){.x = a, .y = b, .z = c, .w = d})
|
||||
|
||||
// Conversions from the tex instruction's 4-register output to various types
|
||||
#define TEX2D(type, ret) static inline __device__ void conv(type* out, unsigned a, unsigned b, unsigned c, unsigned d) {*out = (ret);}
|
||||
|
||||
TEX2D(unsigned char, a & 0xFF)
|
||||
TEX2D(unsigned short, a & 0xFFFF)
|
||||
TEX2D(float, a)
|
||||
TEX2D(uchar2, make_uchar2(a & 0xFF, b & 0xFF))
|
||||
TEX2D(ushort2, make_ushort2(a & 0xFFFF, b & 0xFFFF))
|
||||
TEX2D(float2, make_float2(a, b))
|
||||
TEX2D(uchar4, make_uchar4(a & 0xFF, b & 0xFF, c & 0xFF, d & 0xFF))
|
||||
TEX2D(ushort4, make_ushort4(a & 0xFFFF, b & 0xFFFF, c & 0xFFFF, d & 0xFFFF))
|
||||
TEX2D(float4, make_float4(a, b, c, d))
|
||||
|
||||
// Template calling tex instruction and converting the output to the selected type
|
||||
template<typename T>
|
||||
inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
template <class T>
|
||||
static inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
T ret;
|
||||
unsigned ret1, ret2, ret3, ret4;
|
||||
@@ -145,48 +128,4 @@ inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float4 tex2D<float4>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret;
|
||||
asm("tex.2d.v4.f32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float tex2D<float>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
return tex2D<float4>(texObject, x, y).x;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float2 tex2D<float2>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret = tex2D<float4>(texObject, x, y);
|
||||
return make_float2(ret.x, ret.y);
|
||||
}
|
||||
|
||||
// Math helper functions
|
||||
static inline __device__ float floorf(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ float floor(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ double floor(double a) { return __builtin_floor(a); }
|
||||
static inline __device__ float ceilf(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ float ceil(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ double ceil(double a) { return __builtin_ceil(a); }
|
||||
static inline __device__ float truncf(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ float trunc(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ double trunc(double a) { return __builtin_trunc(a); }
|
||||
static inline __device__ float fabsf(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ float fabs(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ double fabs(double a) { return __builtin_fabs(a); }
|
||||
static inline __device__ float sqrtf(float a) { return __builtin_sqrtf(a); }
|
||||
|
||||
static inline __device__ float __saturatef(float a) { return __nvvm_saturate_f(a); }
|
||||
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
||||
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
||||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
||||
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||
|
||||
36
compat/cuda/ptx2c.sh
Executable file
36
compat/cuda/ptx2c.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
|
||||
set -e
|
||||
|
||||
OUT="$1"
|
||||
IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while IFS= read -r LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
printf ";\n" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
@@ -59,7 +59,7 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See https://web.archive.org/web/20151214111935/http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
|
||||
@@ -27,19 +27,15 @@
|
||||
#define COMPAT_OS2THREADS_H
|
||||
|
||||
#define INCL_DOS
|
||||
#define INCL_DOSERRORS
|
||||
#include <os2.h>
|
||||
|
||||
#undef __STRICT_ANSI__ /* for _beginthread() */
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sys/builtin.h>
|
||||
#include <sys/fmutex.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct {
|
||||
TID tid;
|
||||
@@ -167,28 +163,6 @@ static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_timedwait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
ULONG t = av_clip64(abs_milli - av_gettime() / 1000, 0, ULONG_MAX);
|
||||
|
||||
__atomic_increment(&cond->wait_count);
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
APIRET ret = DosWaitEventSem(cond->event_sem, t);
|
||||
|
||||
__atomic_decrement(&cond->wait_count);
|
||||
|
||||
DosPostEventSem(cond->ack_sem);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
return (ret == ERROR_TIMEOUT) ? ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
|
||||
@@ -20,40 +20,11 @@
|
||||
#define COMPAT_W32DLFCN_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <stdint.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/macros.h"
|
||||
#if (_WIN32_WINNT < 0x0602) || HAVE_WINRT
|
||||
#include "libavutil/wchar_filename.h"
|
||||
|
||||
static inline wchar_t *get_module_filename(HMODULE module)
|
||||
{
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD path_size = 0, path_len;
|
||||
|
||||
do {
|
||||
path_size = path_size ? FFMIN(2 * path_size, INT16_MAX + 1) : MAX_PATH;
|
||||
new_path = av_realloc_array(path, path_size, sizeof *path);
|
||||
if (!new_path) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
path = new_path;
|
||||
// Returns path_size in case of insufficient buffer.
|
||||
// Whether the error is set or not and whether the output
|
||||
// is null-terminated or not depends on the version of Windows.
|
||||
path_len = GetModuleFileNameW(module, path, path_size);
|
||||
} while (path_len && path_size <= INT16_MAX && path_size <= path_len);
|
||||
|
||||
if (!path_len) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* Safe function used to open dynamic libs. This attempts to improve program security
|
||||
* by removing the current directory from the dll search path. Only dll's found in the
|
||||
@@ -63,53 +34,29 @@ static inline wchar_t *get_module_filename(HMODULE module)
|
||||
*/
|
||||
static inline HMODULE win32_dlopen(const char *name)
|
||||
{
|
||||
wchar_t *name_w;
|
||||
HMODULE module = NULL;
|
||||
if (utf8towchar(name, &name_w))
|
||||
name_w = NULL;
|
||||
#if _WIN32_WINNT < 0x0602
|
||||
// On Win7 and earlier we check if KB2533623 is available
|
||||
// Need to check if KB2533623 is available
|
||||
if (!GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetDefaultDllDirectories")) {
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD pathlen, pathsize, namelen;
|
||||
if (!name_w)
|
||||
HMODULE module = NULL;
|
||||
wchar_t *path = NULL, *name_w = NULL;
|
||||
DWORD pathlen;
|
||||
if (utf8towchar(name, &name_w))
|
||||
goto exit;
|
||||
namelen = wcslen(name_w);
|
||||
path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t));
|
||||
// Try local directory first
|
||||
path = get_module_filename(NULL);
|
||||
if (!path)
|
||||
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
||||
pathlen = wcsrchr(path, '\\') - path;
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
new_path = wcsrchr(path, '\\');
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
pathlen = new_path - path;
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
if (module == NULL) {
|
||||
// Next try System32 directory
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
pathlen = GetSystemDirectoryW(path, MAX_PATH);
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
// Buffer is not enough in two cases:
|
||||
// 1. system directory + \ + module name
|
||||
// 2. system directory even without the module name.
|
||||
if (pathlen + namelen + 2 > pathsize) {
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
// Query again to handle the case #2.
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
goto exit;
|
||||
}
|
||||
path[pathlen] = L'\\';
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
}
|
||||
@@ -126,19 +73,16 @@ exit:
|
||||
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
|
||||
#endif
|
||||
#if HAVE_WINRT
|
||||
if (!name_w)
|
||||
wchar_t *name_w = NULL;
|
||||
int ret;
|
||||
if (utf8towchar(name, &name_w))
|
||||
return NULL;
|
||||
module = LoadPackagedLibrary(name_w, 0);
|
||||
#else
|
||||
#define LOAD_FLAGS (LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
/* filename may be be in CP_ACP */
|
||||
if (!name_w)
|
||||
return LoadLibraryExA(name, NULL, LOAD_FLAGS);
|
||||
module = LoadLibraryExW(name_w, NULL, LOAD_FLAGS);
|
||||
#undef LOAD_FLAGS
|
||||
#endif
|
||||
ret = LoadPackagedLibrary(name_w, 0);
|
||||
av_free(name_w);
|
||||
return module;
|
||||
return ret;
|
||||
#else
|
||||
return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32);
|
||||
#endif
|
||||
}
|
||||
#define dlopen(name, flags) win32_dlopen(name)
|
||||
#define dlclose FreeLibrary
|
||||
|
||||
@@ -38,13 +38,11 @@
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
@@ -63,9 +61,6 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
|
||||
#define PTHREAD_CANCEL_ENABLE 1
|
||||
#define PTHREAD_CANCEL_DISABLE 0
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
@@ -161,31 +156,10 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
|
||||
|
||||
if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
|
||||
DWORD err = GetLastError();
|
||||
if (err == ERROR_TIMEOUT)
|
||||
return ETIMEDOUT;
|
||||
else
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_setcancelstate(int state, int *oldstate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* COMPAT_W32PTHREADS_H */
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "--version" ]; then
|
||||
rc.exe -?
|
||||
exit $?
|
||||
fi
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: mswindres [-I/include/path ...] [-DSOME_DEFINE ...] [-o output.o] input.rc [output.o]" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
EXTRA_OPTS="-nologo"
|
||||
|
||||
while [ $# -gt 2 ]; do
|
||||
case $1 in
|
||||
-D*) EXTRA_OPTS="$EXTRA_OPTS -d$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-I*) EXTRA_OPTS="$EXTRA_OPTS -i$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-o) OPT_OUT="$2"; shift ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
IN="$1"
|
||||
if [ -z "$OPT_OUT" ]; then
|
||||
OUT="$2"
|
||||
else
|
||||
OUT="$OPT_OUT"
|
||||
fi
|
||||
|
||||
eval set -- $EXTRA_OPTS
|
||||
rc.exe "$@" -fo "$OUT" "$IN"
|
||||
675
doc/APIchanges
675
doc/APIchanges
@@ -1,665 +1,20 @@
|
||||
The last version increases of all libraries were on 2023-02-09
|
||||
Never assume the API of libav* to be stable unless at least 1 month has passed
|
||||
since the last major version increase or the API was added.
|
||||
|
||||
The last version increases were:
|
||||
libavcodec: 2017-10-21
|
||||
libavdevice: 2017-10-21
|
||||
libavfilter: 2017-10-21
|
||||
libavformat: 2017-10-21
|
||||
libavresample: 2017-10-21
|
||||
libpostproc: 2017-10-21
|
||||
libswresample: 2017-10-21
|
||||
libswscale: 2017-10-21
|
||||
libavutil: 2017-10-21
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 6.0 was cut here -------- 8< ---------
|
||||
|
||||
2023-02-16 - 927042b409 - lavf 60.2.100 - avformat.h
|
||||
Deprecate AVFormatContext io_close callback.
|
||||
The superior io_close2 callback should be used instead.
|
||||
|
||||
2023-02-13 - 2296078397 - lavu 58.1.100 - frame.h
|
||||
Deprecate AVFrame.coded_picture_number and display_picture_number.
|
||||
Their usefulness is questionable and very few decoders set them.
|
||||
|
||||
2023-02-13 - 6b6f7db819 - lavc 60.2.100 - avcodec.h
|
||||
Add AVCodecContext.frame_num as a 64bit version of frame_number.
|
||||
Deprecate AVCodecContext.frame_number.
|
||||
|
||||
2023-02-12 - d1b9a3ddb4 - lavfi 9.1.100 - avfilter.h
|
||||
Add filtergraph segment parsing API.
|
||||
New structs:
|
||||
- AVFilterGraphSegment
|
||||
- AVFilterChain
|
||||
- AVFilterParams
|
||||
- AVFilterPadParams
|
||||
New functions:
|
||||
- avfilter_graph_segment_parse()
|
||||
- avfilter_graph_segment_create_filters()
|
||||
- avfilter_graph_segment_apply_opts()
|
||||
- avfilter_graph_segment_init()
|
||||
- avfilter_graph_segment_link()
|
||||
- avfilter_graph_segment_apply()
|
||||
|
||||
2023-02-09 - 719a93f4e4 - lavu 58.0.100 - csp.h
|
||||
Add av_csp_approximate_trc_gamma() and av_csp_trc_func_from_id().
|
||||
Add av_csp_trc_function.
|
||||
|
||||
2023-02-09 - 868a31b42d - lavc 60.0.100 - avcodec.h
|
||||
avcodec_decode_subtitle2() now accepts const AVPacket*.
|
||||
|
||||
2023-02-04 - d02340b9e3 - lavc 59.63.100
|
||||
Allow AV_CODEC_FLAG_COPY_OPAQUE to be used with decoders.
|
||||
|
||||
2023-01-29 - a1a80f2e64 - lavc 59.59.100 - avcodec.h
|
||||
Add AV_CODEC_FLAG_COPY_OPAQUE and AV_CODEC_FLAG_FRAME_DURATION.
|
||||
|
||||
2023-01-13 - 002d0ec740 - lavu 57.44.100 - ambient_viewing_environment.h frame.h
|
||||
Adds a new structure for holding H.274 Ambient Viewing Environment metadata,
|
||||
AVAmbientViewingEnvironment.
|
||||
Adds a new AVFrameSideDataType entry AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
|
||||
for it.
|
||||
|
||||
2022-12-10 - 7a8d78f7e3 - lavc 59.55.100 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_UNSAFE_OUTPUT.
|
||||
|
||||
2022-11-24 - e97368eba5 - lavu 57.43.100 - tx.h
|
||||
Add AV_TX_FLOAT_DCT, AV_TX_DOUBLE_DCT and AV_TX_INT32_DCT.
|
||||
|
||||
2022-11-06 - 9dad237928 - lavu 57.42.100 - dict.h
|
||||
Add av_dict_iterate().
|
||||
|
||||
2022-11-03 - 6228ba141d - lavu 57.41.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_7POINT1_TOP_BACK and AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK.
|
||||
|
||||
2022-10-30 - 83e918de71 - lavu 57.40.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_CUBE and AV_CHANNEL_LAYOUT_CUBE.
|
||||
|
||||
2022-10-11 - 479747645f - lavu 57.39.101 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGBF32 and AV_PIX_FMT_RGBAF32.
|
||||
|
||||
2022-10-05 - 37d5ddc317 - lavu 57.39.100 - cpu.h
|
||||
Add AV_CPU_FLAG_RVB_BASIC.
|
||||
|
||||
2022-10-03 - d09776d486 - lavf 59.34.100 - avio.h
|
||||
Make AVIODirContext an opaque type in a future major version bump.
|
||||
|
||||
2022-09-27 - 0c0a3deb18 - lavu 57.38.100 - cpu.h
|
||||
Add CPU flags for RISC-V vector extensions:
|
||||
AV_CPU_FLAG_RVV_I32, AV_CPU_FLAG_RVV_F32, AV_CPU_FLAG_RVV_I64,
|
||||
AV_CPU_FLAG_RVV_F64
|
||||
|
||||
2022-09-26 - a02a0e8db4 - lavc 59.48.100 - avcodec.h
|
||||
Deprecate avcodec_enum_to_chroma_pos() and avcodec_chroma_pos_to_enum().
|
||||
Use av_chroma_location_enum_to_pos() or av_chroma_location_pos_to_enum()
|
||||
instead.
|
||||
|
||||
2022-09-26 - xxxxxxxxxx - lavu 57.37.100 - pixdesc.h pixfmt.h
|
||||
Add av_chroma_location_enum_to_pos() and av_chroma_location_pos_to_enum().
|
||||
Add AV_PIX_FMT_RGBF32BE, AV_PIX_FMT_RGBF32LE, AV_PIX_FMT_RGBAF32BE,
|
||||
AV_PIX_FMT_RGBAF32LE.
|
||||
|
||||
2022-09-26 - cf856d8957 - lavc 59.47.100 - avcodec.h defs.h
|
||||
Move the AV_EF_* and FF_COMPLIANCE_* defines from avcodec.h to defs.h.
|
||||
|
||||
2022-09-03 - d75c4693fe - lavu 57.36.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_P012, AV_PIX_FMT_Y212, AV_PIX_FMT_XV30, AV_PIX_FMT_XV36
|
||||
|
||||
2022-09-03 - dea9744560 - lavu 57.35.100 - file.h
|
||||
Deprecate av_tempfile() without replacement.
|
||||
|
||||
2022-08-03 - cc5a5c9860 - lavu 57.34.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_VUYX.
|
||||
|
||||
2022-08-22 - 14726571dd - lavf 59 - avformat.h
|
||||
Deprecate av_stream_get_end_pts() without replacement.
|
||||
|
||||
2022-08-19 - 352799dca8 - lavc 59.42.102 - codec_id.h
|
||||
Deprecate AV_CODEC_ID_AYUV and ayuv decoder/encoder. The rawvideo codec
|
||||
and vuya pixel format combination will be used instead from now on.
|
||||
|
||||
2022-08-07 - e95b08a7dd - lavu 57.33.101 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGBAF16{BE,LE} pixel formats.
|
||||
|
||||
2022-08-12 - e0bbdbe0a6 - lavu 57.33.100 - hwcontext_qsv.h
|
||||
Add loader field to AVQSVDeviceContext
|
||||
|
||||
2022-08-03 - 6ab8a9d375 - lavu 57.32.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_VUYA.
|
||||
|
||||
2022-08-02 - e3838b856f - lavc 59.41.100 - avcodec.h codec.h
|
||||
Add AV_CODEC_FLAG_RECON_FRAME and AV_CODEC_CAP_ENCODER_RECON_FRAME.
|
||||
avcodec_receive_frame() may now be used on encoders when
|
||||
AV_CODEC_FLAG_RECON_FRAME is active.
|
||||
|
||||
2022-08-02 - eede1d2927 - lavu 57.31.100 - frame.h
|
||||
av_frame_make_writable() may now be called on non-refcounted
|
||||
frames and will make a refcounted copy out of them.
|
||||
Previously an error was returned in such cases.
|
||||
|
||||
2022-07-30 - e1a0f2df3d - lavc 59.40.100 - avcodec.h
|
||||
Add the AV_CODEC_FLAG2_ICC_PROFILES flag to AVCodecContext, to enable
|
||||
automatic reading and writing of embedded ICC profiles in image files.
|
||||
The "flags2" option now supports the corresponding flag "icc_profiles".
|
||||
|
||||
2022-07-19 - 4397f9a5a0 - lavu 57.30.100 - frame.h
|
||||
Add AVFrame.duration, deprecate AVFrame.pkt_duration.
|
||||
|
||||
-------- 8< --------- FFmpeg 5.1 was cut here -------- 8< ---------
|
||||
|
||||
2022-06-12 - 7cae3d8b76 - lavf 59.25.100 - avio.h
|
||||
Add avio_vprintf(), similar to avio_printf() but allow to use it
|
||||
from within a function taking a variable argument list as input.
|
||||
|
||||
2022-06-12 - ff59ecc4de - lavu 57.27.100 - uuid.h
|
||||
Add UUID handling functions.
|
||||
Add av_uuid_parse(), av_uuid_urn_parse(), av_uuid_parse_range(),
|
||||
av_uuid_parse_range(), av_uuid_equal(), av_uuid_copy(), and av_uuid_nil().
|
||||
|
||||
2022-06-01 - d42b410e05 - lavu 57.26.100 - csp.h
|
||||
Add public API for colorspace structs.
|
||||
Add av_csp_luma_coeffs_from_avcsp(), av_csp_primaries_desc_from_id(),
|
||||
and av_csp_primaries_id_from_desc().
|
||||
|
||||
2022-05-23 - 4cdc14aa95 - lavu 57.25.100 - avutil.h
|
||||
Deprecate av_fopen_utf8() without replacement.
|
||||
|
||||
2022-03-16 - f3a0e2ee2b - all libraries - version_major.h
|
||||
Add lib<name>/version_major.h as new installed headers, which only
|
||||
contain the major version number (and corresponding API deprecation
|
||||
defines).
|
||||
|
||||
2022-03-15 - cdba98bb80 - swr 4.5.100 - swresample.h
|
||||
Add swr_alloc_set_opts2() and swr_build_matrix2().
|
||||
Deprecate swr_alloc_set_opts() and swr_build_matrix().
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavfi 8.28.100 - avfilter.h buffersink.h buffersrc.h
|
||||
Update AVFilterLink for the new channel layout API: add ch_layout,
|
||||
deprecate channel_layout.
|
||||
|
||||
Update the buffersink filter sink for the new channel layout API:
|
||||
add av_buffersink_get_ch_layout() and the ch_layouts option,
|
||||
deprecate av_buffersink_get_channel_layout() and the channel_layouts option.
|
||||
|
||||
Update AVBufferSrcParameters for the new channel layout API:
|
||||
add ch_layout, deprecate channel_layout.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavf 59.19.100 - avformat.h
|
||||
Add AV_DISPOSITION_NON_DIEGETIC.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavc 59.24.100 - avcodec.h codec_par.h
|
||||
Update AVCodecParameters for the new channel layout API: add ch_layout,
|
||||
deprecate channels/channel_layout.
|
||||
|
||||
Update AVCodecContext for the new channel layout API: add ch_layout,
|
||||
deprecate channels/channel_layout.
|
||||
|
||||
Update AVCodec for the new channel layout API: add ch_layouts,
|
||||
deprecate channel_layouts.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavu 57.24.100 - channel_layout.h frame.h opt.h
|
||||
Add new channel layout API based on the AVChannelLayout struct.
|
||||
Add support for Ambisonic audio.
|
||||
Deprecate previous channel layout API based on uint64 bitmasks.
|
||||
|
||||
Add AV_OPT_TYPE_CHLAYOUT option type, deprecate AV_OPT_TYPE_CHANNEL_LAYOUT.
|
||||
Update AVFrame for the new channel layout API: add ch_layout, deprecate
|
||||
channels/channel_layout.
|
||||
|
||||
2022-03-10 - f629ea2e18 - lavu 57.23.100 - cpu.h
|
||||
Add AV_CPU_FLAG_AVX512ICL.
|
||||
|
||||
2022-02-07 - a10f1aec1f - lavu 57.21.100 - fifo.h
|
||||
Deprecate AVFifoBuffer and the API around it, namely av_fifo_alloc(),
|
||||
av_fifo_alloc_array(), av_fifo_free(), av_fifo_freep(), av_fifo_reset(),
|
||||
av_fifo_size(), av_fifo_space(), av_fifo_generic_peek_at(),
|
||||
av_fifo_generic_peek(), av_fifo_generic_read(), av_fifo_generic_write(),
|
||||
av_fifo_realloc2(), av_fifo_grow(), av_fifo_drain() and av_fifo_peek2().
|
||||
Users should switch to the AVFifo-API.
|
||||
|
||||
2022-02-07 - 7329b22c05 - lavu 57.20.100 - fifo.h
|
||||
Add a new FIFO API, which allows setting a FIFO element size.
|
||||
This API operates on these elements rather than on bytes.
|
||||
Add av_fifo_alloc2(), av_fifo_elem_size(), av_fifo_can_read(),
|
||||
av_fifo_can_write(), av_fifo_grow2(), av_fifo_drain2(), av_fifo_write(),
|
||||
av_fifo_write_from_cb(), av_fifo_read(), av_fifo_read_to_cb(),
|
||||
av_fifo_peek(), av_fifo_peek_to_cb(), av_fifo_drain2(), av_fifo_reset2(),
|
||||
av_fifo_freep2(), av_fifo_auto_grow_limit().
|
||||
|
||||
2022-01-26 - af94ab7c7c0 - lavu 57.19.100 - tx.h
|
||||
Add AV_TX_FLOAT_RDFT, AV_TX_DOUBLE_RDFT and AV_TX_INT32_RDFT.
|
||||
|
||||
-------- 8< --------- FFmpeg 5.0 was cut here -------- 8< ---------
|
||||
|
||||
2022-01-04 - 78dc21b123e - lavu 57.16.100 - frame.h
|
||||
Add AV_FRAME_DATA_DOVI_METADATA.
|
||||
|
||||
2022-01-03 - 70f318e6b6c - lavf 59.13.100 - avformat.h
|
||||
Add AVFMT_EXPERIMENTAL flag.
|
||||
|
||||
2021-12-22 - b7e1ec7bda9 - lavu 57.13.100 - hwcontext_videotoolbox.h
|
||||
Add av_vt_pixbuf_set_attachments
|
||||
|
||||
2021-12-22 - 69bd95dcd8d - lavu 57.13.100 - hwcontext_videotoolbox.h
|
||||
Add av_map_videotoolbox_chroma_loc_from_av
|
||||
Add av_map_videotoolbox_color_matrix_from_av
|
||||
Add av_map_videotoolbox_color_primaries_from_av
|
||||
Add av_map_videotoolbox_color_trc_from_av
|
||||
|
||||
2021-12-21 - ffbab99f2c2 - lavu 57.12.100 - cpu.h
|
||||
Add AV_CPU_FLAG_SLOW_GATHER.
|
||||
|
||||
2021-12-20 - 278068dc60d - lavu 57.11.101 - display.h
|
||||
Modified the documentation of av_display_rotation_set()
|
||||
to match its longstanding actual behaviour of treating
|
||||
the angle as directed clockwise.
|
||||
|
||||
2021-12-12 - 64834bb86a1 - lavf 59.10.100 - avformat.h
|
||||
Add AVFormatContext io_close2 which returns an int
|
||||
|
||||
2021-12-10 - f45cbb775e4 - lavu 57.11.100 - hwcontext_vulkan.h
|
||||
Add AVVkFrame.offset and AVVulkanFramesContext.flags.
|
||||
|
||||
2021-12-04 - b9c928a486f - lavfi 8.19.100 - avfilter.h
|
||||
Add AVFILTER_FLAG_METADATA_ONLY.
|
||||
|
||||
2021-12-03 - b236ef0a594 - lavu 57.10.100 - frame.h
|
||||
Add AVFrame.time_base
|
||||
|
||||
2021-11-22 - b2cd1fb2ec6 - lavu 57.9.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_P210, AV_PIX_FMT_P410, AV_PIX_FMT_P216, and AV_PIX_FMT_P416.
|
||||
|
||||
2021-11-17 - 54e65aa38ab - lavf 57.9.100 - frame.h
|
||||
Add AV_FRAME_DATA_DOVI_RPU_BUFFER.
|
||||
|
||||
2021-11-16 - ed75a08d36c - lavf 59.9.100 - avformat.h
|
||||
Add av_stream_get_class(). Schedule adding AVStream.av_class at libavformat
|
||||
major version 60.
|
||||
Add av_disposition_to_string() and av_disposition_from_string().
|
||||
Add "disposition" AVOption to AVStream's class.
|
||||
|
||||
2021-11-12 - 8478d60d5b5 - lavu 57.8.100 - hwcontext_vulkan.h
|
||||
Added AVVkFrame.sem_value, AVVulkanDeviceContext.queue_family_encode_index,
|
||||
nb_encode_queues, queue_family_decode_index, and nb_decode_queues.
|
||||
|
||||
2021-10-18 - 682bafdb125 - lavf 59.8.100 - avio.h
|
||||
Introduce public bytes_{read,written} statistic fields to AVIOContext.
|
||||
|
||||
2021-10-13 - a5622ed16f8 - lavf 59.7.100 - avio.h
|
||||
Deprecate AVIOContext.written. Originally added as a private entry in
|
||||
commit 3f75e5116b900f1428aa13041fc7d6301bf1988a, its grouping with
|
||||
the comment noting its private state was missed during merging of the field
|
||||
from Libav (most likely due to an already existing field in between).
|
||||
|
||||
2021-09-21 - 0760d9153c3 - lavu 57.7.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_X2BGR10.
|
||||
|
||||
2021-09-20 - 8d5de914d31 - lavu 57.6.100 - mem.h
|
||||
Deprecate av_mallocz_array() as it is identical to av_calloc().
|
||||
|
||||
2021-09-20 - 176b8d785bf - lavc 59.9.100 - avcodec.h
|
||||
Deprecate AVCodecContext.sub_text_format and the corresponding
|
||||
AVOptions. It is unused since the last major bump.
|
||||
|
||||
2021-09-20 - dd846bc4a91 - lavc 59.8.100 - avcodec.h codec.h
|
||||
Deprecate AV_CODEC_FLAG_TRUNCATED and AV_CODEC_CAP_TRUNCATED,
|
||||
as they are redundant with parsers.
|
||||
|
||||
2021-09-17 - ccfdef79b13 - lavu 57.5.101 - buffer.h
|
||||
Constified the input parameters in av_buffer_replace(), av_buffer_ref(),
|
||||
and av_buffer_pool_buffer_get_opaque().
|
||||
|
||||
2021-09-08 - 4f78711f9c2 - lavu 57.5.100 - hwcontext_d3d11va.h
|
||||
Add AVD3D11VAFramesContext.texture_infos
|
||||
|
||||
2021-09-06 - 42cd64c1826 - lsws 6.1.100 - swscale.h
|
||||
Add AVFrame-based scaling API:
|
||||
- sws_scale_frame()
|
||||
- sws_frame_start()
|
||||
- sws_frame_end()
|
||||
- sws_send_slice()
|
||||
- sws_receive_slice()
|
||||
- sws_receive_slice_alignment()
|
||||
|
||||
2021-09-02 - cbf111059d2 - lavc 59.7.100 - avcodec.h
|
||||
Incremented the number of elements of AVCodecParser.codec_ids to seven.
|
||||
|
||||
2021-08-24 - 590a7e02f04 - lavc 59.6.100 - avcodec.h
|
||||
Add FF_CODEC_PROPERTY_FILM_GRAIN
|
||||
|
||||
2021-08-20 - 7c5f998196d - lavfi 8.3.100 - avfilter.H
|
||||
Add avfilter_filter_pad_count() as a replacement for avfilter_pad_count().
|
||||
Deprecate avfilter_pad_count().
|
||||
|
||||
2021-08-17 - 8c53b145993 - lavu 57.4.101 - opt.h
|
||||
av_opt_copy() now guarantees that allocated src and dst options
|
||||
don't alias each other even on error.
|
||||
|
||||
2021-08-14 - d5de9965ef6 - lavu 57.4.100 - imgutils.h
|
||||
Add av_image_copy_plane_uc_from()
|
||||
|
||||
2021-08-02 - a1a0fddfd05 - lavc 59.4.100 - packet.h
|
||||
Add AVPacket.opaque, AVPacket.opaque_ref, AVPacket.time_base.
|
||||
|
||||
2021-07-23 - 2dd8acbe800 - lavu 57.3.100 - common.h macros.h
|
||||
Move several macros (AV_NE, FFDIFFSIGN, FFMAX, FFMAX3, FFMIN, FFMIN3,
|
||||
FFSWAP, FF_ARRAY_ELEMS, MKTAG, MKBETAG) from common.h to macros.h.
|
||||
|
||||
2021-07-22 - e3b5ff17c2e - lavu 57.2.100 - film_grain_params.h
|
||||
Add AV_FILM_GRAIN_PARAMS_H274, AVFilmGrainH274Params
|
||||
|
||||
2021-07-19 - c1bf56a526f - lavu 57.1.100 - cpu.h
|
||||
Add av_cpu_force_count()
|
||||
|
||||
2021-06-17 - aca923b3653 - lavc 59.2.100 - packet.h
|
||||
Add AV_PKT_DATA_DYNAMIC_HDR10_PLUS
|
||||
|
||||
2021-06-09 - 2cccab96f6f - lavf 59.3.100 - avformat.h
|
||||
Add pts_wrap_bits to AVStream
|
||||
|
||||
2021-06-10 - 7c9763070d9 - lavc 59.1.100 - avcodec.h codec.h
|
||||
Move av_get_profile_name() from avcodec.h to codec.h.
|
||||
|
||||
2021-06-10 - bb3648e6766 - lavc 59.1.100 - avcodec.h codec_par.h
|
||||
Move av_get_audio_frame_duration2() from avcodec.h to codec_par.h.
|
||||
|
||||
2021-06-10 - 881db34f6a0 - lavc 59.1.100 - avcodec.h codec_id.h
|
||||
Move av_get_bits_per_sample(), av_get_exact_bits_per_sample(),
|
||||
avcodec_profile_name(), and av_get_pcm_codec() from avcodec.h
|
||||
to codec_id.h.
|
||||
|
||||
2021-06-10 - ff0a96046d8 - lavc 59.1.100 - avcodec.h defs.h
|
||||
Add new installed header defs.h. The following definitions are moved
|
||||
into it from avcodec.h:
|
||||
- AVDiscard
|
||||
- AVAudioServiceType
|
||||
- AVPanScan
|
||||
- AVCPBProperties and av_cpb_properties_alloc()
|
||||
- AVProducerReferenceTime
|
||||
- av_xiphlacing()
|
||||
|
||||
2021-04-27 - cb3ac722f4 - lavc 59.0.100 - avcodec.h
|
||||
Constified AVCodecParserContext.parser.
|
||||
|
||||
2021-04-27 - 8b3e6ce5f4 - lavd 59.0.100 - avdevice.h
|
||||
The av_*_device_next API functions now accept and return
|
||||
pointers to const AVInputFormat resp. AVOutputFormat.
|
||||
|
||||
2021-04-27 - d7e0d428fa - lavd 59.0.100 - avdevice.h
|
||||
avdevice_list_input_sources and avdevice_list_output_sinks now accept
|
||||
pointers to const AVInputFormat resp. const AVOutputFormat.
|
||||
|
||||
2021-04-27 - 46dac8cf3d - lavf 59.0.100 - avformat.h
|
||||
av_find_best_stream now uses a const AVCodec ** parameter
|
||||
for the returned decoder.
|
||||
|
||||
2021-04-27 - 626535f6a1 - lavc 59.0.100 - codec.h
|
||||
avcodec_find_encoder_by_name(), avcodec_find_encoder(),
|
||||
avcodec_find_decoder_by_name() and avcodec_find_decoder()
|
||||
now return a pointer to const AVCodec.
|
||||
|
||||
2021-04-27 - 14fa0a4efb - lavf 59.0.100 - avformat.h
|
||||
Constified AVFormatContext.*_codec.
|
||||
|
||||
2021-04-27 - 56450a0ee4 - lavf 59.0.100 - avformat.h
|
||||
Constified the pointers to AVInputFormats and AVOutputFormats
|
||||
in AVFormatContext, avformat_alloc_output_context2(),
|
||||
av_find_input_format(), av_probe_input_format(),
|
||||
av_probe_input_format2(), av_probe_input_format3(),
|
||||
av_probe_input_buffer2(), av_probe_input_buffer(),
|
||||
avformat_open_input(), av_guess_format() and av_guess_codec().
|
||||
Furthermore, constified the AVProbeData in av_probe_input_format(),
|
||||
av_probe_input_format2() and av_probe_input_format3().
|
||||
|
||||
2021-04-19 - 18af1ea8d1 - lavu 56.74.100 - tx.h
|
||||
Add AV_TX_FULL_IMDCT and AV_TX_UNALIGNED.
|
||||
|
||||
2021-04-17 - f1bf465aa0 - lavu 56.73.100 - frame.h detection_bbox.h
|
||||
Add AV_FRAME_DATA_DETECTION_BBOXES
|
||||
|
||||
2021-04-06 - 557953a397 - lavf 58.78.100 - avformat.h
|
||||
Add avformat_index_get_entries_count(), avformat_index_get_entry(),
|
||||
and avformat_index_get_entry_from_timestamp().
|
||||
|
||||
2021-03-21 - a77beea6c8 - lavu 56.72.100 - frame.h
|
||||
Deprecated av_get_colorspace_name().
|
||||
Use av_color_space_name() instead.
|
||||
|
||||
-------- 8< --------- FFmpeg 4.4 was cut here -------- 8< ---------
|
||||
|
||||
2021-03-19 - e8c0bca6bd - lavu 56.69.100 - adler32.h
|
||||
Added a typedef for the type of the Adler-32 checksums
|
||||
used by av_adler32_update(). It will be changed to uint32_t
|
||||
at the next major bump.
|
||||
The type of the parameter for the length of the input buffer
|
||||
will also be changed to size_t at the next major bump.
|
||||
|
||||
2021-03-19 - e318438f2f - lavf 58.75.100 - avformat.h
|
||||
AVChapter.id will be changed from int to int64_t
|
||||
on the next major version bump.
|
||||
|
||||
2021-03-17 - f7db77bd87 - lavc 58.133.100 - codec.h
|
||||
Deprecated av_init_packet(). Once removed, sizeof(AVPacket) will
|
||||
no longer be a part of the public ABI.
|
||||
Deprecated AVPacketList.
|
||||
|
||||
2021-03-16 - 7d09579190 - lavc 58.132.100 - codec.h
|
||||
Add AV_CODEC_CAP_OTHER_THREADS as a new name for
|
||||
AV_CODEC_CAP_AUTO_THREADS. AV_CODEC_CAP_AUTO_THREADS
|
||||
is now deprecated.
|
||||
|
||||
2021-03-12 - 6e7e3a3820 - lavc 58.131.100 - avcodec.h codec.h
|
||||
Add a get_encode_buffer callback to AVCodecContext, similar to
|
||||
get_buffer2 but for encoders.
|
||||
Add avcodec_default_get_encode_buffer().
|
||||
Add AV_GET_ENCODE_BUFFER_FLAG_REF.
|
||||
Encoders may now be flagged as AV_CODEC_CAP_DR1 capable.
|
||||
|
||||
2021-03-10 - 42e68fe015 - lavf 58.72.100 - avformat.h
|
||||
Change AVBufferRef related AVStream function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-10 - d79e0fe65c - lavc 58.130.100 - packet.h
|
||||
Change AVBufferRef related AVPacket function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-10 - 14040a1d91 - lavu 56.68.100 - buffer.h frame.h
|
||||
Change AVBufferRef and relevant AVFrame function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-04 - a0eec776b6 - lavc 58.128.101 - avcodec.h
|
||||
Enable err_recognition to be set for encoders.
|
||||
|
||||
2021-03-03 - 2ff40b98ec - lavf 58.70.100 - avformat.h
|
||||
Deprecate AVFMT_FLAG_PRIV_OPT. It will do nothing
|
||||
as soon as av_demuxer_open() is removed.
|
||||
|
||||
2021-02-27 - dd9227e48f - lavc 58.126.100 - avcodec.h
|
||||
Deprecated avcodec_get_frame_class().
|
||||
|
||||
2021-02-21 - 5ca40d6d94 - lavu 56.66.100 - tx.h
|
||||
Add enum AVTXFlags and AVTXFlags.AV_TX_INPLACE
|
||||
|
||||
2021-02-14 - 4f49ca7bbc - lavd 58.12.100 - avdevice.h
|
||||
Deprecated avdevice_capabilities_create() and
|
||||
avdevice_capabilities_free().
|
||||
|
||||
2021-02-10 - 1bda9bb68a - lavu 56.65.100 - common.h
|
||||
Add FFABS64U()
|
||||
|
||||
2021-01-26 - 5dd9567080 - lavu 56.64.100 - common.h
|
||||
Add FFABSU()
|
||||
|
||||
2021-01-25 - 56709ca8aa - lavc 58.119.100 - avcodec.h
|
||||
Deprecate AVCodecContext.debug_mv, FF_DEBUG_VIS_MV_P_FOR, FF_DEBUG_VIS_MV_B_FOR,
|
||||
FF_DEBUG_VIS_MV_B_BACK
|
||||
|
||||
2021-01-11 - ebdd33086a - lavc 58.116.100 - avcodec.h
|
||||
Add FF_PROFILE_VVC_MAIN_10 and FF_PROFILE_VVC_MAIN_10_444.
|
||||
|
||||
2020-01-01 - baecaa16c1 - lavu 56.63.100 - video_enc_params.h
|
||||
Add AV_VIDEO_ENC_PARAMS_MPEG2
|
||||
|
||||
2020-12-03 - eca12f4d5a - lavu 56.62.100 - timecode.h
|
||||
Add av_timecode_init_from_components.
|
||||
|
||||
2020-11-27 - a83098ab03 - lavc 58.114.100 - avcodec.h
|
||||
Deprecate AVCodecContext.thread_safe_callbacks. Starting with
|
||||
LIBAVCODEC_VERSION_MAJOR=60, user callbacks must always be
|
||||
thread-safe when frame threading is used.
|
||||
|
||||
2020-11-25 - d243dd540a - lavc 58.113.100 - avcodec.h
|
||||
Adds a new flag AV_CODEC_EXPORT_DATA_FILM_GRAIN for export_side_data.
|
||||
|
||||
2020-11-25 - 4f9ee87253 - lavu 56.61.100 - film_grain_params.h
|
||||
Adds a new API for extracting codec film grain parameters as side data.
|
||||
Adds a new AVFrameSideDataType entry AV_FRAME_DATA_FILM_GRAIN_PARAMS for it.
|
||||
|
||||
2020-10-28 - f95d9510ff - lavf 58.64.100 - avformat.h
|
||||
Add AVSTREAM_EVENT_FLAG_NEW_PACKETS.
|
||||
|
||||
2020-09-28 - 68918d3b7f - lavu 56.60.100 - buffer.h
|
||||
Add a av_buffer_replace() convenience function.
|
||||
|
||||
2020-09-13 - 837b6eb90e - lavu 56.59.100 - timecode.h
|
||||
Add av_timecode_make_smpte_tc_string2.
|
||||
|
||||
2020-08-21 - 06f2651204 - lavu 56.58.100 - avstring.h
|
||||
Deprecate av_d2str(). Use av_asprintf() instead.
|
||||
|
||||
2020-08-04 - 34de0abbe7 - lavu 56.58.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_22POINT2 together with its newly required pieces:
|
||||
AV_CH_TOP_SIDE_LEFT, AV_CH_TOP_SIDE_RIGHT, AV_CH_BOTTOM_FRONT_CENTER,
|
||||
AV_CH_BOTTOM_FRONT_LEFT, AV_CH_BOTTOM_FRONT_RIGHT.
|
||||
|
||||
2020-07-23 - 84655b7101 - lavu 56.57.100 - cpu.h
|
||||
Add AV_CPU_FLAG_MMI and AV_CPU_FLAG_MSA.
|
||||
|
||||
2020-07-22 - 3a8e927176 - lavu 56.56.100 - imgutils.h
|
||||
Add av_image_fill_plane_sizes().
|
||||
|
||||
2020-07-15 - 448a9aaa78 - lavc 58.96.100 - packet.h
|
||||
Add AV_PKT_DATA_S12M_TIMECODE.
|
||||
|
||||
2020-06-12 - b09fb030c1 - lavu 56.55.100 - pixdesc.h
|
||||
Add AV_PIX_FMT_X2RGB10.
|
||||
|
||||
2020-06-11 - bc8ab084fb - lavu 56.54.100 - frame.h
|
||||
Add AV_FRAME_DATA_SEI_UNREGISTERED.
|
||||
|
||||
2020-06-10 - 1b4a98b029 - lavu 56.53.100 - log.h opt.h
|
||||
Add av_opt_child_class_iterate() and AVClass.child_class_iterate().
|
||||
Deprecate av_opt_child_class_next() and AVClass.child_class_next().
|
||||
|
||||
-------- 8< --------- FFmpeg 4.3 was cut here -------- 8< ---------
|
||||
|
||||
2020-06-05 - ec39c2276a - lavu 56.50.100 - buffer.h
|
||||
Passing NULL as alloc argument to av_buffer_pool_init2() is now allowed.
|
||||
|
||||
2020-05-27 - ba6cada92e - lavc 58.88.100 - avcodec.h codec.h
|
||||
Move AVCodec-related public API to new header codec.h.
|
||||
|
||||
2020-05-23 - 064b875e89 - lavu 56.49.100 - video_enc_params.h
|
||||
Add AV_VIDEO_ENC_PARAMS_H264.
|
||||
|
||||
2020-05-23 - 2e08b39444 - lavu 56.48.100 - hwcontext.h
|
||||
Add av_hwdevice_ctx_create_derived_opts.
|
||||
|
||||
2020-05-23 - 6b65c4ec54 - lavu 56.47.100 - rational.h
|
||||
Add av_gcd_q().
|
||||
|
||||
2020-05-22 - af9e622776 - lavu 56.46.101 - opt.h
|
||||
Add AV_OPT_FLAG_CHILD_CONSTS.
|
||||
|
||||
2020-05-22 - 9d443c3e68 - lavc 58.87.100 - avcodec.h codec_par.h
|
||||
Move AVBitstreamFilter-related public API to new header bsf.h.
|
||||
Move AVCodecParameters-related public API to new header codec_par.h.
|
||||
|
||||
2020-05-21 - 13b1bbff0b - lavc 58.86.101 - avcodec.h
|
||||
Deprecated AV_CODEC_CAP_INTRA_ONLY and AV_CODEC_CAP_LOSSLESS.
|
||||
|
||||
2020-05-17 - 84af196c65 - lavu 56.46.100 - common.h
|
||||
Add av_sat_add64() and av_sat_sub64()
|
||||
|
||||
2020-05-12 - 991d417692 - lavu 56.45.100 - video_enc_params.h
|
||||
lavc 58.84.100 - avcodec.h
|
||||
Add a new API for exporting video encoding information.
|
||||
Replaces the deprecated API for exporting QP tables from decoders.
|
||||
Add AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS to request this information from
|
||||
decoders.
|
||||
|
||||
2020-05-10 - dccd07f66d - lavu 56.44.100 - hwcontext_vulkan.h
|
||||
Add enabled_inst_extensions, num_enabled_inst_extensions, enabled_dev_extensions
|
||||
and num_enabled_dev_extensions fields to AVVulkanDeviceContext
|
||||
|
||||
2020-04-22 - 0e1db79e37 - lavc 58.81.100 - packet.h
|
||||
- lavu 56.43.100 - dovi_meta.h
|
||||
Add AV_PKT_DATA_DOVI_CONF and AVDOVIDecoderConfigurationRecord.
|
||||
|
||||
2020-04-15 - 22b25b3ea5 - lavc 58.79.100 - avcodec.h
|
||||
Add formal support for calling avcodec_flush_buffers() on encoders.
|
||||
Encoders that set the cap AV_CODEC_CAP_ENCODER_FLUSH will be flushed.
|
||||
For all other encoders, the call is now a no-op rather than undefined
|
||||
behaviour.
|
||||
|
||||
2020-04-10 - 672946c7fe - lavc 58.78.100 - avcodec.h codec_desc.h codec_id.h packet.h
|
||||
Move AVCodecDesc-related public API to new header codec_desc.h.
|
||||
Move AVCodecID enum to new header codec_id.h.
|
||||
Move AVPacket-related public API to new header packet.h.
|
||||
|
||||
2020-03-29 - 4cb0dda555 - lavf 58.42.100 - avformat.h
|
||||
av_read_frame() now guarantees to handle uninitialized input packets
|
||||
and to return refcounted packets on success.
|
||||
|
||||
2020-03-27 - c52ec0367d - lavc 58.77.100 - avcodec.h
|
||||
av_packet_ref() now guarantees to return the destination packet
|
||||
in a blank state on error.
|
||||
|
||||
2020-03-10 - 05d27f342b - lavc 58.75.100 - avcodec.h
|
||||
Add AV_PKT_DATA_ICC_PROFILE.
|
||||
|
||||
2020-02-21 - d005a7cdfd - lavc 58.73.101 - avcodec.h
|
||||
Add AV_CODEC_EXPORT_DATA_PRFT.
|
||||
|
||||
2020-02-21 - c666689491 - lavc 58.73.100 - avcodec.h
|
||||
Add AVCodecContext.export_side_data and AV_CODEC_EXPORT_DATA_MVS.
|
||||
|
||||
2020-02-13 - e8f054b095 - lavu 56.41.100 - tx.h
|
||||
Add AV_TX_INT32_FFT and AV_TX_INT32_MDCT
|
||||
|
||||
2020-02-12 - 3182114f88 - lavu 56.40.100 - log.h
|
||||
Add av_log_once().
|
||||
|
||||
2020-02-04 - a88449ffb2 - lavu 56.39.100 - hwcontext.h
|
||||
Add AV_PIX_FMT_VULKAN
|
||||
Add AV_HWDEVICE_TYPE_VULKAN and implementation.
|
||||
|
||||
2020-01-30 - 27529eeb27 - lavf 58.37.100 - avio.h
|
||||
Add avio_protocol_get_class().
|
||||
|
||||
2020-01-15 - 717b2074ec - lavc 58.66.100 - avcodec.h
|
||||
Add AV_PKT_DATA_PRFT and AVProducerReferenceTime.
|
||||
|
||||
2019-12-27 - 45259a0ee4 - lavu 56.38.100 - eval.h
|
||||
Add av_expr_count_func().
|
||||
|
||||
2019-12-26 - 16685114d5 - lavu 56.37.100 - buffer.h
|
||||
Add av_buffer_pool_buffer_get_opaque().
|
||||
|
||||
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
|
||||
Add av_expr_count_vars().
|
||||
|
||||
2019-10-14 - f3746d31f9 - lavu 56.35.101 - opt.h
|
||||
Add AV_OPT_FLAG_RUNTIME_PARAM.
|
||||
|
||||
2019-09-25 - f8406ab4b9 - lavc 58.59.100 - avcodec.h
|
||||
Add max_samples
|
||||
|
||||
2019-09-04 - 2a9d461abc - lavu 56.35.100 - hwcontext_videotoolbox.h
|
||||
Add av_map_videotoolbox_format_from_pixfmt2() for full range pixfmt
|
||||
|
||||
2019-09-01 - 8821d1f56e - lavu 56.34.100 - pixfmt.h
|
||||
Add EBU Tech. 3213-E AVColorPrimaries value
|
||||
|
||||
2019-08-17 - 95fa73a2b4 - lavf 58.31.101 - avio.h
|
||||
4K limit removed from avio_printf.
|
||||
|
||||
2019-08-17 - a82f8f2f10 - lavf 58.31.100 - avio.h
|
||||
Add avio_print_string_array and avio_print.
|
||||
|
||||
2019-07-27 - 42e2319ba9 - lavu 56.33.100 - tx.h
|
||||
Add AV_TX_DOUBLE_FFT and AV_TX_DOUBLE_MDCT
|
||||
|
||||
-------- 8< --------- FFmpeg 4.2 was cut here -------- 8< ---------
|
||||
|
||||
2019-06-21 - a30e44098a - lavu 56.30.100 - frame.h
|
||||
@@ -1810,7 +1165,7 @@ API changes, most recent first:
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be8f - lavu 52.76.100 - log.h
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
Add av_log_get_flags()
|
||||
|
||||
2014-04-11 - 6db42a2b - lavd 55.12.100 - avdevice.h
|
||||
|
||||
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 6.0.1
|
||||
PROJECT_NUMBER = 4.2.2
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
@@ -1980,7 +1980,6 @@ PREDEFINED = __attribute__(x)= \
|
||||
av_alloc_size(...)= \
|
||||
AV_GCC_VERSION_AT_LEAST(x,y)=1 \
|
||||
AV_GCC_VERSION_AT_MOST(x,y)=0 \
|
||||
"FF_PAD_STRUCTURE(name,size,...)=typedef struct name { __VA_ARGS__ } name;" \
|
||||
__GNUC__
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
|
||||
|
||||
@@ -27,9 +27,6 @@ HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMP
|
||||
doc/mailing-list-faq.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
$(SRC_PATH)/doc/bootstrap.min.css \
|
||||
$(SRC_PATH)/doc/style.min.css \
|
||||
$(SRC_PATH)/doc/default.css \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
@@ -105,7 +102,7 @@ DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
|
||||
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(https://git.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{https://git.ffmpeg.org/ffmpeg}.
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
|
||||
@@ -81,7 +81,7 @@ Top-left position.
|
||||
@end table
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (@emph{time_scale / num_units_in_display_tick}) in
|
||||
Set the tick rate (@emph{num_units_in_display_tick / time_scale}) in
|
||||
the timing info in the sequence header.
|
||||
@item num_ticks_per_picture
|
||||
Set the number of ticks in each picture, to indicate that the stream
|
||||
@@ -132,36 +132,6 @@ the header stored in extradata to the key packets:
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section dv_error_marker
|
||||
|
||||
Blocks in DV which are marked as damaged are replaced by blocks of the specified color.
|
||||
|
||||
@table @option
|
||||
@item color
|
||||
The color to replace damaged blocks by
|
||||
@item sta
|
||||
A 16 bit mask which specifies which of the 16 possible error status values are
|
||||
to be replaced by colored blocks. 0xFFFE is the default which replaces all non 0
|
||||
error status values.
|
||||
@table @samp
|
||||
@item ok
|
||||
No error, no concealment
|
||||
@item err
|
||||
Error, No concealment
|
||||
@item res
|
||||
Reserved
|
||||
@item notok
|
||||
Error or concealment
|
||||
@item notres
|
||||
Not reserved
|
||||
@item Aa, Ba, Ca, Ab, Bb, Cb, A, B, C, a, b, erri, erru
|
||||
The specific error status code
|
||||
@end table
|
||||
see page 44-46 or section 5.5 of
|
||||
@url{http://web.archive.org/web/20060927044735/http://www.smpte.org/smpte_store/standards/pdf/s314m.pdf}
|
||||
|
||||
@end table
|
||||
|
||||
@section eac3_core
|
||||
|
||||
Extract the core from a E-AC-3 stream, dropping extra channels.
|
||||
@@ -247,20 +217,12 @@ Modify metadata embedded in an H.264 stream.
|
||||
Insert or remove AUD NAL units in all access units of the stream.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
@item sample_aspect_ratio
|
||||
Set the sample aspect ratio of the stream in the VUI parameters.
|
||||
See H.264 table E-1.
|
||||
|
||||
@item overscan_appropriate_flag
|
||||
Set whether the stream is suitable for display using overscan
|
||||
or not (see H.264 section E.2.1).
|
||||
|
||||
@item video_format
|
||||
@item video_full_range_flag
|
||||
@@ -278,7 +240,7 @@ Set the chroma sample location in the stream (see H.264 section
|
||||
E.2.1 and figure E-1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (time_scale / num_units_in_tick) in the VUI
|
||||
Set the tick rate (num_units_in_tick / time_scale) in the VUI
|
||||
parameters. This is the smallest time unit representable in the
|
||||
stream, and in many cases represents the field rate of the stream
|
||||
(double the frame rate).
|
||||
@@ -287,11 +249,6 @@ Set whether the stream has fixed framerate - typically this indicates
|
||||
that the framerate is exactly half the tick rate, but the exact
|
||||
meaning is dependent on interlacing and the picture structure (see
|
||||
H.264 section E.2.1 and table E-6).
|
||||
@item zero_new_constraint_set_flags
|
||||
Zero constraint_set4_flag and constraint_set5_flag in the SPS. These
|
||||
bits were reserved in a previous version of the H.264 spec, and thus
|
||||
some hardware decoders require these to be zero. The result of zeroing
|
||||
this is still a valid bitstream.
|
||||
|
||||
@item crop_left
|
||||
@item crop_right
|
||||
@@ -315,37 +272,6 @@ insert the string ``hello'' associated with the given UUID.
|
||||
@item delete_filler
|
||||
Deletes both filler NAL units and filler SEI messages.
|
||||
|
||||
@item display_orientation
|
||||
Insert, extract or remove Display orientation SEI messages.
|
||||
See H.264 section D.1.27 and D.2.27 for syntax and semantics.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@item extract
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
Insert mode works in conjunction with @code{rotate} and @code{flip} options.
|
||||
Any pre-existing Display orientation messages will be removed in insert or remove mode.
|
||||
Extract mode attaches the display matrix to the packet as side data.
|
||||
|
||||
@item rotate
|
||||
Set rotation in display orientation SEI (anticlockwise angle in degrees).
|
||||
Range is -360 to +360. Default is NaN.
|
||||
|
||||
@item flip
|
||||
Set flip in display orientation SEI.
|
||||
|
||||
@table @samp
|
||||
@item horizontal
|
||||
@item vertical
|
||||
@end table
|
||||
|
||||
Default is unset.
|
||||
|
||||
@item level
|
||||
Set the level in the SPS. Refer to H.264 section A.3 and tables A-1
|
||||
to A-5.
|
||||
@@ -382,6 +308,9 @@ This applies a specific fixup to some Blu-ray streams which contain
|
||||
redundant PPSs modifying irrelevant parameters of the stream which
|
||||
confuse other transformations which require correct extradata.
|
||||
|
||||
A new single global PPS is created, and all of the redundant PPSs
|
||||
within the stream are removed.
|
||||
|
||||
@section hevc_metadata
|
||||
|
||||
Modify metadata embedded in an HEVC stream.
|
||||
@@ -414,8 +343,8 @@ Set the chroma sample location in the stream (see H.265 section
|
||||
E.3.1 and figure E.1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate in the VPS and VUI parameters (time_scale /
|
||||
num_units_in_tick). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
Set the tick rate in the VPS and VUI parameters (num_units_in_tick /
|
||||
time_scale). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
set a constant framerate in the stream. Note that it is likely to be
|
||||
overridden by container parameters when the stream is in a container.
|
||||
|
||||
@@ -596,110 +525,25 @@ container. Can be used for fuzzing or testing error resilience/concealment.
|
||||
Parameters:
|
||||
@table @option
|
||||
@item amount
|
||||
Accepts an expression whose evaluation per-packet determines how often bytes in that
|
||||
packet will be modified. A value below 0 will result in a variable frequency.
|
||||
Default is 0 which results in no modification. However, if neither amount nor drop is specified,
|
||||
amount will be set to @var{-1}. See below for accepted variables.
|
||||
@item drop
|
||||
Accepts an expression evaluated per-packet whose value determines whether that packet is dropped.
|
||||
Evaluation to a positive value results in the packet being dropped. Evaluation to a negative
|
||||
value results in a variable chance of it being dropped, roughly inverse in proportion to the magnitude
|
||||
of the value. Default is 0 which results in no drops. See below for accepted variables.
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
@item dropamount
|
||||
Accepts a non-negative integer, which assigns a variable chance of it being dropped, roughly inverse
|
||||
in proportion to the value. Default is 0 which results in no drops. This option is kept for backwards
|
||||
compatibility and is equivalent to setting drop to a negative value with the same magnitude
|
||||
i.e. @code{dropamount=4} is the same as @code{drop=-4}. Ignored if drop is also specified.
|
||||
A numeral string, whose value is related to how often packets will be dropped.
|
||||
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||
@end table
|
||||
|
||||
Both @code{amount} and @code{drop} accept expressions containing the following variables:
|
||||
|
||||
@table @samp
|
||||
@item n
|
||||
The index of the packet, starting from zero.
|
||||
@item tb
|
||||
The timebase for packet timestamps.
|
||||
@item pts
|
||||
Packet presentation timestamp.
|
||||
@item dts
|
||||
Packet decoding timestamp.
|
||||
@item nopts
|
||||
Constant representing AV_NOPTS_VALUE.
|
||||
@item startpts
|
||||
First non-AV_NOPTS_VALUE PTS seen in the stream.
|
||||
@item startdts
|
||||
First non-AV_NOPTS_VALUE DTS seen in the stream.
|
||||
@item duration
|
||||
@itemx d
|
||||
Packet duration, in timebase units.
|
||||
@item pos
|
||||
Packet position in input; may be -1 when unknown or not set.
|
||||
@item size
|
||||
Packet size, in bytes.
|
||||
@item key
|
||||
Whether packet is marked as a keyframe.
|
||||
@item state
|
||||
A pseudo random integer, primarily derived from the content of packet payload.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Apply modification to every byte but don't drop any packets.
|
||||
The following example applies the modification to every byte but does not drop
|
||||
any packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
||||
@end example
|
||||
|
||||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
||||
modify any of the remaining packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
|
||||
@end example
|
||||
|
||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
|
||||
@section pcm_rechunk
|
||||
|
||||
Repacketize PCM audio to a fixed number of samples per packet or a fixed packet
|
||||
rate per second. This is similar to the @ref{asetnsamples,,asetnsamples audio
|
||||
filter,ffmpeg-filters} but works on audio packets instead of audio frames.
|
||||
|
||||
@table @option
|
||||
@item nb_out_samples, n
|
||||
Set the number of samples per each output audio packet. The number is intended
|
||||
as the number of samples @emph{per each channel}. Default value is 1024.
|
||||
|
||||
@item pad, p
|
||||
If set to 1, the filter will pad the last audio packet with silence, so that it
|
||||
will contain the same number of samples (or roughly the same number of samples,
|
||||
see @option{frame_rate}) as the previous ones. Default value is 1.
|
||||
|
||||
@item frame_rate, r
|
||||
This option makes the filter output a fixed number of packets per second instead
|
||||
of a fixed number of samples per packet. If the audio sample rate is not
|
||||
divisible by the frame rate then the number of samples will not be constant but
|
||||
will vary slightly so that each packet will start as close to the frame
|
||||
boundary as possible. Using this option has precedence over @option{nb_out_samples}.
|
||||
@end table
|
||||
|
||||
You can generate the well known 1602-1601-1602-1601-1602 pattern of 48kHz audio
|
||||
for NTSC frame rate using the @option{frame_rate} option.
|
||||
@example
|
||||
ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc -
|
||||
@end example
|
||||
|
||||
@section pgs_frame_merge
|
||||
|
||||
Merge a sequence of PGS Subtitle segments ending with an "end of display set"
|
||||
segment into a single packet.
|
||||
|
||||
This is required by some containers that support PGS subtitles
|
||||
(muxer @code{matroska}).
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
@@ -741,10 +585,6 @@ Keep the same transfer characteristics property (default).
|
||||
@item unknown
|
||||
@item bt709
|
||||
BT 601, BT 709, BT 2020
|
||||
@item smpte2084
|
||||
SMPTE ST 2084
|
||||
@item arib-std-b67
|
||||
ARIB STD-B67
|
||||
@end table
|
||||
|
||||
|
||||
@@ -754,7 +594,7 @@ Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same colorspace property (default).
|
||||
Keep the same transfer characteristics property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@@ -770,11 +610,6 @@ Set Rec709 colorspace for each frame of the file
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov
|
||||
@end example
|
||||
|
||||
Set Hybrid Log-Gamma parameters for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt2020:color_trc=arib-std-b67:colorspace=bt2020nc output.mov
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
Remove extradata from packets.
|
||||
@@ -797,91 +632,6 @@ Remove extradata from all frames.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section setts
|
||||
Set PTS and DTS in packets.
|
||||
|
||||
It accepts the following parameters:
|
||||
@table @option
|
||||
@item ts
|
||||
@item pts
|
||||
@item dts
|
||||
Set expressions for PTS, DTS or both.
|
||||
@item duration
|
||||
Set expression for duration.
|
||||
@item time_base
|
||||
Set output time base.
|
||||
@end table
|
||||
|
||||
The expressions are evaluated through the eval API and can contain the following
|
||||
constants:
|
||||
|
||||
@table @option
|
||||
@item N
|
||||
The count of the input packet. Starting from 0.
|
||||
|
||||
@item TS
|
||||
The demux timestamp in input in case of @code{ts} or @code{dts} option or presentation
|
||||
timestamp in case of @code{pts} option.
|
||||
|
||||
@item POS
|
||||
The original position in the file of the packet, or undefined if undefined
|
||||
for the current packet
|
||||
|
||||
@item DTS
|
||||
The demux timestamp in input.
|
||||
|
||||
@item PTS
|
||||
The presentation timestamp in input.
|
||||
|
||||
@item DURATION
|
||||
The duration in input.
|
||||
|
||||
@item STARTDTS
|
||||
The DTS of the first packet.
|
||||
|
||||
@item STARTPTS
|
||||
The PTS of the first packet.
|
||||
|
||||
@item PREV_INDTS
|
||||
The previous input DTS.
|
||||
|
||||
@item PREV_INPTS
|
||||
The previous input PTS.
|
||||
|
||||
@item PREV_INDURATION
|
||||
The previous input duration.
|
||||
|
||||
@item PREV_OUTDTS
|
||||
The previous output DTS.
|
||||
|
||||
@item PREV_OUTPTS
|
||||
The previous output PTS.
|
||||
|
||||
@item PREV_OUTDURATION
|
||||
The previous output duration.
|
||||
|
||||
@item NEXT_DTS
|
||||
The next input DTS.
|
||||
|
||||
@item NEXT_PTS
|
||||
The next input PTS.
|
||||
|
||||
@item NEXT_DURATION
|
||||
The next input duration.
|
||||
|
||||
@item TB
|
||||
The timebase of stream packet belongs.
|
||||
|
||||
@item TB_OUT
|
||||
The output timebase.
|
||||
|
||||
@item SR
|
||||
The sample rate of stream packet belongs.
|
||||
|
||||
@item NOPTS
|
||||
The AV_NOPTS_VALUE constant.
|
||||
@end table
|
||||
|
||||
@anchor{text2movsub}
|
||||
@section text2movsub
|
||||
|
||||
@@ -909,9 +659,7 @@ Modify metadata embedded in a VP9 stream.
|
||||
|
||||
@table @option
|
||||
@item color_space
|
||||
Set the color space value in the frame header. Note that any frame
|
||||
set to RGB will be implicitly set to PC range and that RGB is
|
||||
incompatible with profiles 0 and 2.
|
||||
Set the color space value in the frame header.
|
||||
@table @samp
|
||||
@item unknown
|
||||
@item bt601
|
||||
@@ -923,8 +671,8 @@ incompatible with profiles 0 and 2.
|
||||
@end table
|
||||
|
||||
@item color_range
|
||||
Set the color range value in the frame header. Note that any value
|
||||
imposed by the color space will take precedence over this value.
|
||||
Set the color range value in the frame header. Note that this cannot
|
||||
be set in RGB streams.
|
||||
@table @samp
|
||||
@item tv
|
||||
@item pc
|
||||
|
||||
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -48,8 +48,6 @@ config
|
||||
tools/target_dec_<decoder>_fuzzer
|
||||
Build fuzzer to fuzz the specified decoder.
|
||||
|
||||
tools/target_bsf_<filter>_fuzzer
|
||||
Build fuzzer to fuzz the specified bitstream filter.
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
||||
341
doc/codecs.texi
341
doc/codecs.texi
@@ -50,10 +50,11 @@ Use internal 2pass ratecontrol in first pass mode.
|
||||
Use internal 2pass ratecontrol in second pass mode.
|
||||
@item gray
|
||||
Only decode/encode grayscale.
|
||||
@item emu_edge
|
||||
Do not draw edges.
|
||||
@item psnr
|
||||
Set error[?] variables during encoding.
|
||||
@item truncated
|
||||
Input bitstream might be randomly truncated.
|
||||
@item drop_changed
|
||||
Don't output frames whose parameters differ from first decoded frame in stream.
|
||||
Error AVERROR_INPUT_CHANGED is returned when a frame is dropped.
|
||||
@@ -70,14 +71,50 @@ This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item aic
|
||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||
@item cbp
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item qprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item ilme
|
||||
Apply interlaced motion estimation.
|
||||
@item cgop
|
||||
Use closed gop.
|
||||
@item output_corrupt
|
||||
Output even potentially corrupted frames.
|
||||
@end table
|
||||
|
||||
@item me_method @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item zero
|
||||
zero motion estimation (fastest)
|
||||
@item full
|
||||
full motion estimation (slowest)
|
||||
@item epzs
|
||||
EPZS motion estimation (default)
|
||||
@item esa
|
||||
esa motion estimation (alias for full)
|
||||
@item tesa
|
||||
tesa motion estimation
|
||||
@item dia
|
||||
dia motion estimation (alias for epzs)
|
||||
@item log
|
||||
log motion estimation
|
||||
@item phods
|
||||
phods motion estimation
|
||||
@item x1
|
||||
X1 motion estimation
|
||||
@item hex
|
||||
hex motion estimation
|
||||
@item umh
|
||||
umh motion estimation
|
||||
@item iter
|
||||
iter motion estimation
|
||||
@end table
|
||||
|
||||
@item extradata_size @var{integer}
|
||||
Set extradata size.
|
||||
|
||||
@item time_base @var{rational number}
|
||||
Set codec time base.
|
||||
|
||||
@@ -144,6 +181,24 @@ Default value is 0.
|
||||
@item b_qfactor @var{float} (@emph{encoding,video})
|
||||
Set qp factor between P and B frames.
|
||||
|
||||
@item rc_strategy @var{integer} (@emph{encoding,video})
|
||||
Set ratecontrol method.
|
||||
|
||||
@item b_strategy @var{integer} (@emph{encoding,video})
|
||||
Set strategy to choose between I/P/B-frames.
|
||||
|
||||
@item ps @var{integer} (@emph{encoding,video})
|
||||
Set RTP payload size in bytes.
|
||||
|
||||
@item mv_bits @var{integer}
|
||||
@item header_bits @var{integer}
|
||||
@item i_tex_bits @var{integer}
|
||||
@item p_tex_bits @var{integer}
|
||||
@item i_count @var{integer}
|
||||
@item p_count @var{integer}
|
||||
@item skip_count @var{integer}
|
||||
@item misc_bits @var{integer}
|
||||
@item frame_bits @var{integer}
|
||||
@item codec_tag @var{integer}
|
||||
@item bug @var{flags} (@emph{decoding,video})
|
||||
Workaround not auto detected encoder bugs.
|
||||
@@ -152,6 +207,8 @@ Possible values:
|
||||
@table @samp
|
||||
@item autodetect
|
||||
|
||||
@item old_msmpeg4
|
||||
some old lavc generated msmpeg4v3 files (no autodetection)
|
||||
@item xvid_ilace
|
||||
Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
@item ump4
|
||||
@@ -160,6 +217,8 @@ Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
padding bug (autodetected)
|
||||
@item amv
|
||||
|
||||
@item ac_vlc
|
||||
illegal vlc bug (autodetected per fourcc)
|
||||
@item qpel_chroma
|
||||
|
||||
@item std_qpel
|
||||
@@ -180,6 +239,14 @@ Workaround various bugs in microsoft broken decoders.
|
||||
trancated frames
|
||||
@end table
|
||||
|
||||
@item lelim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for luminance (negative
|
||||
values also consider DC coefficient).
|
||||
|
||||
@item celim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for chrominance (negative
|
||||
values also consider dc coefficient)
|
||||
|
||||
@item strict @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
Specify how strictly to follow the standards.
|
||||
|
||||
@@ -233,8 +300,29 @@ consider things that a sane encoder should not do as an error
|
||||
|
||||
@item block_align @var{integer}
|
||||
|
||||
@item mpeg_quant @var{integer} (@emph{encoding,video})
|
||||
Use MPEG quantizers instead of H.263.
|
||||
|
||||
@item qsquish @var{float} (@emph{encoding,video})
|
||||
How to keep quantizer between qmin and qmax (0 = clip, 1 = use
|
||||
differentiable function).
|
||||
|
||||
@item rc_qmod_amp @var{float} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_qmod_freq @var{integer} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_override_count @var{integer}
|
||||
|
||||
@item rc_eq @var{string} (@emph{encoding,video})
|
||||
Set rate control equation. When computing the expression, besides the
|
||||
standard functions defined in the section 'Expression Evaluation', the
|
||||
following functions are available: bits2qp(bits), qp2bits(qp). Also
|
||||
the following constants are available: iTex pTex tex mv fCode iCount
|
||||
mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
|
||||
avgTex.
|
||||
|
||||
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
||||
Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
|
||||
|
||||
@@ -245,12 +333,18 @@ encode. It is of little use elsewise.
|
||||
@item bufsize @var{integer} (@emph{encoding,audio,video})
|
||||
Set ratecontrol buffer size (in bits).
|
||||
|
||||
@item rc_buf_aggressivity @var{float} (@emph{encoding,video})
|
||||
Currently useless.
|
||||
|
||||
@item i_qfactor @var{float} (@emph{encoding,video})
|
||||
Set QP factor between P and I frames.
|
||||
|
||||
@item i_qoffset @var{float} (@emph{encoding,video})
|
||||
Set QP offset between P and I frames.
|
||||
|
||||
@item rc_init_cplx @var{float} (@emph{encoding,video})
|
||||
Set initial complexity for 1-pass encoding.
|
||||
|
||||
@item dct @var{integer} (@emph{encoding,video})
|
||||
Set DCT algorithm.
|
||||
|
||||
@@ -315,7 +409,11 @@ Automatically pick a IDCT compatible with the simple one
|
||||
|
||||
@item simpleneon
|
||||
|
||||
@item xvid
|
||||
@item simplealpha
|
||||
|
||||
@item ipp
|
||||
|
||||
@item xvidmmx
|
||||
|
||||
@item faani
|
||||
floating point AAN IDCT
|
||||
@@ -338,6 +436,19 @@ favor predicting from the previous frame instead of the current
|
||||
|
||||
@item bits_per_coded_sample @var{integer}
|
||||
|
||||
@item pred @var{integer} (@emph{encoding,video})
|
||||
Set prediction method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item left
|
||||
|
||||
@item plane
|
||||
|
||||
@item median
|
||||
|
||||
@end table
|
||||
|
||||
@item aspect @var{rational number} (@emph{encoding,video})
|
||||
Set sample aspect ratio.
|
||||
|
||||
@@ -532,28 +643,13 @@ noise preserving sum of squared differences
|
||||
|
||||
@item dia_size @var{integer} (@emph{encoding,video})
|
||||
Set diamond type & size for motion estimation.
|
||||
@table @samp
|
||||
@item (1024, INT_MAX)
|
||||
full motion estimation(slowest)
|
||||
@item (768, 1024]
|
||||
umh motion estimation
|
||||
@item (512, 768]
|
||||
hex motion estimation
|
||||
@item (256, 512]
|
||||
l2s diamond motion estimation
|
||||
@item [2,256]
|
||||
var diamond motion estimation
|
||||
@item (-1, 2)
|
||||
small diamond motion estimation
|
||||
@item -1
|
||||
funny diamond motion estimation
|
||||
@item (INT_MIN, -1)
|
||||
sab diamond motion estimation
|
||||
@end table
|
||||
|
||||
@item last_pred @var{integer} (@emph{encoding,video})
|
||||
Set amount of motion predictors from the previous frame.
|
||||
|
||||
@item preme @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation.
|
||||
|
||||
@item precmp @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation compare function.
|
||||
|
||||
@@ -597,11 +693,40 @@ Set diamond type & size for motion estimation pre-pass.
|
||||
@item subq @var{integer} (@emph{encoding,video})
|
||||
Set sub pel motion estimation quality.
|
||||
|
||||
@item dtg_active_format @var{integer}
|
||||
|
||||
@item me_range @var{integer} (@emph{encoding,video})
|
||||
Set limit motion vectors range (1023 for DivX player).
|
||||
|
||||
@item ibias @var{integer} (@emph{encoding,video})
|
||||
Set intra quant bias.
|
||||
|
||||
@item pbias @var{integer} (@emph{encoding,video})
|
||||
Set inter quant bias.
|
||||
|
||||
@item color_table_id @var{integer}
|
||||
|
||||
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
@item coder @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item vlc
|
||||
variable length coder / huffman coder
|
||||
@item ac
|
||||
arithmetic coder
|
||||
@item raw
|
||||
raw (no encoding)
|
||||
@item rle
|
||||
run-length coder
|
||||
@item deflate
|
||||
deflate-based coder
|
||||
@end table
|
||||
|
||||
@item context @var{integer} (@emph{encoding,video})
|
||||
Set context model.
|
||||
|
||||
@item slice_flags @var{integer}
|
||||
|
||||
@item mbd @var{integer} (@emph{encoding,video})
|
||||
@@ -617,16 +742,32 @@ use fewest bits
|
||||
use best rate distortion
|
||||
@end table
|
||||
|
||||
@item stream_codec_tag @var{integer}
|
||||
|
||||
@item sc_threshold @var{integer} (@emph{encoding,video})
|
||||
Set scene change threshold.
|
||||
|
||||
@item lmin @var{integer} (@emph{encoding,video})
|
||||
Set min lagrange factor (VBR).
|
||||
|
||||
@item lmax @var{integer} (@emph{encoding,video})
|
||||
Set max lagrange factor (VBR).
|
||||
|
||||
@item nr @var{integer} (@emph{encoding,video})
|
||||
Set noise reduction.
|
||||
|
||||
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
||||
Set number of bits which should be loaded into the rc buffer before
|
||||
decoding starts.
|
||||
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item fast
|
||||
Allow non spec compliant speedup tricks.
|
||||
@item sgop
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item noout
|
||||
Skip bitstream encoding.
|
||||
@item ignorecrop
|
||||
@@ -640,31 +781,12 @@ Show all frames before the first keyframe.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item skip_manual
|
||||
Do not skip samples and export skip information as frame side data.
|
||||
@item ass_ro_flush_noop
|
||||
Do not reset ASS ReadOrder field on flush.
|
||||
@item icc_profiles
|
||||
Generate/parse embedded ICC profiles from/to colorimetry tags.
|
||||
@end table
|
||||
|
||||
@item export_side_data @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item prft
|
||||
Export encoder Producer Reference Time into packet side-data (see @code{AV_PKT_DATA_PRFT})
|
||||
for codecs that support it.
|
||||
@item venc_params
|
||||
Export video encoding parameters through frame side data (see @code{AV_FRAME_DATA_VIDEO_ENC_PARAMS})
|
||||
for codecs that support it. At present, those are H.264 and VP9.
|
||||
@item film_grain
|
||||
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
|
||||
Supported at present by AV1 decoders.
|
||||
@end table
|
||||
@item qns @var{integer} (@emph{encoding,video})
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
|
||||
@item threads @var{integer} (@emph{decoding/encoding,video})
|
||||
Set the number of threads to be used, in case the selected codec
|
||||
@@ -678,6 +800,12 @@ automatically select the number of threads to set
|
||||
|
||||
Default value is @samp{auto}.
|
||||
|
||||
@item me_threshold @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation threshold.
|
||||
|
||||
@item mb_threshold @var{integer} (@emph{encoding,video})
|
||||
Set macroblock threshold.
|
||||
|
||||
@item dc @var{integer} (@emph{encoding,video})
|
||||
Set intra_dc_precision.
|
||||
|
||||
@@ -692,8 +820,49 @@ Set number of macroblock rows at the bottom which are skipped.
|
||||
|
||||
@item profile @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set encoder codec profile. Default value is @samp{unknown}. Encoder specific
|
||||
profiles are documented in the relevant encoder documentation.
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@item aac_main
|
||||
|
||||
@item aac_low
|
||||
|
||||
@item aac_ssr
|
||||
|
||||
@item aac_ltp
|
||||
|
||||
@item aac_he
|
||||
|
||||
@item aac_he_v2
|
||||
|
||||
@item aac_ld
|
||||
|
||||
@item aac_eld
|
||||
|
||||
@item mpeg2_aac_low
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
||||
@item dts_96_24
|
||||
|
||||
@item dts_hd_hra
|
||||
|
||||
@item dts_hd_ma
|
||||
|
||||
@end table
|
||||
|
||||
@item level @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
@@ -706,12 +875,67 @@ Possible values:
|
||||
@item lowres @var{integer} (@emph{decoding,audio,video})
|
||||
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
||||
|
||||
@item skip_threshold @var{integer} (@emph{encoding,video})
|
||||
Set frame skip threshold.
|
||||
|
||||
@item skip_factor @var{integer} (@emph{encoding,video})
|
||||
Set frame skip factor.
|
||||
|
||||
@item skip_exp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip exponent.
|
||||
Negative values behave identical to the corresponding positive ones, except
|
||||
that the score is normalized.
|
||||
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||
|
||||
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip compare function.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item w53
|
||||
5/3 wavelet, only used in snow
|
||||
@item w97
|
||||
9/7 wavelet, only used in snow
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
|
||||
@item border_mask @var{float} (@emph{encoding,video})
|
||||
Increase the quantizer for macroblocks close to borders.
|
||||
|
||||
@item mblmin @var{integer} (@emph{encoding,video})
|
||||
Set min macroblock lagrange factor (VBR).
|
||||
|
||||
@item mblmax @var{integer} (@emph{encoding,video})
|
||||
Set max macroblock lagrange factor (VBR).
|
||||
|
||||
@item mepc @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation bitrate penalty compensation (1.0 = 256).
|
||||
|
||||
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
||||
@item skip_idct @var{integer} (@emph{decoding,video})
|
||||
@item skip_frame @var{integer} (@emph{decoding,video})
|
||||
@@ -751,17 +975,34 @@ Default value is @samp{default}.
|
||||
@item bidir_refine @var{integer} (@emph{encoding,video})
|
||||
Refine the two motion vectors used in bidirectional macroblocks.
|
||||
|
||||
@item brd_scale @var{integer} (@emph{encoding,video})
|
||||
Downscale frames for dynamic B-frame decision.
|
||||
|
||||
@item keyint_min @var{integer} (@emph{encoding,video})
|
||||
Set minimum interval between IDR-frames.
|
||||
|
||||
@item refs @var{integer} (@emph{encoding,video})
|
||||
Set reference frames to consider for motion compensation.
|
||||
|
||||
@item chromaoffset @var{integer} (@emph{encoding,video})
|
||||
Set chroma qp offset from luma.
|
||||
|
||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||
Set rate-distortion optimal quantization.
|
||||
|
||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||
Adjust sensitivity of b_frame_strategy 1.
|
||||
|
||||
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
||||
@item min_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item max_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item timecode_frame_start @var{integer} (@emph{encoding,video})
|
||||
Set GOP timecode frame start number, in non drop frame format.
|
||||
|
||||
@item request_channels @var{integer} (@emph{decoding,audio})
|
||||
Set desired number of audio channels.
|
||||
|
||||
@item bits_per_raw_sample @var{integer}
|
||||
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
||||
|
||||
@@ -875,12 +1116,6 @@ BT.2020 NCL
|
||||
BT.2020 CL
|
||||
@item smpte2085
|
||||
SMPTE 2085
|
||||
@item chroma-derived-nc
|
||||
Chroma-derived NCL
|
||||
@item chroma-derived-c
|
||||
Chroma-derived CL
|
||||
@item ictcp
|
||||
ICtCp
|
||||
@end table
|
||||
|
||||
@item color_range @var{integer} (@emph{decoding/encoding,video})
|
||||
|
||||
@@ -25,19 +25,6 @@ enabled decoders.
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section av1
|
||||
|
||||
AOMedia Video 1 (AV1) decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item operating_point
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
@@ -70,36 +57,19 @@ You need to explicitly configure the build with @code{--enable-libdav1d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libdav1d wrapper.
|
||||
The following option is supported by the libdav1d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item framethreads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
option @code{max_frame_delay} and the global option @code{threads} instead.
|
||||
|
||||
@item tilethreads
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
global option @code{threads} instead.
|
||||
|
||||
@item max_frame_delay
|
||||
Set max amount of frames the decoder may buffer internally. The default value is 0
|
||||
(autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||
internal default of the library.
|
||||
This option is deprecated and will be removed in the future. See the global option
|
||||
@code{export_side_data} to export Film Grain parameters instead of applying it.
|
||||
|
||||
@item oppoint
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||
internal default of the library.
|
||||
|
||||
@item alllayers
|
||||
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
|
||||
Apply film grain to the decoded video if present in the bitstream. The default value
|
||||
is true.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -111,84 +81,6 @@ This decoder allows libavcodec to decode AVS2 streams with davs2 library.
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@section libuavs3d
|
||||
|
||||
AVS3-P2/IEEE1857.10 video decoder.
|
||||
|
||||
libuavs3d allows libavcodec to decode AVS3 streams.
|
||||
Requires the presence of the libuavs3d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libuavs3d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libuavs3d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item frame_threads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@end table
|
||||
|
||||
@section QSV Decoders
|
||||
|
||||
The family of Intel QuickSync Video decoders (VC1, MPEG-2, H.264, HEVC,
|
||||
JPEG/MJPEG, VP8, VP9, AV1).
|
||||
|
||||
@subsection Common Options
|
||||
|
||||
The following options are supported by all qsv decoders.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{async_depth}
|
||||
Internal parallelization depth, the higher the value the higher the latency.
|
||||
|
||||
@item @var{gpu_copy}
|
||||
A GPU-accelerated copy between video and system memory
|
||||
@table @samp
|
||||
@item default
|
||||
@item on
|
||||
@item off
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HEVC Options
|
||||
Extra options for hevc_qsv.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{load_plugin}
|
||||
A user plugin to load in an internal session
|
||||
@table @samp
|
||||
@item none
|
||||
@item hevc_sw
|
||||
@item hevc_hw
|
||||
@end table
|
||||
|
||||
@item @var{load_plugins}
|
||||
A :-separate list of hexadecimal plugin UIDs to load in an internal session
|
||||
|
||||
@end table
|
||||
|
||||
@section v210
|
||||
|
||||
Uncompressed 4:2:2 10-bit decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item custom_stride
|
||||
Set the line size of the v210 data in bytes. The default value is 0
|
||||
(autodetect). You can use the special -1 value for a strideless v210 as seen in
|
||||
BOXX files.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@c man begin AUDIO DECODERS
|
||||
|
||||
@@ -208,7 +100,7 @@ the undocumented RealAudio 3 (a.k.a. dnet).
|
||||
|
||||
@item -drc_scale @var{value}
|
||||
Dynamic Range Scale Factor. The factor to apply to dynamic range values
|
||||
from the AC-3 stream. This factor is applied exponentially. The default value is 1.
|
||||
from the AC-3 stream. This factor is applied exponentially.
|
||||
There are 3 notable scale factor ranges:
|
||||
@table @option
|
||||
@item drc_scale == 0
|
||||
@@ -360,8 +252,6 @@ Enabled by default.
|
||||
@table @option
|
||||
@item compute_clut
|
||||
@table @option
|
||||
@item -2
|
||||
Compute clut once if no matching CLUT is in the stream.
|
||||
@item -1
|
||||
Compute clut if no matching CLUT is in the stream.
|
||||
@item 0
|
||||
@@ -390,7 +280,7 @@ palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by commas, for example @code{0d00ee,
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@@ -419,11 +309,6 @@ List of teletext page numbers to decode. Pages that do not match the specified
|
||||
list are dropped. You may use the special @code{*} string to match all pages,
|
||||
or @code{subtitle} to match all subtitle pages.
|
||||
Default value is *.
|
||||
@item txt_default_region
|
||||
Set default character set used for decoding, a value between 0 and 87 (see
|
||||
ETS 300 706, Section 15, Table 32). Default value is -1, which does not
|
||||
override the libzvbi default. This option is needed for some legacy level 1.0
|
||||
transmissions which cannot signal the proper charset.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
|
||||
@@ -25,13 +25,6 @@ Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section aac
|
||||
|
||||
Raw Audio Data Transport Stream AAC demuxer.
|
||||
|
||||
This demuxer is used to demux an ADTS input containing a single AAC stream
|
||||
alongwith any ID3v1/2 or APE tags in it.
|
||||
|
||||
@section apng
|
||||
|
||||
Animated Portable Network Graphics demuxer.
|
||||
@@ -44,15 +37,12 @@ between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set. Default is enabled.
|
||||
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second. Default of 0 imposes no limit.
|
||||
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible). Default is 15.
|
||||
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
@@ -103,7 +93,8 @@ backslash or single quotes.
|
||||
All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version.
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appear exactly as is (no extra space or byte-order-mark) on the very first
|
||||
@@ -157,16 +148,6 @@ directive) will be reduced based on their specified Out point.
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
This directive is deprecated, use @code{file_packet_meta} instead.
|
||||
|
||||
@item @code{file_packet_meta @var{key} @var{value}}
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
|
||||
@item @code{option @var{key} @var{value}}
|
||||
Option to access, open and probe the file.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
@@ -184,20 +165,6 @@ subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@item @code{stream_meta @var{key} @var{value}}
|
||||
Metadata for the stream.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream_codec @var{value}}
|
||||
Codec for the stream.
|
||||
|
||||
@item @code{stream_extradata @var{hex_string}}
|
||||
Extradata for the string, encoded in hexadecimal.
|
||||
|
||||
@item @code{chapter @var{id} @var{start} @var{end}}
|
||||
Add a chapter. @var{id} is an unique identifier, possibly small and
|
||||
consecutive.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
@@ -207,8 +174,7 @@ This demuxer accepts the following option:
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths and directives.
|
||||
A file path is considered safe if it
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
@@ -218,6 +184,9 @@ If set to 0, any file name is accepted.
|
||||
|
||||
The default is 1.
|
||||
|
||||
-1 is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
@@ -274,47 +243,11 @@ which streams to actually receive.
|
||||
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
||||
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item cenc_decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@end table
|
||||
|
||||
@section ea
|
||||
|
||||
Electronic Arts Multimedia format demuxer.
|
||||
|
||||
This format is used by various Electronic Arts games.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item merge_alpha @var{bool}
|
||||
|
||||
Normally the VP6 alpha channel (if exists) is returned as a secondary video
|
||||
stream, by setting this option you can make the demuxer return a single video
|
||||
stream which contains the alpha channel in addition to the ordinary video.
|
||||
|
||||
@end table
|
||||
|
||||
@section imf
|
||||
|
||||
Interoperable Master Format demuxer.
|
||||
|
||||
This demuxer presents audio and video streams found in an IMF Composition.
|
||||
|
||||
@section flv, live_flv, kux
|
||||
@section flv, live_flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
||||
KUX is a flv variant used on the Youku platform.
|
||||
|
||||
@example
|
||||
ffmpeg -f flv -i myfile.flv ...
|
||||
@@ -391,9 +324,6 @@ It accepts the following options:
|
||||
@item live_start_index
|
||||
segment index to start live streams at (negative values are from the end).
|
||||
|
||||
@item prefer_x_start
|
||||
prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
|
||||
@item allowed_extensions
|
||||
',' separated list of file extensions that hls is allowed to access.
|
||||
|
||||
@@ -401,10 +331,6 @@ prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
|
||||
@item m3u8_hold_counters
|
||||
The maximum number of times to load m3u8 when it refreshes without new segments.
|
||||
Default value is 1000.
|
||||
|
||||
@item http_persistent
|
||||
Use persistent HTTP connections. Applicable only for HTTP streams.
|
||||
Enabled by default.
|
||||
@@ -412,17 +338,6 @@ Enabled by default.
|
||||
@item http_multiple
|
||||
Use multiple HTTP connections for downloading HTTP segments.
|
||||
Enabled by default for HTTP/1.1 servers.
|
||||
|
||||
@item http_seekable
|
||||
Use HTTP partial requests for downloading HTTP segments.
|
||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||
|
||||
@item seg_format_options
|
||||
Set options for the demuxer of media segments using a list of key=value pairs separated by @code{:}.
|
||||
|
||||
@item seg_max_retry
|
||||
Maximum number of times to reload a segment on error, useful when segment skip on network error is not desired.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
@@ -533,17 +448,6 @@ nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@item export_path_metadata
|
||||
If set to 1, will add two extra fields to the metadata found in input, making them
|
||||
also available for other filters (see @var{drawtext} filter for examples). Default
|
||||
value is 0. The extra fields are described below:
|
||||
@table @option
|
||||
@item lavf.image2dec.source_path
|
||||
Corresponds to the full path to the input file being read.
|
||||
@item lavf.image2dec.source_basename
|
||||
Corresponds to the name of the file being read.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -681,13 +585,9 @@ Set the sample rate for libopenmpt to output.
|
||||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@section mov/mp4/3gp
|
||||
@section mov/mp4/3gp/QuickTime
|
||||
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
|
||||
Registered extensions: mov, mp4, m4a, 3gp, 3g2, mj2, psp, m4b, ism, ismv, isma, f4v
|
||||
|
||||
@subsection Options
|
||||
QuickTime / MP4 demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@@ -698,88 +598,10 @@ Enabling this can theoretically leak information in some use cases.
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non-malicious.
|
||||
is known to be non malicious.
|
||||
|
||||
@item seek_streams_individually
|
||||
When seeking, identify the closest point in each stream individually and demux packets in
|
||||
that stream from identified point. This can lead to a different sequence of packets compared
|
||||
to demuxing linearly from the beginning. Default is true.
|
||||
|
||||
@item ignore_editlist
|
||||
Ignore any edit list atoms. The demuxer, by default, modifies the stream index to reflect the
|
||||
timeline described by the edit list. Default is false.
|
||||
|
||||
@item advanced_editlist
|
||||
Modify the stream index to reflect the timeline described by the edit list. @code{ignore_editlist}
|
||||
must be set to false for this option to be effective.
|
||||
If both @code{ignore_editlist} and this option are set to false, then only the
|
||||
start of the stream index is modified to reflect initial dwell time or starting timestamp
|
||||
described by the edit list. Default is true.
|
||||
|
||||
@item ignore_chapters
|
||||
Don't parse chapters. This includes GoPro 'HiLight' tags/moments. Note that chapters are
|
||||
only parsed when input is seekable. Default is false.
|
||||
|
||||
@item use_mfra_for
|
||||
For seekable fragmented input, set fragment's starting timestamp from media fragment random access box, if present.
|
||||
|
||||
Following options are available:
|
||||
@table @samp
|
||||
@item auto
|
||||
Auto-detect whether to set mfra timestamps as PTS or DTS @emph{(default)}
|
||||
|
||||
@item dts
|
||||
Set mfra timestamps as DTS
|
||||
|
||||
@item pts
|
||||
Set mfra timestamps as PTS
|
||||
|
||||
@item 0
|
||||
Don't use mfra box to set timestamps
|
||||
@end table
|
||||
|
||||
@item use_tfdt
|
||||
For fragmented input, set fragment's starting timestamp to @code{baseMediaDecodeTime} from the @code{tfdt} box.
|
||||
Default is enabled, which will prefer to use the @code{tfdt} box to set DTS. Disable to use the @code{earliest_presentation_time} from the @code{sidx} box.
|
||||
In either case, the timestamp from the @code{mfra} box will be used if it's available and @code{use_mfra_for} is
|
||||
set to pts or dts.
|
||||
|
||||
@item export_all
|
||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||
characters of the box type are set as the key. Default is false.
|
||||
|
||||
@item export_xmp
|
||||
Export entire contents of @var{XMP_} box and @var{uuid} box as a string with key @code{xmp}. Note that
|
||||
if @code{export_all} is set and this option isn't, the contents of @var{XMP_} box are still exported
|
||||
but with key @code{XMP_}. Default is false.
|
||||
|
||||
@item activation_bytes
|
||||
4-byte key required to decrypt Audible AAX and AAX+ files. See Audible AAX subsection below.
|
||||
|
||||
@item audible_fixed_key
|
||||
Fixed key used for handling Audible AAX/AAX+ files. It has been pre-set so should not be necessary to
|
||||
specify.
|
||||
|
||||
@item decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@item max_stts_delta
|
||||
Very high sample deltas written in a trak's stts box may occasionally be intended but usually they are written in
|
||||
error or used to store a negative value for dts correction when treated as signed 32-bit integers. This option lets
|
||||
the user set an upper limit, beyond which the delta is clamped to 1. Values greater than the limit if negative when
|
||||
cast to int32 are used to adjust onward dts.
|
||||
|
||||
Unit is the track time scale. Range is 0 to UINT_MAX. Default is @code{UINT_MAX - 48000*10} which allows upto
|
||||
a 10 second dts correction for 48 kHz audio streams while accommodating 99.9% of @code{uint32} range.
|
||||
@end table
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
@@ -811,10 +633,6 @@ disabled). Default value is -1.
|
||||
@item merge_pmt_versions
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
|
||||
@item max_packet_size
|
||||
Set maximum size, in bytes, of packet emitted by the demuxer. Payloads above this size
|
||||
are split across multiple packets. Range is 1 to INT_MAX/2. Default is 204800 bytes.
|
||||
@end table
|
||||
|
||||
@section mpjpeg
|
||||
|
||||
@@ -1,82 +0,0 @@
|
||||
# FFmpeg project
|
||||
|
||||
## Organisation
|
||||
|
||||
The FFmpeg project is organized through a community working on global consensus.
|
||||
|
||||
Decisions are taken by the ensemble of active members, through voting and
|
||||
are aided by two committees.
|
||||
|
||||
## General Assembly
|
||||
|
||||
The ensemble of active members is called the General Assembly (GA).
|
||||
|
||||
The General Assembly is sovereign and legitimate for all its decisions
|
||||
regarding the FFmpeg project.
|
||||
|
||||
The General Assembly is made up of active contributors.
|
||||
|
||||
Contributors are considered "active contributors" if they have pushed more
|
||||
than 20 patches in the last 36 months in the main FFmpeg repository, or
|
||||
if they have been voted in by the GA.
|
||||
|
||||
Additional members are added to the General Assembly through a vote after
|
||||
proposal by a member of the General Assembly.
|
||||
They are part of the GA for two years, after which they need a confirmation by
|
||||
the GA.
|
||||
|
||||
A script to generate the current members of the general assembly (minus members
|
||||
voted in) can be found in `tools/general_assembly.pl`.
|
||||
|
||||
## Voting
|
||||
|
||||
Voting is done using a ranked voting system, currently running on https://vote.ffmpeg.org/ .
|
||||
|
||||
Majority vote means more than 50% of the expressed ballots.
|
||||
|
||||
## Technical Committee
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when
|
||||
technical conflicts occur in the project.
|
||||
They will consider the merits of all the positions, judge them and make a
|
||||
decision.
|
||||
|
||||
The TC resolves technical conflicts but is not a technical steering committee.
|
||||
|
||||
Decisions by the TC are binding for all the contributors.
|
||||
|
||||
Decisions made by the TC can be re-opened after 1 year or by a majority vote
|
||||
of the General Assembly, requested by one of the member of the GA.
|
||||
|
||||
The TC is elected by the General Assembly for a duration of 1 year, and
|
||||
is composed of 5 members.
|
||||
Members can be re-elected if they wish. A majority vote in the General Assembly
|
||||
can trigger a new election of the TC.
|
||||
|
||||
The members of the TC can be elected from outside of the GA.
|
||||
Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The conflict resolution process is detailed in the [resolution process](resolution_process.md) document.
|
||||
|
||||
## Community committee
|
||||
|
||||
The Community Committee (CC) is here to arbitrage and make decisions when
|
||||
inter-personal conflicts occur in the project. It will decide quickly and
|
||||
take actions, for the sake of the project.
|
||||
|
||||
The CC can remove privileges of offending members, including removal of
|
||||
commit access and temporary ban from the community.
|
||||
|
||||
Decisions made by the CC can be re-opened after 1 year or by a majority vote
|
||||
of the General Assembly. Indefinite bans from the community must be confirmed
|
||||
by the General Assembly, in a majority vote.
|
||||
|
||||
The CC is elected by the General Assembly for a duration of 1 year, and is
|
||||
composed of 5 members.
|
||||
Members can be re-elected if they wish. A majority vote in the General Assembly
|
||||
can trigger a new election of the CC.
|
||||
|
||||
The members of the CC can be elected from outside of the GA.
|
||||
Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The CC is governed by and responsible for enforcing the Code of Conduct.
|
||||
@@ -1,91 +0,0 @@
|
||||
# Technical Committee
|
||||
|
||||
_This document only makes sense with the rules from [the community document](community)_.
|
||||
|
||||
The Technical Committee (**TC**) is here to arbitrate and make decisions when
|
||||
technical conflicts occur in the project.
|
||||
|
||||
The TC main role is to resolve technical conflicts.
|
||||
It is therefore not a technical steering committee, but it is understood that
|
||||
some decisions might impact the future of the project.
|
||||
|
||||
# Process
|
||||
|
||||
## Seizing
|
||||
|
||||
The TC can take possession of any technical matter that it sees fit.
|
||||
|
||||
To involve the TC in a matter, email tc@ or CC them on an ongoing discussion.
|
||||
|
||||
As members of TC are developers, they also can email tc@ to raise an issue.
|
||||
|
||||
## Announcement
|
||||
|
||||
The TC, once seized, must announce itself on the main mailing list, with a _[TC]_ tag.
|
||||
|
||||
The TC has 2 modes of operation: a RFC one and an internal one.
|
||||
|
||||
If the TC thinks it needs the input from the larger community, the TC can call
|
||||
for a RFC. Else, it can decide by itself.
|
||||
|
||||
If the disagreement involves a member of the TC, that member should recuse
|
||||
themselves from the decision.
|
||||
|
||||
The decision to use a RFC process or an internal discussion is a discretionary
|
||||
decision of the TC.
|
||||
|
||||
The TC can also reject a seizure for a few reasons such as:
|
||||
the matter was not discussed enough previously; it lacks expertise to reach a
|
||||
beneficial decision on the matter; or the matter is too trivial.
|
||||
|
||||
### RFC call
|
||||
|
||||
In the RFC mode, one person from the TC posts on the mailing list the
|
||||
technical question and will request input from the community.
|
||||
|
||||
The mail will have the following specification:
|
||||
* a precise title
|
||||
* a specific tag [TC RFC]
|
||||
* a top-level email
|
||||
* contain a precise question that does not exceed 100 words and that is answerable by developers
|
||||
* may have an extra description, or a link to a previous discussion, if deemed necessary,
|
||||
* contain a precise end date for the answers.
|
||||
|
||||
The answers from the community must be on the main mailing list and must have
|
||||
the following specification:
|
||||
* keep the tag and the title unchanged
|
||||
* limited to 400 words
|
||||
* a first-level, answering directly to the main email
|
||||
* answering to the question.
|
||||
|
||||
Further replies to answers are permitted, as long as they conform to the
|
||||
community standards of politeness, they are limited to 100 words, and are not
|
||||
nested more than once. (max-depth=2)
|
||||
|
||||
After the end-date, mails on the thread will be ignored.
|
||||
|
||||
Violations of those rules will be escalated through the Community Committee.
|
||||
|
||||
After all the emails are in, the TC has 96 hours to give its final decision.
|
||||
Exceptionally, the TC can request an extra delay, that will be notified on the
|
||||
mailing list.
|
||||
|
||||
### Within TC
|
||||
|
||||
In the internal case, the TC has 96 hours to give its final decision.
|
||||
Exceptionally, the TC can request an extra delay.
|
||||
|
||||
|
||||
## Decisions
|
||||
|
||||
The decisions from the TC will be sent on the mailing list, with the _[TC]_ tag.
|
||||
|
||||
Internally, the TC should take decisions with a majority, or using
|
||||
ranked-choice voting.
|
||||
|
||||
The decision from the TC should be published with a summary of the reasons that
|
||||
lead to this decision.
|
||||
|
||||
The decisions from the TC are final, until the matters are reopened after
|
||||
no less than one year.
|
||||
|
||||
@@ -10,79 +10,41 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
@chapter Notes for external developers
|
||||
|
||||
This text is concerned with the development @emph{of} FFmpeg itself. Information
|
||||
on using the FFmpeg libraries in other programs can be found elsewhere, e.g. in:
|
||||
@itemize @bullet
|
||||
@item
|
||||
the installed header files
|
||||
@item
|
||||
@url{http://ffmpeg.org/doxygen/trunk/index.html, the Doxygen documentation}
|
||||
generated from the headers
|
||||
@item
|
||||
the examples under @file{doc/examples}
|
||||
@end itemize
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
If you modify FFmpeg code for your own use case, you are highly encouraged to
|
||||
@emph{submit your changes back to us}, using this document as a guide. There are
|
||||
both pragmatic and ideological reasons to do so:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Maintaining external changes to keep up with upstream development is
|
||||
time-consuming and error-prone. With your code in the main tree, it will be
|
||||
maintained by FFmpeg developers.
|
||||
@item
|
||||
FFmpeg developers include leading experts in the field who can find bugs or
|
||||
design flaws in your code.
|
||||
@item
|
||||
By supporting the project you find useful you ensure it continues to be
|
||||
maintained and developed.
|
||||
@end itemize
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{https://ffmpeg.org/legal.html}.
|
||||
|
||||
@section Contributing code
|
||||
@chapter Contributing
|
||||
|
||||
All proposed code changes should be submitted for review to
|
||||
@url{mailto:ffmpeg-devel@@ffmpeg.org, the development mailing list}, as
|
||||
described in more detail in the @ref{Submitting patches} chapter. The code
|
||||
should comply with the @ref{Development Policy} and follow the @ref{Coding Rules}.
|
||||
There are 2 ways by which code gets into FFmpeg:
|
||||
@itemize @bullet
|
||||
@item Submitting patches to the ffmpeg-devel mailing list.
|
||||
See @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@end itemize
|
||||
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@chapter Coding Rules
|
||||
|
||||
@section C language features
|
||||
|
||||
FFmpeg is programmed in the ISO C99 language, extended with:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Atomic operations from C11 @file{stdatomic.h}. They are emulated on
|
||||
architectures/compilers that do not support them, so all FFmpeg-internal code
|
||||
may use atomics without any extra checks. However, @file{stdatomic.h} must not
|
||||
be included in public headers, so they stay C99-compatible.
|
||||
@end itemize
|
||||
|
||||
Compiler-specific extensions may be used with good reason, but must not be
|
||||
depended on, i.e. the code must still compile and work with compilers lacking
|
||||
the extension.
|
||||
|
||||
The following C99 features must not be used anywhere in the codebase:
|
||||
@itemize @bullet
|
||||
@item
|
||||
variable-length arrays;
|
||||
|
||||
@item
|
||||
complex numbers;
|
||||
|
||||
@item
|
||||
mixed statements and declarations.
|
||||
@end itemize
|
||||
|
||||
@section Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
@@ -105,39 +67,8 @@ K&R coding style is used.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
@subsection Vim configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
@subsection Emacs configuration
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@section Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
@@ -179,51 +110,89 @@ int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@section Naming conventions
|
||||
@section C language features
|
||||
|
||||
Names of functions, variables, and struct members must be lowercase, using
|
||||
underscores (_) to separate words. For example, @samp{avfilter_get_video_buffer}
|
||||
is an acceptable function name and @samp{AVFilterGetVideo} is not.
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
Struct, union, enum, and typedeffed type names must use CamelCase. All structs
|
||||
and unions should be typedeffed to the same name as the struct/union tag, e.g.
|
||||
@code{typedef struct AVFoo @{ ... @} AVFoo;}. Enums are typically not
|
||||
typedeffed.
|
||||
|
||||
Enumeration constants and macros must be UPPERCASE, except for macros
|
||||
masquerading as functions, which should use the function naming convention.
|
||||
|
||||
All identifiers in the libraries should be namespaced as follows:
|
||||
@itemize @bullet
|
||||
@item
|
||||
No namespacing for identifiers with file and lower scope (e.g. local variables,
|
||||
static functions), and struct and union members,
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
The @code{ff_} prefix must be used for variables and functions visible outside
|
||||
of file scope, but only used internally within a single library, e.g.
|
||||
@samp{ff_w64_demuxer}. This prevents name collisions when FFmpeg is statically
|
||||
linked.
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
for loops with variable definition (@samp{for (int i = 0; i < 8; i++)});
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
casts are binary truncated. Shift right of signed values uses sign extension.
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@section Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in CamelCase.
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_report_missing_feature}.
|
||||
|
||||
@item
|
||||
All other internal identifiers, like private type or macro names, should be
|
||||
namespaced only to avoid possible internal conflicts. E.g. @code{H264_NAL_SPS}
|
||||
vs. @code{HEVC_NAL_SPS}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
|
||||
@item
|
||||
Other public identifiers (struct, union, enum, macro, type names) must use their
|
||||
library's public prefix (@code{AV}, @code{Sws}, or @code{Swr}).
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
@@ -246,7 +215,39 @@ Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@anchor{Development Policy}
|
||||
@section Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
|
||||
@chapter Development Policy
|
||||
|
||||
@section Patches/Committing
|
||||
@@ -490,22 +491,6 @@ patch is inline or attached per mail.
|
||||
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
||||
likely was wrong.
|
||||
|
||||
@subheading Sending patches from email clients
|
||||
Using @code{git send-email} might not be desirable for everyone. The
|
||||
following trick allows to send patches via email clients in a safe
|
||||
way. It has been tested with Outlook and Thunderbird (with X-Unsent
|
||||
extension) and might work with other applications.
|
||||
|
||||
Create your patch like this:
|
||||
|
||||
@verbatim
|
||||
git format-patch -s -o "outputfolder" --add-header "X-Unsent: 1" --suffix .eml --to ffmpeg-devel@ffmpeg.org -1 1a2b3c4d
|
||||
@end verbatim
|
||||
|
||||
Now you'll just need to open the eml file with the email application
|
||||
and execute 'Send'.
|
||||
|
||||
@subheading Reviews
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
@@ -637,7 +622,7 @@ If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to @url{https://streams.videolan.org/upload/}.
|
||||
URL, you can upload to ftp://upload.ffmpeg.org.
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
OUT_DIR="${1}"
|
||||
SRC_DIR="${2}"
|
||||
DOXYFILE="${3}"
|
||||
DOXYGEN="${4}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 4
|
||||
|
||||
cd ${SRC_DIR}
|
||||
shift 3
|
||||
|
||||
if [ -e "VERSION" ]; then
|
||||
VERSION=`cat "VERSION"`
|
||||
|
||||
1277
doc/encoders.texi
1277
doc/encoders.texi
File diff suppressed because it is too large
Load Diff
3
doc/examples/.gitignore
vendored
3
doc/examples/.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
/avio_list_dir
|
||||
/avio_dir_cmd
|
||||
/avio_reading
|
||||
/decode_audio
|
||||
/decode_video
|
||||
@@ -22,4 +22,3 @@
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
||||
/qsv_transcode
|
||||
|
||||
@@ -1,27 +1,26 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_HTTP_SERVE_FILES) += avio_http_serve_files
|
||||
EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
EXAMPLES-$(CONFIG_AVIO_READ_CALLBACK_EXAMPLE) += avio_read_callback
|
||||
EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_AUDIO_EXAMPLE) += decode_filter_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_VIDEO_EXAMPLE) += decode_filter_video
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
EXAMPLES-$(CONFIG_DEMUX_DECODE_EXAMPLE) += demux_decode
|
||||
EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
EXAMPLES-$(CONFIG_HW_DECODE_EXAMPLE) += hw_decode
|
||||
EXAMPLES-$(CONFIG_MUX_EXAMPLE) += mux
|
||||
EXAMPLES-$(CONFIG_QSV_DECODE_EXAMPLE) += qsv_decode
|
||||
EXAMPLES-$(CONFIG_REMUX_EXAMPLE) += remux
|
||||
EXAMPLES-$(CONFIG_RESAMPLE_AUDIO_EXAMPLE) += resample_audio
|
||||
EXAMPLES-$(CONFIG_SCALE_VIDEO_EXAMPLE) += scale_video
|
||||
EXAMPLES-$(CONFIG_SHOW_METADATA_EXAMPLE) += show_metadata
|
||||
EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_EXAMPLE) += transcode
|
||||
EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
EXAMPLES-$(CONFIG_VAAPI_ENCODE_EXAMPLE) += vaapi_encode
|
||||
EXAMPLES-$(CONFIG_VAAPI_TRANSCODE_EXAMPLE) += vaapi_transcode
|
||||
EXAMPLES-$(CONFIG_QSV_TRANSCODE_EXAMPLE) += qsv_transcode
|
||||
|
||||
EXAMPLES := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
EXAMPLES_G := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
@@ -11,40 +11,33 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
# missing the following targets, since they need special options in the FFmpeg build:
|
||||
# qsv_decode
|
||||
# qsv_transcode
|
||||
# vaapi_encode
|
||||
# vaapi_transcode
|
||||
|
||||
EXAMPLES=\
|
||||
avio_http_serve_files \
|
||||
avio_list_dir \
|
||||
avio_read_callback \
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_filter_audio \
|
||||
decode_filter_video \
|
||||
decode_video \
|
||||
demux_decode \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
hw_decode \
|
||||
mux \
|
||||
remux \
|
||||
resample_audio \
|
||||
scale_video \
|
||||
show_metadata \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcode
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
mux: LDLIBS += -lm
|
||||
resample_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
|
||||
@@ -7,10 +7,8 @@ that you have them installed and working on your system.
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and run:
|
||||
make -f Makefile.example
|
||||
|
||||
It will link to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
@@ -22,4 +20,4 @@ examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make -f Makefile.example
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
||||
|
||||
@@ -20,13 +20,6 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext list directory API usage example
|
||||
* @example avio_list_dir.c
|
||||
*
|
||||
* Show how to list directories through the libavformat AVIOContext API.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
@@ -109,15 +102,38 @@ static int list_op(const char *input_dir)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s input_dir\n"
|
||||
"API example program to show how to list files in directory "
|
||||
"accessed through AVIOContext.\n", program_name);
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@@ -129,7 +145,32 @@ int main(int argc, char *argv[])
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
ret = list_op(argv[1]);
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext read callback API usage example
|
||||
* @example avio_read_callback.c
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec audio decoding API usage example
|
||||
* @example decode_audio.c
|
||||
* @file
|
||||
* audio decoding with libavcodec API example
|
||||
*
|
||||
* Decode data from an MP2 input file and generate a raw audio file to
|
||||
* be played with ffplay.
|
||||
* @example decode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -40,35 +39,6 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
FILE *outfile)
|
||||
{
|
||||
@@ -98,7 +68,7 @@ static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++)
|
||||
for (ch = 0; ch < dec_ctx->ch_layout.nb_channels; ch++)
|
||||
for (ch = 0; ch < dec_ctx->channels; ch++)
|
||||
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
}
|
||||
@@ -116,9 +86,6 @@ int main(int argc, char **argv)
|
||||
size_t data_size;
|
||||
AVPacket *pkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
enum AVSampleFormat sfmt;
|
||||
int n_channels = 0;
|
||||
const char *fmt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -205,26 +172,6 @@ int main(int argc, char **argv)
|
||||
pkt->size = 0;
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
/* print output pcm infomations, because there have no metadata of pcm */
|
||||
sfmt = c->sample_fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
}
|
||||
|
||||
n_channels = c->ch_layout.nb_channels;
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, c->sample_rate,
|
||||
outfilename);
|
||||
end:
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec video decoding API usage example
|
||||
* @example decode_video.c *
|
||||
* @file
|
||||
* video decoding with libavcodec API example
|
||||
*
|
||||
* Read from an MPEG1 video file, decode frames, and generate PGM images as
|
||||
* output.
|
||||
* @example decode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -42,7 +41,7 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"wb");
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
@@ -70,12 +69,12 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("saving frame %3"PRId64"\n", dec_ctx->frame_num);
|
||||
printf("saving frame %3d\n", dec_ctx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%"PRId64, filename, dec_ctx->frame_num);
|
||||
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
}
|
||||
@@ -93,12 +92,10 @@ int main(int argc, char **argv)
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
int ret;
|
||||
int eof;
|
||||
AVPacket *pkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n"
|
||||
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(0);
|
||||
}
|
||||
filename = argv[1];
|
||||
@@ -152,16 +149,15 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
do {
|
||||
while (!feof(f)) {
|
||||
/* read raw data from the input file */
|
||||
data_size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (ferror(f))
|
||||
if (!data_size)
|
||||
break;
|
||||
eof = !data_size;
|
||||
|
||||
/* use the parser to split the data into frames */
|
||||
data = inbuf;
|
||||
while (data_size > 0 || eof) {
|
||||
while (data_size > 0) {
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
@@ -173,10 +169,8 @@ int main(int argc, char **argv)
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, frame, pkt, outfilename);
|
||||
else if (eof)
|
||||
break;
|
||||
}
|
||||
} while (!eof);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
decode(c, frame, NULL, outfilename);
|
||||
|
||||
@@ -21,18 +21,17 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat and libavcodec demuxing and decoding API usage example
|
||||
* @example demux_decode.c
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and decode audio
|
||||
* and video data. Write the output as raw audio and input files to be played by
|
||||
* ffplay.
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -52,97 +51,99 @@ static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket *pkt = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
/* Enable or disable frame reference counting. You are not supposed to support
|
||||
* both paths in your application but pick the one most appropriate to your
|
||||
* needs. Look for the use of refcount in this example to see what are the
|
||||
* differences of API usage between them. */
|
||||
static int refcount = 0;
|
||||
|
||||
printf("video_frame n:%d\n",
|
||||
video_frame_count++);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
*got_frame = 0;
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
if (*got_frame) {
|
||||
|
||||
av_frame_unref(frame);
|
||||
if (ret < 0)
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
/* If we use frame reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && refcount)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
@@ -150,7 +151,8 @@ static int open_codec_context(int *stream_idx,
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
@@ -184,8 +186,9 @@ static int open_codec_context(int *stream_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) {
|
||||
/* Init the decoders, with or without reference counting */
|
||||
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -227,17 +230,24 @@ static int get_format_from_sample_fmt(const char **fmt,
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
|
||||
refcount = 1;
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
@@ -303,12 +313,10 @@ int main (int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
@@ -316,23 +324,24 @@ int main (int argc, char **argv)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, pkt);
|
||||
else if (pkt->stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, pkt);
|
||||
av_packet_unref(pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_packet_unref(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
@@ -345,7 +354,7 @@ int main (int argc, char **argv)
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->ch_layout.nb_channels;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
@@ -374,7 +383,6 @@ end:
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding audio API usage examples
|
||||
* @example encode_audio.c
|
||||
* @file
|
||||
* audio encoding with libavcodec API example.
|
||||
*
|
||||
* Generate a synthetic audio signal and encode it to an output MP2 file.
|
||||
* @example encode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
@@ -70,25 +70,26 @@ static int select_sample_rate(const AVCodec *codec)
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst)
|
||||
static int select_channel_layout(const AVCodec *codec)
|
||||
{
|
||||
const AVChannelLayout *p, *best_ch_layout;
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->ch_layouts)
|
||||
return av_channel_layout_copy(dst, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->ch_layouts;
|
||||
while (p->nb_channels) {
|
||||
int nb_channels = p->nb_channels;
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = p;
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return av_channel_layout_copy(dst, best_ch_layout);
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
|
||||
@@ -163,9 +164,8 @@ int main(int argc, char **argv)
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
ret = select_channel_layout(codec, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
@@ -195,9 +195,7 @@ int main(int argc, char **argv)
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
ret = av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* allocate the data buffers */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
@@ -220,7 +218,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->ch_layout.nb_channels; k++)
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding video API usage example
|
||||
* @example encode_video.c
|
||||
* @file
|
||||
* video encoding with libavcodec API example
|
||||
*
|
||||
* Generate synthetic video data and encode it to an output file.
|
||||
* @example encode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -145,7 +145,7 @@ int main(int argc, char **argv)
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate the video frame data\n");
|
||||
exit(1);
|
||||
@@ -155,25 +155,12 @@ int main(int argc, char **argv)
|
||||
for (i = 0; i < 25; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
/* Make sure the frame data is writable.
|
||||
On the first round, the frame is fresh from av_frame_get_buffer()
|
||||
and therefore we know it is writable.
|
||||
But on the next rounds, encode() will have called
|
||||
avcodec_send_frame(), and the codec may have kept a reference to
|
||||
the frame in its internal structures, that makes the frame
|
||||
unwritable.
|
||||
av_frame_make_writable() checks that and allocates a new buffer
|
||||
for the frame only if necessary.
|
||||
*/
|
||||
/* make sure the frame data is writable */
|
||||
ret = av_frame_make_writable(frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* Prepare a dummy image.
|
||||
In real code, this is where you would have your own logic for
|
||||
filling the frame. FFmpeg does not care what you put in the
|
||||
frame.
|
||||
*/
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
@@ -198,14 +185,8 @@ int main(int argc, char **argv)
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
|
||||
/* Add sequence end code to have a real MPEG file.
|
||||
It makes only sense because this tiny examples writes packets
|
||||
directly. This is called "elementary stream" and only works for some
|
||||
codecs. To create a valid file, you usually need to write packets
|
||||
into a proper file format or protocol; see mux.c.
|
||||
*/
|
||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_free_context(&c);
|
||||
|
||||
@@ -21,16 +21,7 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec motion vectors extraction API usage example
|
||||
* @example extract_mvs.c
|
||||
*
|
||||
* Read from input file, decode video stream and print a motion vectors
|
||||
* representation to stdout.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -69,11 +60,10 @@ static int decode_packet(const AVPacket *pkt)
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64",%4d,%4d,%4d\n",
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags,
|
||||
mv->motion_x, mv->motion_y, mv->motion_scale);
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
@@ -88,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
||||
@@ -114,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
ret = avcodec_open2(dec_ctx, dec, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (ret < 0) {
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -133,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
@@ -168,20 +156,13 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags,motion_x,motion_y,motion_scale\n");
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(pkt);
|
||||
av_packet_unref(pkt);
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(&pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -193,6 +174,5 @@ end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
return ret < 0;
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavfilter audio filtering API usage example
|
||||
* @example filter_audio.c
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* This example will generate a sine wave audio, pass it through a simple filter
|
||||
* chain, and then compute the MD5 checksum of the output data.
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
@@ -53,7 +55,7 @@
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
@@ -98,7 +100,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
@@ -152,8 +154,9 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100);
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
@@ -212,7 +215,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = frame->ch_layout.nb_channels;
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
@@ -245,7 +248,7 @@ static int get_input(AVFrame *frame, int frame_num)
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
av_channel_layout_copy(&frame->ch_layout, &INPUT_CHANNEL_LAYOUT);
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
|
||||
@@ -23,11 +23,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio decoding and filtering usage example
|
||||
* @example decode_filter_audio.c
|
||||
*
|
||||
* Demux, decode and filter audio input file, generate a raw audio
|
||||
* file to be played with ffplay.
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
@@ -36,7 +34,6 @@
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
@@ -51,8 +48,8 @@ static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -96,6 +93,7 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
@@ -107,13 +105,12 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
ret = snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=",
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt));
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, args + ret, sizeof(args) - ret);
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -136,7 +133,7 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
@@ -187,7 +184,7 @@ static int init_filters(const char *filters_descr)
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_channel_layout_describe(&outlink->ch_layout, args, sizeof(args));
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
@@ -202,7 +199,7 @@ end:
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * frame->ch_layout.nb_channels;
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
@@ -217,12 +214,12 @@ static void print_frame(const AVFrame *frame)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (!packet || !frame || !filt_frame) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
@@ -237,11 +234,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -277,13 +274,12 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_packet_free(&packet);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example decode_filter_video.c
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
@@ -53,8 +53,8 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -210,7 +210,7 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet;
|
||||
AVPacket packet;
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
@@ -221,9 +221,8 @@ int main(int argc, char **argv)
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
packet = av_packet_alloc();
|
||||
if (!frame || !filt_frame || !packet) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -234,11 +233,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -274,7 +273,7 @@ int main(int argc, char **argv)
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
@@ -282,7 +281,6 @@ end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
av_packet_free(&packet);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat multi-client network API usage example
|
||||
* @example avio_http_serve_files.c
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* Serve a file without decoding or demuxing it over the HTTP protocol. Multiple
|
||||
* clients can connect and will receive the same file.
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
@@ -24,11 +24,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file HW-accelerated decoding API usage.example
|
||||
* @example hw_decode.c
|
||||
* @file
|
||||
* HW-Accelerated decoding example.
|
||||
*
|
||||
* Perform HW-accelerated decoding with output frames from HW video
|
||||
* surfaces.
|
||||
* @example hw_decode.c
|
||||
* This example shows how to do HW-accelerated decoding with output
|
||||
* frames from the HW video surfaces.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -151,8 +152,8 @@ int main(int argc, char *argv[])
|
||||
int video_stream, ret;
|
||||
AVStream *video = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVPacket *packet = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVPacket packet;
|
||||
enum AVHWDeviceType type;
|
||||
int i;
|
||||
|
||||
@@ -171,12 +172,6 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
}
|
||||
|
||||
packet = av_packet_alloc();
|
||||
if (!packet) {
|
||||
fprintf(stderr, "Failed to allocate AVPacket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||
@@ -228,25 +223,27 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* open the file to dump raw data */
|
||||
output_file = fopen(argv[3], "w+b");
|
||||
output_file = fopen(argv[3], "w+");
|
||||
|
||||
/* actual decoding and dump the raw data */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(input_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == packet->stream_index)
|
||||
ret = decode_write(decoder_ctx, packet);
|
||||
if (video_stream == packet.stream_index)
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_write(decoder_ctx, NULL);
|
||||
packet.data = NULL;
|
||||
packet.size = 0;
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
av_packet_unref(&packet);
|
||||
|
||||
if (output_file)
|
||||
fclose(output_file);
|
||||
av_packet_free(&packet);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avformat_close_input(&input_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat metadata extraction API usage example
|
||||
* @example show_metadata.c
|
||||
*
|
||||
* Show metadata from an input file.
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -35,7 +34,7 @@
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
const AVDictionaryEntry *tag = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
@@ -53,7 +52,7 @@ int main (int argc, char **argv)
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_iterate(fmt_ctx->metadata, tag)))
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat muxing API usage example
|
||||
* @example mux.c
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Generate a synthetic audio and video signal and mux them to a media file in
|
||||
* any supported libavformat format. The default codecs are used.
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@@ -38,7 +39,6 @@
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
@@ -61,8 +61,6 @@ typedef struct OutputStream {
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVPacket *tmp_pkt;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
@@ -80,50 +78,20 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
const AVCodec **codec,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
@@ -137,12 +105,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->tmp_pkt = av_packet_alloc();
|
||||
if (!ost->tmp_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
@@ -169,7 +131,16 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
av_channel_layout_copy(&c->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
@@ -199,7 +170,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
@@ -214,22 +185,25 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
const AVChannelLayout *channel_layout,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
av_channel_layout_copy(&frame->ch_layout, channel_layout);
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -238,8 +212,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
@@ -268,9 +241,9 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
@@ -281,25 +254,25 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
@@ -312,12 +285,12 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
|
||||
for (i = 0; i < ost->enc->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
@@ -336,20 +309,23 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
@@ -373,7 +349,22 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
@@ -393,7 +384,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 0);
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
@@ -402,8 +393,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->enc;
|
||||
@@ -474,7 +464,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
@@ -516,7 +506,37 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
@@ -524,7 +544,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
av_packet_free(&ost->tmp_pkt);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
@@ -535,10 +554,10 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const AVOutputFormat *fmt;
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
const AVCodec *audio_codec, *video_codec;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
@@ -625,6 +644,10 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
@@ -1,438 +0,0 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated video transcoding API usage example
|
||||
* @example qsv_transcode.c
|
||||
*
|
||||
* Perform QSV-accelerated transcoding and show to dynamically change
|
||||
* encoder's options.
|
||||
*
|
||||
* Usage: qsv_transcode input_stream codec output_stream initial option
|
||||
* { frame_number new_option }
|
||||
* e.g: - qsv_transcode input.mp4 h264_qsv output_h264.mp4 "g 60"
|
||||
* - qsv_transcode input.mp4 hevc_qsv output_hevc.mp4 "g 60 async_depth 1"
|
||||
* 100 "g 120"
|
||||
* (initialize codec with gop_size 60 and change it to 120 after 100
|
||||
* frames)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
static AVBufferRef *hw_device_ctx = NULL;
|
||||
static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;
|
||||
static int video_stream = -1;
|
||||
|
||||
typedef struct DynamicSetting {
|
||||
int frame_number;
|
||||
char* optstr;
|
||||
} DynamicSetting;
|
||||
static DynamicSetting *dynamic_setting;
|
||||
static int setting_number;
|
||||
static int current_setting_number;
|
||||
|
||||
static int str_to_dict(char* optstr, AVDictionary **opt)
|
||||
{
|
||||
char *key, *value;
|
||||
if (strlen(optstr) == 0)
|
||||
return 0;
|
||||
key = strtok(optstr, " ");
|
||||
if (key == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
do {
|
||||
key = strtok(NULL, " ");
|
||||
if (key == NULL)
|
||||
return 0;
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
} while(key != NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dynamic_set_parameter(AVCodecContext *avctx)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
static int frame_number = 0;
|
||||
frame_number++;
|
||||
if (current_setting_number < setting_number &&
|
||||
frame_number == dynamic_setting[current_setting_number].frame_number) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
ret = str_to_dict(dynamic_setting[current_setting_number].optstr, &opts);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "The dynamic parameter is wrong\n");
|
||||
goto fail;
|
||||
}
|
||||
/* Set common option. The dictionary will be freed and replaced
|
||||
* by a new one containing all options not found in common option list.
|
||||
* Then this new dictionary is used to set private option. */
|
||||
if ((ret = av_opt_set_dict(avctx, &opts)) < 0)
|
||||
goto fail;
|
||||
/* Set codec specific option */
|
||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||
goto fail;
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to set
|
||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
avctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
}
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int open_input_file(char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
|
||||
filename, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot find a video stream in the input file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
video_stream = ret;
|
||||
video = ifmt_ctx->streams[video_stream];
|
||||
|
||||
switch(video->codecpar->codec_id) {
|
||||
case AV_CODEC_ID_H264:
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
decoder = avcodec_find_decoder_by_name("hevc_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP9:
|
||||
decoder = avcodec_find_decoder_by_name("vp9_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP8:
|
||||
decoder = avcodec_find_decoder_by_name("vp8_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_AV1:
|
||||
decoder = avcodec_find_decoder_by_name("av1_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
decoder = avcodec_find_decoder_by_name("mpeg2_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MJPEG:
|
||||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Codec is not supportted by qsv\n");
|
||||
return AVERROR(ENAVAIL);
|
||||
}
|
||||
|
||||
if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
|
||||
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
decoder_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video, NULL);
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
if (!decoder_ctx->hw_device_ctx) {
|
||||
fprintf(stderr, "A hardware device reference create failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
decoder_ctx->get_format = get_format;
|
||||
decoder_ctx->pkt_timebase = video->time_base;
|
||||
if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
|
||||
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if((ret = dynamic_set_parameter(encoder_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to set dynamic parameter. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
if (ret = avcodec_receive_packet(encoder_ctx, enc_pkt))
|
||||
break;
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, encoder_ctx->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
if ((ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt)) < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
if (ret == AVERROR_EOF)
|
||||
return 0;
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding. Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
if (!(frame = av_frame_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
av_frame_free(&frame);
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while decoding. Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
AVStream *ost;
|
||||
/* we need to ref hw_frames_ctx of decoder to initialize encoder's codec.
|
||||
Only after we get a decoded frame, can we obtain its hw_frames_ctx */
|
||||
encoder_ctx->hw_frames_ctx = av_buffer_ref(decoder_ctx->hw_frames_ctx);
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
/* set AVCodecContext Parameters for encoder, here we keep them stay
|
||||
* the same as decoder.
|
||||
*/
|
||||
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
|
||||
encoder_ctx->pix_fmt = AV_PIX_FMT_QSV;
|
||||
encoder_ctx->width = decoder_ctx->width;
|
||||
encoder_ctx->height = decoder_ctx->height;
|
||||
if ((ret = str_to_dict(optstr, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||
goto fail;
|
||||
}
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to
|
||||
* set framerate, which is compatible with ffmpeg commandline. The
|
||||
* video is assumed to be average frame rate, so set time_base to
|
||||
* 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
encoder_ctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
if ((ret = avcodec_open2(encoder_ctx, enc_codec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
av_dict_free(&opts);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {
|
||||
fprintf(stderr, "Failed to allocate stream for output format.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ost->time_base = encoder_ctx->time_base;
|
||||
ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy the stream parameters. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the stream header */
|
||||
if ((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Error while writing stream header. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
frame->pts = av_rescale_q(frame->pts, decoder_ctx->pkt_timebase,
|
||||
encoder_ctx->time_base);
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt;
|
||||
|
||||
if (argc < 5 || (argc - 5) % 2) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <encoder> <output file>"
|
||||
" <\"encoding option set 0\"> [<frame_number> <\"encoding options set 1\">]...\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
setting_number = (argc - 5) / 2;
|
||||
dynamic_setting = av_malloc(setting_number * sizeof(*dynamic_setting));
|
||||
current_setting_number = 0;
|
||||
for (int i = 0; i < setting_number; i++) {
|
||||
dynamic_setting[i].frame_number = atoi(argv[i*2 + 5]);
|
||||
dynamic_setting[i].optstr = argv[i*2 + 6];
|
||||
}
|
||||
|
||||
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to create a QSV device. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {
|
||||
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) {
|
||||
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
|
||||
"%s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec, argv[4]);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
if ((ret = dec_enc(dec_pkt, enc_codec, argv[4])) < 0) {
|
||||
fprintf(stderr, "Failed to flush decoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
if ((ret = encode_write(dec_pkt, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to flush encoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* write the trailer for output stream */
|
||||
if ((ret = av_write_trailer(ofmt_ctx)) < 0)
|
||||
fprintf(stderr, "Failed to write trailer %s\n", av_err2str(ret));
|
||||
|
||||
end:
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
avformat_close_input(&ofmt_ctx);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
av_freep(&dynamic_setting);
|
||||
return ret;
|
||||
}
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated H.264 decoding API usage example
|
||||
* @example qsv_decode.c
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* Perform QSV-accelerated H.264 decoding with output frames in the
|
||||
* GPU video surfaces, write the decoded frames to an output file.
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the GPU video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
@@ -43,10 +44,38 @@
|
||||
#include "libavutil/hwcontext_qsv.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
AVBufferRef *hw_device_ref;
|
||||
} DecodeContext;
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
AVQSVFramesContext *frames_hwctx;
|
||||
int ret;
|
||||
|
||||
/* create a pool of surfaces to be used by the decoder */
|
||||
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||
if (!avctx->hw_frames_ctx)
|
||||
return AV_PIX_FMT_NONE;
|
||||
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||
frames_ctx->initial_pool_size = 32;
|
||||
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
@@ -58,7 +87,7 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *decoder_ctx,
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
{
|
||||
@@ -112,15 +141,15 @@ int main(int argc, char **argv)
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
|
||||
AVBufferRef *device_ref = NULL;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
@@ -148,7 +177,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
@@ -180,8 +209,7 @@ int main(int argc, char **argv)
|
||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||
}
|
||||
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
@@ -199,26 +227,27 @@ int main(int argc, char **argv)
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
pkt = av_packet_alloc();
|
||||
if (!frame || !sw_frame || !pkt) {
|
||||
if (!frame || !sw_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, pkt);
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt->stream_index == video_st->index)
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx);
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(pkt);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
@@ -231,11 +260,10 @@ finish:
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&device_ref);
|
||||
av_buffer_unref(&decode.hw_device_ref);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat/libavcodec demuxing and muxing API usage example
|
||||
* @example remux.c
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another. Data is copied from the
|
||||
* input to the output without transcoding.
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
@@ -45,9 +45,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, cons
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVOutputFormat *ofmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
int stream_index = 0;
|
||||
@@ -65,12 +65,6 @@ int main(int argc, char **argv)
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
@@ -91,7 +85,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping));
|
||||
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
|
||||
if (!stream_mapping) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
@@ -146,39 +140,38 @@ int main(int argc, char **argv)
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, pkt);
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt->stream_index];
|
||||
if (pkt->stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt->stream_index] < 0) {
|
||||
av_packet_unref(pkt);
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
if (pkt.stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt.stream_index] < 0) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
|
||||
pkt->stream_index = stream_mapping[pkt->stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt->stream_index];
|
||||
log_packet(ifmt_ctx, pkt, "in");
|
||||
pkt.stream_index = stream_mapping[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
|
||||
pkt->pos = -1;
|
||||
log_packet(ofmt_ctx, pkt, "out");
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
@@ -21,12 +21,8 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio resampling API usage example
|
||||
* @example resample_audio.c
|
||||
*
|
||||
* Generate a synthetic audio signal, and Use libswresample API to perform audio
|
||||
* resampling. The output is written to a raw audio file to be played with
|
||||
* ffplay.
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
@@ -84,7 +80,7 @@ static void fill_samples(double *dst, int nb_samples, int nb_channels, int sampl
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_STEREO, dst_ch_layout = AV_CHANNEL_LAYOUT_SURROUND;
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
@@ -96,7 +92,6 @@ int main(int argc, char **argv)
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
char buf[64];
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
@@ -125,11 +120,11 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout(swr_ctx, "in_chlayout", &src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
@@ -141,7 +136,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = src_ch_layout.nb_channels;
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -156,7 +151,7 @@ int main(int argc, char **argv)
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = dst_ch_layout.nb_channels;
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -199,10 +194,9 @@ int main(int argc, char **argv)
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
av_channel_layout_describe(&dst_ch_layout, buf, sizeof(buf));
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %s -channels %d -ar %d %s\n",
|
||||
fmt, buf, dst_nb_channels, dst_rate, dst_filename);
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libswscale API usage example
|
||||
* @example scale_video.c
|
||||
*
|
||||
* Generate a synthetic video signal and use libswscale to perform rescaling.
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2022 Andreas Unterweger
|
||||
* Copyright (c) 2013-2018 Andreas Unterweger
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -19,11 +19,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio transcoding to MPEG/AAC API usage example
|
||||
* @example transcode_aac.c
|
||||
* @file
|
||||
* Simple audio converter
|
||||
*
|
||||
* Convert an input audio file to AAC in an MP4 container. Formats other than
|
||||
* MP4 are supported based on the output file extension.
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* Formats other than MP4 are supported based on the output file extension.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
@@ -37,7 +38,6 @@
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
@@ -60,8 +60,7 @@ static int open_input_file(const char *filename,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodecContext *avctx;
|
||||
const AVCodec *input_codec;
|
||||
const AVStream *stream;
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/* Open the input file to read from it. */
|
||||
@@ -89,10 +88,8 @@ static int open_input_file(const char *filename,
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
stream = (*input_format_context)->streams[0];
|
||||
|
||||
/* Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder(stream->codecpar->codec_id))) {
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
@@ -107,7 +104,7 @@ static int open_input_file(const char *filename,
|
||||
}
|
||||
|
||||
/* Initialize the stream parameters with demuxer information. */
|
||||
error = avcodec_parameters_to_context(avctx, stream->codecpar);
|
||||
error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
|
||||
if (error < 0) {
|
||||
avformat_close_input(input_format_context);
|
||||
avcodec_free_context(&avctx);
|
||||
@@ -123,9 +120,6 @@ static int open_input_file(const char *filename,
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Set the packet timebase for the decoder. */
|
||||
avctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Save the decoder context for easier access later. */
|
||||
*input_codec_context = avctx;
|
||||
|
||||
@@ -150,7 +144,7 @@ static int open_output_file(const char *filename,
|
||||
AVCodecContext *avctx = NULL;
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
const AVCodec *output_codec = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/* Open the output file to write to it. */
|
||||
@@ -205,11 +199,15 @@ static int open_output_file(const char *filename,
|
||||
|
||||
/* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion. */
|
||||
av_channel_layout_default(&avctx->ch_layout, OUTPUT_CHANNELS);
|
||||
avctx->channels = OUTPUT_CHANNELS;
|
||||
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
avctx->sample_rate = input_codec_context->sample_rate;
|
||||
avctx->sample_fmt = output_codec->sample_fmts[0];
|
||||
avctx->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/* Allow the use of the experimental AAC encoder. */
|
||||
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/* Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
@@ -247,16 +245,14 @@ cleanup:
|
||||
|
||||
/**
|
||||
* Initialize one data packet for reading or writing.
|
||||
* @param[out] packet Packet to be initialized
|
||||
* @return Error code (0 if successful)
|
||||
* @param packet Packet to be initialized
|
||||
*/
|
||||
static int init_packet(AVPacket **packet)
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
if (!(*packet = av_packet_alloc())) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
av_init_packet(packet);
|
||||
/* Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -291,18 +287,21 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
/*
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
error = swr_alloc_set_opts2(resample_context,
|
||||
&output_codec_context->ch_layout,
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
&input_codec_context->ch_layout,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (error < 0) {
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return error;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/*
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
@@ -330,7 +329,7 @@ static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
{
|
||||
/* Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->ch_layout.nb_channels, 1))) {
|
||||
output_codec_context->channels, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -372,33 +371,28 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *input_packet;
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
error = init_packet(&input_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
*data_present = 0;
|
||||
*finished = 0;
|
||||
/* Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, input_packet)) < 0) {
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/* If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the audio frame stored in the temporary packet to the decoder.
|
||||
* The input audio stream decoder is used to do this. */
|
||||
if ((error = avcodec_send_packet(input_codec_context, input_packet)) < 0) {
|
||||
if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one frame from the decoder. */
|
||||
@@ -424,7 +418,7 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&input_packet);
|
||||
av_packet_unref(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -450,7 +444,7 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->ch_layout.nb_channels,
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
@@ -459,7 +453,7 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
/* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience. */
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->ch_layout.nb_channels,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
@@ -559,7 +553,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFrame *input_frame = NULL;
|
||||
/* Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int data_present = 0;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/* Initialize temporary storage for one input frame. */
|
||||
@@ -633,7 +627,7 @@ static int init_output_frame(AVFrame **frame,
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity. */
|
||||
(*frame)->nb_samples = frame_size;
|
||||
av_channel_layout_copy(&(*frame)->ch_layout, &output_codec_context->ch_layout);
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
@@ -667,12 +661,9 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int *data_present)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *output_packet;
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
|
||||
error = init_packet(&output_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/* Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
@@ -680,20 +671,21 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
*data_present = 0;
|
||||
/* Send the audio frame stored in the temporary packet to the encoder.
|
||||
* The output audio stream encoder is used to do this. */
|
||||
error = avcodec_send_frame(output_codec_context, frame);
|
||||
/* Check for errors, but proceed with fetching encoded samples if the
|
||||
* encoder signals that it has nothing more to encode. */
|
||||
if (error < 0 && error != AVERROR_EOF) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
/* The encoder signals that it has nothing more to encode. */
|
||||
if (error == AVERROR_EOF) {
|
||||
error = 0;
|
||||
goto cleanup;
|
||||
} else if (error < 0) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one encoded frame from the encoder. */
|
||||
error = avcodec_receive_packet(output_codec_context, output_packet);
|
||||
error = avcodec_receive_packet(output_codec_context, &output_packet);
|
||||
/* If the encoder asks for more data to be able to provide an
|
||||
* encoded frame, return indicating that no data is present. */
|
||||
if (error == AVERROR(EAGAIN)) {
|
||||
@@ -714,14 +706,14 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
|
||||
/* Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present &&
|
||||
(error = av_write_frame(output_format_context, output_packet)) < 0) {
|
||||
(error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&output_packet);
|
||||
av_packet_unref(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -860,6 +852,7 @@ int main(int argc, char **argv)
|
||||
int data_written;
|
||||
/* Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
data_written = 0;
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
|
||||
@@ -23,18 +23,15 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file demuxing, decoding, filtering, encoding and muxing API usage example
|
||||
* @example transcode.c
|
||||
*
|
||||
* Convert input to output file, applying some hard-coded filter-graph on both
|
||||
* audio and video streams.
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
@@ -44,17 +41,12 @@ typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
|
||||
AVPacket *enc_pkt;
|
||||
AVFrame *filtered_frame;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
typedef struct StreamContext {
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
AVFrame *dec_frame;
|
||||
} StreamContext;
|
||||
static StreamContext *stream_ctx;
|
||||
|
||||
@@ -74,13 +66,13 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
if (!stream_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream = ifmt_ctx->streams[i];
|
||||
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodecContext *codec_ctx;
|
||||
if (!dec) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||
@@ -110,10 +102,6 @@ static int open_input_file(const char *filename)
|
||||
}
|
||||
}
|
||||
stream_ctx[i].dec_ctx = codec_ctx;
|
||||
|
||||
stream_ctx[i].dec_frame = av_frame_alloc();
|
||||
if (!stream_ctx[i].dec_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
@@ -125,7 +113,7 @@ static int open_output_file(const char *filename)
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
const AVCodec *encoder;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
@@ -177,9 +165,8 @@ static int open_output_file(const char *filename)
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
@@ -292,7 +279,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
@@ -301,14 +287,14 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf));
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s",
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
buf);
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -331,9 +317,9 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf));
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts",
|
||||
buf, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
@@ -412,63 +398,54 @@ static int init_filters(void)
|
||||
stream_ctx[i].enc_ctx, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filter_ctx[i].enc_pkt = av_packet_alloc();
|
||||
if (!filter_ctx[i].enc_pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
filter_ctx[i].filtered_frame = av_frame_alloc();
|
||||
if (!filter_ctx[i].filtered_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(unsigned int stream_index, int flush)
|
||||
{
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
|
||||
AVPacket *enc_pkt = filter->enc_pkt;
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
|
||||
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt->stream_index = stream_index;
|
||||
av_packet_rescale_ts(enc_pkt,
|
||||
stream->enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
}
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
stream_ctx[stream_index].enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
@@ -477,9 +454,14 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter->buffersink_ctx,
|
||||
filter->filtered_frame);
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
@@ -487,12 +469,12 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(stream_index, 0);
|
||||
av_frame_unref(filter->filtered_frame);
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -502,20 +484,34 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
return encode_write_frame(stream_index, 1);
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = NULL;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -528,54 +524,56 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
if (!(packet = av_packet_alloc()))
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet->stream_index;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
|
||||
av_packet_rescale_ts(packet,
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
stream->dec_ctx->time_base);
|
||||
ret = avcodec_send_packet(stream->dec_ctx, packet);
|
||||
stream_ctx[stream_index].dec_ctx->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, stream_index);
|
||||
if (got_frame) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(packet,
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
@@ -599,18 +597,14 @@ int main(int argc, char **argv)
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&packet);
|
||||
av_packet_unref(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_free_context(&stream_ctx[i].dec_ctx);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
|
||||
avcodec_free_context(&stream_ctx[i].enc_ctx);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph) {
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
av_packet_free(&filter_ctx[i].enc_pkt);
|
||||
av_frame_free(&filter_ctx[i].filtered_frame);
|
||||
}
|
||||
|
||||
av_frame_free(&stream_ctx[i].dec_frame);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
av_free(stream_ctx);
|
||||
@@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Video Acceleration API (video encoding) encode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
@@ -19,12 +21,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated encoding API usage example
|
||||
* @example vaapi_encode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated encoding example.
|
||||
*
|
||||
* @example vaapi_encode.c
|
||||
* This example shows how to do VAAPI-accelerated encoding. now only support NV12
|
||||
* raw file, usage like: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*
|
||||
* Perform VAAPI-accelerated encoding. Read input from an NV12 raw
|
||||
* file, and write the H.264 encoded data to an output raw file.
|
||||
* Usage: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -71,27 +74,27 @@ static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
|
||||
static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *enc_pkt;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
if (!(enc_pkt = av_packet_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(avctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(avctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(avctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
ret = fwrite(enc_pkt->data, enc_pkt->size, 1, fout);
|
||||
av_packet_unref(enc_pkt);
|
||||
enc_pkt.stream_index = 0;
|
||||
ret = fwrite(enc_pkt.data, enc_pkt.size, 1, fout);
|
||||
av_packet_unref(&enc_pkt);
|
||||
}
|
||||
|
||||
end:
|
||||
av_packet_free(&enc_pkt);
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0 : -1);
|
||||
return ret;
|
||||
}
|
||||
@@ -102,7 +105,7 @@ int main(int argc, char *argv[])
|
||||
FILE *fin = NULL, *fout = NULL;
|
||||
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
||||
AVCodecContext *avctx = NULL;
|
||||
const AVCodec *codec = NULL;
|
||||
AVCodec *codec = NULL;
|
||||
const char *enc_name = "h264_vaapi";
|
||||
|
||||
if (argc < 5) {
|
||||
@@ -169,7 +172,7 @@ int main(int argc, char *argv[])
|
||||
sw_frame->width = width;
|
||||
sw_frame->height = height;
|
||||
sw_frame->format = AV_PIX_FMT_NV12;
|
||||
if ((err = av_frame_get_buffer(sw_frame, 0)) < 0)
|
||||
if ((err = av_frame_get_buffer(sw_frame, 32)) < 0)
|
||||
goto close;
|
||||
if ((err = fread((uint8_t*)(sw_frame->data[0]), size, 1, fin)) <= 0)
|
||||
break;
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Video Acceleration API (video transcoding) transcode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
@@ -19,10 +21,11 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated transcoding API usage example
|
||||
* @example vaapi_transcode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated transcoding example.
|
||||
*
|
||||
* Perform VAAPI-accelerated transcoding.
|
||||
* @example vaapi_transcode.c
|
||||
* This example shows how to do VAAPI-accelerated transcoding.
|
||||
* Usage: vaapi_transcode input_stream codec output_stream
|
||||
* e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
|
||||
* - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
|
||||
@@ -59,7 +62,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -106,25 +109,28 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
static int encode_write(AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(encoder_ctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(encoder_ctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
enc_pkt.stream_index = 0;
|
||||
av_packet_rescale_ts(&enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
@@ -139,7 +145,7 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
@@ -210,7 +216,7 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
if ((ret = encode_write(frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
@@ -223,9 +229,9 @@ fail:
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt;
|
||||
AVPacket dec_pkt;
|
||||
AVCodec *enc_codec;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
||||
@@ -240,12 +246,6 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
@@ -275,21 +275,23 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
if (video_stream == dec_pkt.stream_index)
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
av_packet_unref(&dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
dec_pkt.data = NULL;
|
||||
dec_pkt.size = 0;
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
av_packet_unref(&dec_pkt);
|
||||
|
||||
/* flush encoder */
|
||||
ret = encode_write(dec_pkt, NULL);
|
||||
ret = encode_write(NULL);
|
||||
|
||||
/* write the trailer for output stream */
|
||||
av_write_trailer(ofmt_ctx);
|
||||
@@ -300,6 +302,5 @@ end:
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -79,21 +79,6 @@ Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To get the complete list of tests, run the command:
|
||||
@example
|
||||
make fate-list
|
||||
@end example
|
||||
|
||||
You can specify a subset of tests to run by specifying the
|
||||
corresponding elements from the list with the @code{fate-} prefix,
|
||||
e.g. as in:
|
||||
@example
|
||||
make fate-ffprobe_compact fate-ffprobe_xml
|
||||
@end example
|
||||
|
||||
This makes it easier to run a few tests in case of failure without
|
||||
running the complete test suite.
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
@@ -164,8 +149,6 @@ the synchronisation of the samples directory.
|
||||
|
||||
@chapter Uploading new samples to the fate suite
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
This is for developers who have an account on the fate suite server.
|
||||
If you upload new samples, please make sure they are as small as possible,
|
||||
space on each client, network bandwidth and so on benefit from smaller test cases.
|
||||
@@ -174,8 +157,6 @@ practice generally do not replace, remove or overwrite files as it likely would
|
||||
break older checkouts or releases.
|
||||
Also all needed samples for a commit should be uploaded, ideally 24
|
||||
hours, before the push.
|
||||
If you need an account for frequently uploading samples or you wish to help
|
||||
others by doing that send a mail to ffmpeg-devel.
|
||||
|
||||
@example
|
||||
#First update your local samples copy:
|
||||
|
||||
694
doc/ffmpeg.texi
694
doc/ffmpeg.texi
@@ -449,11 +449,6 @@ output file already exists.
|
||||
Set number of times input stream shall be looped. Loop 0 means no loop,
|
||||
loop -1 means infinite loop.
|
||||
|
||||
@item -recast_media (@emph{global})
|
||||
Allow forcing a decoder of a different media type than the one
|
||||
detected or designated by the demuxer. Useful for decoding media
|
||||
data muxed as data streams.
|
||||
|
||||
@item -c[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
||||
@itemx -codec[:@var{stream_specifier}] @var{codec} (@emph{input/output,per-stream})
|
||||
Select an encoder (when used before an output file) or a decoder (when used
|
||||
@@ -518,21 +513,6 @@ see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1)
|
||||
Like the @code{-ss} option but relative to the "end of file". That is negative
|
||||
values are earlier in the file, 0 is at EOF.
|
||||
|
||||
@item -isync @var{input_index} (@emph{input})
|
||||
Assign an input as a sync source.
|
||||
|
||||
This will take the difference between the start times of the target and reference inputs and
|
||||
offset the timestamps of the target file by that difference. The source timestamps of the two
|
||||
inputs should derive from the same clock source for expected results. If @code{copyts} is set
|
||||
then @code{start_at_zero} must also be set. If either of the inputs has no starting timestamp
|
||||
then no sync adjustment is made.
|
||||
|
||||
Acceptable values are those that refer to a valid ffmpeg input index. If the sync reference is
|
||||
the target index itself or @var{-1}, then no adjustment is made to target timestamps. A sync
|
||||
reference may not itself be synced to any other input.
|
||||
|
||||
Default value is @var{-1}.
|
||||
|
||||
@item -itsoffset @var{offset} (@emph{input})
|
||||
Set the input time offset.
|
||||
|
||||
@@ -575,22 +555,27 @@ ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
|
||||
@item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream})
|
||||
Sets the disposition for a stream.
|
||||
|
||||
By default, the disposition is copied from the input stream, unless the output
|
||||
stream this option applies to is fed by a complex filtergraph - in that case the
|
||||
disposition is unset by default.
|
||||
This option overrides the disposition copied from the input stream. It is also
|
||||
possible to delete the disposition by setting it to 0.
|
||||
|
||||
@var{value} is a sequence of items separated by '+' or '-'. The first item may
|
||||
also be prefixed with '+' or '-', in which case this option modifies the default
|
||||
value. Otherwise (the first item is not prefixed) this options overrides the
|
||||
default value. A '+' prefix adds the given disposition, '-' removes it. It is
|
||||
also possible to clear the disposition by setting it to 0.
|
||||
|
||||
If no @code{-disposition} options were specified for an output file, ffmpeg will
|
||||
automatically set the 'default' disposition on the first stream of each type,
|
||||
when there are multiple streams of this type in the output file and no stream of
|
||||
that type is already marked as default.
|
||||
|
||||
The @code{-dispositions} option lists the known dispositions.
|
||||
The following dispositions are recognized:
|
||||
@table @option
|
||||
@item default
|
||||
@item dub
|
||||
@item original
|
||||
@item comment
|
||||
@item lyrics
|
||||
@item karaoke
|
||||
@item forced
|
||||
@item hearing_impaired
|
||||
@item visual_impaired
|
||||
@item clean_effects
|
||||
@item attached_pic
|
||||
@item captions
|
||||
@item descriptions
|
||||
@item dependent
|
||||
@item metadata
|
||||
@end table
|
||||
|
||||
For example, to make the second audio stream the default stream:
|
||||
@example
|
||||
@@ -632,102 +617,6 @@ they do not conflict with the standard, as in:
|
||||
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
The parameters set for each target are as follows.
|
||||
|
||||
@strong{VCD}
|
||||
@example
|
||||
@var{pal}:
|
||||
-f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324
|
||||
-s 352x288 -r 25
|
||||
-codec:v mpeg1video -g 15 -b:v 1150k -maxrate:v 1150k -minrate:v 1150k -bufsize:v 327680
|
||||
-ar 44100 -ac 2
|
||||
-codec:a mp2 -b:a 224k
|
||||
|
||||
@var{ntsc}:
|
||||
-f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324
|
||||
-s 352x240 -r 30000/1001
|
||||
-codec:v mpeg1video -g 18 -b:v 1150k -maxrate:v 1150k -minrate:v 1150k -bufsize:v 327680
|
||||
-ar 44100 -ac 2
|
||||
-codec:a mp2 -b:a 224k
|
||||
|
||||
@var{film}:
|
||||
-f vcd -muxrate 1411200 -muxpreload 0.44 -packetsize 2324
|
||||
-s 352x240 -r 24000/1001
|
||||
-codec:v mpeg1video -g 18 -b:v 1150k -maxrate:v 1150k -minrate:v 1150k -bufsize:v 327680
|
||||
-ar 44100 -ac 2
|
||||
-codec:a mp2 -b:a 224k
|
||||
@end example
|
||||
|
||||
@strong{SVCD}
|
||||
@example
|
||||
@var{pal}:
|
||||
-f svcd -packetsize 2324
|
||||
-s 480x576 -pix_fmt yuv420p -r 25
|
||||
-codec:v mpeg2video -g 15 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1
|
||||
-ar 44100
|
||||
-codec:a mp2 -b:a 224k
|
||||
|
||||
@var{ntsc}:
|
||||
-f svcd -packetsize 2324
|
||||
-s 480x480 -pix_fmt yuv420p -r 30000/1001
|
||||
-codec:v mpeg2video -g 18 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1
|
||||
-ar 44100
|
||||
-codec:a mp2 -b:a 224k
|
||||
|
||||
@var{film}:
|
||||
-f svcd -packetsize 2324
|
||||
-s 480x480 -pix_fmt yuv420p -r 24000/1001
|
||||
-codec:v mpeg2video -g 18 -b:v 2040k -maxrate:v 2516k -minrate:v 0 -bufsize:v 1835008 -scan_offset 1
|
||||
-ar 44100
|
||||
-codec:a mp2 -b:a 224k
|
||||
@end example
|
||||
|
||||
@strong{DVD}
|
||||
@example
|
||||
@var{pal}:
|
||||
-f dvd -muxrate 10080k -packetsize 2048
|
||||
-s 720x576 -pix_fmt yuv420p -r 25
|
||||
-codec:v mpeg2video -g 15 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008
|
||||
-ar 48000
|
||||
-codec:a ac3 -b:a 448k
|
||||
|
||||
@var{ntsc}:
|
||||
-f dvd -muxrate 10080k -packetsize 2048
|
||||
-s 720x480 -pix_fmt yuv420p -r 30000/1001
|
||||
-codec:v mpeg2video -g 18 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008
|
||||
-ar 48000
|
||||
-codec:a ac3 -b:a 448k
|
||||
|
||||
@var{film}:
|
||||
-f dvd -muxrate 10080k -packetsize 2048
|
||||
-s 720x480 -pix_fmt yuv420p -r 24000/1001
|
||||
-codec:v mpeg2video -g 18 -b:v 6000k -maxrate:v 9000k -minrate:v 0 -bufsize:v 1835008
|
||||
-ar 48000
|
||||
-codec:a ac3 -b:a 448k
|
||||
@end example
|
||||
|
||||
@strong{DV}
|
||||
@example
|
||||
@var{pal}:
|
||||
-f dv
|
||||
-s 720x576 -pix_fmt yuv420p -r 25
|
||||
-ar 48000 -ac 2
|
||||
|
||||
@var{ntsc}:
|
||||
-f dv
|
||||
-s 720x480 -pix_fmt yuv411p -r 30000/1001
|
||||
-ar 48000 -ac 2
|
||||
|
||||
@var{film}:
|
||||
-f dv
|
||||
-s 720x480 -pix_fmt yuv411p -r 24000/1001
|
||||
-ar 48000 -ac 2
|
||||
@end example
|
||||
The @code{dv50} target is identical to the @code{dv} target except that the pixel format set is @code{yuv422p} for all three standards.
|
||||
|
||||
Any user-set value for a parameter above will override the target preset value. In that case, the output may
|
||||
not comply with the target standard.
|
||||
|
||||
@item -dn (@emph{input/output})
|
||||
As an input option, blocks all data streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
@@ -774,16 +663,6 @@ This option is similar to @option{-filter}, the only difference is that its
|
||||
argument is the name of the file from which a filtergraph description is to be
|
||||
read.
|
||||
|
||||
@item -reinit_filter[:@var{stream_specifier}] @var{integer} (@emph{input,per-stream})
|
||||
This boolean option determines if the filtergraph(s) to which this stream is fed gets
|
||||
reinitialized when input frame parameters change mid-stream. This option is enabled by
|
||||
default as most video and all audio filters cannot handle deviation in input frame properties.
|
||||
Upon reinitialization, existing filter state is lost, like e.g. the frame count @code{n}
|
||||
reference available in some filters. Any frames buffered at time of reinitialization are lost.
|
||||
The properties where a change triggers reinitialization are,
|
||||
for video, frame resolution or pixel format;
|
||||
for audio, sample format, sample rate, channel count or channel layout.
|
||||
|
||||
@item -filter_threads @var{nb_threads} (@emph{global})
|
||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||
will produce a thread pool with this many threads available for parallel processing.
|
||||
@@ -796,19 +675,14 @@ Specify the preset for matching stream(s).
|
||||
Print encoding progress/statistics. It is on by default, to explicitly
|
||||
disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -stats_period @var{time} (@emph{global})
|
||||
Set period at which encoding progress/statistics are updated. Default is 0.5 seconds.
|
||||
|
||||
@item -progress @var{url} (@emph{global})
|
||||
Send program-friendly progress information to @var{url}.
|
||||
|
||||
Progress information is written periodically and at the end of
|
||||
Progress information is written approximately every second and at the end of
|
||||
the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key}
|
||||
consists of only alphanumeric characters. The last key of a sequence of
|
||||
progress information is always "progress".
|
||||
|
||||
The update period is set using @code{-stats_period}.
|
||||
|
||||
@anchor{stdin option}
|
||||
@item -stdin
|
||||
Enable interaction on standard input. On by default unless standard input is
|
||||
@@ -860,6 +734,10 @@ ffmpeg -dump_attachment:t "" -i INPUT
|
||||
Technical note -- attachments are implemented as codec extradata, so this
|
||||
option can actually be used to extract extradata from any stream, not just
|
||||
attachments.
|
||||
|
||||
@item -noautorotate
|
||||
Disable automatically rotating video based on file metadata.
|
||||
|
||||
@end table
|
||||
|
||||
@section Video Options
|
||||
@@ -877,27 +755,9 @@ This is not the same as the @option{-framerate} option used for some input forma
|
||||
like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
|
||||
If in doubt use @option{-framerate} instead of the input option @option{-r}.
|
||||
|
||||
As an output option:
|
||||
@table @option
|
||||
@item video encoding
|
||||
Duplicate or drop frames right before encoding them to achieve constant output
|
||||
As an output option, duplicate or drop input frames to achieve constant output
|
||||
frame rate @var{fps}.
|
||||
|
||||
@item video streamcopy
|
||||
Indicate to the muxer that @var{fps} is the stream frame rate. No data is
|
||||
dropped or duplicated in this case. This may produce invalid files if @var{fps}
|
||||
does not match the actual stream frame rate as determined by packet timestamps.
|
||||
See also the @code{setts} bitstream filter.
|
||||
|
||||
@end table
|
||||
|
||||
@item -fpsmax[:@var{stream_specifier}] @var{fps} (@emph{output,per-stream})
|
||||
Set maximum frame rate (Hz value, fraction or abbreviation).
|
||||
|
||||
Clamps output frame rate when output framerate is auto-set and is higher than this value.
|
||||
Useful in batch processing or when input framerate is wrongly detected as very high.
|
||||
It cannot be set together with @code{-r}. It is ignored during streamcopy.
|
||||
|
||||
@item -s[:@var{stream_specifier}] @var{size} (@emph{input/output,per-stream})
|
||||
Set frame size.
|
||||
|
||||
@@ -923,32 +783,6 @@ If used together with @option{-vcodec copy}, it will affect the aspect ratio
|
||||
stored at container level, but not the aspect ratio stored in encoded
|
||||
frames, if it exists.
|
||||
|
||||
@item -display_rotation[:@var{stream_specifier}] @var{rotation} (@emph{input,per-stream})
|
||||
Set video rotation metadata.
|
||||
|
||||
@var{rotation} is a decimal number specifying the amount in degree by
|
||||
which the video should be rotated counter-clockwise before being
|
||||
displayed.
|
||||
|
||||
This option overrides the rotation/display transform metadata stored in
|
||||
the file, if any. When the video is being transcoded (rather than
|
||||
copied) and @code{-autorotate} is enabled, the video will be rotated at
|
||||
the filtering stage. Otherwise, the metadata will be written into the
|
||||
output file if the muxer supports it.
|
||||
|
||||
If the @code{-display_hflip} and/or @code{-display_vflip} options are
|
||||
given, they are applied after the rotation specified by this option.
|
||||
|
||||
@item -display_hflip[:@var{stream_specifier}] (@emph{input,per-stream})
|
||||
Set whether on display the image should be horizontally flipped.
|
||||
|
||||
See the @code{-display_rotation} option for more details.
|
||||
|
||||
@item -display_vflip[:@var{stream_specifier}] (@emph{input,per-stream})
|
||||
Set whether on display the image should be vertically flipped.
|
||||
|
||||
See the @code{-display_rotation} option for more details.
|
||||
|
||||
@item -vn (@emph{input/output})
|
||||
As an input option, blocks all video streams of a file from being filtered or
|
||||
being automatically selected or mapped for any output. See @code{-discard}
|
||||
@@ -985,18 +819,6 @@ Create the filtergraph specified by @var{filtergraph} and use it to
|
||||
filter the stream.
|
||||
|
||||
This is an alias for @code{-filter:v}, see the @ref{filter_option,,-filter option}.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to file metadata. Enabled by
|
||||
default, use @option{-noautorotate} to disable it.
|
||||
|
||||
@item -autoscale
|
||||
Automatically scale the video according to the resolution of first frame.
|
||||
Enabled by default, use @option{-noautoscale} to disable it. When autoscale is
|
||||
disabled, all output frames of filter graph might not be in the same resolution
|
||||
and may be inadequate for some encoder/muxer. Therefore, it is not recommended
|
||||
to disable it unless you really know what you are doing.
|
||||
Disable autoscale at your own risk.
|
||||
@end table
|
||||
|
||||
@section Advanced Video options
|
||||
@@ -1022,9 +844,14 @@ list separated with slashes. Two first values are the beginning and
|
||||
end frame numbers, last one is quantizer to use if positive, or quality
|
||||
factor if negative.
|
||||
|
||||
@item -ilme
|
||||
Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
|
||||
Use this option if your input file is interlaced and you want
|
||||
to keep the interlaced format for minimum losses.
|
||||
The alternative is to deinterlace the input stream with
|
||||
@option{-deinterlace}, but deinterlacing introduces losses.
|
||||
@item -psnr
|
||||
Calculate PSNR of compressed frames. This option is deprecated, pass the
|
||||
PSNR flag to the encoder instead, using @code{-flags +psnr}.
|
||||
Calculate PSNR of compressed frames.
|
||||
@item -vstats
|
||||
Dump video coding statistics to @file{vstats_HHMMSS.log}.
|
||||
@item -vstats_file @var{file}
|
||||
@@ -1041,6 +868,8 @@ version > 1:
|
||||
@code{out= %2d st= %2d frame= %5d q= %2.1f PSNR= %6.2f f_size= %6d s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s}
|
||||
@item -top[:@var{stream_specifier}] @var{n} (@emph{output,per-stream})
|
||||
top=1/bottom=0/auto=-1 field first
|
||||
@item -dc @var{precision}
|
||||
Intra_dc_precision.
|
||||
@item -vtag @var{fourcc/tag} (@emph{output})
|
||||
Force video tag/fourcc. This is an alias for @code{-tag:v}.
|
||||
@item -qphist (@emph{global})
|
||||
@@ -1050,20 +879,12 @@ Deprecated see -bsf
|
||||
|
||||
@item -force_key_frames[:@var{stream_specifier}] @var{time}[,@var{time}...] (@emph{output,per-stream})
|
||||
@item -force_key_frames[:@var{stream_specifier}] expr:@var{expr} (@emph{output,per-stream})
|
||||
@item -force_key_frames[:@var{stream_specifier}] source (@emph{output,per-stream})
|
||||
@item -force_key_frames[:@var{stream_specifier}] source_no_drop (@emph{output,per-stream})
|
||||
Force key frames at the specified timestamps, more precisely at the first
|
||||
frames after each specified time.
|
||||
|
||||
@var{force_key_frames} can take arguments of the following form:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{time}[,@var{time}...]
|
||||
If the argument consists of timestamps, ffmpeg will round the specified times to the nearest
|
||||
output timestamp as per the encoder time base and force a keyframe at the first frame having
|
||||
timestamp equal or greater than the computed timestamp. Note that if the encoder time base is too
|
||||
coarse, then the keyframes may be forced on frames with timestamps lower than the specified time.
|
||||
The default encoder time base is the inverse of the output framerate but may be set otherwise
|
||||
via @code{-enc_time_base}.
|
||||
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
||||
is interpreted like an expression and is evaluated for each frame. A
|
||||
key frame is forced in case the evaluation is non-zero.
|
||||
|
||||
If one of the times is "@code{chapters}[@var{delta}]", it is expanded into
|
||||
the time of the beginning of all chapters in the file, shifted by
|
||||
@@ -1077,11 +898,6 @@ before the beginning of every chapter:
|
||||
-force_key_frames 0:05:00,chapters-0.1
|
||||
@end example
|
||||
|
||||
@item expr:@var{expr}
|
||||
If the argument is prefixed with @code{expr:}, the string @var{expr}
|
||||
is interpreted like an expression and is evaluated for each frame. A
|
||||
key frame is forced in case the evaluation is non-zero.
|
||||
|
||||
The expression in @var{expr} can contain the following constants:
|
||||
@table @option
|
||||
@item n
|
||||
@@ -1109,18 +925,6 @@ starting from second 13:
|
||||
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
|
||||
@end example
|
||||
|
||||
@item source
|
||||
If the argument is @code{source}, ffmpeg will force a key frame if
|
||||
the current frame being encoded is marked as a key frame in its source.
|
||||
|
||||
@item source_no_drop
|
||||
If the argument is @code{source_no_drop}, ffmpeg will force a key frame if
|
||||
the current frame being encoded is marked as a key frame in its source.
|
||||
In cases where this particular source frame has to be dropped,
|
||||
enforce the next available frame to become a key frame instead.
|
||||
|
||||
@end table
|
||||
|
||||
Note that forcing too many keyframes is very harmful for the lookahead
|
||||
algorithms of certain encoders: using fixed-GOP options or similar
|
||||
would be more efficient.
|
||||
@@ -1141,27 +945,9 @@ device type:
|
||||
@item cuda
|
||||
@var{device} is the number of the CUDA device.
|
||||
|
||||
The following options are recognized:
|
||||
@table @option
|
||||
@item primary_ctx
|
||||
If set to 1, uses the primary device context instead of creating a new one.
|
||||
@end table
|
||||
|
||||
Examples:
|
||||
@table @emph
|
||||
@item -init_hw_device cuda:1
|
||||
Choose the second device on the system.
|
||||
|
||||
@item -init_hw_device cuda:0,primary_ctx=1
|
||||
Choose the first device and use the primary device context.
|
||||
@end table
|
||||
|
||||
@item dxva2
|
||||
@var{device} is the number of the Direct3D 9 display adapter.
|
||||
|
||||
@item d3d11va
|
||||
@var{device} is the number of the Direct3D 11 display adapter.
|
||||
|
||||
@item vaapi
|
||||
@var{device} is either an X11 display name or a DRM render node.
|
||||
If not specified, it will attempt to open the default X11 display (@emph{$DISPLAY})
|
||||
@@ -1185,21 +971,9 @@ If not specified, it will attempt to open the default X11 display (@emph{$DISPLA
|
||||
@end table
|
||||
If not specified, @samp{auto_any} is used.
|
||||
(Note that it may be easier to achieve the desired result for QSV by creating the
|
||||
platform-appropriate subdevice (@samp{dxva2} or @samp{d3d11va} or @samp{vaapi}) and then deriving a
|
||||
platform-appropriate subdevice (@samp{dxva2} or @samp{vaapi}) and then deriving a
|
||||
QSV device from that.)
|
||||
|
||||
Alternatively, @samp{child_device_type} helps to choose platform-appropriate subdevice type.
|
||||
On Windows @samp{d3d11va} is used as default subdevice type.
|
||||
|
||||
Examples:
|
||||
@table @emph
|
||||
@item -init_hw_device qsv:hw,child_device_type=d3d11va
|
||||
Choose the GPU subdevice with type @samp{d3d11va} and create QSV device with @samp{MFX_IMPL_HARDWARE}.
|
||||
|
||||
@item -init_hw_device qsv:hw,child_device_type=dxva2
|
||||
Choose the GPU subdevice with type @samp{dxva2} and create QSV device with @samp{MFX_IMPL_HARDWARE}.
|
||||
@end table
|
||||
|
||||
@item opencl
|
||||
@var{device} selects the platform and device as @emph{platform_index.device_index}.
|
||||
|
||||
@@ -1237,35 +1011,6 @@ Choose the GPU device on the second platform supporting the @emph{cl_khr_fp16}
|
||||
extension.
|
||||
@end table
|
||||
|
||||
@item vulkan
|
||||
If @var{device} is an integer, it selects the device by its index in a
|
||||
system-dependent list of devices. If @var{device} is any other string, it
|
||||
selects the first device with a name containing that string as a substring.
|
||||
|
||||
The following options are recognized:
|
||||
@table @option
|
||||
@item debug
|
||||
If set to 1, enables the validation layer, if installed.
|
||||
@item linear_images
|
||||
If set to 1, images allocated by the hwcontext will be linear and locally mappable.
|
||||
@item instance_extensions
|
||||
A plus separated list of additional instance extensions to enable.
|
||||
@item device_extensions
|
||||
A plus separated list of additional device extensions to enable.
|
||||
@end table
|
||||
|
||||
Examples:
|
||||
@table @emph
|
||||
@item -init_hw_device vulkan:1
|
||||
Choose the second device on the system.
|
||||
|
||||
@item -init_hw_device vulkan:RADV
|
||||
Choose the first device with a name containing the string @emph{RADV}.
|
||||
|
||||
@item -init_hw_device vulkan:0,instance_extensions=VK_KHR_wayland_surface+VK_KHR_xcb_surface
|
||||
Choose the first device and enable the Wayland and XCB instance extensions.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@item -init_hw_device @var{type}[=@var{name}]@@@var{source}
|
||||
@@ -1302,9 +1047,6 @@ Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
|
||||
@item dxva2
|
||||
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
|
||||
|
||||
@item d3d11va
|
||||
Use D3D11VA (DirectX Video Acceleration) hardware acceleration.
|
||||
|
||||
@item vaapi
|
||||
Use VAAPI (Video Acceleration API) hardware acceleration.
|
||||
|
||||
@@ -1338,25 +1080,7 @@ by name, or it can create a new device as if
|
||||
were called immediately before.
|
||||
|
||||
@item -hwaccels
|
||||
List all hardware acceleration components enabled in this build of ffmpeg.
|
||||
Actual runtime availability depends on the hardware and its suitable driver
|
||||
being installed.
|
||||
|
||||
@item -fix_sub_duration_heartbeat[:@var{stream_specifier}]
|
||||
Set a specific output video stream as the heartbeat stream according to which
|
||||
to split and push through currently in-progress subtitle upon receipt of a
|
||||
random access packet.
|
||||
|
||||
This lowers the latency of subtitles for which the end packet or the following
|
||||
subtitle has not yet been received. As a drawback, this will most likely lead
|
||||
to duplication of subtitle events in order to cover the full duration, so
|
||||
when dealing with use cases where latency of when the subtitle event is passed
|
||||
on to output is not relevant this option should not be utilized.
|
||||
|
||||
Requires @option{-fix_sub_duration} to be set for the relevant input subtitle
|
||||
stream for this to have any effect, as well as for the input subtitle stream
|
||||
having to be directly mapped to the same output in which the heartbeat stream
|
||||
resides.
|
||||
List all hardware acceleration methods supported in this build of ffmpeg.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -1456,18 +1180,18 @@ Set the size of the canvas used to render subtitles.
|
||||
@section Advanced options
|
||||
|
||||
@table @option
|
||||
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][?] | @var{[linklabel]} (@emph{output})
|
||||
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][?][,@var{sync_file_id}[:@var{stream_specifier}]] | @var{[linklabel]} (@emph{output})
|
||||
|
||||
Create one or more streams in the output file. This option has two forms for
|
||||
specifying the data source(s): the first selects one or more streams from some
|
||||
input file (specified with @code{-i}), the second takes an output from some
|
||||
complex filtergraph (specified with @code{-filter_complex} or
|
||||
@code{-filter_complex_script}).
|
||||
Designate one or more input streams as a source for the output file. Each input
|
||||
stream is identified by the input file index @var{input_file_id} and
|
||||
the input stream index @var{input_stream_id} within the input
|
||||
file. Both indices start at 0. If specified,
|
||||
@var{sync_file_id}:@var{stream_specifier} sets which input stream
|
||||
is used as a presentation sync reference.
|
||||
|
||||
In the first form, an output stream is created for every stream from the input
|
||||
file with the index @var{input_file_id}. If @var{stream_specifier} is given,
|
||||
only those streams that match the specifier are used (see the
|
||||
@ref{Stream specifiers} section for the @var{stream_specifier} syntax).
|
||||
The first @code{-map} option on the command line specifies the
|
||||
source for output stream 0, the second @code{-map} option specifies
|
||||
the source for output stream 1, etc.
|
||||
|
||||
A @code{-} character before the stream identifier creates a "negative" mapping.
|
||||
It disables matching streams from already created mappings.
|
||||
@@ -1481,56 +1205,39 @@ An alternative @var{[linklabel]} form will map outputs from complex filter
|
||||
graphs (see the @option{-filter_complex} option) to the output file.
|
||||
@var{linklabel} must correspond to a defined output link label in the graph.
|
||||
|
||||
This option may be specified multiple times, each adding more streams to the
|
||||
output file. Any given input stream may also be mapped any number of times as a
|
||||
source for different output streams, e.g. in order to use different encoding
|
||||
options and/or filters. The streams are created in the output in the same order
|
||||
in which the @code{-map} options are given on the commandline.
|
||||
|
||||
Using this option disables the default mappings for this output file.
|
||||
|
||||
Examples:
|
||||
|
||||
@table @emph
|
||||
|
||||
@item map everything
|
||||
To map ALL streams from the first input file to output
|
||||
For example, to map ALL streams from the first input file to output
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0 output
|
||||
@end example
|
||||
|
||||
@item select specific stream
|
||||
If you have two audio streams in the first input file, these streams are
|
||||
identified by @var{0:0} and @var{0:1}. You can use @code{-map} to select which
|
||||
streams to place in an output file. For example:
|
||||
For example, if you have two audio streams in the first input file,
|
||||
these streams are identified by "0:0" and "0:1". You can use
|
||||
@code{-map} to select which streams to place in an output file. For
|
||||
example:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0:1 out.wav
|
||||
@end example
|
||||
will map the second input stream in @file{INPUT} to the (single) output stream
|
||||
in @file{out.wav}.
|
||||
will map the input stream in @file{INPUT} identified by "0:1" to
|
||||
the (single) output stream in @file{out.wav}.
|
||||
|
||||
@item create multiple streams
|
||||
To select the stream with index 2 from input file @file{a.mov} (specified by the
|
||||
identifier @var{0:2}), and stream with index 6 from input @file{b.mov}
|
||||
(specified by the identifier @var{1:6}), and copy them to the output file
|
||||
@file{out.mov}:
|
||||
For example, to select the stream with index 2 from input file
|
||||
@file{a.mov} (specified by the identifier "0:2"), and stream with
|
||||
index 6 from input @file{b.mov} (specified by the identifier "1:6"),
|
||||
and copy them to the output file @file{out.mov}:
|
||||
@example
|
||||
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
|
||||
@end example
|
||||
|
||||
@item create multiple streams 2
|
||||
To select all video and the third audio stream from an input file:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
|
||||
@end example
|
||||
|
||||
@item negative map
|
||||
To map all the streams except the second audio, use negative mappings
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
|
||||
@end example
|
||||
|
||||
@item optional map
|
||||
To map the video and audio streams from the first input, and using the
|
||||
trailing @code{?}, ignore the audio mapping if no audio streams exist in
|
||||
the first input:
|
||||
@@ -1538,13 +1245,12 @@ the first input:
|
||||
ffmpeg -i INPUT -map 0:v -map 0:a? OUTPUT
|
||||
@end example
|
||||
|
||||
@item map by language
|
||||
To pick the English audio stream:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
|
||||
@end example
|
||||
|
||||
@end table
|
||||
Note that using this option disables the default mappings for this output file.
|
||||
|
||||
@item -ignore_unknown
|
||||
Ignore input streams with unknown type instead of failing if copying
|
||||
@@ -1555,10 +1261,6 @@ Allow input streams with unknown type to be copied instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][?][:@var{output_file_id}.@var{stream_specifier}]
|
||||
This option is deprecated and will be removed. It can be replaced by the
|
||||
@var{pan} filter. In some cases it may be easier to use some combination of the
|
||||
@var{channelsplit}, @var{channelmap}, or @var{amerge} filters.
|
||||
|
||||
Map an audio channel from a given input to an output. If
|
||||
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
|
||||
be mapped on all the audio streams.
|
||||
@@ -1681,49 +1383,38 @@ it will usually display as 0 if not supported.
|
||||
Show benchmarking information during the encode.
|
||||
Shows real, system and user time used in various steps (audio/video encode/decode).
|
||||
@item -timelimit @var{duration} (@emph{global})
|
||||
Exit after ffmpeg has been running for @var{duration} seconds in CPU user time.
|
||||
Exit after ffmpeg has been running for @var{duration} seconds.
|
||||
@item -dump (@emph{global})
|
||||
Dump each input packet to stderr.
|
||||
@item -hex (@emph{global})
|
||||
When dumping packets, also dump the payload.
|
||||
@item -readrate @var{speed} (@emph{input})
|
||||
Limit input read speed.
|
||||
|
||||
Its value is a floating-point positive number which represents the maximum duration of
|
||||
media, in seconds, that should be ingested in one second of wallclock time.
|
||||
Default value is zero and represents no imposed limitation on speed of ingestion.
|
||||
Value @code{1} represents real-time speed and is equivalent to @code{-re}.
|
||||
|
||||
Mainly used to simulate a capture device or live input stream (e.g. when reading from a file).
|
||||
Should not be used with a low value when input is an actual capture device or live stream as
|
||||
it may cause packet loss.
|
||||
|
||||
It is useful for when flow speed of output packets is important, such as live streaming.
|
||||
@item -re (@emph{input})
|
||||
Read input at native frame rate. This is equivalent to setting @code{-readrate 1}.
|
||||
@item -vsync @var{parameter} (@emph{global})
|
||||
@itemx -fps_mode[:@var{stream_specifier}] @var{parameter} (@emph{output,per-stream})
|
||||
Set video sync method / framerate mode. vsync is applied to all output video streams
|
||||
but can be overridden for a stream by setting fps_mode. vsync is deprecated and will be
|
||||
removed in the future.
|
||||
|
||||
For compatibility reasons some of the values for vsync can be specified as numbers (shown
|
||||
in parentheses in the following table).
|
||||
Read input at native frame rate. Mainly used to simulate a grab device,
|
||||
or live input stream (e.g. when reading from a file). Should not be used
|
||||
with actual grab devices or live input streams (where it can cause packet
|
||||
loss).
|
||||
By default @command{ffmpeg} attempts to read the input(s) as fast as possible.
|
||||
This option will slow down the reading of the input(s) to the native frame rate
|
||||
of the input(s). It is useful for real-time output (e.g. live streaming).
|
||||
@item -vsync @var{parameter}
|
||||
Video sync method.
|
||||
For compatibility reasons old values can be specified as numbers.
|
||||
Newly added values will have to be specified as strings always.
|
||||
|
||||
@table @option
|
||||
@item passthrough (0)
|
||||
@item 0, passthrough
|
||||
Each frame is passed with its timestamp from the demuxer to the muxer.
|
||||
@item cfr (1)
|
||||
@item 1, cfr
|
||||
Frames will be duplicated and dropped to achieve exactly the requested
|
||||
constant frame rate.
|
||||
@item vfr (2)
|
||||
@item 2, vfr
|
||||
Frames are passed through with their timestamp or dropped so as to
|
||||
prevent 2 frames from having the same timestamp.
|
||||
@item drop
|
||||
As passthrough but destroys all timestamps, making the muxer generate
|
||||
fresh timestamps based on frame-rate.
|
||||
@item auto (-1)
|
||||
Chooses between cfr and vfr depending on muxer capabilities. This is the
|
||||
@item -1, auto
|
||||
Chooses between 1 and 2 depending on muxer capabilities. This is the
|
||||
default method.
|
||||
@end table
|
||||
|
||||
@@ -1742,16 +1433,17 @@ The default is -1.1. One possible usecase is to avoid framedrops in case
|
||||
of noisy timestamps or to increase frame drop precision in case of exact
|
||||
timestamps.
|
||||
|
||||
@item -adrift_threshold @var{time}
|
||||
Set the minimum difference between timestamps and audio data (in seconds) to trigger
|
||||
adding/dropping samples to make it match the timestamps. This option effectively is
|
||||
a threshold to select between hard (add/drop) and soft (squeeze/stretch) compensation.
|
||||
@code{-async} must be set to a positive value.
|
||||
@item -async @var{samples_per_second}
|
||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
-async 1 is a special case where only the start of the audio stream is corrected
|
||||
without any later correction.
|
||||
|
||||
@item -apad @var{parameters} (@emph{output,per-stream})
|
||||
Pad the output audio stream(s). This is the same as applying @code{-af apad}.
|
||||
Argument is a string of filter parameters composed the same as with the @code{apad} filter.
|
||||
@code{-shortest} must be set for this output for the option to take effect.
|
||||
Note that the timestamps may be further modified by the muxer, after this.
|
||||
For example, in the case that the format option @option{avoid_negative_ts}
|
||||
is enabled.
|
||||
|
||||
This option has been deprecated. Use the @code{aresample} audio filter instead.
|
||||
|
||||
@item -copyts
|
||||
Do not process input timestamps, but keep their values without trying
|
||||
@@ -1820,29 +1512,9 @@ Default value is 0.
|
||||
@item -bitexact (@emph{input/output})
|
||||
Enable bitexact mode for (de)muxer and (de/en)coder
|
||||
@item -shortest (@emph{output})
|
||||
Finish encoding when the shortest output stream ends.
|
||||
|
||||
Note that this option may require buffering frames, which introduces extra
|
||||
latency. The maximum amount of this latency may be controlled with the
|
||||
@code{-shortest_buf_duration} option.
|
||||
|
||||
@item -shortest_buf_duration @var{duration} (@emph{output})
|
||||
The @code{-shortest} option may require buffering potentially large amounts
|
||||
of data when at least one of the streams is "sparse" (i.e. has large gaps
|
||||
between frames – this is typically the case for subtitles).
|
||||
|
||||
This option controls the maximum duration of buffered frames in seconds.
|
||||
Larger values may allow the @code{-shortest} option to produce more accurate
|
||||
results, but increase memory use and latency.
|
||||
|
||||
The default value is 10 seconds.
|
||||
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
Timestamp discontinuity delta threshold.
|
||||
@item -dts_error_threshold @var{seconds}
|
||||
Timestamp error delta threshold. This threshold use to discard crazy/damaged
|
||||
timestamps and the default is 30 hours which is arbitrarily picked and quite
|
||||
conservative.
|
||||
@item -muxdelay @var{seconds} (@emph{output})
|
||||
Set the maximum demux-decode delay.
|
||||
@item -muxpreload @var{seconds} (@emph{output})
|
||||
@@ -1923,22 +1595,6 @@ graph will be added to the output file automatically, so we can simply write
|
||||
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
|
||||
@end example
|
||||
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
will be converted into a video with the same size as the largest video in
|
||||
the file, or 720x576 if no video is present. Note that this is an
|
||||
experimental and temporary solution. It will be removed once libavfilter has
|
||||
proper support for subtitles.
|
||||
|
||||
For example, to hardcode subtitles on top of a DVB-T recording stored in
|
||||
MPEG-TS format, delaying the subtitles by 1 second:
|
||||
@example
|
||||
ffmpeg -i input.ts -filter_complex \
|
||||
'[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
|
||||
-sn -map '#0x2dc' output.mkv
|
||||
@end example
|
||||
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
|
||||
audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
|
||||
|
||||
To generate 5 seconds of pure red video using lavfi @code{color} source:
|
||||
@example
|
||||
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
|
||||
@@ -1971,15 +1627,11 @@ to the @option{-ss} option is considered an actual timestamp, and is not
|
||||
offset by the start time of the file. This matters only for files which do
|
||||
not start from timestamp 0, such as transport streams.
|
||||
|
||||
@item -thread_queue_size @var{size} (@emph{input/output})
|
||||
For input, this option sets the maximum number of queued packets when reading
|
||||
from the file or device. With low latency / high rate live streams, packets may
|
||||
be discarded if they are not read in a timely manner; setting this value can
|
||||
force ffmpeg to use a separate input thread and read packets as soon as they
|
||||
arrive. By default ffmpeg only does this if multiple inputs are specified.
|
||||
|
||||
For output, this option specified the maximum number of packets that may be
|
||||
queued to each muxing thread.
|
||||
@item -thread_queue_size @var{size} (@emph{input})
|
||||
This option sets the maximum number of queued packets when reading from the
|
||||
file or device. With low latency / high rate live streams, packets may be
|
||||
discarded if they are not read in a timely manner; raising this value can
|
||||
avoid it.
|
||||
|
||||
@item -sdp_file @var{file} (@emph{global})
|
||||
Print sdp information for an output stream to @var{file}.
|
||||
@@ -2018,15 +1670,8 @@ Stop and abort on various conditions. The following flags are available:
|
||||
@table @option
|
||||
@item empty_output
|
||||
No packets were passed to the muxer, the output is empty.
|
||||
@item empty_output_stream
|
||||
No packets were passed to the muxer in some of the output streams.
|
||||
@end table
|
||||
|
||||
@item -max_error_rate (@emph{global})
|
||||
Set fraction of decoding frame failures across all inputs which when crossed
|
||||
ffmpeg will return exit code 69. Crossing this threshold does not terminate
|
||||
processing. Range is a floating-point number between 0 to 1. Default is 2/3.
|
||||
|
||||
@item -xerror (@emph{global})
|
||||
Stop and exit on error
|
||||
|
||||
@@ -2039,139 +1684,23 @@ this buffer, in packets, for the matching output stream.
|
||||
The default value of this option should be high enough for most uses, so only
|
||||
touch this option if you are sure that you need it.
|
||||
|
||||
@item -muxing_queue_data_threshold @var{bytes} (@emph{output,per-stream})
|
||||
This is a minimum threshold until which the muxing queue size is not taken into
|
||||
account. Defaults to 50 megabytes per stream, and is based on the overall size
|
||||
of packets passed to the muxer.
|
||||
|
||||
@item -auto_conversion_filters (@emph{global})
|
||||
Enable automatically inserting format conversion filters in all filter
|
||||
graphs, including those defined by @option{-vf}, @option{-af},
|
||||
@option{-filter_complex} and @option{-lavfi}. If filter format negotiation
|
||||
requires a conversion, the initialization of the filters will fail.
|
||||
Conversions can still be performed by inserting the relevant conversion
|
||||
filter (scale, aresample) in the graph.
|
||||
On by default, to explicitly disable it you need to specify
|
||||
@code{-noauto_conversion_filters}.
|
||||
|
||||
@item -bits_per_raw_sample[:@var{stream_specifier}] @var{value} (@emph{output,per-stream})
|
||||
Declare the number of bits per raw sample in the given output stream to be
|
||||
@var{value}. Note that this option sets the information provided to the
|
||||
encoder/muxer, it does not change the stream to conform to this value. Setting
|
||||
values that do not match the stream properties may result in encoding failures
|
||||
or invalid output files.
|
||||
|
||||
@item -stats_enc_pre[:@var{stream_specifier}] @var{path} (@emph{output,per-stream})
|
||||
@item -stats_enc_post[:@var{stream_specifier}] @var{path} (@emph{output,per-stream})
|
||||
@item -stats_mux_pre[:@var{stream_specifier}] @var{path} (@emph{output,per-stream})
|
||||
Write per-frame encoding information about the matching streams into the file
|
||||
given by @var{path}.
|
||||
|
||||
@option{-stats_enc_pre} writes information about raw video or audio frames right
|
||||
before they are sent for encoding, while @option{-stats_enc_post} writes
|
||||
information about encoded packets as they are received from the encoder.
|
||||
@option{-stats_mux_pre} writes information about packets just as they are about to
|
||||
be sent to the muxer. Every frame or packet produces one line in the specified
|
||||
file. The format of this line is controlled by @option{-stats_enc_pre_fmt} /
|
||||
@option{-stats_enc_post_fmt} / @option{-stats_mux_pre_fmt}.
|
||||
|
||||
When stats for multiple streams are written into a single file, the lines
|
||||
corresponding to different streams will be interleaved. The precise order of
|
||||
this interleaving is not specified and not guaranteed to remain stable between
|
||||
different invocations of the program, even with the same options.
|
||||
|
||||
@item -stats_enc_pre_fmt[:@var{stream_specifier}] @var{format_spec} (@emph{output,per-stream})
|
||||
@item -stats_enc_post_fmt[:@var{stream_specifier}] @var{format_spec} (@emph{output,per-stream})
|
||||
@item -stats_mux_pre_fmt[:@var{stream_specifier}] @var{format_spec} (@emph{output,per-stream})
|
||||
Specify the format for the lines written with @option{-stats_enc_pre} /
|
||||
@option{-stats_enc_post} / @option{-stats_mux_pre}.
|
||||
|
||||
@var{format_spec} is a string that may contain directives of the form
|
||||
@var{@{fmt@}}. @var{format_spec} is backslash-escaped --- use \@{, \@}, and \\
|
||||
to write a literal @{, @}, or \, respectively, into the output.
|
||||
|
||||
The directives given with @var{fmt} may be one of the following:
|
||||
@table @option
|
||||
@item fidx
|
||||
Index of the output file.
|
||||
|
||||
@item sidx
|
||||
Index of the output stream in the file.
|
||||
|
||||
@item n
|
||||
Frame number. Pre-encoding: number of frames sent to the encoder so far.
|
||||
Post-encoding: number of packets received from the encoder so far.
|
||||
Muxing: number of packets submitted to the muxer for this stream so far.
|
||||
|
||||
@item ni
|
||||
Input frame number. Index of the input frame (i.e. output by a decoder) that
|
||||
corresponds to this output frame or packet. -1 if unavailable.
|
||||
|
||||
@item tb
|
||||
Encoder timebase, as a rational number @var{num/den}. Note that this may be
|
||||
different from the timebase used by the muxer.
|
||||
|
||||
@item tbi
|
||||
Timebase for @var{ptsi}, as a rational number @var{num/den}. Available when
|
||||
@var{ptsi} is available, @var{0/1} otherwise.
|
||||
|
||||
@item pts
|
||||
Presentation timestamp of the frame or packet, as an integer. Should be
|
||||
multiplied by the timebase to compute presentation time.
|
||||
|
||||
@item ptsi
|
||||
Presentation timestamp of the input frame (see @var{ni}), as an integer. Should
|
||||
be multiplied by @var{tbi} to compute presentation time. Printed as
|
||||
(2^63 - 1 = 9223372036854775807) when not available.
|
||||
|
||||
@item t
|
||||
Presentation time of the frame or packet, as a decimal number. Equal to
|
||||
@var{pts} multiplied by @var{tb}.
|
||||
|
||||
@item ti
|
||||
Presentation time of the input frame (see @var{ni}), as a decimal number. Equal
|
||||
to @var{ptsi} multiplied by @var{tbi}. Printed as inf when not available.
|
||||
|
||||
@item dts
|
||||
Decoding timestamp of the packet, as an integer. Should be multiplied by the
|
||||
timebase to compute presentation time. Post-encoding only.
|
||||
|
||||
@item dt
|
||||
Decoding time of the frame or packet, as a decimal number. Equal to
|
||||
@var{dts} multiplied by @var{tb}.
|
||||
|
||||
@item sn
|
||||
Number of audio samples sent to the encoder so far. Audio and pre-encoding only.
|
||||
|
||||
@item samp
|
||||
Number of audio samples in the frame. Audio and pre-encoding only.
|
||||
|
||||
@item size
|
||||
Size of the encoded packet in bytes. Post-encoding only.
|
||||
|
||||
@item br
|
||||
Current bitrate in bits per second. Post-encoding only.
|
||||
|
||||
@item abr
|
||||
Average bitrate for the whole stream so far, in bits per second, -1 if it cannot
|
||||
be determined at this point. Post-encoding only.
|
||||
@end table
|
||||
|
||||
The default format strings are:
|
||||
@table @option
|
||||
@item pre-encoding
|
||||
@{fidx@} @{sidx@} @{n@} @{t@}
|
||||
@item post-encoding
|
||||
@{fidx@} @{sidx@} @{n@} @{t@}
|
||||
@end table
|
||||
In the future, new items may be added to the end of the default formatting
|
||||
strings. Users who depend on the format staying exactly the same, should
|
||||
prescribe it manually.
|
||||
As a special exception, you can use a bitmap subtitle stream as input: it
|
||||
will be converted into a video with the same size as the largest video in
|
||||
the file, or 720x576 if no video is present. Note that this is an
|
||||
experimental and temporary solution. It will be removed once libavfilter has
|
||||
proper support for subtitles.
|
||||
|
||||
Note that stats for different streams written into the same file may have
|
||||
different formats.
|
||||
|
||||
@end table
|
||||
For example, to hardcode subtitles on top of a DVB-T recording stored in
|
||||
MPEG-TS format, delaying the subtitles by 1 second:
|
||||
@example
|
||||
ffmpeg -i input.ts -filter_complex \
|
||||
'[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
|
||||
-sn -map '#0x2dc' output.mkv
|
||||
@end example
|
||||
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
|
||||
audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
|
||||
|
||||
@section Preset files
|
||||
A preset file contains a sequence of @var{option}=@var{value} pairs,
|
||||
@@ -2449,7 +1978,6 @@ ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@@ -34,6 +34,10 @@ various FFmpeg APIs.
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
@@ -122,12 +126,15 @@ Read @var{input_url}.
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is shown by default, unless the
|
||||
log level is lower than @code{info}. Its display can be forced by manually
|
||||
specifying this option. To disable it, you need to specify @code{-nostats}.
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@@ -214,6 +221,8 @@ Pause.
|
||||
Toggle mute.
|
||||
|
||||
@item 9, 0
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@item /, *
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@@ -285,7 +294,6 @@ Toggle full screen.
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] @file{input_url}
|
||||
ffprobe [@var{options}] [@file{input_url}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -28,9 +28,6 @@ If a url is specified in input, ffprobe will try to open and
|
||||
probe the url content. If the url cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
If no output is specified as output with @option{o} ffprobe will write
|
||||
to stdout.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
@@ -338,12 +335,6 @@ Show information about all pixel formats supported by FFmpeg.
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -show_optional_fields @var{value}
|
||||
Some writers viz. JSON and XML, omit the printing of fields with invalid or non-applicable values,
|
||||
while other writers always print them. This option enables one to control this behaviour.
|
||||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
||||
Default is @var{auto}.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
@@ -351,10 +342,6 @@ on the specific build.
|
||||
@item -i @var{input_url}
|
||||
Read @var{input_url}.
|
||||
|
||||
@item -o @var{output_url}
|
||||
Write output to @var{output_url}. If not specified, the output is sent
|
||||
to stdout.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@@ -655,7 +642,6 @@ DV, GXF and AVI timecodes are available in format metadata
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@@ -29,18 +29,22 @@
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
@@ -57,6 +61,8 @@
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
@@ -86,14 +92,14 @@
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
@@ -195,11 +201,6 @@
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="captions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="descriptions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="metadata" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dependent" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="still_image" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
@@ -214,10 +215,10 @@
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_size" type="xsd:int" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
@@ -225,8 +226,6 @@
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="film_grain" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
@@ -238,6 +237,7 @@
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
@@ -246,7 +246,6 @@
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="initial_padding" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
@@ -273,6 +272,10 @@
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
@@ -354,6 +357,7 @@
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
|
||||
@@ -107,24 +107,17 @@ Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter named @var{filter_name}. Use the
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
|
||||
@item bsf=@var{bitstream_filter_name}
|
||||
Print detailed information about the bitstream filter named @var{bitstream_filter_name}.
|
||||
Print detailed information about the bitstream filter name @var{bitstream_filter_name}.
|
||||
Use the @option{-bsfs} option to get a list of all bitstream filters.
|
||||
|
||||
@item protocol=@var{protocol_name}
|
||||
Print detailed information about the protocol named @var{protocol_name}.
|
||||
Use the @option{-protocols} option to get a list of all protocols.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -buildconf
|
||||
Show the build configuration, one option per line.
|
||||
|
||||
@item -formats
|
||||
Show available formats (including devices).
|
||||
|
||||
@@ -167,9 +160,6 @@ Show available sample formats.
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -dispositions
|
||||
Show stream dispositions.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@@ -246,11 +236,13 @@ ffmpeg [...] -loglevel +repeat
|
||||
By default the program logs to stderr. If coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR}, or can be forced setting
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a future FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and log output to a file named
|
||||
Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
@@ -355,19 +347,6 @@ Possible flags for this option are:
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -cpucount @var{count} (@emph{global})
|
||||
Override detection of CPU count. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpucount 2
|
||||
@end example
|
||||
|
||||
@item -max_alloc @var{bytes}
|
||||
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
||||
family of malloc functions. Exercise @strong{extreme caution} when using
|
||||
this option. Don't use if you do not understand the full consequence of doing so.
|
||||
Default is INT_MAX.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
|
||||
9698
doc/filters.texi
9698
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -27,10 +27,6 @@ stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item max_probe_packets @var{integer} (@emph{input})
|
||||
Set the maximum number of buffered packets when probing a codec.
|
||||
Default is 2500 packets.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@@ -49,6 +45,7 @@ Generate missing PTS if DTS is present.
|
||||
Ignore DTS if PTS is set. Inert when nofillin is set.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item keepside (@emph{deprecated},@emph{inert})
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by buffering during initial input streams analysis.
|
||||
@item nofillin
|
||||
@@ -69,6 +66,7 @@ This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item flush_packets
|
||||
Write out packets immediately.
|
||||
@item latm (@emph{deprecated},@emph{inert})
|
||||
@item shortest
|
||||
Stop muxing at the end of the shortest stream.
|
||||
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
||||
@@ -141,7 +139,7 @@ Consider things that a sane encoder should not do as an error.
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 10000000 (10 seconds).
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
|
||||
1364
doc/general.texi
1364
doc/general.texi
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it.
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git <target>
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
@@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions.
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told Git your name, email address and GPG key
|
||||
Make sure you have told Git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
git config --global user.signingkey ABCDEF0123245
|
||||
@end example
|
||||
|
||||
Enable signing all commits or use -S
|
||||
|
||||
@example
|
||||
git config --global commit.gpgsign true
|
||||
@end example
|
||||
|
||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||
@@ -224,46 +217,16 @@ git config --global core.editor
|
||||
or set by one of the following environment variables:
|
||||
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
||||
|
||||
@section Writing a commit message
|
||||
Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don't
|
||||
include filenames in log messages, Git provides that information.
|
||||
|
||||
Log messages should be concise but descriptive.
|
||||
|
||||
The first line must contain the context, a colon and a very short
|
||||
summary of what the commit does. Details can be added, if necessary,
|
||||
separated by an empty line. These details should not exceed 60-72 characters
|
||||
per line, except when containing code.
|
||||
|
||||
Example of a good commit message:
|
||||
|
||||
@example
|
||||
avcodec/cbs: add a helper to read extradata within packet side data
|
||||
|
||||
Using ff_cbs_read() on the raw buffer will not parse it as extradata,
|
||||
resulting in parsing errors for example when handling ISOBMFF avcC.
|
||||
This helper works around that.
|
||||
@end example
|
||||
|
||||
@example
|
||||
ptr might be NULL
|
||||
@end example
|
||||
|
||||
If the summary on the first line is not enough, in the body of the message,
|
||||
explain why you made a change, what you did will be obvious from the changes
|
||||
themselves most of the time. Saying just "bug fix" or "10l" is bad. Remember
|
||||
that people of varying skill levels look at and educate themselves while
|
||||
reading through your code. Don't include filenames in log messages except in
|
||||
the context, Git provides that information.
|
||||
|
||||
If the commit fixes a registered issue, state it in a separate line of the
|
||||
body: @code{Fix Trac ticket #42.}
|
||||
|
||||
The first line will be used to name
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by @command{git format-patch}.
|
||||
|
||||
Common mistakes for the first line, as seen in @command{git log --oneline}
|
||||
include: missing context at the beginning; description of what the code did
|
||||
before the patch; line too long or wrapped to the second line.
|
||||
|
||||
@section Preparing a patchset
|
||||
|
||||
@example
|
||||
@@ -430,19 +393,6 @@ git checkout -b svn_23456 $SHA1
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter gpg key generation
|
||||
|
||||
If you have no gpg key yet, we recommend that you create a ed25519 based key as it
|
||||
is small, fast and secure. Especially it results in small signatures in git.
|
||||
|
||||
@example
|
||||
gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com"
|
||||
@end example
|
||||
|
||||
When generating a key, make sure the email specified matches the email used in git as some sites like
|
||||
github consider mismatches a reason to declare such commits unverified. After generating a key you
|
||||
can add it to the MAINTAINER file and upload it to a keyserver.
|
||||
|
||||
@chapter Pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
|
||||
110
doc/indevs.texi
110
doc/indevs.texi
@@ -277,8 +277,8 @@ audio track.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sources} option of ffmpeg to list the available input devices.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sources}
|
||||
option of ffmpeg to list the available input devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -292,35 +292,25 @@ as @option{pal} (3 letters).
|
||||
Default behavior is autodetection of the input video format, if the hardware
|
||||
supports it.
|
||||
|
||||
@item bm_v210
|
||||
This is a deprecated option, you can use @option{raw_format} instead.
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item raw_format
|
||||
Set the pixel format of the captured video.
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item auto
|
||||
|
||||
This is the default which means 8-bit YUV 422 or 8-bit ARGB if format
|
||||
autodetection is used, 8-bit YUV 422 otherwise.
|
||||
|
||||
@item uyvy422
|
||||
|
||||
8-bit YUV 422.
|
||||
|
||||
@item yuv422p10
|
||||
|
||||
10-bit YUV 422.
|
||||
|
||||
@item argb
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item bgra
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item rgb10
|
||||
|
||||
10-bit RGB.
|
||||
|
||||
@end table
|
||||
|
||||
@item teletext_lines
|
||||
@@ -344,33 +334,14 @@ Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp
|
||||
Defaults to @samp{2}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timecode_format
|
||||
Timecode type to include in the frame and video stream metadata. Must be
|
||||
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
||||
@samp{rp188hfr}, @samp{rp188any}, @samp{vitc}, @samp{vitc2}, or @samp{serial}.
|
||||
Defaults to @samp{none} (not included).
|
||||
|
||||
In order to properly support 50/60 fps timecodes, the ordering of the queried
|
||||
timecode types for @samp{rp188any} is HFR, VITC1, VITC2 and LTC for >30 fps
|
||||
content. Note that this is slightly different to the ordering used by the
|
||||
DeckLink API, which is HFR, VITC1, LTC, VITC2.
|
||||
@samp{rp188any}, @samp{vitc}, @samp{vitc2}, or @samp{serial}. Defaults to
|
||||
@samp{none} (not included).
|
||||
|
||||
@item video_input
|
||||
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
|
||||
@@ -424,20 +395,6 @@ Either sync could go wrong by 1 frame or in a rarer case
|
||||
@option{timestamp_align} seconds.
|
||||
Defaults to @samp{0}.
|
||||
|
||||
@item wait_for_tc (@emph{bool})
|
||||
Drop frames till a frame with timecode is received. Sometimes serial timecode
|
||||
isn't received with the first input frame. If that happens, the stored stream
|
||||
timecode will be inaccurate. If this option is set to @option{true}, input frames
|
||||
are dropped till a frame with timecode is received.
|
||||
Option @var{timecode_format} must be specified.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item enable_klv(@emph{bool})
|
||||
If set to @option{true}, extracts KLV data from VANC and outputs KLV packets.
|
||||
KLV VANC packets are joined based on MID and PSC fields and aggregated into
|
||||
one KLV packet.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -447,7 +404,7 @@ Defaults to @option{false}.
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -sources decklink
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -465,7 +422,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -625,12 +582,6 @@ Save the currently used video capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@item use_video_device_timestamps
|
||||
If set to @option{false}, the timestamp for video frames will be
|
||||
derived from the wallclock instead of the timestamp provided by
|
||||
the capture device. This allows working around devices that
|
||||
provide unreliable timestamps.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -929,15 +880,11 @@ If you don't understand what all of that means, you probably don't want this. L
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. This can be autodetected if you are running Linux 5.7
|
||||
or later, but needs to be provided for earlier versions. Defaults to @option{bgr0},
|
||||
which is the most common format used by the Linux console and Xorg X server.
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs. It can be autodetected if you are running Linux 5.7 or later, but will need
|
||||
to be provided explicitly when needed in earlier versions. See the libdrm documentation
|
||||
for possible values.
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
@@ -1289,11 +1236,11 @@ Specify the samplerate in Hz, by default 48kHz is used.
|
||||
Specify the channels in use, by default 2 (stereo) is set.
|
||||
|
||||
@item frame_size
|
||||
This option does nothing and is deprecated.
|
||||
Specify the number of bytes per frame, by default it is set to 1024.
|
||||
|
||||
@item fragment_size
|
||||
Specify the size in bytes of the minimal buffering fragment in PulseAudio, it
|
||||
will affect the audio latency. By default it is set to 50 ms amount of data.
|
||||
Specify the minimal buffering fragment in PulseAudio, it will affect the
|
||||
audio latency. By default it is unset.
|
||||
|
||||
@item wallclock
|
||||
Set the initial PTS using the current time. Default is 1.
|
||||
@@ -1528,14 +1475,6 @@ ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item select_region
|
||||
Specify whether to select the grabbing area graphically using the pointer.
|
||||
A value of @code{1} prompts the user to select the grabbing area graphically
|
||||
by clicking and dragging. A single click with no dragging will select the
|
||||
whole screen. A region with zero width or height will also select the whole
|
||||
screen. This option overwrites the @var{video_size}, @var{grab_x}, and
|
||||
@var{grab_y} options. Default value is @code{0}.
|
||||
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specifies
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
@@ -1584,21 +1523,8 @@ With @var{follow_mouse}:
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item window_id
|
||||
Grab this window, instead of the whole screen. Default value is 0, which maps to
|
||||
the whole screen (root window).
|
||||
|
||||
The id of a window can be found using the @command{xwininfo} program, possibly with options -tree and
|
||||
-root.
|
||||
|
||||
If the window is later enlarged, the new area is not recorded. Video ends when
|
||||
the window is closed, unmapped (i.e., iconified) or shrunk beyond the video
|
||||
size (which defaults to the initial window size).
|
||||
|
||||
This option disables options @option{follow_mouse} and @option{select_region}.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default is the full desktop or window.
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item grab_x
|
||||
@item grab_y
|
||||
|
||||
@@ -116,7 +116,7 @@ or is abusive towards others).
|
||||
@section How long does it take for my message in the moderation queue to be approved?
|
||||
|
||||
The queue is not checked on a regular basis. You can ask on the
|
||||
@t{#ffmpeg-devel} IRC channel on Libera Chat for someone to approve your message.
|
||||
@t{#ffmpeg-devel} IRC channel on Freenode for someone to approve your message.
|
||||
|
||||
@anchor{How do I delete my message in the moderation queue?}
|
||||
@section How do I delete my message in the moderation queue?
|
||||
@@ -155,7 +155,7 @@ Perform a site search using your favorite search engine. Example:
|
||||
|
||||
@section Is there an alternative to the mailing list?
|
||||
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Libera Chat.
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||
|
||||
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
||||
interface which presents the mailing lists in a typical forum layout.
|
||||
|
||||
@@ -20,7 +20,8 @@ Slice threading -
|
||||
|
||||
Frame threading -
|
||||
* Restrictions with slice threading also apply.
|
||||
* Custom get_buffer2() and get_format() callbacks must be thread-safe.
|
||||
* For best performance, the client should set thread_safe_callbacks if it
|
||||
provides a thread-safe get_buffer() callback.
|
||||
* There is one frame of delay added for every thread beyond the first one.
|
||||
Clients must be able to handle this; the pkt_dts and pkt_pts fields in
|
||||
AVFrame will work as usual.
|
||||
@@ -50,14 +51,16 @@ the decode process starts. Call ff_thread_finish_setup() afterwards. If
|
||||
some code can't be moved, have update_thread_context() run it in the next
|
||||
thread.
|
||||
|
||||
If the codec allocates writable tables in its init(), add an init_thread_copy()
|
||||
which re-allocates them for other threads.
|
||||
|
||||
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set FF_CODEC_CAP_ALLOCATE_PROGRESS in
|
||||
AVCodec.caps_internal and use ff_thread_get_buffer() to allocate frames. The
|
||||
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
Otherwise decode directly into the user-supplied frames.
|
||||
Otherwise leave it at zero and decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
A good place to put this is where draw_horiz_band() is called - add this if it isn't
|
||||
|
||||
924
doc/muxers.texi
924
doc/muxers.texi
File diff suppressed because it is too large
Load Diff
@@ -267,11 +267,6 @@ CELL/SPU:
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf
|
||||
|
||||
RISC-V-specific:
|
||||
----------------
|
||||
The RISC-V Instruction Set Manual, Volume 1, Unprivileged ISA:
|
||||
https://riscv.org/technical/specifications/
|
||||
|
||||
GCC asm links:
|
||||
--------------
|
||||
official doc but quite ugly
|
||||
|
||||
@@ -38,52 +38,6 @@ ffmpeg -i INPUT -f alsa hw:1,7
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section AudioToolbox
|
||||
|
||||
AudioToolbox output device.
|
||||
|
||||
Allows native output to CoreAudio devices on OSX.
|
||||
|
||||
The output filename can be empty (or @code{-}) to refer to the default system output device or a number that refers to the device index as shown using: @code{-list_devices true}.
|
||||
|
||||
Alternatively, the audio input device can be chosen by index using the
|
||||
@option{
|
||||
-audio_device_index <INDEX>
|
||||
}
|
||||
, overriding any device name or index given in the input filename.
|
||||
|
||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
||||
all device names, UIDs and corresponding indices.
|
||||
|
||||
@subsection Options
|
||||
|
||||
AudioToolbox supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item -audio_device_index <INDEX>
|
||||
Specify the audio device by its index. Overrides anything given in the output filename.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of supported devices and output a sine wave to the default device:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -list_devices true -
|
||||
@end example
|
||||
|
||||
@item
|
||||
Output a sine wave to the device with the index 2, overriding any output filename:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -audio_device_index 2 -
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section caca
|
||||
|
||||
CACA output device.
|
||||
@@ -186,8 +140,8 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sinks} option of ffmpeg to list the available output devices.
|
||||
Defaults to @option{false}. Alternatively you can use the @code{-sinks}
|
||||
option of ffmpeg to list the available output devices.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -198,43 +152,13 @@ Amount of time to preroll video in seconds.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timing_offset
|
||||
Sets the genlock timing pixel offset on the used output.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item link
|
||||
Sets the SDI video link configuration on the used output. Must be
|
||||
@samp{unset}, @samp{single} link SDI, @samp{dual} link SDI or @samp{quad} link
|
||||
SDI.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item sqd
|
||||
Enable Square Division Quad Split mode for Quad-link SDI output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@item level_a
|
||||
Enable SMPTE Level A mode on the used output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -244,7 +168,7 @@ Defaults to @option{unset}.
|
||||
@item
|
||||
List output devices:
|
||||
@example
|
||||
ffmpeg -sinks decklink
|
||||
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -405,8 +329,6 @@ ffmpeg -i INPUT -f pulse "stream name"
|
||||
|
||||
SDL (Simple DirectMedia Layer) output device.
|
||||
|
||||
"sdl2" can be used as alias for "sdl".
|
||||
|
||||
This output device allows one to show a video stream in an SDL
|
||||
window. Only one SDL window is allowed per application, so you can
|
||||
have only one instance of this output device in an application.
|
||||
|
||||
@@ -51,82 +51,6 @@ in microseconds.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section amqp
|
||||
|
||||
Advanced Message Queueing Protocol (AMQP) version 0-9-1 is a broker based
|
||||
publish-subscribe communication protocol.
|
||||
|
||||
FFmpeg must be compiled with --enable-librabbitmq to support AMQP. A separate
|
||||
AMQP broker must also be run. An example open-source AMQP broker is RabbitMQ.
|
||||
|
||||
After starting the broker, an FFmpeg client may stream data to the broker using
|
||||
the command:
|
||||
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||
@end example
|
||||
|
||||
Where hostname and port (default is 5672) is the address of the broker. The
|
||||
client may also set a user/password for authentication. The default for both
|
||||
fields is "guest". Name of virtual host on broker can be set with vhost. The
|
||||
default value is "/".
|
||||
|
||||
Muliple subscribers may stream from the broker using the command:
|
||||
@example
|
||||
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||
@end example
|
||||
|
||||
In RabbitMQ all data published to the broker flows through a specific exchange,
|
||||
and each subscribing client has an assigned queue/buffer. When a packet arrives
|
||||
at an exchange, it may be copied to a client's queue depending on the exchange
|
||||
and routing_key fields.
|
||||
|
||||
The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item exchange
|
||||
Sets the exchange to use on the broker. RabbitMQ has several predefined
|
||||
exchanges: "amq.direct" is the default exchange, where the publisher and
|
||||
subscriber must have a matching routing_key; "amq.fanout" is the same as a
|
||||
broadcast operation (i.e. the data is forwarded to all queues on the fanout
|
||||
exchange independent of the routing_key); and "amq.topic" is similar to
|
||||
"amq.direct", but allows for more complex pattern matching (refer to the RabbitMQ
|
||||
documentation).
|
||||
|
||||
@item routing_key
|
||||
Sets the routing key. The default value is "amqp". The routing key is used on
|
||||
the "amq.direct" and "amq.topic" exchanges to decide whether packets are written
|
||||
to the queue of a subscriber.
|
||||
|
||||
@item pkt_size
|
||||
Maximum size of each packet sent/received to the broker. Default is 131072.
|
||||
Minimum is 4096 and max is any large value (representable by an int). When
|
||||
receiving packets, this sets an internal buffer size in FFmpeg. It should be
|
||||
equal to or greater than the size of the published packets to the broker. Otherwise
|
||||
the received message may be truncated causing decoding errors.
|
||||
|
||||
@item connection_timeout
|
||||
The timeout in seconds during the initial connection to the broker. The
|
||||
default value is rw_timeout, or 5 seconds if rw_timeout is not set.
|
||||
|
||||
@item delivery_mode @var{mode}
|
||||
Sets the delivery mode of each message sent to broker.
|
||||
The following values are accepted:
|
||||
@table @samp
|
||||
@item persistent
|
||||
Delivery mode set to "persistent" (2). This is the default value.
|
||||
Messages may be written to the broker's disk depending on its setup.
|
||||
|
||||
@item non-persistent
|
||||
Delivery mode set to "non-persistent" (1).
|
||||
Messages will stay in broker's memory unless the broker is under memory
|
||||
pressure.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section async
|
||||
|
||||
Asynchronous data filling wrapper for input stream.
|
||||
@@ -175,16 +99,6 @@ Caching wrapper for input stream.
|
||||
|
||||
Cache the input stream to temporary file. It brings seeking capability to live streams.
|
||||
|
||||
The accepted options are:
|
||||
@table @option
|
||||
|
||||
@item read_ahead_limit
|
||||
Amount in bytes that may be read ahead when seeking isn't supported. Range is -1 to INT_MAX.
|
||||
-1 for unlimited. Default is 65536.
|
||||
|
||||
@end table
|
||||
|
||||
URL Syntax is
|
||||
@example
|
||||
cache:@var{URL}
|
||||
@end example
|
||||
@@ -215,38 +129,6 @@ ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
|
||||
Note that you may need to escape the character "|" which is special for
|
||||
many shells.
|
||||
|
||||
@section concatf
|
||||
|
||||
Physical concatenation protocol using a line break delimited list of
|
||||
resources.
|
||||
|
||||
Read and seek from many resources in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@example
|
||||
concatf:@var{URL}
|
||||
@end example
|
||||
|
||||
where @var{URL} is the url containing a line break delimited list of
|
||||
resources to be concatenated, each one possibly specifying a distinct
|
||||
protocol. Special characters must be escaped with backslash or single
|
||||
quotes. See @ref{quoting_and_escaping,,the "Quoting and escaping"
|
||||
section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
For example to read a sequence of files @file{split1.mpeg},
|
||||
@file{split2.mpeg}, @file{split3.mpeg} listed in separate lines within
|
||||
a file @file{split.txt} with @command{ffplay} use the command:
|
||||
@example
|
||||
ffplay concatf:split.txt
|
||||
@end example
|
||||
Where @file{split.txt} contains the lines:
|
||||
@example
|
||||
split1.mpeg
|
||||
split2.mpeg
|
||||
split3.mpeg
|
||||
@end example
|
||||
|
||||
@section crypto
|
||||
|
||||
AES-encrypted stream reading protocol.
|
||||
@@ -275,33 +157,6 @@ For example, to convert a GIF file given inline with @command{ffmpeg}:
|
||||
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
|
||||
@end example
|
||||
|
||||
@section fd
|
||||
|
||||
File descriptor access protocol.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
fd: -fd @var{file_descriptor}
|
||||
@end example
|
||||
|
||||
If @option{fd} is not specified, by default the stdout file descriptor will be
|
||||
used for writing, stdin for reading. Unlike the pipe protocol, fd protocol has
|
||||
seek support if it corresponding to a regular file. fd protocol doesn't support
|
||||
pass file descriptor via URL for security.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item blocksize
|
||||
Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable if data transmission is slow.
|
||||
|
||||
@item fd
|
||||
Set file descriptor.
|
||||
@end table
|
||||
|
||||
@section file
|
||||
|
||||
File access protocol.
|
||||
@@ -373,14 +228,6 @@ Set timeout in microseconds of socket I/O operations used by the underlying low
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@item ftp-user
|
||||
Set a user to be used for authenticating to the FTP server. This is overridden by the
|
||||
user in the FTP URL.
|
||||
|
||||
@item ftp-password
|
||||
Set a password to be used for authenticating to the FTP server. This is overridden by
|
||||
the password in the FTP URL, or by @option{ftp-anonymous-password} if no user is set.
|
||||
|
||||
@item ftp-anonymous-password
|
||||
Password used when login as anonymous user. Typically an e-mail address
|
||||
should be used.
|
||||
@@ -400,12 +247,6 @@ operation. ff* tools may produce incomplete content due to server limitations.
|
||||
|
||||
Gopher protocol.
|
||||
|
||||
@section gophers
|
||||
|
||||
Gophers protocol.
|
||||
|
||||
The Gopher protocol with TLS encapsulation.
|
||||
|
||||
@section hls
|
||||
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
@@ -465,6 +306,14 @@ Set the Referer header. Include 'Referer: URL' header in HTTP request.
|
||||
Override the User-Agent header. If not specified the protocol will use a
|
||||
string describing the libavformat build. ("Lavf/<version>")
|
||||
|
||||
@item user-agent
|
||||
This is a deprecated option, you can use user_agent instead it.
|
||||
|
||||
@item timeout
|
||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@item reconnect_at_eof
|
||||
If set then eof is treated like an error and causes reconnection, this is useful
|
||||
for live / endless streams.
|
||||
@@ -472,13 +321,6 @@ for live / endless streams.
|
||||
@item reconnect_streamed
|
||||
If set then even streamed/non seekable streams will be reconnected on errors.
|
||||
|
||||
@item reconnect_on_network_error
|
||||
Reconnect automatically in case of TCP/TLS errors during connect.
|
||||
|
||||
@item reconnect_on_http_error
|
||||
A comma separated list of HTTP status codes to reconnect on. The list can
|
||||
include specific status codes (e.g. '503') or the strings '4xx' / '5xx'.
|
||||
|
||||
@item reconnect_delay_max
|
||||
Sets the maximum delay in seconds after which to give up reconnecting
|
||||
|
||||
@@ -556,28 +398,6 @@ Send an Expect: 100-continue header for POST. If set to 1 it will send, if set
|
||||
to 0 it won't, if set to -1 it will try to send if it is applicable. Default
|
||||
value is -1.
|
||||
|
||||
@item auth_type
|
||||
|
||||
Set HTTP authentication type. No option for Digest, since this method requires
|
||||
getting nonce parameters from the server first and can't be used straight away like
|
||||
Basic.
|
||||
|
||||
@table @option
|
||||
@item none
|
||||
Choose the HTTP authentication type automatically. This is the default.
|
||||
@item basic
|
||||
|
||||
Choose the HTTP basic authentication.
|
||||
|
||||
Basic authentication sends a Base64-encoded string that contains a user name and password
|
||||
for the client. Base64 is not a form of encryption and should be considered the same as
|
||||
sending the user name and password in clear text (Base64 is a reversible encoding).
|
||||
If a resource needs to be protected, strongly consider using an authentication scheme
|
||||
other than basic authentication. HTTPS/TLS should be used with basic authentication.
|
||||
Without these additional security enhancements, basic authentication should not be used
|
||||
to protect sensitive or valuable information.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HTTP Cookies
|
||||
@@ -632,44 +452,12 @@ audio/mpeg.
|
||||
This enables support for Icecast versions < 2.4.0, that do not support the
|
||||
HTTP PUT method but the SOURCE method.
|
||||
|
||||
@item tls
|
||||
Establish a TLS (HTTPS) connection to Icecast.
|
||||
|
||||
@end table
|
||||
|
||||
@example
|
||||
icecast://[@var{username}[:@var{password}]@@]@var{server}:@var{port}/@var{mountpoint}
|
||||
@end example
|
||||
|
||||
@section ipfs
|
||||
|
||||
InterPlanetary File System (IPFS) protocol support. One can access files stored
|
||||
on the IPFS network through so-called gateways. These are http(s) endpoints.
|
||||
This protocol wraps the IPFS native protocols (ipfs:// and ipns://) to be sent
|
||||
to such a gateway. Users can (and should) host their own node which means this
|
||||
protocol will use one's local gateway to access files on the IPFS network.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item gateway
|
||||
Defines the gateway to use. When not set, the protocol will first try
|
||||
locating the local gateway by looking at @code{$IPFS_GATEWAY}, @code{$IPFS_PATH}
|
||||
and @code{$HOME/.ipfs/}, in that order.
|
||||
|
||||
@end table
|
||||
|
||||
One can use this protocol in 2 ways. Using IPFS:
|
||||
@example
|
||||
ffplay ipfs://<hash>
|
||||
@end example
|
||||
|
||||
Or the IPNS protocol (IPNS is mutable IPFS):
|
||||
@example
|
||||
ffplay ipns://<hash>
|
||||
@end example
|
||||
|
||||
@section mmst
|
||||
|
||||
MMS (Microsoft Media Server) protocol over TCP.
|
||||
@@ -714,7 +502,7 @@ The accepted syntax is:
|
||||
pipe:[@var{number}]
|
||||
@end example
|
||||
|
||||
If @option{fd} isn't specified, @var{number} is the number corresponding to the file descriptor of the
|
||||
@var{number} is the number corresponding to the file descriptor of the
|
||||
pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If @var{number}
|
||||
is not specified, by default the stdout file descriptor will be used
|
||||
for writing, stdin for reading.
|
||||
@@ -741,8 +529,6 @@ Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable if data transmission is slow.
|
||||
@item fd
|
||||
Set file descriptor.
|
||||
@end table
|
||||
|
||||
Note that some formats (typically MOV), require the output protocol to
|
||||
@@ -783,50 +569,6 @@ Example usage:
|
||||
-f rtp_mpegts -fec prompeg=l=8:d=4 rtp://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
@section rist
|
||||
|
||||
Reliable Internet Streaming Transport protocol
|
||||
|
||||
The accepted options are:
|
||||
@table @option
|
||||
@item rist_profile
|
||||
Supported values:
|
||||
@table @samp
|
||||
@item simple
|
||||
@item main
|
||||
This one is default.
|
||||
@item advanced
|
||||
@end table
|
||||
|
||||
@item buffer_size
|
||||
Set internal RIST buffer size in milliseconds for retransmission of data.
|
||||
Default value is 0 which means the librist default (1 sec). Maximum value is 30
|
||||
seconds.
|
||||
|
||||
@item fifo_size
|
||||
Size of the librist receiver output fifo in number of packets. This must be a
|
||||
power of 2.
|
||||
Defaults to 8192 (vs the librist default of 1024).
|
||||
|
||||
@item overrun_nonfatal=@var{1|0}
|
||||
Survive in case of librist fifo buffer overrun. Default value is 0.
|
||||
|
||||
@item pkt_size
|
||||
Set maximum packet size for sending data. 1316 by default.
|
||||
|
||||
@item log_level
|
||||
Set loglevel for RIST logging messages. You only need to set this if you
|
||||
explicitly want to enable debug level messages or packet loss simulation,
|
||||
otherwise the regular loglevel is respected.
|
||||
|
||||
@item secret
|
||||
Set override of encryption secret, by default is unset.
|
||||
|
||||
@item encryption
|
||||
Set encryption type, by default is disabled.
|
||||
Acceptable values are 128 and 256.
|
||||
@end table
|
||||
|
||||
@section rtmp
|
||||
|
||||
Real-Time Messaging Protocol.
|
||||
@@ -941,11 +683,6 @@ URL to player swf file, compute hash/size automatically.
|
||||
@item rtmp_tcurl
|
||||
URL of the target stream. Defaults to proto://host[:port]/app.
|
||||
|
||||
@item tcp_nodelay=@var{1|0}
|
||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||
|
||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
||||
|
||||
@end table
|
||||
|
||||
For example to read with @command{ffplay} a multimedia resource named
|
||||
@@ -1133,9 +870,6 @@ Set the local RTCP port to @var{n}.
|
||||
@item pkt_size=@var{n}
|
||||
Set max packet size (in bytes) to @var{n}.
|
||||
|
||||
@item buffer_size=@var{size}
|
||||
Set the maximum UDP socket buffer size in bytes.
|
||||
|
||||
@item connect=0|1
|
||||
Do a @code{connect()} on the UDP socket (if set to 1) or not (if set
|
||||
to 0).
|
||||
@@ -1153,13 +887,6 @@ set to 1) or to a default remote address (if set to 0).
|
||||
@item localport=@var{n}
|
||||
Set the local RTP port to @var{n}.
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Local IP address of a network interface used for sending packets or joining
|
||||
multicast groups.
|
||||
|
||||
@item timeout=@var{n}
|
||||
Set timeout (in microseconds) of socket I/O operations to @var{n}.
|
||||
|
||||
This is a deprecated option. Instead, @option{localrtpport} should be
|
||||
used.
|
||||
|
||||
@@ -1204,59 +931,6 @@ Options can be set on the @command{ffmpeg}/@command{ffplay} command
|
||||
line, or set in code via @code{AVOption}s or in
|
||||
@code{avformat_open_input}.
|
||||
|
||||
@subsection Muxer
|
||||
The following options are supported.
|
||||
|
||||
@table @option
|
||||
@item rtsp_transport
|
||||
Set RTSP transport protocols.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item udp
|
||||
Use UDP as lower transport protocol.
|
||||
|
||||
@item tcp
|
||||
Use TCP (interleaving within the RTSP control channel) as lower
|
||||
transport protocol.
|
||||
@end table
|
||||
|
||||
Default value is @samp{0}.
|
||||
|
||||
@item rtsp_flags
|
||||
Set RTSP flags.
|
||||
|
||||
The following values are accepted:
|
||||
@table @samp
|
||||
@item latm
|
||||
Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC.
|
||||
@item rfc2190
|
||||
Use RFC 2190 packetization instead of RFC 4629 for H.263.
|
||||
@item skip_rtcp
|
||||
Don't send RTCP sender reports.
|
||||
@item h264_mode0
|
||||
Use mode 0 for H.264 in RTP.
|
||||
@item send_bye
|
||||
Send RTCP BYE packets when finishing.
|
||||
@end table
|
||||
|
||||
Default value is @samp{0}.
|
||||
|
||||
|
||||
@item min_port
|
||||
Set minimum local UDP port. Default value is 5000.
|
||||
|
||||
@item max_port
|
||||
Set maximum local UDP port. Default value is 65000.
|
||||
|
||||
@item buffer_size
|
||||
Set the maximum socket buffer size in bytes.
|
||||
|
||||
@item pkt_size
|
||||
Set max send packet size (in bytes). Default value is 1472.
|
||||
@end table
|
||||
|
||||
@subsection Demuxer
|
||||
The following options are supported.
|
||||
|
||||
@table @option
|
||||
@@ -1282,10 +956,6 @@ Use UDP multicast as lower transport protocol.
|
||||
@item http
|
||||
Use HTTP tunneling as lower transport protocol, which is useful for
|
||||
passing proxies.
|
||||
|
||||
@item https
|
||||
Use HTTPs tunneling as lower transport protocol, which is useful for
|
||||
passing proxies and widely used for security consideration.
|
||||
@end table
|
||||
|
||||
Multiple lower transport protocols may be specified, in that case they are
|
||||
@@ -1303,9 +973,6 @@ Accept packets only from negotiated peer address and port.
|
||||
Act as a server, listening for an incoming connection.
|
||||
@item prefer_tcp
|
||||
Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
|
||||
@item satip_raw
|
||||
Export raw MPEG-TS stream instead of demuxing. The flag will simply write out
|
||||
the raw stream, with the original PAT/PMT/PIDs intact.
|
||||
@end table
|
||||
|
||||
Default value is @samp{none}.
|
||||
@@ -1318,7 +985,6 @@ The following flags are accepted:
|
||||
@item video
|
||||
@item audio
|
||||
@item data
|
||||
@item subtitle
|
||||
@end table
|
||||
|
||||
By default it accepts all media types.
|
||||
@@ -1329,23 +995,21 @@ Set minimum local UDP port. Default value is 5000.
|
||||
@item max_port
|
||||
Set maximum local UDP port. Default value is 65000.
|
||||
|
||||
@item listen_timeout
|
||||
Set maximum timeout (in seconds) to establish an initial connection. Setting
|
||||
@option{listen_timeout} > 0 sets @option{rtsp_flags} to @samp{listen}. Default is -1
|
||||
which means an infinite timeout when @samp{listen} mode is set.
|
||||
@item timeout
|
||||
Set maximum timeout (in seconds) to wait for incoming connections.
|
||||
|
||||
A value of -1 means infinite (default). This option implies the
|
||||
@option{rtsp_flags} set to @samp{listen}.
|
||||
|
||||
@item reorder_queue_size
|
||||
Set number of packets to buffer for handling of reordered packets.
|
||||
|
||||
@item timeout
|
||||
@item stimeout
|
||||
Set socket TCP I/O timeout in microseconds.
|
||||
|
||||
@item user_agent
|
||||
@item user-agent
|
||||
Override User-Agent header. If not specified, it defaults to the
|
||||
libavformat identifier string.
|
||||
|
||||
@item buffer_size
|
||||
Set the maximum socket buffer size in bytes.
|
||||
@end table
|
||||
|
||||
When receiving data over UDP, the demuxer tries to reorder received packets
|
||||
@@ -1523,7 +1187,7 @@ options.
|
||||
This protocol accepts the following options.
|
||||
|
||||
@table @option
|
||||
@item connect_timeout=@var{milliseconds}
|
||||
@item connect_timeout
|
||||
Connection timeout; SRT cannot connect for RTT > 1500 msec
|
||||
(2 handshake exchanges) with the default connect timeout of
|
||||
3 seconds. This option applies to the caller and rendezvous
|
||||
@@ -1554,7 +1218,7 @@ IP Type of Service. Applies to sender only. Default value is 0xB8.
|
||||
@item ipttl=@var{ttl}
|
||||
IP Time To Live. Applies to sender only. Default value is 64.
|
||||
|
||||
@item latency=@var{microseconds}
|
||||
@item latency
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb bursts of missed packet retransmissions.
|
||||
This flag sets both @option{rcvlatency} and @option{peerlatency}
|
||||
@@ -1565,7 +1229,7 @@ when side is sender and @option{rcvlatency}
|
||||
when side is receiver, and the bidirectional stream
|
||||
sending is not supported.
|
||||
|
||||
@item listen_timeout=@var{microseconds}
|
||||
@item listen_timeout
|
||||
Set socket listen timeout.
|
||||
|
||||
@item maxbw=@var{bytes/seconds}
|
||||
@@ -1610,32 +1274,6 @@ only if @option{pbkeylen} is non-zero. It is used on
|
||||
the receiver only if the received data is encrypted.
|
||||
The configured passphrase cannot be recovered (write-only).
|
||||
|
||||
@item enforced_encryption=@var{1|0}
|
||||
If true, both connection parties must have the same password
|
||||
set (including empty, that is, with no encryption). If the
|
||||
password doesn't match or only one side is unencrypted,
|
||||
the connection is rejected. Default is true.
|
||||
|
||||
@item kmrefreshrate=@var{packets}
|
||||
The number of packets to be transmitted after which the
|
||||
encryption key is switched to a new key. Default is -1.
|
||||
-1 means auto (0x1000000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item kmpreannounce=@var{packets}
|
||||
The interval between when a new encryption key is sent and
|
||||
when switchover occurs. This value also applies to the
|
||||
subsequent interval between when switchover occurs and
|
||||
when the old encryption key is decommissioned. Default is -1.
|
||||
-1 means auto (0x1000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item snddropdelay=@var{microseconds}
|
||||
The sender's extra delay before dropping packets. This delay is
|
||||
added to the default drop delay time interval value.
|
||||
|
||||
Special value -1: Do not drop packets on the sender at all.
|
||||
|
||||
@item payload_size=@var{bytes}
|
||||
Sets the maximum declared size of a packet transferred
|
||||
during the single call to the sending function in Live
|
||||
@@ -1651,7 +1289,7 @@ use a bigger maximum frame size, though not greater than
|
||||
@item pkt_size=@var{bytes}
|
||||
Alias for @samp{payload_size}.
|
||||
|
||||
@item peerlatency=@var{microseconds}
|
||||
@item peerlatency
|
||||
The latency value (as described in @option{rcvlatency}) that is
|
||||
set by the sender side as a minimum value for the receiver.
|
||||
|
||||
@@ -1663,7 +1301,7 @@ Not required on receiver (set to 0),
|
||||
key size obtained from sender in HaiCrypt handshake.
|
||||
Default value is 0.
|
||||
|
||||
@item rcvlatency=@var{microseconds}
|
||||
@item rcvlatency
|
||||
The time that should elapse since the moment when the
|
||||
packet was sent and the moment when it's delivered to
|
||||
the receiver application in the receiving function.
|
||||
@@ -1681,10 +1319,12 @@ Set UDP receive buffer size, expressed in bytes.
|
||||
@item send_buffer_size=@var{bytes}
|
||||
Set UDP send buffer size, expressed in bytes.
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
Set raise error timeouts for read, write and connect operations. Note that the
|
||||
SRT library has internal timeouts which can be controlled separately, the
|
||||
value set here is only a cap on those.
|
||||
@item rw_timeout
|
||||
Set raise error timeout for read/write optations.
|
||||
|
||||
This option is only relevant in read mode:
|
||||
if no data arrived in more than this time
|
||||
interval, raise error.
|
||||
|
||||
@item tlpktdrop=@var{1|0}
|
||||
Too-late Packet Drop. When enabled on receiver, it skips
|
||||
@@ -1735,9 +1375,6 @@ This option doesn’t make sense in Rendezvous connection; the result
|
||||
might be that simply one side will override the value from the other
|
||||
side and it’s the matter of luck which one would win
|
||||
|
||||
@item srt_streamid=@var{string}
|
||||
Alias for @samp{streamid} to avoid conflict with ffmpeg command line option.
|
||||
|
||||
@item smoother=@var{live|file}
|
||||
The type of Smoother used for the transmission for that socket, which
|
||||
is responsible for the transmission and congestion control. The Smoother
|
||||
@@ -1781,17 +1418,6 @@ the overhead transmission (retransmitted and control packets).
|
||||
file: Set options as for non-live transmission. See @option{messageapi}
|
||||
for further explanations
|
||||
|
||||
@item linger=@var{seconds}
|
||||
The number of seconds that the socket waits for unsent data when closing.
|
||||
Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
|
||||
seconds in file mode). The range for this option is integers in the
|
||||
0 - @code{INT_MAX}.
|
||||
|
||||
@item tsbpd=@var{1|0}
|
||||
When true, use Timestamp-based Packet Delivery mode. The default behavior
|
||||
depends on the transmission type: enabled in live mode, disabled in file
|
||||
mode.
|
||||
|
||||
@end table
|
||||
|
||||
For more information see: @url{https://github.com/Haivision/srt}.
|
||||
@@ -1878,9 +1504,8 @@ tcp://@var{hostname}:@var{port}[?@var{options}]
|
||||
The list of supported options follows.
|
||||
|
||||
@table @option
|
||||
@item listen=@var{2|1|0}
|
||||
Listen for an incoming connection. 0 disables listen, 1 enables listen in
|
||||
single client mode, 2 enables listen in multi-client mode. Default value is 0.
|
||||
@item listen=@var{1|0}
|
||||
Listen for an incoming connection. Default value is 0.
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
Set raise error timeout, expressed in microseconds.
|
||||
@@ -1900,8 +1525,6 @@ Set send buffer size, expressed bytes.
|
||||
@item tcp_nodelay=@var{1|0}
|
||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||
|
||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
||||
|
||||
@item tcp_mss=@var{bytes}
|
||||
Set maximum segment size for outgoing TCP packets, expressed in bytes.
|
||||
@end table
|
||||
@@ -1958,10 +1581,6 @@ A file containing the private key for the certificate.
|
||||
If enabled, listen for connections on the provided port, and assume
|
||||
the server role in the handshake instead of the client role.
|
||||
|
||||
@item http_proxy
|
||||
The HTTP proxy to tunnel through, e.g. @code{http://example.com:1234}.
|
||||
The proxy must support the CONNECT method.
|
||||
|
||||
@end table
|
||||
|
||||
Example command lines:
|
||||
@@ -2000,7 +1619,7 @@ The list of supported options follows.
|
||||
@item buffer_size=@var{size}
|
||||
Set the UDP maximum socket buffer size in bytes. This is used to set either
|
||||
the receive or send buffer size, depending on what the socket is used for.
|
||||
Default is 32 KB for output, 384 KB for input. See also @var{fifo_size}.
|
||||
Default is 64KB. See also @var{fifo_size}.
|
||||
|
||||
@item bitrate=@var{bitrate}
|
||||
If set to nonzero, the output will have the specified constant bitrate if the
|
||||
@@ -2109,50 +1728,4 @@ Timeout in ms.
|
||||
Create the Unix socket in listening mode.
|
||||
@end table
|
||||
|
||||
@section zmq
|
||||
|
||||
ZeroMQ asynchronous messaging using the libzmq library.
|
||||
|
||||
This library supports unicast streaming to multiple clients without relying on
|
||||
an external server.
|
||||
|
||||
The required syntax for streaming or connecting to a stream is:
|
||||
@example
|
||||
zmq:tcp://ip-address:port
|
||||
@end example
|
||||
|
||||
Example:
|
||||
Create a localhost stream on port 5555:
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Multiple clients may connect to the stream using:
|
||||
@example
|
||||
ffplay zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
The server side binds to a port and publishes data. Clients connect to the
|
||||
server (via IP address/port) and subscribe to the stream. The order in which
|
||||
the server and client start generally does not matter.
|
||||
|
||||
ffmpeg must be compiled with the --enable-libzmq option to support
|
||||
this protocol.
|
||||
|
||||
Options can be set on the @command{ffmpeg}/@command{ffplay} command
|
||||
line. The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item pkt_size
|
||||
Forces the maximum packet size for sending/receiving data. The default value is
|
||||
131,072 bytes. On the server side, this sets the maximum size of sent packets
|
||||
via ZeroMQ. On the clients, it sets an internal buffer size for receiving
|
||||
packets. Note that pkt_size on the clients should be equal to or greater than
|
||||
pkt_size on the server. Otherwise the received message may be truncated causing
|
||||
decoding errors.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end PROTOCOLS
|
||||
|
||||
@@ -11,8 +11,18 @@ programmatic use.
|
||||
|
||||
@table @option
|
||||
|
||||
@item uchl, used_chlayout
|
||||
Set used input channel layout. Default is unset. This option is
|
||||
@item ich, in_channel_count
|
||||
Set the number of input channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{in_channel_layout} is set.
|
||||
|
||||
@item och, out_channel_count
|
||||
Set the number of output channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{out_channel_layout} is set.
|
||||
|
||||
@item uch, used_channel_count
|
||||
Set the number of used input channels. Default value is 0. This option is
|
||||
only used for special remapping.
|
||||
|
||||
@item isr, in_sample_rate
|
||||
@@ -31,8 +41,8 @@ Specify the output sample format. It is set by default to @code{none}.
|
||||
Set the internal sample format. Default value is @code{none}.
|
||||
This will automatically be chosen when it is not explicitly set.
|
||||
|
||||
@item ichl, in_chlayout
|
||||
@item ochl, out_chlayout
|
||||
@item icl, in_channel_layout
|
||||
@item ocl, out_channel_layout
|
||||
Set the input/output channel layout.
|
||||
|
||||
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
|
||||
122
doc/t2h.pm
122
doc/t2h.pm
@@ -20,45 +20,8 @@
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# Texinfo 7.0 changed the syntax of various functions.
|
||||
# Provide a shim for older versions.
|
||||
sub ff_set_from_init_file($$) {
|
||||
my $key = shift;
|
||||
my $value = shift;
|
||||
if (exists &{'texinfo_set_from_init_file'}) {
|
||||
texinfo_set_from_init_file($key, $value);
|
||||
} else {
|
||||
set_from_init_file($key, $value);
|
||||
}
|
||||
}
|
||||
|
||||
sub ff_get_conf($) {
|
||||
my $key = shift;
|
||||
if (exists &{'texinfo_get_conf'}) {
|
||||
texinfo_get_conf($key);
|
||||
} else {
|
||||
get_conf($key);
|
||||
}
|
||||
}
|
||||
|
||||
sub get_formatting_function($$) {
|
||||
my $obj = shift;
|
||||
my $func = shift;
|
||||
|
||||
my $sub = $obj->can('formatting_function');
|
||||
if ($sub) {
|
||||
return $obj->formatting_function($func);
|
||||
} else {
|
||||
return $obj->{$func};
|
||||
}
|
||||
}
|
||||
|
||||
# determine texinfo version
|
||||
my $program_version_num = version->declare(ff_get_conf('PACKAGE_VERSION'))->numify;
|
||||
my $program_version_6_8 = $program_version_num >= 6.008000;
|
||||
|
||||
# no navigation elements
|
||||
ff_set_from_init_file('HEADERS', 0);
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
@@ -92,7 +55,7 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname,
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
@@ -149,11 +112,7 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
# format_heading_text expects an array of headings for texinfo >= 7.0
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$heading = [$heading];
|
||||
}
|
||||
$result .= &{get_formatting_function($self,'format_heading_text')}(
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
@@ -168,18 +127,14 @@ foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
}
|
||||
|
||||
# print the TOC where @contents is used
|
||||
if ($program_version_6_8) {
|
||||
ff_set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline');
|
||||
} else {
|
||||
ff_set_from_init_file('INLINE_CONTENTS', 1);
|
||||
}
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
|
||||
# make chapters <h2>
|
||||
ff_set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
|
||||
# Do not add <hr>
|
||||
ff_set_from_init_file('DEFAULT_RULE', '');
|
||||
ff_set_from_init_file('BIG_RULE', '');
|
||||
set_from_init_file('DEFAULT_RULE', '');
|
||||
set_from_init_file('BIG_RULE', '');
|
||||
|
||||
# Customized file beginning
|
||||
sub ffmpeg_begin_file($$$)
|
||||
@@ -196,18 +151,7 @@ sub ffmpeg_begin_file($$$)
|
||||
my ($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_information($command);
|
||||
} else {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
}
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
|
||||
my $links = $self->_get_links ($filename, $element);
|
||||
|
||||
@@ -240,11 +184,7 @@ EOT
|
||||
|
||||
return $head1 . $head_title . $head2 . $head_title . $head3;
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_begin_file', \&ffmpeg_begin_file);
|
||||
} else {
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
sub ffmpeg_program_string($)
|
||||
{
|
||||
@@ -261,17 +201,13 @@ sub ffmpeg_program_string($)
|
||||
$self->gdt('This document was generated automatically.'));
|
||||
}
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_program_string', \&ffmpeg_program_string);
|
||||
} else {
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
}
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
my $self = shift;
|
||||
my $program_string = &{get_formatting_function($self,'format_program_string')}($self);
|
||||
my $program_string = &{$self->{'format_program_string'}}($self);
|
||||
my $program_text = <<EOT;
|
||||
<p style="font-size: small;">
|
||||
$program_string
|
||||
@@ -284,15 +220,11 @@ EOT
|
||||
EOT
|
||||
return $program_text . $footer;
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_end_file', \&ffmpeg_end_file);
|
||||
} else {
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
}
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
|
||||
# Dummy title command
|
||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||
ff_set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
sub ffmpeg_title($$$$)
|
||||
{
|
||||
return '';
|
||||
@@ -310,14 +242,8 @@ sub ffmpeg_float($$$$$)
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my ($caption, $prepended);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($caption, $prepended) = Texinfo::Convert::Converter::float_name_caption($self,
|
||||
$command);
|
||||
} else {
|
||||
($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
}
|
||||
my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
my $caption_text = '';
|
||||
my $prepended_text;
|
||||
my $prepended_save = '';
|
||||
@@ -389,13 +315,8 @@ sub ffmpeg_float($$$$$)
|
||||
$caption->{'args'}->[0], 'float caption');
|
||||
}
|
||||
if ($prepended_text.$caption_text ne '') {
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$prepended_text = $self->html_attribute_class('div',['float-caption']). '>'
|
||||
. $prepended_text;
|
||||
} else {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
}
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
$caption_text .= '</div>';
|
||||
}
|
||||
my $html_class = '';
|
||||
@@ -408,13 +329,8 @@ sub ffmpeg_float($$$$$)
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
}
|
||||
if ($program_version_num >= 7.000000) {
|
||||
return $self->html_attribute_class('div', [$html_class]). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
} else {
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('float',
|
||||
|
||||
@@ -172,9 +172,6 @@ INF: while(<$inf>) {
|
||||
} elsif ($ended =~ /^(?:itemize|enumerate|(?:multi|[fv])?table)$/) {
|
||||
$_ = "\n=back\n";
|
||||
$ic = pop @icstack;
|
||||
} elsif ($ended =~ /^float$/) {
|
||||
$_ = "\n=back\n";
|
||||
$ic = pop @icstack;
|
||||
} else {
|
||||
die "unknown command \@end $ended at line $.\n";
|
||||
}
|
||||
@@ -300,12 +297,6 @@ INF: while(<$inf>) {
|
||||
$_ = ""; # need a paragraph break
|
||||
};
|
||||
|
||||
/^\@(float)\s+\w+/ and do {
|
||||
push @endwstack, $endw;
|
||||
$endw = $1;
|
||||
$_ = "\n=over 4\n";
|
||||
};
|
||||
|
||||
/^\@item\s+(.*\S)\s*$/ and $endw eq "multitable" and do {
|
||||
my $columns = $1;
|
||||
$columns =~ s/\@tab/ : /;
|
||||
|
||||
1029
doc/transforms.md
1029
doc/transforms.md
File diff suppressed because it is too large
Load Diff
@@ -110,13 +110,11 @@ maximum of 2 digits. The @var{m} at the end expresses decimal value for
|
||||
@emph{or}
|
||||
|
||||
@example
|
||||
[-]@var{S}+[.@var{m}...][s|ms|us]
|
||||
[-]@var{S}+[.@var{m}...]
|
||||
@end example
|
||||
|
||||
@var{S} expresses the number of seconds, with the optional decimal part
|
||||
@var{m}. The optional literal suffixes @samp{s}, @samp{ms} or @samp{us}
|
||||
indicate to interpret the value as seconds, milliseconds or microseconds,
|
||||
respectively.
|
||||
@var{m}.
|
||||
|
||||
In both expressions, the optional @samp{-} indicates negative duration.
|
||||
|
||||
@@ -128,15 +126,6 @@ The following examples are all valid time duration:
|
||||
@item 55
|
||||
55 seconds
|
||||
|
||||
@item 0.2
|
||||
0.2 seconds
|
||||
|
||||
@item 200ms
|
||||
200 milliseconds, that's 0.2s
|
||||
|
||||
@item 200000us
|
||||
200000 microseconds, that's 0.2s
|
||||
|
||||
@item 12:03:45
|
||||
12 hours, 03 minutes and 45 seconds
|
||||
|
||||
@@ -713,42 +702,26 @@ FL+FR+FC+LFE+BL+BR+SL+SR
|
||||
FL+FR+FC+LFE+BL+BR+FLC+FRC
|
||||
@item 7.1(wide-side)
|
||||
FL+FR+FC+LFE+FLC+FRC+SL+SR
|
||||
@item 7.1(top)
|
||||
FL+FR+FC+LFE+BL+BR+TFL+TFR
|
||||
@item octagonal
|
||||
FL+FR+FC+BL+BR+BC+SL+SR
|
||||
@item cube
|
||||
FL+FR+BL+BR+TFL+TFR+TBL+TBR
|
||||
@item hexadecagonal
|
||||
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
|
||||
@item downmix
|
||||
DL+DR
|
||||
@item 22.2
|
||||
FL+FR+FC+LFE+BL+BR+FLC+FRC+BC+SL+SR+TC+TFL+TFC+TFR+TBL+TBC+TBR+LFE2+TSL+TSR+BFC+BFL+BFR
|
||||
@end table
|
||||
|
||||
A custom channel layout can be specified as a sequence of terms, separated by '+'.
|
||||
Each term can be:
|
||||
A custom channel layout can be specified as a sequence of terms, separated by
|
||||
'+' or '|'. Each term can be:
|
||||
@itemize
|
||||
@item
|
||||
the name of a single channel (e.g. @samp{FL}, @samp{FR}, @samp{FC}, @samp{LFE}, etc.),
|
||||
each optionally containing a custom name after a '@@', (e.g. @samp{FL@@Left},
|
||||
@samp{FR@@Right}, @samp{FC@@Center}, @samp{LFE@@Low_Frequency}, etc.)
|
||||
@end itemize
|
||||
|
||||
A standard channel layout can be specified by the following:
|
||||
@itemize
|
||||
@item
|
||||
the name of a single channel (e.g. @samp{FL}, @samp{FR}, @samp{FC}, @samp{LFE}, etc.)
|
||||
|
||||
@item
|
||||
the name of a standard channel layout (e.g. @samp{mono},
|
||||
@samp{stereo}, @samp{4.0}, @samp{quad}, @samp{5.0}, etc.)
|
||||
|
||||
@item
|
||||
the name of a single channel (e.g. @samp{FL}, @samp{FR}, @samp{FC}, @samp{LFE}, etc.)
|
||||
|
||||
@item
|
||||
a number of channels, in decimal, followed by 'c', yielding the default channel
|
||||
layout for that number of channels (see the function
|
||||
@code{av_channel_layout_default}). Note that not all channel counts have a
|
||||
@code{av_get_default_channel_layout}). Note that not all channel counts have a
|
||||
default layout.
|
||||
|
||||
@item
|
||||
@@ -765,7 +738,7 @@ Before libavutil version 53 the trailing character "c" to specify a number of
|
||||
channels was optional, but now it is required, while a channel layout mask can
|
||||
also be specified as a decimal number (if and only if not followed by "c" or "C").
|
||||
|
||||
See also the function @code{av_channel_layout_from_string} defined in
|
||||
See also the function @code{av_get_channel_layout} defined in
|
||||
@file{libavutil/channel_layout.h}.
|
||||
@c man end SYNTAX
|
||||
|
||||
@@ -947,9 +920,6 @@ corresponding input value will be returned.
|
||||
@item round(expr)
|
||||
Round the value of expression @var{expr} to the nearest integer. For example, "round(1.5)" is "2.0".
|
||||
|
||||
@item sgn(x)
|
||||
Compute sign of @var{x}.
|
||||
|
||||
@item sin(x)
|
||||
Compute sine of @var{x}.
|
||||
|
||||
@@ -1077,13 +1047,13 @@ indication of the corresponding powers of 10 and of 2.
|
||||
@item T
|
||||
10^12 / 2^40
|
||||
@item P
|
||||
10^15 / 2^50
|
||||
10^15 / 2^40
|
||||
@item E
|
||||
10^18 / 2^60
|
||||
10^18 / 2^50
|
||||
@item Z
|
||||
10^21 / 2^70
|
||||
10^21 / 2^60
|
||||
@item Y
|
||||
10^24 / 2^80
|
||||
10^24 / 2^70
|
||||
@end table
|
||||
|
||||
@c man end EXPRESSION EVALUATION
|
||||
|
||||
@@ -418,4 +418,4 @@ done:
|
||||
|
||||
When all of this is done, you can submit your patch to the ffmpeg-devel
|
||||
mailing-list for review. If you need any help, feel free to come on our IRC
|
||||
channel, #ffmpeg-devel on irc.libera.chat.
|
||||
channel, #ffmpeg-devel on irc.freenode.net.
|
||||
|
||||
2
ffbuild/.gitignore
vendored
2
ffbuild/.gitignore
vendored
@@ -1,6 +1,4 @@
|
||||
/.config
|
||||
/bin2c
|
||||
/bin2c.exe
|
||||
/config.fate
|
||||
/config.log
|
||||
/config.mak
|
||||
|
||||
@@ -8,14 +8,10 @@ OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
OBJS-$(HAVE_MSA) += $(MSA-OBJS) $(MSA-OBJS-yes)
|
||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||
OBJS-$(HAVE_LSX) += $(LSX-OBJS) $(LSX-OBJS-yes)
|
||||
OBJS-$(HAVE_LASX) += $(LASX-OBJS) $(LASX-OBJS-yes)
|
||||
OBJS-$(HAVE_MMI) += $(MMI-OBJS) $(MMI-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_RVV) += $(RVV-OBJS) $(RVV-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_X86ASM) += $(X86ASM-OBJS) $(X86ASM-OBJS-yes)
|
||||
|
||||
@@ -1,76 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *name;
|
||||
FILE *input, *output;
|
||||
unsigned int length = 0;
|
||||
unsigned char data;
|
||||
|
||||
if (argc < 3 || argc > 4)
|
||||
return 1;
|
||||
|
||||
input = fopen(argv[1], "rb");
|
||||
if (!input)
|
||||
return -1;
|
||||
|
||||
output = fopen(argv[2], "wb");
|
||||
if (!output)
|
||||
return -1;
|
||||
|
||||
if (argc == 4) {
|
||||
name = argv[3];
|
||||
} else {
|
||||
size_t arglen = strlen(argv[1]);
|
||||
name = argv[1];
|
||||
|
||||
for (int i = 0; i < arglen; i++) {
|
||||
if (argv[1][i] == '.')
|
||||
argv[1][i] = '_';
|
||||
else if (argv[1][i] == '/')
|
||||
name = &argv[1][i+1];
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(output, "const unsigned char ff_%s_data[] = { ", name);
|
||||
|
||||
while (fread(&data, 1, 1, input) > 0) {
|
||||
fprintf(output, "0x%02x, ", data);
|
||||
length++;
|
||||
}
|
||||
|
||||
fprintf(output, "0x00 };\n");
|
||||
fprintf(output, "const unsigned int ff_%s_len = %u;\n", name, length);
|
||||
|
||||
fclose(output);
|
||||
|
||||
if (ferror(input) || !feof(input))
|
||||
return -1;
|
||||
|
||||
fclose(input);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -12,13 +12,10 @@ endif
|
||||
|
||||
ifndef SUBDIR
|
||||
|
||||
BIN2CEXE = ffbuild/bin2c$(HOSTEXESUF)
|
||||
BIN2C = $(BIN2CEXE)
|
||||
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
@@ -29,8 +26,7 @@ $(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
|
||||
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
||||
endif
|
||||
|
||||
# Prepend to a recursively expanded variable without making it simply expanded.
|
||||
PREPEND = $(eval $(1) = $(patsubst %,$$(%), $(2)) $(value $(1)))
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
|
||||
|
||||
# NASM requires -I path terminated with /
|
||||
IFLAGS := -I. -I$(SRC_LINK)/
|
||||
@@ -40,9 +36,7 @@ CCFLAGS = $(CPPFLAGS) $(CFLAGS)
|
||||
OBJCFLAGS += $(EOBJCFLAGS)
|
||||
OBJCCFLAGS = $(CPPFLAGS) $(CFLAGS) $(OBJCFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
# Use PREPEND here so that later (target-dependent) additions to CPPFLAGS
|
||||
# end up in CXXFLAGS.
|
||||
$(call PREPEND,CXXFLAGS, CPPFLAGS CFLAGS)
|
||||
CXXFLAGS := $(CPPFLAGS) $(CFLAGS) $(CXXFLAGS)
|
||||
X86ASMFLAGS += $(IFLAGS:%=%/) -I$(<D)/ -Pconfig.asm
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
|
||||
@@ -50,7 +44,7 @@ LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
|
||||
define COMPILE
|
||||
$(call $(1)DEP,$(1))
|
||||
$($(1)) $($(1)FLAGS) $($(2)) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||
endef
|
||||
|
||||
COMPILE_C = $(call COMPILE,CC)
|
||||
@@ -60,22 +54,6 @@ COMPILE_M = $(call COMPILE,OBJCC)
|
||||
COMPILE_X86ASM = $(call COMPILE,X86ASM)
|
||||
COMPILE_HOSTC = $(call COMPILE,HOSTCC)
|
||||
COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
COMPILE_MMI = $(call COMPILE,CC,MMIFLAGS)
|
||||
COMPILE_MSA = $(call COMPILE,CC,MSAFLAGS)
|
||||
COMPILE_LSX = $(call COMPILE,CC,LSXFLAGS)
|
||||
COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
||||
|
||||
%_mmi.o: %_mmi.c
|
||||
$(COMPILE_MMI)
|
||||
|
||||
%_msa.o: %_msa.c
|
||||
$(COMPILE_MSA)
|
||||
|
||||
%_lsx.o: %_lsx.c
|
||||
$(COMPILE_LSX)
|
||||
|
||||
%_lasx.o: %_lasx.c
|
||||
$(COMPILE_LASX)
|
||||
|
||||
%.o: %.c
|
||||
$(COMPILE_C)
|
||||
@@ -104,7 +82,7 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
||||
-$(if $(ASMSTRIPFLAGS), $(STRIP) $(ASMSTRIPFLAGS) $@)
|
||||
|
||||
%.o: %.rc
|
||||
$(WINDRES) $(IFLAGS) $(foreach ARG,$(CC_DEPFLAGS),--preprocessor-arg "$(ARG)") -o $@ $<
|
||||
$(WINDRES) $(IFLAGS) --preprocessor "$(DEPWINDRES) -E -xc-header -DRC_INVOKED $(CC_DEPFLAGS)" -o $@ $<
|
||||
|
||||
%.i: %.c
|
||||
$(CC) $(CCFLAGS) $(CC_E) $<
|
||||
@@ -112,40 +90,16 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
|
||||
$(BIN2CEXE): ffbuild/bin2c_host.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS)
|
||||
|
||||
%.metal.air: %.metal
|
||||
$(METALCC) $< -o $@
|
||||
|
||||
%.metallib: %.metal.air
|
||||
$(METALLIB) --split-module-without-linking $< -o $@
|
||||
|
||||
%.metallib.c: %.metallib $(BIN2CEXE)
|
||||
$(BIN2C) $< $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
|
||||
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
|
||||
$(COMPILE_NVCC)
|
||||
|
||||
ifdef CONFIG_PTX_COMPRESSION
|
||||
%.ptx.gz: TAG = GZIP
|
||||
%.ptx.gz: %.ptx
|
||||
$(M)gzip -c9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
|
||||
|
||||
%.ptx.c: %.ptx.gz $(BIN2CEXE)
|
||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
else
|
||||
%.ptx.c: %.ptx $(BIN2CEXE)
|
||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
endif
|
||||
|
||||
clean::
|
||||
$(RM) $(BIN2CEXE)
|
||||
%.ptx.c: %.ptx
|
||||
$(Q)sh $(SRC_PATH)/compat/cuda/ptx2c.sh $@ $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||
|
||||
%.c %.h %.pc %.ver %.version: TAG = GEN
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
%.h %_template.c:
|
||||
%.h:
|
||||
@:
|
||||
|
||||
# Disable suffix rules. Most of the builtin rules are suffix rules,
|
||||
@@ -160,8 +114,6 @@ include $(SRC_PATH)/ffbuild/arch.mak
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
SLIBOBJS += $(SLIBOBJS-yes)
|
||||
SHLIBOBJS += $(SHLIBOBJS-yes)
|
||||
STLIBOBJS += $(STLIBOBJS-yes)
|
||||
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTPROGS += $(TESTPROGS-yes)
|
||||
|
||||
@@ -170,8 +122,6 @@ FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=
|
||||
|
||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
|
||||
SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%))
|
||||
STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%))
|
||||
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
|
||||
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)tests/%$(EXESUF))
|
||||
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
|
||||
@@ -193,7 +143,7 @@ HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
PTXOBJS = $(filter %.ptx.o,$(OBJS))
|
||||
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=.gz) $(PTXOBJS:.o=)
|
||||
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=)
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
@@ -207,14 +157,12 @@ $(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
||||
$(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS)))
|
||||
$(STLIBOBJS): | $(sort $(dir $(STLIBOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
@@ -224,4 +172,4 @@ endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||
|
||||
@@ -14,26 +14,10 @@ INSTHEADERS := $(INSTHEADERS) $(HEADERS:%=$(SUBDIR)%)
|
||||
all-$(CONFIG_STATIC): $(SUBDIR)$(LIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
||||
all-$(CONFIG_SHARED): $(SUBDIR)$(SLIBNAME) $(SUBDIR)lib$(FULLNAME).pc
|
||||
|
||||
LIBOBJS := $(OBJS) $(SHLIBOBJS) $(STLIBOBJS) $(SUBDIR)%.h.o $(TESTOBJS)
|
||||
LIBOBJS := $(OBJS) $(SUBDIR)%.h.o $(TESTOBJS)
|
||||
$(LIBOBJS) $(LIBOBJS:.o=.s) $(LIBOBJS:.o=.i): CPPFLAGS += -DHAVE_AV_CONFIG_H
|
||||
|
||||
ifdef CONFIG_SHARED
|
||||
# In case both shared libs and static libs are enabled, it can happen
|
||||
# that a user might want to link e.g. libavformat statically, but
|
||||
# libavcodec and the other libs dynamically. In this case
|
||||
# libavformat won't be able to access libavcodec's internal symbols,
|
||||
# so that they have to be duplicated into the archive just like
|
||||
# for purely shared builds.
|
||||
# Test programs are always statically linked against their library
|
||||
# to be able to access their library's internals, even with shared builds.
|
||||
# Yet linking against dependend libraries still uses dynamic linking.
|
||||
# This means that we are in the scenario described above.
|
||||
# In case only static libs are used, the linker will only use
|
||||
# one of these copies; this depends on the duplicated object files
|
||||
# containing exactly the same symbols.
|
||||
OBJS += $(SHLIBOBJS)
|
||||
endif
|
||||
$(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS)
|
||||
$(SUBDIR)$(LIBNAME): $(OBJS)
|
||||
$(RM) $@
|
||||
$(AR) $(ARFLAGS) $(AR_O) $^
|
||||
$(RANLIB) $@
|
||||
@@ -52,8 +36,8 @@ $(LIBOBJS): CPPFLAGS += -DBUILDING_$(NAME)
|
||||
$(TESTPROGS) $(TOOLS): %$(EXESUF): %.o
|
||||
$$(LD) $(LDFLAGS) $(LDEXEFLAGS) $$(LD_O) $$(filter %.o,$$^) $$(THISLIB) $(FFEXTRALIBS) $$(EXTRALIBS-$$(*F)) $$(ELIBS)
|
||||
|
||||
$(SUBDIR)lib$(NAME).version: $(SUBDIR)version.h $(SUBDIR)version_major.h | $(SUBDIR)
|
||||
$$(M) $$(SRC_PATH)/ffbuild/libversion.sh $(NAME) $$^ > $$@
|
||||
$(SUBDIR)lib$(NAME).version: $(SUBDIR)version.h | $(SUBDIR)
|
||||
$$(M) $$(SRC_PATH)/ffbuild/libversion.sh $(NAME) $$< > $$@
|
||||
|
||||
$(SUBDIR)lib$(FULLNAME).pc: $(SUBDIR)version.h ffbuild/config.sh | $(SUBDIR)
|
||||
$$(M) $$(SRC_PATH)/ffbuild/pkgconfig_generate.sh $(NAME) "$(DESC)"
|
||||
@@ -64,7 +48,7 @@ $(SUBDIR)lib$(NAME).ver: $(SUBDIR)lib$(NAME).v $(OBJS)
|
||||
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
|
||||
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
|
||||
|
||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
||||
$(SLIB_CREATE_DEF_CMD)
|
||||
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
|
||||
$(SLIB_EXTRA_CMD)
|
||||
|
||||
@@ -5,12 +5,8 @@ toupper(){
|
||||
name=lib$1
|
||||
ucname=$(toupper ${name})
|
||||
file=$2
|
||||
file2=$3
|
||||
|
||||
eval $(awk "/#define ${ucname}_VERSION_M/ { print \$2 \"=\" \$3 }" "$file")
|
||||
if [ -f "$file2" ]; then
|
||||
eval $(awk "/#define ${ucname}_VERSION_M/ { print \$2 \"=\" \$3 }" "$file2")
|
||||
fi
|
||||
eval ${ucname}_VERSION=\$${ucname}_VERSION_MAJOR.\$${ucname}_VERSION_MINOR.\$${ucname}_VERSION_MICRO
|
||||
eval echo "${name}_VERSION=\$${ucname}_VERSION"
|
||||
eval echo "${name}_VERSION_MAJOR=\$${ucname}_VERSION_MAJOR"
|
||||
|
||||
@@ -9,22 +9,16 @@ AVBASENAMES = ffmpeg ffplay ffprobe
|
||||
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
OBJS-ffmpeg += \
|
||||
fftools/ffmpeg_demux.o \
|
||||
fftools/ffmpeg_filter.o \
|
||||
fftools/ffmpeg_hw.o \
|
||||
fftools/ffmpeg_mux.o \
|
||||
fftools/ffmpeg_mux_init.o \
|
||||
fftools/ffmpeg_opt.o \
|
||||
fftools/objpool.o \
|
||||
fftools/sync_queue.o \
|
||||
fftools/thread_queue.o \
|
||||
OBJS-ffmpeg += fftools/ffmpeg_opt.o fftools/ffmpeg_filter.o fftools/ffmpeg_hw.o
|
||||
OBJS-ffmpeg-$(CONFIG_CUVID) += fftools/ffmpeg_cuvid.o
|
||||
OBJS-ffmpeg-$(CONFIG_LIBMFX) += fftools/ffmpeg_qsv.o
|
||||
ifndef CONFIG_VIDEOTOOLBOX
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += fftools/ffmpeg_videotoolbox.o
|
||||
endif
|
||||
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += fftools/ffmpeg_videotoolbox.o
|
||||
|
||||
define DOFFTOOL
|
||||
OBJS-$(1) += fftools/cmdutils.o fftools/opt_common.o fftools/$(1).o $(OBJS-$(1)-yes)
|
||||
ifdef HAVE_GNU_WINDRES
|
||||
OBJS-$(1) += fftools/fftoolsres.o
|
||||
endif
|
||||
OBJS-$(1) += fftools/cmdutils.o fftools/$(1).o $(OBJS-$(1)-yes)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): | fftools
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user