mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-06 06:49:59 +01:00
Compare commits
251 Commits
release/6.
...
n4.0.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fcbd117df3 | ||
|
|
b8aa7b9a6d | ||
|
|
d2ce6472a7 | ||
|
|
ebc01c8f6d | ||
|
|
00e7e4b188 | ||
|
|
938bc919ea | ||
|
|
afebdc3ed7 | ||
|
|
6e14fc4aa1 | ||
|
|
1061a2e2ef | ||
|
|
65b0caf47d | ||
|
|
bccddd7fcc | ||
|
|
b2e9d3da81 | ||
|
|
380bc8585c | ||
|
|
8791a1e7de | ||
|
|
70d0d83d4d | ||
|
|
aec3daa8b4 | ||
|
|
7b23dd0f41 | ||
|
|
967604fecf | ||
|
|
3fb09dba40 | ||
|
|
a634da282b | ||
|
|
df6e929e89 | ||
|
|
42355d12db | ||
|
|
eb495b20e5 | ||
|
|
7f521fae2b | ||
|
|
531ebb7506 | ||
|
|
d7973cf03d | ||
|
|
45fb50b4bc | ||
|
|
f645fd64c3 | ||
|
|
1a05e6ced3 | ||
|
|
5a4234de5e | ||
|
|
47f608a7e1 | ||
|
|
5af88171e7 | ||
|
|
6d2b2ee3a5 | ||
|
|
9afcf994f6 | ||
|
|
7da37aa980 | ||
|
|
318b13a5ad | ||
|
|
17a5df4ab8 | ||
|
|
968ffb93af | ||
|
|
84ab680624 | ||
|
|
8327559fe8 | ||
|
|
229025799f | ||
|
|
a04ff0c054 | ||
|
|
ce828a247d | ||
|
|
6e60a38322 | ||
|
|
f2b51fd54c | ||
|
|
0f36c5f5c7 | ||
|
|
3f095c5b56 | ||
|
|
b95c209a61 | ||
|
|
cd42c19f53 | ||
|
|
32353f8bcb | ||
|
|
2c3ea34082 | ||
|
|
c0a6febf32 | ||
|
|
cc0817af0d | ||
|
|
386975d7a4 | ||
|
|
8591d16ce5 | ||
|
|
07255282d0 | ||
|
|
f9235773d6 | ||
|
|
4f51a21c30 | ||
|
|
6cf72a56e7 | ||
|
|
88093d2c1f | ||
|
|
8147da2bad | ||
|
|
f291acafbb | ||
|
|
cec6df48ba | ||
|
|
1a4a6d94cc | ||
|
|
2be51cbeea | ||
|
|
49a90d5d31 | ||
|
|
fab3418cb9 | ||
|
|
9cc5337247 | ||
|
|
d6d7853b4b | ||
|
|
db923b3fbd | ||
|
|
0a155c57bd | ||
|
|
3ef38c414e | ||
|
|
40ed40902a | ||
|
|
0561cde128 | ||
|
|
670b565ba2 | ||
|
|
6b65f46673 | ||
|
|
052edeec55 | ||
|
|
48479937c3 | ||
|
|
fd53179f4a | ||
|
|
5db47b3983 | ||
|
|
0981dfee7d | ||
|
|
d8c4b2ae57 | ||
|
|
fc92ca5b8e | ||
|
|
6d992a51c7 | ||
|
|
6f4b82cc3a | ||
|
|
37f505cc85 | ||
|
|
a21703ca5d | ||
|
|
a28ab09e2a | ||
|
|
4439d6aa69 | ||
|
|
3bf80c7b22 | ||
|
|
1361e4abb8 | ||
|
|
5fd1dce39a | ||
|
|
de0a1d01ba | ||
|
|
c4b23793d4 | ||
|
|
e21e5c95c1 | ||
|
|
2b13c136c4 | ||
|
|
d3536ce839 | ||
|
|
679d749eab | ||
|
|
7610538224 | ||
|
|
0003ace83b | ||
|
|
20c5fb9721 | ||
|
|
841e1399e6 | ||
|
|
9f76f0fab8 | ||
|
|
bf3e331b76 | ||
|
|
21732c1adc | ||
|
|
d00548f2c1 | ||
|
|
10e5302db4 | ||
|
|
84280dc7cf | ||
|
|
2c404cc11a | ||
|
|
acb7907319 | ||
|
|
2c138c2d8c | ||
|
|
b5106c5aa2 | ||
|
|
789bac72ed | ||
|
|
33fcbb4372 | ||
|
|
a56eb4d56c | ||
|
|
70799fae35 | ||
|
|
e049f7c24f | ||
|
|
83a737aa70 | ||
|
|
2deeb2eaef | ||
|
|
9eaf908897 | ||
|
|
1ca157b026 | ||
|
|
f36128518b | ||
|
|
897524954b | ||
|
|
f0e4bc61e3 | ||
|
|
36628bd215 | ||
|
|
e42ab0115e | ||
|
|
10f68641ae | ||
|
|
f0a10f6376 | ||
|
|
0b4d76d891 | ||
|
|
4fa2078217 | ||
|
|
081874a050 | ||
|
|
028a0c9148 | ||
|
|
9a53e8572a | ||
|
|
3aa3b05d64 | ||
|
|
7a5ddf731b | ||
|
|
4b12afccb2 | ||
|
|
b55c824ee7 | ||
|
|
a90497c183 | ||
|
|
65b2b0d98a | ||
|
|
0097cc0ea3 | ||
|
|
07767c704b | ||
|
|
2e7830e5ff | ||
|
|
4f644b2632 | ||
|
|
1d01a3b34c | ||
|
|
2742cb10c7 | ||
|
|
8229afc3a9 | ||
|
|
76f8c8cd05 | ||
|
|
786834a693 | ||
|
|
f2c253f083 | ||
|
|
e26be20a27 | ||
|
|
7b7c582c15 | ||
|
|
06b84f7271 | ||
|
|
f974cc9830 | ||
|
|
af0ba288e7 | ||
|
|
c98d84e229 | ||
|
|
b3d740263c | ||
|
|
6edf0ecab0 | ||
|
|
b2aaf5de42 | ||
|
|
217367b5eb | ||
|
|
c97f9ed53f | ||
|
|
05ac7fdeeb | ||
|
|
c071618ba6 | ||
|
|
b367c23da1 | ||
|
|
58a03420be | ||
|
|
cbe442048f | ||
|
|
87e9f5e118 | ||
|
|
2cffce26a7 | ||
|
|
c26e101654 | ||
|
|
60e408f252 | ||
|
|
e61dcd2c86 | ||
|
|
dd3914c5b5 | ||
|
|
90c4c076c7 | ||
|
|
d976855c00 | ||
|
|
7bc5d49c60 | ||
|
|
36c4995428 | ||
|
|
57bb78d980 | ||
|
|
b2cb42f1c3 | ||
|
|
fe7f2a77c7 | ||
|
|
9dfe36616f | ||
|
|
4ace1597a2 | ||
|
|
6d2c5bb5d2 | ||
|
|
a6d85a97d0 | ||
|
|
72a34d2332 | ||
|
|
c8b57d4333 | ||
|
|
da399903c7 | ||
|
|
8336a66270 | ||
|
|
d1845e7f1a | ||
|
|
852f78443a | ||
|
|
c343eabfb7 | ||
|
|
7ad163c258 | ||
|
|
ef28571efe | ||
|
|
97aea63340 | ||
|
|
bb6a34f237 | ||
|
|
db5631e408 | ||
|
|
df2c811b7c | ||
|
|
6f55a36be9 | ||
|
|
33042d632d | ||
|
|
3054e53ddc | ||
|
|
84bf631018 | ||
|
|
2884575d97 | ||
|
|
76716518a8 | ||
|
|
e8caf67f56 | ||
|
|
e40922c16c | ||
|
|
ef9478d264 | ||
|
|
13d83899df | ||
|
|
61fed89ad4 | ||
|
|
7931e01540 | ||
|
|
93cee87b13 | ||
|
|
2a44f706aa | ||
|
|
61b673b1f1 | ||
|
|
8fde71acd9 | ||
|
|
b32f865969 | ||
|
|
d89eea3455 | ||
|
|
0a22e31fbb | ||
|
|
70a01aa490 | ||
|
|
da6c519f6e | ||
|
|
29328d96b9 | ||
|
|
44cb647477 | ||
|
|
a768c0a3e1 | ||
|
|
58569162c2 | ||
|
|
6b2fee19a7 | ||
|
|
8cd79c2e73 | ||
|
|
0502602d37 | ||
|
|
ace829cb45 | ||
|
|
b9b3ef4f5a | ||
|
|
b2b7cb0f60 | ||
|
|
5cc6370a15 | ||
|
|
8b019be79b | ||
|
|
e36830c695 | ||
|
|
bc2ceeb3ac | ||
|
|
66bdf8f145 | ||
|
|
bfe61bbd00 | ||
|
|
5888679ae3 | ||
|
|
ecb375684d | ||
|
|
df56bc18ef | ||
|
|
ef99025603 | ||
|
|
860293a9a2 | ||
|
|
9b71114247 | ||
|
|
0b6de235b9 | ||
|
|
a73b464118 | ||
|
|
d9e9e97e5f | ||
|
|
d52676da38 | ||
|
|
ca85c3cd7d | ||
|
|
de253343c1 | ||
|
|
9c787a21ce | ||
|
|
7e11a86175 | ||
|
|
9ef90ff0a2 | ||
|
|
6c95a26c1a | ||
|
|
b6ec181240 | ||
|
|
b42e135614 | ||
|
|
0564e8ee49 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -19,12 +19,8 @@
|
||||
*.swp
|
||||
*.ver
|
||||
*.version
|
||||
*.metal.air
|
||||
*.metallib
|
||||
*.metallib.c
|
||||
*.ptx
|
||||
*.ptx.c
|
||||
*.ptx.gz
|
||||
*_g
|
||||
\#*
|
||||
.\#*
|
||||
@@ -35,9 +31,8 @@
|
||||
/ffprobe
|
||||
/config.asm
|
||||
/config.h
|
||||
/config_components.h
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/lcov/
|
||||
/src
|
||||
/mapfile
|
||||
/tools/python/__pycache__/
|
||||
|
||||
24
.mailmap
24
.mailmap
@@ -1,24 +0,0 @@
|
||||
<jeebjp@gmail.com> <jan.ekstrom@aminocom.com>
|
||||
<sw@jkqxz.net> <mrt@jkqxz.net>
|
||||
<u@pkh.me> <cboesch@gopro.com>
|
||||
<zhilizhao@tencent.com> <quinkblack@foxmail.com>
|
||||
<zhilizhao@tencent.com> <wantlamy@gmail.com>
|
||||
<modmaker@google.com> <modmaker-at-google.com@ffmpeg.org>
|
||||
<stebbins@jetheaddev.com> <jstebbins@jetheaddev.com>
|
||||
<barryjzhao@tencent.com> <mypopydev@gmail.com>
|
||||
<barryjzhao@tencent.com> <jun.zhao@intel.com>
|
||||
<josh@itanimul.li> <joshdk@obe.tv>
|
||||
<michael@niedermayer.cc> <michaelni@gmx.at>
|
||||
<linjie.justin.fu@gmail.com> <linjie.fu@intel.com>
|
||||
<linjie.justin.fu@gmail.com> <fulinjie@zju.edu.cn>
|
||||
<ceffmpeg@gmail.com> <cehoyos@ag.or.at>
|
||||
<ceffmpeg@gmail.com> <cehoyos@rainbow.studorg.tuwien.ac.at>
|
||||
<ffmpeg@gyani.pro> <gyandoshi@gmail.com>
|
||||
<atomnuker@gmail.com> <rpehlivanov@obe.tv>
|
||||
<lizhong1008@gmail.com> <zhong.li@intel.com>
|
||||
<lizhong1008@gmail.com> <zhongli_dev@126.com>
|
||||
<andreas.rheinhardt@gmail.com> <andreas.rheinhardt@googlemail.com>
|
||||
rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
|
||||
<thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com>
|
||||
<liuqi05@kuaishou.com> <lq@chinaffmpeg.org>
|
||||
<ruiling.song83@gmail.com> <ruiling.song@intel.com>
|
||||
@@ -19,7 +19,7 @@ cache:
|
||||
directories:
|
||||
- ffmpeg-samples
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update --all; fi
|
||||
install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install nasm; fi
|
||||
script:
|
||||
|
||||
4
CREDITS
4
CREDITS
@@ -1,6 +1,6 @@
|
||||
See the Git history of the project (https://git.ffmpeg.org/ffmpeg) to
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
https://git.ffmpeg.org/ffmpeg
|
||||
http://source.ffmpeg.org.
|
||||
|
||||
10
INSTALL.md
10
INSTALL.md
@@ -1,4 +1,4 @@
|
||||
## Installing FFmpeg
|
||||
#Installing FFmpeg:
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
@@ -15,11 +15,3 @@ NOTICE
|
||||
------
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
||||
|
||||
NOTICE for Package Maintainers
|
||||
------------------------------
|
||||
|
||||
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
||||
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
||||
can then be built. And last build FFmpeg with full dependancies (which may in turn depend on
|
||||
some of these 3rd party packages). This avoids circular dependencies during build.
|
||||
|
||||
41
LICENSE.md
41
LICENSE.md
@@ -21,11 +21,10 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `compat/solaris/make_sunver.pl`
|
||||
- `doc/t2h.pm`
|
||||
- `doc/texi2pod.pl`
|
||||
- `libswresample/tests/swresample.c`
|
||||
- `libswresample/swresample-test.c`
|
||||
- `tests/checkasm/*`
|
||||
- `tests/tiny_ssim.c`
|
||||
- the following filters in libavfilter:
|
||||
- `signature_lookup.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
@@ -35,13 +34,13 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_lensfun.c` (GPL version 3 or later)
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_nnedi.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
@@ -50,14 +49,12 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_signature.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vf_vaguedenoiser.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
@@ -83,47 +80,41 @@ affect the licensing of binaries resulting from the combination.
|
||||
|
||||
### Compatible libraries
|
||||
|
||||
The following libraries are under GPL version 2:
|
||||
- avisynth
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libdavs2
|
||||
- librubberband
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxavs2
|
||||
- libxvid
|
||||
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
|
||||
The following libraries are under LGPL version 3:
|
||||
- gmp
|
||||
- libaribb24
|
||||
- liblensfun
|
||||
|
||||
When combining them with FFmpeg, use the configure option `--enable-version3` to
|
||||
upgrade FFmpeg to the LGPL v3.
|
||||
|
||||
The VMAF, mbedTLS, RK MPI, OpenCORE and VisualOn libraries are under the Apache License
|
||||
2.0. That license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
|
||||
The smbclient library is under the GPL v3, to combine it with FFmpeg,
|
||||
the options `--enable-gpl` and `--enable-version3` have to be passed to
|
||||
configure to upgrade FFmpeg to the GPL v3.
|
||||
|
||||
### Incompatible libraries
|
||||
|
||||
There are certain libraries you can combine with FFmpeg whose licenses are not
|
||||
compatible with the GPL and/or the LGPL. If you wish to enable these
|
||||
libraries, even in circumstances that their license may be incompatible, pass
|
||||
`--enable-nonfree` to configure. This will cause the resulting binary to be
|
||||
`--enable-nonfree` to configure. But note that if you enable any of these
|
||||
libraries the resulting binary will be under a complex license mix that is
|
||||
more restrictive than the LGPL and that may result in additional obligations.
|
||||
It is possible that these restrictions cause the resulting binary to be
|
||||
unredistributable.
|
||||
|
||||
The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
|
||||
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
|
||||
compatible with the LGPL.
|
||||
|
||||
The NVENC library, while its header file is licensed under the compatible MIT
|
||||
license, requires a proprietary binary blob at run time, and is deemed to be
|
||||
incompatible with the GPL. We are not certain if it is compatible with the
|
||||
LGPL, but we require `--enable-nonfree` even with LGPL configurations in case
|
||||
it is not.
|
||||
|
||||
114
MAINTAINERS
114
MAINTAINERS
@@ -11,11 +11,17 @@ A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
|
||||
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
final design decisions
|
||||
|
||||
|
||||
Applications
|
||||
============
|
||||
|
||||
ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer, Anton Khirnov
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c Marton Balint
|
||||
@@ -33,9 +39,8 @@ QuickTime faststart:
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server day to day operations Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
project server emergencies Árpád Gereöffy, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan, Gyan Doshi
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -47,12 +52,12 @@ Communication
|
||||
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork Andriy Gelman
|
||||
mailing lists Baptiste Coudurier
|
||||
Twitter Reynaldo H. Verdejo Pinochet
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, rcombs, wm4
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4
|
||||
|
||||
|
||||
libavutil
|
||||
@@ -73,7 +78,6 @@ Other:
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_vulkan* Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
@@ -84,7 +88,6 @@ Other:
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
tx* Lynne
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -110,12 +113,15 @@ Generic Parts:
|
||||
lzw.* Michael Niedermayer
|
||||
floating point AAN DCT:
|
||||
faandct.c, faandct.h Michael Niedermayer
|
||||
Non-power-of-two MDCT:
|
||||
mdct15.c, mdct15.h Rostislav Pehlivanov
|
||||
Golomb coding:
|
||||
golomb.c, golomb.h Michael Niedermayer
|
||||
motion estimation:
|
||||
motion* Michael Niedermayer
|
||||
rate control:
|
||||
ratecontrol.c Michael Niedermayer
|
||||
libxvid_rc.c Michael Niedermayer
|
||||
simple IDCT:
|
||||
simple_idct.c, simple_idct.h Michael Niedermayer
|
||||
postprocessing:
|
||||
@@ -131,16 +137,13 @@ Codecs:
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||
adpcm.c Zane van Iperen
|
||||
alacenc.c Jaikrishnan Menon
|
||||
alsdec.c Thilo Borgmann, Umair Khan
|
||||
amfenc* Dmitrii Ovchinnikov
|
||||
aptx.c Aurelien Jacobs
|
||||
ass* Aurelien Jacobs
|
||||
asv* Michael Niedermayer
|
||||
atrac3plus* Maxim Poliakovski
|
||||
audiotoolbox* rcombs
|
||||
avs2* Huiwen Ren
|
||||
audiotoolbox* Rodger Combs
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
binkaudio.c Peter Ross
|
||||
cavs* Stefan Gehrer
|
||||
@@ -151,10 +154,10 @@ Codecs:
|
||||
ccaption_dec.c Anshul Maheshwari, Aman Gupta
|
||||
cljr Alex Beregszaszi
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
cuviddec.c Timo Rothenpieler
|
||||
dca* foo86
|
||||
dfpwm* Jack Bruienne
|
||||
dirac* Rostislav Pehlivanov
|
||||
dnxhd* Baptiste Coudurier
|
||||
dolby_e* foo86
|
||||
@@ -165,6 +168,7 @@ Codecs:
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
evrc* Paul B Mahol
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
exr.c Martin Vignali
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fifo.c Jan Sebechlebsky
|
||||
@@ -181,24 +185,19 @@ Codecs:
|
||||
interplayvideo.c Mike Melanson
|
||||
jni*, ffjni* Matthieu Bouron
|
||||
jpeg2000* Nicolas Bertrand
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libcodec2.c Tomas Härdin
|
||||
libdirac* David Conrad
|
||||
libdavs2.c Huiwen Ren
|
||||
libjxl*.c, libjxl.h Leo Izen
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
libxavs.c Stefan Gehrer
|
||||
libxavs2.c Huiwen Ren
|
||||
libzvbi-teletextdec.c Marton Balint
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
@@ -211,11 +210,9 @@ Codecs:
|
||||
mqc* Nicolas Bertrand
|
||||
msmpeg4.c, msmpeg4data.h Michael Niedermayer
|
||||
msrle.c Mike Melanson
|
||||
msrleenc.c Tomas Härdin
|
||||
msvideo1.c Mike Melanson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvdec*, nvenc* Timo Rothenpieler
|
||||
omx.c Martin Storsjo, Aman Gupta
|
||||
opus* Rostislav Pehlivanov
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
@@ -223,7 +220,7 @@ Codecs:
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni
|
||||
qsv* Mark Thompson, Zhong Li, Haihao Xiang
|
||||
qsv* Mark Thompson
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@@ -233,6 +230,7 @@ Codecs:
|
||||
rv10.c Michael Niedermayer
|
||||
s3tc* Ivo van Poorten
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
speedhq.c Steinar H. Gunderson
|
||||
@@ -263,14 +261,16 @@ Codecs:
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xface Stefano Sabatini
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||
d3d11va* Steve Lhomme
|
||||
mediacodec* Matthieu Bouron, Aman Gupta
|
||||
vaapi* Haihao Xiang
|
||||
vaapi_encode* Mark Thompson, Haihao Xiang
|
||||
vaapi* Gwenole Beauchesne
|
||||
vaapi_encode* Mark Thompson
|
||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||
videotoolbox* Rick Kern, Aman Gupta
|
||||
|
||||
@@ -333,7 +333,6 @@ Filters:
|
||||
vf_bwdif Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_chromakey.c Timo Rothenpieler
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorconstancy.c Mina Sami (CC <minas.gorgy@gmail.com>)
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_colorkey.c Timo Rothenpieler
|
||||
vf_colorlevels.c Paul B Mahol
|
||||
@@ -349,7 +348,6 @@ Filters:
|
||||
vf_il.c Paul B Mahol
|
||||
vf_(t)interlace Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_libplacebo.c Niklas Haas
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_mestimate.c Davinder Singh
|
||||
vf_minterpolate.c Davinder Singh
|
||||
@@ -362,15 +360,12 @@ Filters:
|
||||
vf_ssim.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_tonemap_opencl.c Ruiling Song
|
||||
vf_yadif.c Michael Niedermayer
|
||||
vf_zoompan.c Paul B Mahol
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
dnn Yejun Guo
|
||||
|
||||
libavformat
|
||||
===========
|
||||
|
||||
@@ -389,13 +384,7 @@ Muxers/Demuxers:
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
alp.c Zane van Iperen
|
||||
amvenc.c Zane van Iperen
|
||||
apm.c Zane van Iperen
|
||||
apngdec.c Benoit Fouet
|
||||
argo_asf.c Zane van Iperen
|
||||
argo_brp.c Zane van Iperen
|
||||
argo_cvg.c Zane van Iperen
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
@@ -412,40 +401,36 @@ Muxers/Demuxers:
|
||||
dashdec.c Steven Liu
|
||||
dashenc.c Karthick Jeyapal
|
||||
daud.c Reimar Doeffinger
|
||||
dfpwmdec.c Jack Bruienne
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
electronicarts.c Peter Ross
|
||||
epafdec.c Paul B Mahol
|
||||
evc* Samsung (Dawid Kozinski)
|
||||
ffm* Baptiste Coudurier
|
||||
flic.c Mike Melanson
|
||||
flvdec.c Michael Niedermayer
|
||||
flvenc.c Michael Niedermayer, Steven Liu
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
hlsenc.c Christian Suloway, Steven Liu
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
imf* Pierre-Anthony Lemieux
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
iss.c Stefan Gehrer
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
kvag.c Zane van Iperen
|
||||
libmodplug.c Clément Bœsch
|
||||
libopenmpt.c Josh de Kock
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskadec.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskaenc.c David Conrad, Andreas Rheinhardt
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
@@ -460,7 +445,7 @@ Muxers/Demuxers:
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier, Tomas Härdin
|
||||
mxf* Baptiste Coudurier
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
@@ -468,9 +453,9 @@ Muxers/Demuxers:
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oggparsedaala* Rostislav Pehlivanov
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
pp_bnk.c Zane van Iperen
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
@@ -515,9 +500,7 @@ Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libsrt.c Zhao Zhili
|
||||
libssh.c Lukasz Marek
|
||||
libzmq.c Andriy Gelman
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
@@ -541,12 +524,9 @@ Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
LoongArch Shiyou Yin
|
||||
MIPS Manojkumar Bhosale
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Lauri Kasanen
|
||||
RISC-V Rémi Denis-Courmont
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Matthew Oliver, Hendrik Leppkes
|
||||
@@ -575,7 +555,6 @@ Joakim Plate
|
||||
Jun Zhao
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
Martin Storsjö
|
||||
Panagiotis Issaris
|
||||
Pedro Arthur
|
||||
@@ -588,21 +567,16 @@ wm4
|
||||
Releases
|
||||
========
|
||||
|
||||
7.0 Michael Niedermayer
|
||||
6.1 Michael Niedermayer
|
||||
5.1 Michael Niedermayer
|
||||
4.4 Michael Niedermayer
|
||||
3.4 Michael Niedermayer
|
||||
2.8 Michael Niedermayer
|
||||
2.7 Michael Niedermayer
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints and IRC nicknames of maintainers and contributors
|
||||
====================================================================
|
||||
|
||||
IRC nicknames are in parentheses. These apply
|
||||
to the IRC channels listed on the website.
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
==================================================
|
||||
|
||||
Alexander Strasser 1C96 78B7 83CB 8AA7 9AF5 D1EB A7D8 A57B A876 E58F
|
||||
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
|
||||
@@ -616,23 +590,17 @@ Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Leo Izen (Traneptora) B6FD 3CFC 7ACF 83FC 9137 6945 5A71 C331 FD2F A19A
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Niklas Haas (haasn) 1DDB 8076 B14D 5B48 32FC 99D9 EB52 DA9C 02BA 6FB4
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Pierre-Anthony Lemieux (pal) F4B3 9492 E6F2 E4AF AEC8 46CB 698F A1F0 F8D4 EED4
|
||||
Ramiro Polla 7859 C65B 751B 1179 792E DAE8 8E95 8B2F 9B6C 5700
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
@@ -641,9 +609,7 @@ Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Thilo Borgmann (thilo) CE1D B7F4 4D20 FC3A DD9F FE5A 257C 5B8F 1D20 B92F
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin (thardin) A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
Zane van Iperen (zane) 61AE D40F 368B 6F26 9DAE 3892 6861 6B2D 8AC4 DCC5
|
||||
|
||||
45
Makefile
45
Makefile
@@ -13,19 +13,17 @@ vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %.cu $(SRC_PATH)
|
||||
vpath %.ptx $(SRC_PATH)
|
||||
vpath %.metal $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
@@ -47,47 +45,25 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
FF_STATIC_DEP_LIBS := $(STATIC_DEP_LIBS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(filter-out $(FF_DEP_LIBS), $^) $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
target_dem_%_fuzzer$(EXESUF): target_dem_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
|
||||
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/scale_slice_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/scale_slice_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/target_dec_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/target_dem_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavcodec/hwaccels.h \
|
||||
$(SRC_PATH)/libavcodec/parsers.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config_components.h: ffbuild/.config
|
||||
config.h: ffbuild/.config
|
||||
ffbuild/.config: $(CONFIGURABLE_COMPONENTS)
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?) newer than config_components.h, rerun configure\n\n'
|
||||
@-printf '\nWARNING: $(?) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
@@ -95,8 +71,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS \
|
||||
OBJS SLIBOBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -118,13 +93,12 @@ include $(SRC_PATH)/fftools/Makefile
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
$(ALLFFLIBS:%=lib%/version.o): libavutil/ffversion.h
|
||||
libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
ifeq ($(STRIPTYPE),direct)
|
||||
$(STRIP) -o $@ $<
|
||||
else
|
||||
$(RM) $@
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
@@ -160,17 +134,16 @@ uninstall-data:
|
||||
|
||||
clean::
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES)) $(addprefix compat/*/*/,$(CLEANSUFFIXES))
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES))
|
||||
$(RM) -r coverage-html
|
||||
$(RM) -rf coverage.info coverage.info.in lcov
|
||||
|
||||
distclean:: clean
|
||||
$(RM) .version config.asm config.h config_components.h mapfile \
|
||||
$(RM) .version avversion.h config.asm config.h mapfile \
|
||||
ffbuild/.config ffbuild/config.* libavutil/avconfig.h \
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c \
|
||||
libavcodec/codec_list.c libavcodec/parser_list.c \
|
||||
libavfilter/filter_list.c libavdevice/indev_list.c libavdevice/outdev_list.c \
|
||||
libavformat/muxer_list.c libavformat/demuxer_list.c
|
||||
ifeq ($(SRC_LINK),src)
|
||||
$(RM) src
|
||||
@@ -185,7 +158,7 @@ check: all alltools examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
|
||||
$(sort $(OUTDIRS)):
|
||||
$(sort $(OBJDIRS)):
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
|
||||
@@ -9,7 +9,7 @@ such as audio, video, subtitles and related metadata.
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides means to alter decoded audio and video through a directed graph of connected filters.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
┌──────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 6.1 "Heaviside" │
|
||||
└──────────────────────────────────────────┘
|
||||
┌───────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.0 "Wu" │
|
||||
└───────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 6.1 "Heaviside", about 8
|
||||
months after the release of FFmpeg 6.0.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.0 "Wu", about 6
|
||||
months after the release of FFmpeg 3.4.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <windows.h>
|
||||
@@ -95,7 +96,7 @@ do { \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
|
||||
InterlockedExchangePointer(object, desired);
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
1064
compat/avisynth/avisynth_c.h
Normal file
1064
compat/avisynth/avisynth_c.h
Normal file
File diff suppressed because it is too large
Load Diff
62
compat/avisynth/avs/capi.h
Normal file
62
compat/avisynth/avs/capi.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
||||
55
compat/avisynth/avs/config.h
Normal file
55
compat/avisynth/avs/config.h
Normal file
@@ -0,0 +1,55 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 32
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
||||
51
compat/avisynth/avs/types.h
Normal file
51
compat/avisynth/avs/types.h
Normal file
@@ -0,0 +1,51 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
||||
728
compat/avisynth/avxsynth_c.h
Normal file
728
compat/avisynth/avxsynth_c.h
Normal file
@@ -0,0 +1,728 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
||||
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
@@ -0,0 +1,85 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
||||
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
@@ -0,0 +1,77 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
||||
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
* Minimum CUDA compatibility definitions header
|
||||
*
|
||||
* Copyright (c) 2019 rcombs
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
#define COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
|
||||
// Common macros
|
||||
#define __global__ __attribute__((global))
|
||||
#define __device__ __attribute__((device))
|
||||
#define __device_builtin__ __attribute__((device_builtin))
|
||||
#define __align__(N) __attribute__((aligned(N)))
|
||||
#define __inline__ __inline__ __attribute__((always_inline))
|
||||
|
||||
#define max(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define abs(x) ((x) < 0 ? -(x) : (x))
|
||||
|
||||
#define atomicAdd(a, b) (__atomic_fetch_add(a, b, __ATOMIC_SEQ_CST))
|
||||
|
||||
// Basic typedefs
|
||||
typedef __device_builtin__ unsigned long long cudaTextureObject_t;
|
||||
|
||||
typedef struct __device_builtin__ __align__(2) uchar2
|
||||
{
|
||||
unsigned char x, y;
|
||||
} uchar2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) ushort2
|
||||
{
|
||||
unsigned short x, y;
|
||||
} ushort2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) float2
|
||||
{
|
||||
float x, y;
|
||||
} float2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) int2
|
||||
{
|
||||
int x, y;
|
||||
} int2;
|
||||
|
||||
typedef struct __device_builtin__ uint3
|
||||
{
|
||||
unsigned int x, y, z;
|
||||
} uint3;
|
||||
|
||||
typedef struct uint3 dim3;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) uchar4
|
||||
{
|
||||
unsigned char x, y, z, w;
|
||||
} uchar4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) ushort4
|
||||
{
|
||||
unsigned short x, y, z, w;
|
||||
} ushort4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) int4
|
||||
{
|
||||
int x, y, z, w;
|
||||
} int4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) float4
|
||||
{
|
||||
float x, y, z, w;
|
||||
} float4;
|
||||
|
||||
// Accessors for special registers
|
||||
#define GETCOMP(reg, comp) \
|
||||
asm("mov.u32 %0, %%" #reg "." #comp ";" : "=r"(tmp)); \
|
||||
ret.comp = tmp;
|
||||
|
||||
#define GET(name, reg) static inline __device__ uint3 name() {\
|
||||
uint3 ret; \
|
||||
unsigned tmp; \
|
||||
GETCOMP(reg, x) \
|
||||
GETCOMP(reg, y) \
|
||||
GETCOMP(reg, z) \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
GET(getBlockIdx, ctaid)
|
||||
GET(getBlockDim, ntid)
|
||||
GET(getThreadIdx, tid)
|
||||
|
||||
// Instead of externs for these registers, we turn access to them into calls into trivial ASM
|
||||
#define blockIdx (getBlockIdx())
|
||||
#define blockDim (getBlockDim())
|
||||
#define threadIdx (getThreadIdx())
|
||||
|
||||
// Basic initializers (simple macros rather than inline functions)
|
||||
#define make_int2(a, b) ((int2){.x = a, .y = b})
|
||||
#define make_uchar2(a, b) ((uchar2){.x = a, .y = b})
|
||||
#define make_ushort2(a, b) ((ushort2){.x = a, .y = b})
|
||||
#define make_float2(a, b) ((float2){.x = a, .y = b})
|
||||
#define make_int4(a, b, c, d) ((int4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_uchar4(a, b, c, d) ((uchar4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_ushort4(a, b, c, d) ((ushort4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_float4(a, b, c, d) ((float4){.x = a, .y = b, .z = c, .w = d})
|
||||
|
||||
// Conversions from the tex instruction's 4-register output to various types
|
||||
#define TEX2D(type, ret) static inline __device__ void conv(type* out, unsigned a, unsigned b, unsigned c, unsigned d) {*out = (ret);}
|
||||
|
||||
TEX2D(unsigned char, a & 0xFF)
|
||||
TEX2D(unsigned short, a & 0xFFFF)
|
||||
TEX2D(float, a)
|
||||
TEX2D(uchar2, make_uchar2(a & 0xFF, b & 0xFF))
|
||||
TEX2D(ushort2, make_ushort2(a & 0xFFFF, b & 0xFFFF))
|
||||
TEX2D(float2, make_float2(a, b))
|
||||
TEX2D(uchar4, make_uchar4(a & 0xFF, b & 0xFF, c & 0xFF, d & 0xFF))
|
||||
TEX2D(ushort4, make_ushort4(a & 0xFFFF, b & 0xFFFF, c & 0xFFFF, d & 0xFFFF))
|
||||
TEX2D(float4, make_float4(a, b, c, d))
|
||||
|
||||
// Template calling tex instruction and converting the output to the selected type
|
||||
template<typename T>
|
||||
inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
T ret;
|
||||
unsigned ret1, ret2, ret3, ret4;
|
||||
asm("tex.2d.v4.u32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret1), "=r"(ret2), "=r"(ret3), "=r"(ret4) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
conv(&ret, ret1, ret2, ret3, ret4);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float4 tex2D<float4>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret;
|
||||
asm("tex.2d.v4.f32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float tex2D<float>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
return tex2D<float4>(texObject, x, y).x;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float2 tex2D<float2>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret = tex2D<float4>(texObject, x, y);
|
||||
return make_float2(ret.x, ret.y);
|
||||
}
|
||||
|
||||
// Math helper functions
|
||||
static inline __device__ float floorf(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ float floor(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ double floor(double a) { return __builtin_floor(a); }
|
||||
static inline __device__ float ceilf(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ float ceil(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ double ceil(double a) { return __builtin_ceil(a); }
|
||||
static inline __device__ float truncf(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ float trunc(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ double trunc(double a) { return __builtin_trunc(a); }
|
||||
static inline __device__ float fabsf(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ float fabs(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ double fabs(double a) { return __builtin_fabs(a); }
|
||||
static inline __device__ float sqrtf(float a) { return __builtin_sqrtf(a); }
|
||||
|
||||
static inline __device__ float __saturatef(float a) { return __nvvm_saturate_f(a); }
|
||||
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
||||
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
||||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
||||
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||
@@ -16,8 +16,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#ifndef AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "compat/w32dlfcn.h"
|
||||
@@ -30,4 +30,4 @@
|
||||
|
||||
#include <ffnvcodec/dynlink_loader.h>
|
||||
|
||||
#endif /* COMPAT_CUDA_DYNLINK_LOADER_H */
|
||||
#endif
|
||||
|
||||
36
compat/cuda/ptx2c.sh
Executable file
36
compat/cuda/ptx2c.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
|
||||
set -e
|
||||
|
||||
OUT="$1"
|
||||
IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while read LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
printf ";\n" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#define FUN(name, type, op) \
|
||||
type name(type x, type y) \
|
||||
{ \
|
||||
if (fpclassify(x) == FP_NAN) return y; \
|
||||
if (fpclassify(y) == FP_NAN) return x; \
|
||||
return x op y ? x : y; \
|
||||
}
|
||||
|
||||
FUN(fmin, double, <)
|
||||
FUN(fmax, double, >)
|
||||
FUN(fminf, float, <)
|
||||
FUN(fmaxf, float, >)
|
||||
|
||||
long double fmodl(long double x, long double y)
|
||||
{
|
||||
return fmod(x, y);
|
||||
}
|
||||
|
||||
long double scalbnl(long double x, int exp)
|
||||
{
|
||||
return scalbn(x, exp);
|
||||
}
|
||||
|
||||
long double copysignl(long double x, long double y)
|
||||
{
|
||||
return copysign(x, y);
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
double fmin(double, double);
|
||||
double fmax(double, double);
|
||||
float fminf(float, float);
|
||||
float fmaxf(float, float);
|
||||
long double fmodl(long double, long double);
|
||||
long double scalbnl(long double, int);
|
||||
long double copysignl(long double, long double);
|
||||
@@ -59,7 +59,7 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See https://web.archive.org/web/20151214111935/http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
|
||||
@@ -27,19 +27,15 @@
|
||||
#define COMPAT_OS2THREADS_H
|
||||
|
||||
#define INCL_DOS
|
||||
#define INCL_DOSERRORS
|
||||
#include <os2.h>
|
||||
|
||||
#undef __STRICT_ANSI__ /* for _beginthread() */
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sys/builtin.h>
|
||||
#include <sys/fmutex.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct {
|
||||
TID tid;
|
||||
@@ -167,28 +163,6 @@ static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_timedwait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
ULONG t = av_clip64(abs_milli - av_gettime() / 1000, 0, ULONG_MAX);
|
||||
|
||||
__atomic_increment(&cond->wait_count);
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
APIRET ret = DosWaitEventSem(cond->event_sem, t);
|
||||
|
||||
__atomic_decrement(&cond->wait_count);
|
||||
|
||||
DosPostEventSem(cond->ack_sem);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
return (ret == ERROR_TIMEOUT) ? ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
|
||||
@@ -20,40 +20,11 @@
|
||||
#define COMPAT_W32DLFCN_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <stdint.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/macros.h"
|
||||
#if (_WIN32_WINNT < 0x0602) || HAVE_WINRT
|
||||
#include "libavutil/wchar_filename.h"
|
||||
|
||||
static inline wchar_t *get_module_filename(HMODULE module)
|
||||
{
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD path_size = 0, path_len;
|
||||
|
||||
do {
|
||||
path_size = path_size ? FFMIN(2 * path_size, INT16_MAX + 1) : MAX_PATH;
|
||||
new_path = av_realloc_array(path, path_size, sizeof *path);
|
||||
if (!new_path) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
path = new_path;
|
||||
// Returns path_size in case of insufficient buffer.
|
||||
// Whether the error is set or not and whether the output
|
||||
// is null-terminated or not depends on the version of Windows.
|
||||
path_len = GetModuleFileNameW(module, path, path_size);
|
||||
} while (path_len && path_size <= INT16_MAX && path_size <= path_len);
|
||||
|
||||
if (!path_len) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* Safe function used to open dynamic libs. This attempts to improve program security
|
||||
* by removing the current directory from the dll search path. Only dll's found in the
|
||||
@@ -63,53 +34,29 @@ static inline wchar_t *get_module_filename(HMODULE module)
|
||||
*/
|
||||
static inline HMODULE win32_dlopen(const char *name)
|
||||
{
|
||||
wchar_t *name_w;
|
||||
HMODULE module = NULL;
|
||||
if (utf8towchar(name, &name_w))
|
||||
name_w = NULL;
|
||||
#if _WIN32_WINNT < 0x0602
|
||||
// On Win7 and earlier we check if KB2533623 is available
|
||||
// Need to check if KB2533623 is available
|
||||
if (!GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetDefaultDllDirectories")) {
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD pathlen, pathsize, namelen;
|
||||
if (!name_w)
|
||||
HMODULE module = NULL;
|
||||
wchar_t *path = NULL, *name_w = NULL;
|
||||
DWORD pathlen;
|
||||
if (utf8towchar(name, &name_w))
|
||||
goto exit;
|
||||
namelen = wcslen(name_w);
|
||||
path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t));
|
||||
// Try local directory first
|
||||
path = get_module_filename(NULL);
|
||||
if (!path)
|
||||
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
||||
pathlen = wcsrchr(path, '\\') - path;
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
new_path = wcsrchr(path, '\\');
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
pathlen = new_path - path;
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
if (module == NULL) {
|
||||
// Next try System32 directory
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
pathlen = GetSystemDirectoryW(path, MAX_PATH);
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
// Buffer is not enough in two cases:
|
||||
// 1. system directory + \ + module name
|
||||
// 2. system directory even without the module name.
|
||||
if (pathlen + namelen + 2 > pathsize) {
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
// Query again to handle the case #2.
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
goto exit;
|
||||
}
|
||||
path[pathlen] = L'\\';
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
}
|
||||
@@ -126,19 +73,16 @@ exit:
|
||||
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
|
||||
#endif
|
||||
#if HAVE_WINRT
|
||||
if (!name_w)
|
||||
wchar_t *name_w = NULL;
|
||||
int ret;
|
||||
if (utf8towchar(name, &name_w))
|
||||
return NULL;
|
||||
module = LoadPackagedLibrary(name_w, 0);
|
||||
#else
|
||||
#define LOAD_FLAGS (LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
/* filename may be be in CP_ACP */
|
||||
if (!name_w)
|
||||
return LoadLibraryExA(name, NULL, LOAD_FLAGS);
|
||||
module = LoadLibraryExW(name_w, NULL, LOAD_FLAGS);
|
||||
#undef LOAD_FLAGS
|
||||
#endif
|
||||
ret = LoadPackagedLibrary(name_w, 0);
|
||||
av_free(name_w);
|
||||
return module;
|
||||
return ret;
|
||||
#else
|
||||
return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32);
|
||||
#endif
|
||||
}
|
||||
#define dlopen(name, flags) win32_dlopen(name)
|
||||
#define dlclose FreeLibrary
|
||||
|
||||
@@ -35,15 +35,14 @@
|
||||
* As most functions here are used without checking return values,
|
||||
* only implement return values as necessary. */
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
@@ -62,17 +61,7 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
|
||||
#define PTHREAD_CANCEL_ENABLE 1
|
||||
#define PTHREAD_CANCEL_DISABLE 0
|
||||
|
||||
#if HAVE_WINRT
|
||||
#define THREADFUNC_RETTYPE DWORD
|
||||
#else
|
||||
#define THREADFUNC_RETTYPE unsigned
|
||||
#endif
|
||||
|
||||
static av_unused THREADFUNC_RETTYPE
|
||||
__stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
h->ret = h->func(h->arg);
|
||||
@@ -167,31 +156,10 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
|
||||
|
||||
if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
|
||||
DWORD err = GetLastError();
|
||||
if (err == ERROR_TIMEOUT)
|
||||
return ETIMEDOUT;
|
||||
else
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_setcancelstate(int state, int *oldstate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* COMPAT_W32PTHREADS_H */
|
||||
|
||||
@@ -48,7 +48,7 @@ trap 'rm -f -- $libname' EXIT
|
||||
if [ -n "$AR" ]; then
|
||||
$AR rcs ${libname} $@ >/dev/null
|
||||
else
|
||||
lib.exe -out:${libname} $@ >/dev/null
|
||||
lib -out:${libname} $@ >/dev/null
|
||||
fi
|
||||
if [ $? != 0 ]; then
|
||||
echo "Could not create temporary library." >&2
|
||||
@@ -108,7 +108,7 @@ if [ -n "$NM" ]; then
|
||||
cut -d' ' -f3 |
|
||||
sed -e "s/^${prefix}//")
|
||||
else
|
||||
dump=$(dumpbin.exe -linkermember:1 ${libname} |
|
||||
dump=$(dumpbin -linkermember:1 ${libname} |
|
||||
sed -e '/public symbols/,$!d' -e '/^ \{1,\}Summary/,$d' -e "s/ \{1,\}${prefix}/ /" -e 's/ \{1,\}/ /g' |
|
||||
tail -n +2 |
|
||||
cut -d' ' -f3)
|
||||
|
||||
@@ -4,6 +4,6 @@ LINK_EXE_PATH=$(dirname "$(command -v cl)")/link
|
||||
if [ -x "$LINK_EXE_PATH" ]; then
|
||||
"$LINK_EXE_PATH" $@
|
||||
else
|
||||
link.exe $@
|
||||
link $@
|
||||
fi
|
||||
exit $?
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "--version" ]; then
|
||||
rc.exe -?
|
||||
exit $?
|
||||
fi
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: mswindres [-I/include/path ...] [-DSOME_DEFINE ...] [-o output.o] input.rc [output.o]" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
EXTRA_OPTS="-nologo"
|
||||
|
||||
while [ $# -gt 2 ]; do
|
||||
case $1 in
|
||||
-D*) EXTRA_OPTS="$EXTRA_OPTS -d$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-I*) EXTRA_OPTS="$EXTRA_OPTS -i$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-o) OPT_OUT="$2"; shift ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
IN="$1"
|
||||
if [ -z "$OPT_OUT" ]; then
|
||||
OUT="$2"
|
||||
else
|
||||
OUT="$OPT_OUT"
|
||||
fi
|
||||
|
||||
eval set -- $EXTRA_OPTS
|
||||
rc.exe "$@" -fo "$OUT" "$IN"
|
||||
910
doc/APIchanges
910
doc/APIchanges
@@ -1,900 +1,20 @@
|
||||
The last version increases of all libraries were on 2023-02-09
|
||||
Never assume the API of libav* to be stable unless at least 1 month has passed
|
||||
since the last major version increase or the API was added.
|
||||
|
||||
The last version increases were:
|
||||
libavcodec: 2017-10-21
|
||||
libavdevice: 2017-10-21
|
||||
libavfilter: 2017-10-21
|
||||
libavformat: 2017-10-21
|
||||
libavresample: 2017-10-21
|
||||
libpostproc: 2017-10-21
|
||||
libswresample: 2017-10-21
|
||||
libswscale: 2017-10-21
|
||||
libavutil: 2017-10-21
|
||||
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 6.1 was cut here -------- 8< ---------
|
||||
|
||||
2023-10-27 - 52a97642604 - lavu 58.28.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_3POINT1POINT2 and AV_CHANNEL_LAYOUT_3POINT1POINT2.
|
||||
Add AV_CH_LAYOUT_5POINT1POINT2_BACK and AV_CHANNEL_LAYOUT_5POINT1POINT2_BACK.
|
||||
Add AV_CH_LAYOUT_5POINT1POINT4_BACK and AV_CHANNEL_LAYOUT_5POINT1POINT4_BACK.
|
||||
Add AV_CH_LAYOUT_7POINT1POINT2 and AV_CHANNEL_LAYOUT_7POINT1POINT2.
|
||||
Add AV_CH_LAYOUT_7POINT1POINT4_BACK and AV_CHANNEL_LAYOUT_7POINT1POINT4_BACK.
|
||||
|
||||
2023-10-06 - 804be7f9e3c - lavc 60.30.101 - avcodec.h
|
||||
AVCodecContext.coded_side_data may now be used during decoding, to be set
|
||||
by user before calling avcodec_open2() for initialization.
|
||||
|
||||
2023-10-06 - 5432d2aacad - lavc 60.15.100 - avformat.h
|
||||
Deprecate AVFormatContext.{nb_,}side_data, av_stream_add_side_data(),
|
||||
av_stream_new_side_data(), and av_stream_get_side_data(). Side data fields
|
||||
from AVFormatContext.codecpar should be used from now on.
|
||||
|
||||
2023-10-06 - 21d7cc6fa9a - lavc 60.30.100 - codec_par.h
|
||||
Added {nb_,}coded_side_data to AVCodecParameters.
|
||||
The AVCodecParameters helpers will copy it to and from its AVCodecContext
|
||||
namesake.
|
||||
|
||||
2023-10-06 - 74279227dd2 - lavc 60.29.100 - packet.h
|
||||
Added av_packet_side_data_new(), av_packet_side_data_add(),
|
||||
av_packet_side_data_get(), av_packet_side_data_remove, and
|
||||
av_packet_side_data_free().
|
||||
|
||||
2023-10-03 - ea14e8bc302 - lavc 60.28.100 - codec_par.h defs.h
|
||||
Move the definition of enum AVFieldOrder from codec_par.h to defs.h.
|
||||
|
||||
2023-10-03 - dd48e49d547 - lavf 60.14.100 - avformat.h
|
||||
Deprecate AVFMT_ALLOW_FLUSH without replacement. Users can always
|
||||
flush any muxer by sending a NULL packet.
|
||||
|
||||
2023-09-28 - 8e1ef7c38f6 - lavu 58.27.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_GBRAP14BE, AV_PIX_FMT_GBRAP14LE pixel formats.
|
||||
|
||||
2023-09-28 - 05f8b2ca0f7 - lavu 58.26.100 - hwcontext_cuda.h
|
||||
Add AV_CUDA_USE_CURRENT_CONTEXT.
|
||||
|
||||
2023-09-19 - ba9cd06c763 - lavu 58.25.100 - avutil.h
|
||||
Make AV_TIME_BASE_Q compatible with C++.
|
||||
|
||||
2023-09-18 - 85e075587dc - lavf 60 - avformat.h
|
||||
Deprecate AVFMT_FLAG_SHORTEST without replacement.
|
||||
|
||||
2023-09-07 - 423b6a7e493 - lavu 58.24.100 - imgutils.h
|
||||
Add av_image_copy2(), a wrapper around the av_image_copy()
|
||||
to overcome limitations of automatic conversions.
|
||||
|
||||
2023-09-07 - 5094d1f429e - lavu 58.23.100 - fifo.h
|
||||
Constify the AVFifo pointees in av_fifo_peek() and av_fifo_peek_to_cb().
|
||||
|
||||
2023-09-07 - fa4bf5793a0 - lavu 58.22.100 - audio_fifo.h
|
||||
Constify some pointees in av_audio_fifo_write(), av_audio_fifo_read(),
|
||||
av_audio_fifo_peek() and av_audio_fifo_peek_at().
|
||||
|
||||
2023-09-07 - 9bf31f60960 - lavu 58.21.100 - samplefmt.h
|
||||
Constify some pointees in av_samples_copy() and av_samples_set_silence().
|
||||
|
||||
2023-09-07 - 41285890e03 - lavu 58.20.100 - imgutils.h
|
||||
Constify some pointees in av_image_copy(), av_image_copy_uc_from() and
|
||||
av_image_fill_black().
|
||||
|
||||
2023-09-07 - 2a68d945cd7 - lavf 60.12.100 - avio.h
|
||||
Constify the buffer pointees in the write_packet and write_data_type
|
||||
callbacks of AVIOContext on the next major bump.
|
||||
|
||||
2023-09-07 - 8238bc0b5e3 - lavc 60.26.100 - defs.h
|
||||
Add AV_PROFILE_* and AV_LEVEL_* replacements in defs.h for the
|
||||
defines from avcodec.h. The latter are deprecated.
|
||||
|
||||
2023-09-06 - b6627a57f41 - lavc 60.25.101 - avcodec.h
|
||||
AVCodecContext.rc_buffer_size may now be set by decoders.
|
||||
|
||||
2023-09-02 - 25ecc94d58f - lavu 58.19.100 - executor.h
|
||||
Add AVExecutor API
|
||||
|
||||
2023-09-01 - 139e54911c8 - lavc 60.25.100 - avfft.h
|
||||
The entire header will be deprecated and removed in two major bumps.
|
||||
For a replacement to av_dct, av_rdft, av_fft and av_mdct, use
|
||||
the new API from libavutil/tx.h.
|
||||
|
||||
2023-09-01 - 11e22730e1e - lavu 58.18.100 - tx.h
|
||||
Add AV_TX_REAL_TO_REAL and AV_TX_REAL_TO_IMAGINARY
|
||||
|
||||
2023-08-18 - ff094f5ebbd - lavu 58.17.100 - channel_layout.h
|
||||
All AV_CHANNEL_LAYOUT_* macros are now compatible with C++ 17 and older.
|
||||
|
||||
2023-08-08 - 5012b4ab4ca - lavu 58.15.100 - video_hint.h
|
||||
Add AVVideoHint API.
|
||||
|
||||
2023-08-08 - 5012b4ab4ca - lavc 60 - avcodec.h
|
||||
Deprecate AV_CODEC_FLAG_DROPCHANGED without replacement.
|
||||
|
||||
2023-07-05 - d694c25b44c - lavu 58.14.100 - random_seed.h
|
||||
Add av_random_bytes()
|
||||
|
||||
2023-05-29 - 637afea88ed - lavc 60.16.100 - avcodec.h codec_id.h
|
||||
Add AV_CODEC_ID_EVC, FF_PROFILE_EVC_BASELINE, and FF_PROFILE_EVC_MAIN.
|
||||
|
||||
2023-05-29 - 75918016ab1 - lavu 58.12.100 - mathematics.h
|
||||
Add av_bessel_i0()
|
||||
|
||||
2023-05-29 - f3795e18574 - lavc 60.15.100 - avcodec.h
|
||||
Add AVHWAccel.update_thread_context, AVHWAccel.free_frame_priv,
|
||||
AVHWAccel.flush.
|
||||
|
||||
2023-05-29 - db1d0227812 - lavu 58.11.100 - hwcontext_vulkan.h
|
||||
Add AVVulkanDeviceContext.lock_queue, AVVulkanDeviceContext.unlock_queue,
|
||||
AVVulkanFramesContext.format, AVVulkanFramesContext.lock_frame,
|
||||
AVVulkanFramesContext.unlock_frame, AVVkFrame.queue_family.
|
||||
Deprecate AV_VK_FRAME_FLAG_CONTIGUOUS_MEMORY (use multiplane images instead).
|
||||
|
||||
2023-05-29 - bef86ba86cc - lavu 58.10.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_P212BE, AV_PIX_FMT_P212LE, AV_PIX_FMT_P412BE,
|
||||
AV_PIX_FMT_P412LE.
|
||||
|
||||
2023-05-18 - 01d444c077e - lavu 58.8.100 - frame.h
|
||||
Add av_frame_replace().
|
||||
|
||||
2023-05-18 - 63767b79a57 - lavu 58 - frame.h
|
||||
Deprecate AVFrame.palette_has_changed without replacement.
|
||||
|
||||
2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h
|
||||
Depreate AVCodecContext.ticks_per_frame in favor of
|
||||
AVCodecContext.framerate (encoding) and
|
||||
AV_CODEC_PROP_FIELDS (decoding).
|
||||
|
||||
2023-05-15 - 70433abf7fb - lavc 60.12.100 - codec_desc.h
|
||||
Add AV_CODEC_PROP_FIELDS.
|
||||
|
||||
2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h
|
||||
Depreate AV_CODEC_CAP_SUBFRAMES without replacement.
|
||||
|
||||
2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h
|
||||
Add AVCodecParameters.framerate.
|
||||
|
||||
2023-05-04 - 0fc9c1f6828 - lavu 58.7.100 - frame.h
|
||||
Deprecate AVFrame.interlaced_frame, AVFrame.top_field_first, and
|
||||
AVFrame.key_frame.
|
||||
Add AV_FRAME_FLAG_INTERLACED, AV_FRAME_FLAG_TOP_FIELD_FIRST, and
|
||||
AV_FRAME_FLAG_KEY flags as replacement.
|
||||
|
||||
2023-04-10 - 4eaaa38d3df - lavu 58.6.100 - frame.h
|
||||
av_frame_get_plane_buffer() now accepts const AVFrame*.
|
||||
|
||||
2023-04-04 - 61b27b15fc9 - lavu 58.6.100 - hdr_dynamic_metadata.h
|
||||
Add AV_HDR_PLUS_MAX_PAYLOAD_SIZE.
|
||||
av_dynamic_hdr_plus_create_side_data() now accepts a user provided
|
||||
buffer.
|
||||
|
||||
2023-03-24 - 632c3499319 - lavfi 9.5.100 - avfilter.h
|
||||
Add AVFILTER_FLAG_HWDEVICE.
|
||||
|
||||
2023-03-21 - 0a3ce5f7384 - lavu 58.5.100 - hdr_dynamic_metadata.h
|
||||
Add av_dynamic_hdr_plus_from_t35() and av_dynamic_hdr_plus_to_t35()
|
||||
functions to convert between raw T.35 payloads containing dynamic
|
||||
HDR10+ metadata and their parsed representations as AVDynamicHDRPlus.
|
||||
|
||||
2023-03-17 - 3be46ee7672 - lavu 58.4.100 - hdr_dynamic_vivid_metadata.h
|
||||
Add two group of three spline params.
|
||||
Deprecate previous define which only supports one group of params.
|
||||
|
||||
2023-03-02 - 373ef1c4fae - lavc 60.6.100 - avcodec.h
|
||||
Add FF_PROFILE_EAC3_DDP_ATMOS, FF_PROFILE_TRUEHD_ATMOS,
|
||||
FF_PROFILE_DTS_HD_MA_X and FF_PROFILE_DTS_HD_MA_X_IMAX.
|
||||
|
||||
2023-02-25 - f4593775436 - lavc 60.5.100 - avcodec.h
|
||||
Add FF_PROFILE_HEVC_SCC.
|
||||
|
||||
-------- 8< --------- FFmpeg 6.0 was cut here -------- 8< ---------
|
||||
|
||||
2023-02-16 - 927042b409 - lavf 60.2.100 - avformat.h
|
||||
Deprecate AVFormatContext io_close callback.
|
||||
The superior io_close2 callback should be used instead.
|
||||
|
||||
2023-02-13 - 2296078397 - lavu 58.1.100 - frame.h
|
||||
Deprecate AVFrame.coded_picture_number and display_picture_number.
|
||||
Their usefulness is questionable and very few decoders set them.
|
||||
|
||||
2023-02-13 - 6b6f7db819 - lavc 60.2.100 - avcodec.h
|
||||
Add AVCodecContext.frame_num as a 64bit version of frame_number.
|
||||
Deprecate AVCodecContext.frame_number.
|
||||
|
||||
2023-02-12 - d1b9a3ddb4 - lavfi 9.1.100 - avfilter.h
|
||||
Add filtergraph segment parsing API.
|
||||
New structs:
|
||||
- AVFilterGraphSegment
|
||||
- AVFilterChain
|
||||
- AVFilterParams
|
||||
- AVFilterPadParams
|
||||
New functions:
|
||||
- avfilter_graph_segment_parse()
|
||||
- avfilter_graph_segment_create_filters()
|
||||
- avfilter_graph_segment_apply_opts()
|
||||
- avfilter_graph_segment_init()
|
||||
- avfilter_graph_segment_link()
|
||||
- avfilter_graph_segment_apply()
|
||||
|
||||
2023-02-09 - 719a93f4e4 - lavu 58.0.100 - csp.h
|
||||
Add av_csp_approximate_trc_gamma() and av_csp_trc_func_from_id().
|
||||
Add av_csp_trc_function.
|
||||
|
||||
2023-02-09 - 868a31b42d - lavc 60.0.100 - avcodec.h
|
||||
avcodec_decode_subtitle2() now accepts const AVPacket*.
|
||||
|
||||
2023-02-04 - d02340b9e3 - lavc 59.63.100
|
||||
Allow AV_CODEC_FLAG_COPY_OPAQUE to be used with decoders.
|
||||
|
||||
2023-01-29 - a1a80f2e64 - lavc 59.59.100 - avcodec.h
|
||||
Add AV_CODEC_FLAG_COPY_OPAQUE and AV_CODEC_FLAG_FRAME_DURATION.
|
||||
|
||||
2023-01-13 - 002d0ec740 - lavu 57.44.100 - ambient_viewing_environment.h frame.h
|
||||
Adds a new structure for holding H.274 Ambient Viewing Environment metadata,
|
||||
AVAmbientViewingEnvironment.
|
||||
Adds a new AVFrameSideDataType entry AV_FRAME_DATA_AMBIENT_VIEWING_ENVIRONMENT
|
||||
for it.
|
||||
|
||||
2022-12-10 - 7a8d78f7e3 - lavc 59.55.100 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_UNSAFE_OUTPUT.
|
||||
|
||||
2022-11-24 - e97368eba5 - lavu 57.43.100 - tx.h
|
||||
Add AV_TX_FLOAT_DCT, AV_TX_DOUBLE_DCT and AV_TX_INT32_DCT.
|
||||
|
||||
2022-11-06 - 9dad237928 - lavu 57.42.100 - dict.h
|
||||
Add av_dict_iterate().
|
||||
|
||||
2022-11-03 - 6228ba141d - lavu 57.41.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_7POINT1_TOP_BACK and AV_CHANNEL_LAYOUT_7POINT1_TOP_BACK.
|
||||
|
||||
2022-10-30 - 83e918de71 - lavu 57.40.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_CUBE and AV_CHANNEL_LAYOUT_CUBE.
|
||||
|
||||
2022-10-11 - 479747645f - lavu 57.39.101 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGBF32 and AV_PIX_FMT_RGBAF32.
|
||||
|
||||
2022-10-05 - 37d5ddc317 - lavu 57.39.100 - cpu.h
|
||||
Add AV_CPU_FLAG_RVB_BASIC.
|
||||
|
||||
2022-10-03 - d09776d486 - lavf 59.34.100 - avio.h
|
||||
Make AVIODirContext an opaque type in a future major version bump.
|
||||
|
||||
2022-09-27 - 0c0a3deb18 - lavu 57.38.100 - cpu.h
|
||||
Add CPU flags for RISC-V vector extensions:
|
||||
AV_CPU_FLAG_RVV_I32, AV_CPU_FLAG_RVV_F32, AV_CPU_FLAG_RVV_I64,
|
||||
AV_CPU_FLAG_RVV_F64
|
||||
|
||||
2022-09-26 - a02a0e8db4 - lavc 59.48.100 - avcodec.h
|
||||
Deprecate avcodec_enum_to_chroma_pos() and avcodec_chroma_pos_to_enum().
|
||||
Use av_chroma_location_enum_to_pos() or av_chroma_location_pos_to_enum()
|
||||
instead.
|
||||
|
||||
2022-09-26 - xxxxxxxxxx - lavu 57.37.100 - pixdesc.h pixfmt.h
|
||||
Add av_chroma_location_enum_to_pos() and av_chroma_location_pos_to_enum().
|
||||
Add AV_PIX_FMT_RGBF32BE, AV_PIX_FMT_RGBF32LE, AV_PIX_FMT_RGBAF32BE,
|
||||
AV_PIX_FMT_RGBAF32LE.
|
||||
|
||||
2022-09-26 - cf856d8957 - lavc 59.47.100 - avcodec.h defs.h
|
||||
Move the AV_EF_* and FF_COMPLIANCE_* defines from avcodec.h to defs.h.
|
||||
|
||||
2022-09-03 - d75c4693fe - lavu 57.36.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_P012, AV_PIX_FMT_Y212, AV_PIX_FMT_XV30, AV_PIX_FMT_XV36
|
||||
|
||||
2022-09-03 - dea9744560 - lavu 57.35.100 - file.h
|
||||
Deprecate av_tempfile() without replacement.
|
||||
|
||||
2022-08-03 - cc5a5c9860 - lavu 57.34.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_VUYX.
|
||||
|
||||
2022-08-22 - 14726571dd - lavf 59 - avformat.h
|
||||
Deprecate av_stream_get_end_pts() without replacement.
|
||||
|
||||
2022-08-19 - 352799dca8 - lavc 59.42.102 - codec_id.h
|
||||
Deprecate AV_CODEC_ID_AYUV and ayuv decoder/encoder. The rawvideo codec
|
||||
and vuya pixel format combination will be used instead from now on.
|
||||
|
||||
2022-08-07 - e95b08a7dd - lavu 57.33.101 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGBAF16{BE,LE} pixel formats.
|
||||
|
||||
2022-08-12 - e0bbdbe0a6 - lavu 57.33.100 - hwcontext_qsv.h
|
||||
Add loader field to AVQSVDeviceContext
|
||||
|
||||
2022-08-03 - 6ab8a9d375 - lavu 57.32.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_VUYA.
|
||||
|
||||
2022-08-02 - e3838b856f - lavc 59.41.100 - avcodec.h codec.h
|
||||
Add AV_CODEC_FLAG_RECON_FRAME and AV_CODEC_CAP_ENCODER_RECON_FRAME.
|
||||
avcodec_receive_frame() may now be used on encoders when
|
||||
AV_CODEC_FLAG_RECON_FRAME is active.
|
||||
|
||||
2022-08-02 - eede1d2927 - lavu 57.31.100 - frame.h
|
||||
av_frame_make_writable() may now be called on non-refcounted
|
||||
frames and will make a refcounted copy out of them.
|
||||
Previously an error was returned in such cases.
|
||||
|
||||
2022-07-30 - e1a0f2df3d - lavc 59.40.100 - avcodec.h
|
||||
Add the AV_CODEC_FLAG2_ICC_PROFILES flag to AVCodecContext, to enable
|
||||
automatic reading and writing of embedded ICC profiles in image files.
|
||||
The "flags2" option now supports the corresponding flag "icc_profiles".
|
||||
|
||||
2022-07-19 - 4397f9a5a0 - lavu 57.30.100 - frame.h
|
||||
Add AVFrame.duration, deprecate AVFrame.pkt_duration.
|
||||
|
||||
-------- 8< --------- FFmpeg 5.1 was cut here -------- 8< ---------
|
||||
|
||||
2022-06-12 - 7cae3d8b76 - lavf 59.25.100 - avio.h
|
||||
Add avio_vprintf(), similar to avio_printf() but allow to use it
|
||||
from within a function taking a variable argument list as input.
|
||||
|
||||
2022-06-12 - ff59ecc4de - lavu 57.27.100 - uuid.h
|
||||
Add UUID handling functions.
|
||||
Add av_uuid_parse(), av_uuid_urn_parse(), av_uuid_parse_range(),
|
||||
av_uuid_parse_range(), av_uuid_equal(), av_uuid_copy(), and av_uuid_nil().
|
||||
|
||||
2022-06-01 - d42b410e05 - lavu 57.26.100 - csp.h
|
||||
Add public API for colorspace structs.
|
||||
Add av_csp_luma_coeffs_from_avcsp(), av_csp_primaries_desc_from_id(),
|
||||
and av_csp_primaries_id_from_desc().
|
||||
|
||||
2022-05-23 - 4cdc14aa95 - lavu 57.25.100 - avutil.h
|
||||
Deprecate av_fopen_utf8() without replacement.
|
||||
|
||||
2022-03-16 - f3a0e2ee2b - all libraries - version_major.h
|
||||
Add lib<name>/version_major.h as new installed headers, which only
|
||||
contain the major version number (and corresponding API deprecation
|
||||
defines).
|
||||
|
||||
2022-03-15 - cdba98bb80 - swr 4.5.100 - swresample.h
|
||||
Add swr_alloc_set_opts2() and swr_build_matrix2().
|
||||
Deprecate swr_alloc_set_opts() and swr_build_matrix().
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavfi 8.28.100 - avfilter.h buffersink.h buffersrc.h
|
||||
Update AVFilterLink for the new channel layout API: add ch_layout,
|
||||
deprecate channel_layout.
|
||||
|
||||
Update the buffersink filter sink for the new channel layout API:
|
||||
add av_buffersink_get_ch_layout() and the ch_layouts option,
|
||||
deprecate av_buffersink_get_channel_layout() and the channel_layouts option.
|
||||
|
||||
Update AVBufferSrcParameters for the new channel layout API:
|
||||
add ch_layout, deprecate channel_layout.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavf 59.19.100 - avformat.h
|
||||
Add AV_DISPOSITION_NON_DIEGETIC.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavc 59.24.100 - avcodec.h codec_par.h
|
||||
Update AVCodecParameters for the new channel layout API: add ch_layout,
|
||||
deprecate channels/channel_layout.
|
||||
|
||||
Update AVCodecContext for the new channel layout API: add ch_layout,
|
||||
deprecate channels/channel_layout.
|
||||
|
||||
Update AVCodec for the new channel layout API: add ch_layouts,
|
||||
deprecate channel_layouts.
|
||||
|
||||
2022-03-15 - cdba98bb80 - lavu 57.24.100 - channel_layout.h frame.h opt.h
|
||||
Add new channel layout API based on the AVChannelLayout struct.
|
||||
Add support for Ambisonic audio.
|
||||
Deprecate previous channel layout API based on uint64 bitmasks.
|
||||
|
||||
Add AV_OPT_TYPE_CHLAYOUT option type, deprecate AV_OPT_TYPE_CHANNEL_LAYOUT.
|
||||
Update AVFrame for the new channel layout API: add ch_layout, deprecate
|
||||
channels/channel_layout.
|
||||
|
||||
2022-03-10 - f629ea2e18 - lavu 57.23.100 - cpu.h
|
||||
Add AV_CPU_FLAG_AVX512ICL.
|
||||
|
||||
2022-02-07 - a10f1aec1f - lavu 57.21.100 - fifo.h
|
||||
Deprecate AVFifoBuffer and the API around it, namely av_fifo_alloc(),
|
||||
av_fifo_alloc_array(), av_fifo_free(), av_fifo_freep(), av_fifo_reset(),
|
||||
av_fifo_size(), av_fifo_space(), av_fifo_generic_peek_at(),
|
||||
av_fifo_generic_peek(), av_fifo_generic_read(), av_fifo_generic_write(),
|
||||
av_fifo_realloc2(), av_fifo_grow(), av_fifo_drain() and av_fifo_peek2().
|
||||
Users should switch to the AVFifo-API.
|
||||
|
||||
2022-02-07 - 7329b22c05 - lavu 57.20.100 - fifo.h
|
||||
Add a new FIFO API, which allows setting a FIFO element size.
|
||||
This API operates on these elements rather than on bytes.
|
||||
Add av_fifo_alloc2(), av_fifo_elem_size(), av_fifo_can_read(),
|
||||
av_fifo_can_write(), av_fifo_grow2(), av_fifo_drain2(), av_fifo_write(),
|
||||
av_fifo_write_from_cb(), av_fifo_read(), av_fifo_read_to_cb(),
|
||||
av_fifo_peek(), av_fifo_peek_to_cb(), av_fifo_drain2(), av_fifo_reset2(),
|
||||
av_fifo_freep2(), av_fifo_auto_grow_limit().
|
||||
|
||||
2022-01-26 - af94ab7c7c0 - lavu 57.19.100 - tx.h
|
||||
Add AV_TX_FLOAT_RDFT, AV_TX_DOUBLE_RDFT and AV_TX_INT32_RDFT.
|
||||
|
||||
-------- 8< --------- FFmpeg 5.0 was cut here -------- 8< ---------
|
||||
|
||||
2022-01-04 - 78dc21b123e - lavu 57.16.100 - frame.h
|
||||
Add AV_FRAME_DATA_DOVI_METADATA.
|
||||
|
||||
2022-01-03 - 70f318e6b6c - lavf 59.13.100 - avformat.h
|
||||
Add AVFMT_EXPERIMENTAL flag.
|
||||
|
||||
2021-12-22 - b7e1ec7bda9 - lavu 57.13.100 - hwcontext_videotoolbox.h
|
||||
Add av_vt_pixbuf_set_attachments
|
||||
|
||||
2021-12-22 - 69bd95dcd8d - lavu 57.13.100 - hwcontext_videotoolbox.h
|
||||
Add av_map_videotoolbox_chroma_loc_from_av
|
||||
Add av_map_videotoolbox_color_matrix_from_av
|
||||
Add av_map_videotoolbox_color_primaries_from_av
|
||||
Add av_map_videotoolbox_color_trc_from_av
|
||||
|
||||
2021-12-21 - ffbab99f2c2 - lavu 57.12.100 - cpu.h
|
||||
Add AV_CPU_FLAG_SLOW_GATHER.
|
||||
|
||||
2021-12-20 - 278068dc60d - lavu 57.11.101 - display.h
|
||||
Modified the documentation of av_display_rotation_set()
|
||||
to match its longstanding actual behaviour of treating
|
||||
the angle as directed clockwise.
|
||||
|
||||
2021-12-12 - 64834bb86a1 - lavf 59.10.100 - avformat.h
|
||||
Add AVFormatContext io_close2 which returns an int
|
||||
|
||||
2021-12-10 - f45cbb775e4 - lavu 57.11.100 - hwcontext_vulkan.h
|
||||
Add AVVkFrame.offset and AVVulkanFramesContext.flags.
|
||||
|
||||
2021-12-04 - b9c928a486f - lavfi 8.19.100 - avfilter.h
|
||||
Add AVFILTER_FLAG_METADATA_ONLY.
|
||||
|
||||
2021-12-03 - b236ef0a594 - lavu 57.10.100 - frame.h
|
||||
Add AVFrame.time_base
|
||||
|
||||
2021-11-22 - b2cd1fb2ec6 - lavu 57.9.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_P210, AV_PIX_FMT_P410, AV_PIX_FMT_P216, and AV_PIX_FMT_P416.
|
||||
|
||||
2021-11-17 - 54e65aa38ab - lavf 57.9.100 - frame.h
|
||||
Add AV_FRAME_DATA_DOVI_RPU_BUFFER.
|
||||
|
||||
2021-11-16 - ed75a08d36c - lavf 59.9.100 - avformat.h
|
||||
Add av_stream_get_class(). Schedule adding AVStream.av_class at libavformat
|
||||
major version 60.
|
||||
Add av_disposition_to_string() and av_disposition_from_string().
|
||||
Add "disposition" AVOption to AVStream's class.
|
||||
|
||||
2021-11-12 - 8478d60d5b5 - lavu 57.8.100 - hwcontext_vulkan.h
|
||||
Added AVVkFrame.sem_value, AVVulkanDeviceContext.queue_family_encode_index,
|
||||
nb_encode_queues, queue_family_decode_index, and nb_decode_queues.
|
||||
|
||||
2021-10-18 - 682bafdb125 - lavf 59.8.100 - avio.h
|
||||
Introduce public bytes_{read,written} statistic fields to AVIOContext.
|
||||
|
||||
2021-10-13 - a5622ed16f8 - lavf 59.7.100 - avio.h
|
||||
Deprecate AVIOContext.written. Originally added as a private entry in
|
||||
commit 3f75e5116b900f1428aa13041fc7d6301bf1988a, its grouping with
|
||||
the comment noting its private state was missed during merging of the field
|
||||
from Libav (most likely due to an already existing field in between).
|
||||
|
||||
2021-09-21 - 0760d9153c3 - lavu 57.7.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_X2BGR10.
|
||||
|
||||
2021-09-20 - 8d5de914d31 - lavu 57.6.100 - mem.h
|
||||
Deprecate av_mallocz_array() as it is identical to av_calloc().
|
||||
|
||||
2021-09-20 - 176b8d785bf - lavc 59.9.100 - avcodec.h
|
||||
Deprecate AVCodecContext.sub_text_format and the corresponding
|
||||
AVOptions. It is unused since the last major bump.
|
||||
|
||||
2021-09-20 - dd846bc4a91 - lavc 59.8.100 - avcodec.h codec.h
|
||||
Deprecate AV_CODEC_FLAG_TRUNCATED and AV_CODEC_CAP_TRUNCATED,
|
||||
as they are redundant with parsers.
|
||||
|
||||
2021-09-17 - ccfdef79b13 - lavu 57.5.101 - buffer.h
|
||||
Constified the input parameters in av_buffer_replace(), av_buffer_ref(),
|
||||
and av_buffer_pool_buffer_get_opaque().
|
||||
|
||||
2021-09-08 - 4f78711f9c2 - lavu 57.5.100 - hwcontext_d3d11va.h
|
||||
Add AVD3D11VAFramesContext.texture_infos
|
||||
|
||||
2021-09-06 - 42cd64c1826 - lsws 6.1.100 - swscale.h
|
||||
Add AVFrame-based scaling API:
|
||||
- sws_scale_frame()
|
||||
- sws_frame_start()
|
||||
- sws_frame_end()
|
||||
- sws_send_slice()
|
||||
- sws_receive_slice()
|
||||
- sws_receive_slice_alignment()
|
||||
|
||||
2021-09-02 - cbf111059d2 - lavc 59.7.100 - avcodec.h
|
||||
Incremented the number of elements of AVCodecParser.codec_ids to seven.
|
||||
|
||||
2021-08-24 - 590a7e02f04 - lavc 59.6.100 - avcodec.h
|
||||
Add FF_CODEC_PROPERTY_FILM_GRAIN
|
||||
|
||||
2021-08-20 - 7c5f998196d - lavfi 8.3.100 - avfilter.H
|
||||
Add avfilter_filter_pad_count() as a replacement for avfilter_pad_count().
|
||||
Deprecate avfilter_pad_count().
|
||||
|
||||
2021-08-17 - 8c53b145993 - lavu 57.4.101 - opt.h
|
||||
av_opt_copy() now guarantees that allocated src and dst options
|
||||
don't alias each other even on error.
|
||||
|
||||
2021-08-14 - d5de9965ef6 - lavu 57.4.100 - imgutils.h
|
||||
Add av_image_copy_plane_uc_from()
|
||||
|
||||
2021-08-02 - a1a0fddfd05 - lavc 59.4.100 - packet.h
|
||||
Add AVPacket.opaque, AVPacket.opaque_ref, AVPacket.time_base.
|
||||
|
||||
2021-07-23 - 2dd8acbe800 - lavu 57.3.100 - common.h macros.h
|
||||
Move several macros (AV_NE, FFDIFFSIGN, FFMAX, FFMAX3, FFMIN, FFMIN3,
|
||||
FFSWAP, FF_ARRAY_ELEMS, MKTAG, MKBETAG) from common.h to macros.h.
|
||||
|
||||
2021-07-22 - e3b5ff17c2e - lavu 57.2.100 - film_grain_params.h
|
||||
Add AV_FILM_GRAIN_PARAMS_H274, AVFilmGrainH274Params
|
||||
|
||||
2021-07-19 - c1bf56a526f - lavu 57.1.100 - cpu.h
|
||||
Add av_cpu_force_count()
|
||||
|
||||
2021-06-17 - aca923b3653 - lavc 59.2.100 - packet.h
|
||||
Add AV_PKT_DATA_DYNAMIC_HDR10_PLUS
|
||||
|
||||
2021-06-09 - 2cccab96f6f - lavf 59.3.100 - avformat.h
|
||||
Add pts_wrap_bits to AVStream
|
||||
|
||||
2021-06-10 - 7c9763070d9 - lavc 59.1.100 - avcodec.h codec.h
|
||||
Move av_get_profile_name() from avcodec.h to codec.h.
|
||||
|
||||
2021-06-10 - bb3648e6766 - lavc 59.1.100 - avcodec.h codec_par.h
|
||||
Move av_get_audio_frame_duration2() from avcodec.h to codec_par.h.
|
||||
|
||||
2021-06-10 - 881db34f6a0 - lavc 59.1.100 - avcodec.h codec_id.h
|
||||
Move av_get_bits_per_sample(), av_get_exact_bits_per_sample(),
|
||||
avcodec_profile_name(), and av_get_pcm_codec() from avcodec.h
|
||||
to codec_id.h.
|
||||
|
||||
2021-06-10 - ff0a96046d8 - lavc 59.1.100 - avcodec.h defs.h
|
||||
Add new installed header defs.h. The following definitions are moved
|
||||
into it from avcodec.h:
|
||||
- AVDiscard
|
||||
- AVAudioServiceType
|
||||
- AVPanScan
|
||||
- AVCPBProperties and av_cpb_properties_alloc()
|
||||
- AVProducerReferenceTime
|
||||
- av_xiphlacing()
|
||||
|
||||
2021-04-27 - cb3ac722f4 - lavc 59.0.100 - avcodec.h
|
||||
Constified AVCodecParserContext.parser.
|
||||
|
||||
2021-04-27 - 8b3e6ce5f4 - lavd 59.0.100 - avdevice.h
|
||||
The av_*_device_next API functions now accept and return
|
||||
pointers to const AVInputFormat resp. AVOutputFormat.
|
||||
|
||||
2021-04-27 - d7e0d428fa - lavd 59.0.100 - avdevice.h
|
||||
avdevice_list_input_sources and avdevice_list_output_sinks now accept
|
||||
pointers to const AVInputFormat resp. const AVOutputFormat.
|
||||
|
||||
2021-04-27 - 46dac8cf3d - lavf 59.0.100 - avformat.h
|
||||
av_find_best_stream now uses a const AVCodec ** parameter
|
||||
for the returned decoder.
|
||||
|
||||
2021-04-27 - 626535f6a1 - lavc 59.0.100 - codec.h
|
||||
avcodec_find_encoder_by_name(), avcodec_find_encoder(),
|
||||
avcodec_find_decoder_by_name() and avcodec_find_decoder()
|
||||
now return a pointer to const AVCodec.
|
||||
|
||||
2021-04-27 - 14fa0a4efb - lavf 59.0.100 - avformat.h
|
||||
Constified AVFormatContext.*_codec.
|
||||
|
||||
2021-04-27 - 56450a0ee4 - lavf 59.0.100 - avformat.h
|
||||
Constified the pointers to AVInputFormats and AVOutputFormats
|
||||
in AVFormatContext, avformat_alloc_output_context2(),
|
||||
av_find_input_format(), av_probe_input_format(),
|
||||
av_probe_input_format2(), av_probe_input_format3(),
|
||||
av_probe_input_buffer2(), av_probe_input_buffer(),
|
||||
avformat_open_input(), av_guess_format() and av_guess_codec().
|
||||
Furthermore, constified the AVProbeData in av_probe_input_format(),
|
||||
av_probe_input_format2() and av_probe_input_format3().
|
||||
|
||||
2021-04-19 - 18af1ea8d1 - lavu 56.74.100 - tx.h
|
||||
Add AV_TX_FULL_IMDCT and AV_TX_UNALIGNED.
|
||||
|
||||
2021-04-17 - f1bf465aa0 - lavu 56.73.100 - frame.h detection_bbox.h
|
||||
Add AV_FRAME_DATA_DETECTION_BBOXES
|
||||
|
||||
2021-04-06 - 557953a397 - lavf 58.78.100 - avformat.h
|
||||
Add avformat_index_get_entries_count(), avformat_index_get_entry(),
|
||||
and avformat_index_get_entry_from_timestamp().
|
||||
|
||||
2021-03-21 - a77beea6c8 - lavu 56.72.100 - frame.h
|
||||
Deprecated av_get_colorspace_name().
|
||||
Use av_color_space_name() instead.
|
||||
|
||||
-------- 8< --------- FFmpeg 4.4 was cut here -------- 8< ---------
|
||||
|
||||
2021-03-19 - e8c0bca6bd - lavu 56.69.100 - adler32.h
|
||||
Added a typedef for the type of the Adler-32 checksums
|
||||
used by av_adler32_update(). It will be changed to uint32_t
|
||||
at the next major bump.
|
||||
The type of the parameter for the length of the input buffer
|
||||
will also be changed to size_t at the next major bump.
|
||||
|
||||
2021-03-19 - e318438f2f - lavf 58.75.100 - avformat.h
|
||||
AVChapter.id will be changed from int to int64_t
|
||||
on the next major version bump.
|
||||
|
||||
2021-03-17 - f7db77bd87 - lavc 58.133.100 - codec.h
|
||||
Deprecated av_init_packet(). Once removed, sizeof(AVPacket) will
|
||||
no longer be a part of the public ABI.
|
||||
Deprecated AVPacketList.
|
||||
|
||||
2021-03-16 - 7d09579190 - lavc 58.132.100 - codec.h
|
||||
Add AV_CODEC_CAP_OTHER_THREADS as a new name for
|
||||
AV_CODEC_CAP_AUTO_THREADS. AV_CODEC_CAP_AUTO_THREADS
|
||||
is now deprecated.
|
||||
|
||||
2021-03-12 - 6e7e3a3820 - lavc 58.131.100 - avcodec.h codec.h
|
||||
Add a get_encode_buffer callback to AVCodecContext, similar to
|
||||
get_buffer2 but for encoders.
|
||||
Add avcodec_default_get_encode_buffer().
|
||||
Add AV_GET_ENCODE_BUFFER_FLAG_REF.
|
||||
Encoders may now be flagged as AV_CODEC_CAP_DR1 capable.
|
||||
|
||||
2021-03-10 - 42e68fe015 - lavf 58.72.100 - avformat.h
|
||||
Change AVBufferRef related AVStream function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-10 - d79e0fe65c - lavc 58.130.100 - packet.h
|
||||
Change AVBufferRef related AVPacket function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-10 - 14040a1d91 - lavu 56.68.100 - buffer.h frame.h
|
||||
Change AVBufferRef and relevant AVFrame function and struct size
|
||||
parameter and fields type to size_t at next major bump.
|
||||
|
||||
2021-03-04 - a0eec776b6 - lavc 58.128.101 - avcodec.h
|
||||
Enable err_recognition to be set for encoders.
|
||||
|
||||
2021-03-03 - 2ff40b98ec - lavf 58.70.100 - avformat.h
|
||||
Deprecate AVFMT_FLAG_PRIV_OPT. It will do nothing
|
||||
as soon as av_demuxer_open() is removed.
|
||||
|
||||
2021-02-27 - dd9227e48f - lavc 58.126.100 - avcodec.h
|
||||
Deprecated avcodec_get_frame_class().
|
||||
|
||||
2021-02-21 - 5ca40d6d94 - lavu 56.66.100 - tx.h
|
||||
Add enum AVTXFlags and AVTXFlags.AV_TX_INPLACE
|
||||
|
||||
2021-02-14 - 4f49ca7bbc - lavd 58.12.100 - avdevice.h
|
||||
Deprecated avdevice_capabilities_create() and
|
||||
avdevice_capabilities_free().
|
||||
|
||||
2021-02-10 - 1bda9bb68a - lavu 56.65.100 - common.h
|
||||
Add FFABS64U()
|
||||
|
||||
2021-01-26 - 5dd9567080 - lavu 56.64.100 - common.h
|
||||
Add FFABSU()
|
||||
|
||||
2021-01-25 - 56709ca8aa - lavc 58.119.100 - avcodec.h
|
||||
Deprecate AVCodecContext.debug_mv, FF_DEBUG_VIS_MV_P_FOR, FF_DEBUG_VIS_MV_B_FOR,
|
||||
FF_DEBUG_VIS_MV_B_BACK
|
||||
|
||||
2021-01-11 - ebdd33086a - lavc 58.116.100 - avcodec.h
|
||||
Add FF_PROFILE_VVC_MAIN_10 and FF_PROFILE_VVC_MAIN_10_444.
|
||||
|
||||
2020-01-01 - baecaa16c1 - lavu 56.63.100 - video_enc_params.h
|
||||
Add AV_VIDEO_ENC_PARAMS_MPEG2
|
||||
|
||||
2020-12-03 - eca12f4d5a - lavu 56.62.100 - timecode.h
|
||||
Add av_timecode_init_from_components.
|
||||
|
||||
2020-11-27 - a83098ab03 - lavc 58.114.100 - avcodec.h
|
||||
Deprecate AVCodecContext.thread_safe_callbacks. Starting with
|
||||
LIBAVCODEC_VERSION_MAJOR=60, user callbacks must always be
|
||||
thread-safe when frame threading is used.
|
||||
|
||||
2020-11-25 - d243dd540a - lavc 58.113.100 - avcodec.h
|
||||
Adds a new flag AV_CODEC_EXPORT_DATA_FILM_GRAIN for export_side_data.
|
||||
|
||||
2020-11-25 - 4f9ee87253 - lavu 56.61.100 - film_grain_params.h
|
||||
Adds a new API for extracting codec film grain parameters as side data.
|
||||
Adds a new AVFrameSideDataType entry AV_FRAME_DATA_FILM_GRAIN_PARAMS for it.
|
||||
|
||||
2020-10-28 - f95d9510ff - lavf 58.64.100 - avformat.h
|
||||
Add AVSTREAM_EVENT_FLAG_NEW_PACKETS.
|
||||
|
||||
2020-09-28 - 68918d3b7f - lavu 56.60.100 - buffer.h
|
||||
Add a av_buffer_replace() convenience function.
|
||||
|
||||
2020-09-13 - 837b6eb90e - lavu 56.59.100 - timecode.h
|
||||
Add av_timecode_make_smpte_tc_string2.
|
||||
|
||||
2020-08-21 - 06f2651204 - lavu 56.58.100 - avstring.h
|
||||
Deprecate av_d2str(). Use av_asprintf() instead.
|
||||
|
||||
2020-08-04 - 34de0abbe7 - lavu 56.58.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_22POINT2 together with its newly required pieces:
|
||||
AV_CH_TOP_SIDE_LEFT, AV_CH_TOP_SIDE_RIGHT, AV_CH_BOTTOM_FRONT_CENTER,
|
||||
AV_CH_BOTTOM_FRONT_LEFT, AV_CH_BOTTOM_FRONT_RIGHT.
|
||||
|
||||
2020-07-23 - 84655b7101 - lavu 56.57.100 - cpu.h
|
||||
Add AV_CPU_FLAG_MMI and AV_CPU_FLAG_MSA.
|
||||
|
||||
2020-07-22 - 3a8e927176 - lavu 56.56.100 - imgutils.h
|
||||
Add av_image_fill_plane_sizes().
|
||||
|
||||
2020-07-15 - 448a9aaa78 - lavc 58.96.100 - packet.h
|
||||
Add AV_PKT_DATA_S12M_TIMECODE.
|
||||
|
||||
2020-06-12 - b09fb030c1 - lavu 56.55.100 - pixdesc.h
|
||||
Add AV_PIX_FMT_X2RGB10.
|
||||
|
||||
2020-06-11 - bc8ab084fb - lavu 56.54.100 - frame.h
|
||||
Add AV_FRAME_DATA_SEI_UNREGISTERED.
|
||||
|
||||
2020-06-10 - 1b4a98b029 - lavu 56.53.100 - log.h opt.h
|
||||
Add av_opt_child_class_iterate() and AVClass.child_class_iterate().
|
||||
Deprecate av_opt_child_class_next() and AVClass.child_class_next().
|
||||
|
||||
-------- 8< --------- FFmpeg 4.3 was cut here -------- 8< ---------
|
||||
|
||||
2020-06-05 - ec39c2276a - lavu 56.50.100 - buffer.h
|
||||
Passing NULL as alloc argument to av_buffer_pool_init2() is now allowed.
|
||||
|
||||
2020-05-27 - ba6cada92e - lavc 58.88.100 - avcodec.h codec.h
|
||||
Move AVCodec-related public API to new header codec.h.
|
||||
|
||||
2020-05-23 - 064b875e89 - lavu 56.49.100 - video_enc_params.h
|
||||
Add AV_VIDEO_ENC_PARAMS_H264.
|
||||
|
||||
2020-05-23 - 2e08b39444 - lavu 56.48.100 - hwcontext.h
|
||||
Add av_hwdevice_ctx_create_derived_opts.
|
||||
|
||||
2020-05-23 - 6b65c4ec54 - lavu 56.47.100 - rational.h
|
||||
Add av_gcd_q().
|
||||
|
||||
2020-05-22 - af9e622776 - lavu 56.46.101 - opt.h
|
||||
Add AV_OPT_FLAG_CHILD_CONSTS.
|
||||
|
||||
2020-05-22 - 9d443c3e68 - lavc 58.87.100 - avcodec.h codec_par.h
|
||||
Move AVBitstreamFilter-related public API to new header bsf.h.
|
||||
Move AVCodecParameters-related public API to new header codec_par.h.
|
||||
|
||||
2020-05-21 - 13b1bbff0b - lavc 58.86.101 - avcodec.h
|
||||
Deprecated AV_CODEC_CAP_INTRA_ONLY and AV_CODEC_CAP_LOSSLESS.
|
||||
|
||||
2020-05-17 - 84af196c65 - lavu 56.46.100 - common.h
|
||||
Add av_sat_add64() and av_sat_sub64()
|
||||
|
||||
2020-05-12 - 991d417692 - lavu 56.45.100 - video_enc_params.h
|
||||
lavc 58.84.100 - avcodec.h
|
||||
Add a new API for exporting video encoding information.
|
||||
Replaces the deprecated API for exporting QP tables from decoders.
|
||||
Add AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS to request this information from
|
||||
decoders.
|
||||
|
||||
2020-05-10 - dccd07f66d - lavu 56.44.100 - hwcontext_vulkan.h
|
||||
Add enabled_inst_extensions, num_enabled_inst_extensions, enabled_dev_extensions
|
||||
and num_enabled_dev_extensions fields to AVVulkanDeviceContext
|
||||
|
||||
2020-04-22 - 0e1db79e37 - lavc 58.81.100 - packet.h
|
||||
- lavu 56.43.100 - dovi_meta.h
|
||||
Add AV_PKT_DATA_DOVI_CONF and AVDOVIDecoderConfigurationRecord.
|
||||
|
||||
2020-04-15 - 22b25b3ea5 - lavc 58.79.100 - avcodec.h
|
||||
Add formal support for calling avcodec_flush_buffers() on encoders.
|
||||
Encoders that set the cap AV_CODEC_CAP_ENCODER_FLUSH will be flushed.
|
||||
For all other encoders, the call is now a no-op rather than undefined
|
||||
behaviour.
|
||||
|
||||
2020-04-10 - 672946c7fe - lavc 58.78.100 - avcodec.h codec_desc.h codec_id.h packet.h
|
||||
Move AVCodecDesc-related public API to new header codec_desc.h.
|
||||
Move AVCodecID enum to new header codec_id.h.
|
||||
Move AVPacket-related public API to new header packet.h.
|
||||
|
||||
2020-03-29 - 4cb0dda555 - lavf 58.42.100 - avformat.h
|
||||
av_read_frame() now guarantees to handle uninitialized input packets
|
||||
and to return refcounted packets on success.
|
||||
|
||||
2020-03-27 - c52ec0367d - lavc 58.77.100 - avcodec.h
|
||||
av_packet_ref() now guarantees to return the destination packet
|
||||
in a blank state on error.
|
||||
|
||||
2020-03-10 - 05d27f342b - lavc 58.75.100 - avcodec.h
|
||||
Add AV_PKT_DATA_ICC_PROFILE.
|
||||
|
||||
2020-02-21 - d005a7cdfd - lavc 58.73.101 - avcodec.h
|
||||
Add AV_CODEC_EXPORT_DATA_PRFT.
|
||||
|
||||
2020-02-21 - c666689491 - lavc 58.73.100 - avcodec.h
|
||||
Add AVCodecContext.export_side_data and AV_CODEC_EXPORT_DATA_MVS.
|
||||
|
||||
2020-02-13 - e8f054b095 - lavu 56.41.100 - tx.h
|
||||
Add AV_TX_INT32_FFT and AV_TX_INT32_MDCT
|
||||
|
||||
2020-02-12 - 3182114f88 - lavu 56.40.100 - log.h
|
||||
Add av_log_once().
|
||||
|
||||
2020-02-04 - a88449ffb2 - lavu 56.39.100 - hwcontext.h
|
||||
Add AV_PIX_FMT_VULKAN
|
||||
Add AV_HWDEVICE_TYPE_VULKAN and implementation.
|
||||
|
||||
2020-01-30 - 27529eeb27 - lavf 58.37.100 - avio.h
|
||||
Add avio_protocol_get_class().
|
||||
|
||||
2020-01-15 - 717b2074ec - lavc 58.66.100 - avcodec.h
|
||||
Add AV_PKT_DATA_PRFT and AVProducerReferenceTime.
|
||||
|
||||
2019-12-27 - 45259a0ee4 - lavu 56.38.100 - eval.h
|
||||
Add av_expr_count_func().
|
||||
|
||||
2019-12-26 - 16685114d5 - lavu 56.37.100 - buffer.h
|
||||
Add av_buffer_pool_buffer_get_opaque().
|
||||
|
||||
2019-11-17 - 1c23abc88f - lavu 56.36.100 - eval API
|
||||
Add av_expr_count_vars().
|
||||
|
||||
2019-10-14 - f3746d31f9 - lavu 56.35.101 - opt.h
|
||||
Add AV_OPT_FLAG_RUNTIME_PARAM.
|
||||
|
||||
2019-09-25 - f8406ab4b9 - lavc 58.59.100 - avcodec.h
|
||||
Add max_samples
|
||||
|
||||
2019-09-04 - 2a9d461abc - lavu 56.35.100 - hwcontext_videotoolbox.h
|
||||
Add av_map_videotoolbox_format_from_pixfmt2() for full range pixfmt
|
||||
|
||||
2019-09-01 - 8821d1f56e - lavu 56.34.100 - pixfmt.h
|
||||
Add EBU Tech. 3213-E AVColorPrimaries value
|
||||
|
||||
2019-08-17 - 95fa73a2b4 - lavf 58.31.101 - avio.h
|
||||
4K limit removed from avio_printf.
|
||||
|
||||
2019-08-17 - a82f8f2f10 - lavf 58.31.100 - avio.h
|
||||
Add avio_print_string_array and avio_print.
|
||||
|
||||
2019-07-27 - 42e2319ba9 - lavu 56.33.100 - tx.h
|
||||
Add AV_TX_DOUBLE_FFT and AV_TX_DOUBLE_MDCT
|
||||
|
||||
-------- 8< --------- FFmpeg 4.2 was cut here -------- 8< ---------
|
||||
|
||||
2019-06-21 - a30e44098a - lavu 56.30.100 - frame.h
|
||||
Add FF_DECODE_ERROR_DECODE_SLICES
|
||||
|
||||
2019-06-14 - edfced8c04 - lavu 56.29.100 - frame.h
|
||||
Add FF_DECODE_ERROR_CONCEALMENT_ACTIVE
|
||||
|
||||
2019-05-15 - b79b29ddb1 - lavu 56.28.100 - tx.h
|
||||
Add av_tx_init(), av_tx_uninit() and related definitions.
|
||||
|
||||
2019-04-20 - 3153a6502a - lavc 58.52.100 - avcodec.h
|
||||
Add AV_CODEC_FLAG_DROPCHANGED to allow avcodec_receive_frame to drop
|
||||
frames whose parameters differ from first decoded frame in stream.
|
||||
|
||||
2019-04-12 - abfeba9724 - lavf 58.27.102
|
||||
Rename hls,applehttp demuxer to hls
|
||||
|
||||
2019-01-27 - 5bcefceec8 - lavc 58.46.100 - avcodec.h
|
||||
Add discard_damaged_percentage
|
||||
|
||||
2019-01-08 - 1ef4828276 - lavu 56.26.100 - frame.h
|
||||
Add AV_FRAME_DATA_REGIONS_OF_INTEREST
|
||||
|
||||
2018-12-21 - 2744d6b364 - lavu 56.25.100 - hdr_dynamic_metadata.h
|
||||
Add AV_FRAME_DATA_DYNAMIC_HDR_PLUS enum value, av_dynamic_hdr_plus_alloc(),
|
||||
av_dynamic_hdr_plus_create_side_data() functions, and related structs.
|
||||
|
||||
-------- 8< --------- FFmpeg 4.1 was cut here -------- 8< ---------
|
||||
|
||||
2018-10-27 - 718044dc19 - lavu 56.21.100 - pixdesc.h
|
||||
Add av_read_image_line2(), av_write_image_line2()
|
||||
|
||||
2018-10-24 - f9d4126f28 - lavu 56.20.100 - frame.h
|
||||
Add AV_FRAME_DATA_S12M_TIMECODE
|
||||
|
||||
2018-10-11 - f6d48b618a - lavc 58.33.100 - mediacodec.h
|
||||
Add av_mediacodec_render_buffer_at_time().
|
||||
|
||||
2018-09-09 - 35498c124a - lavc 58.29.100 - avcodec.h
|
||||
Add AV_PKT_DATA_AFD
|
||||
|
||||
2018-08-16 - b33f5299a5 - lavc 58.23.100 - avcodec.h
|
||||
Add av_bsf_flush().
|
||||
|
||||
2018-05-18 - 2b2f2f65f3 - lavf 58.15.100 - avformat.h
|
||||
Add pmt_version field to AVProgram
|
||||
|
||||
2018-05-17 - 5dfeb7f081 - lavf 58.14.100 - avformat.h
|
||||
Add AV_DISPOSITION_STILL_IMAGE
|
||||
|
||||
2018-05-10 - c855683427 - lavu 56.18.101 - hwcontext_cuda.h
|
||||
Add AVCUDADeviceContext.stream.
|
||||
|
||||
2018-04-30 - 56b081da57 - lavu 56.18.100 - pixdesc.h
|
||||
Add AV_PIX_FMT_FLAG_ALPHA to AV_PIX_FMT_PAL8.
|
||||
|
||||
2018-04-26 - 5be0410cb3 - lavu 56.17.100 - opt.h
|
||||
Add AV_OPT_FLAG_DEPRECATED.
|
||||
|
||||
2018-04-26 - 71fa82bed6 - lavu 56.16.100 - threadmessage.h
|
||||
Add av_thread_message_queue_nb_elems().
|
||||
|
||||
-------- 8< --------- FFmpeg 4.0 was cut here -------- 8< ---------
|
||||
|
||||
2018-04-03 - d6fc031caf - lavu 56.13.100 - pixdesc.h
|
||||
@@ -1982,7 +1102,7 @@ API changes, most recent first:
|
||||
2014-04-15 - ef818d8 - lavf 55.37.101 - avformat.h
|
||||
Add av_format_inject_global_side_data()
|
||||
|
||||
2014-04-12 - 4f698be8f - lavu 52.76.100 - log.h
|
||||
2014-04-12 - 4f698be - lavu 52.76.100 - log.h
|
||||
Add av_log_get_flags()
|
||||
|
||||
2014-04-11 - 6db42a2b - lavd 55.12.100 - avdevice.h
|
||||
|
||||
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 6.1.4
|
||||
PROJECT_NUMBER = 4.0.3
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
@@ -1980,7 +1980,6 @@ PREDEFINED = __attribute__(x)= \
|
||||
av_alloc_size(...)= \
|
||||
AV_GCC_VERSION_AT_LEAST(x,y)=1 \
|
||||
AV_GCC_VERSION_AT_MOST(x,y)=0 \
|
||||
"FF_PAD_STRUCTURE(name,size,...)=typedef struct name { __VA_ARGS__ } name;" \
|
||||
__GNUC__
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
|
||||
|
||||
@@ -19,7 +19,6 @@ MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
doc/community.html \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
@@ -28,9 +27,6 @@ HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMP
|
||||
doc/mailing-list-faq.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
$(SRC_PATH)/doc/bootstrap.min.css \
|
||||
$(SRC_PATH)/doc/style.min.css \
|
||||
$(SRC_PATH)/doc/default.css \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
@@ -106,7 +102,7 @@ DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
|
||||
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(https://git.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{https://git.ffmpeg.org/ffmpeg}.
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
|
||||
@@ -37,61 +37,6 @@ raw ADTS AAC or an MPEG-TS container to MP4A-LATM, to an FLV file, or
|
||||
to MOV/MP4 files and related formats such as 3GP or M4A. Please note
|
||||
that it is auto-inserted for MP4A-LATM and MOV/MP4 and related formats.
|
||||
|
||||
@section av1_metadata
|
||||
|
||||
Modify metadata embedded in an AV1 stream.
|
||||
|
||||
@table @option
|
||||
@item td
|
||||
Insert or remove temporal delimiter OBUs in all temporal units of the
|
||||
stream.
|
||||
|
||||
@table @samp
|
||||
@item insert
|
||||
Insert a TD at the beginning of every TU which does not already have one.
|
||||
@item remove
|
||||
Remove the TD from the beginning of every TU which has one.
|
||||
@end table
|
||||
|
||||
@item color_primaries
|
||||
@item transfer_characteristics
|
||||
@item matrix_coefficients
|
||||
Set the color description fields in the stream (see AV1 section 6.4.2).
|
||||
|
||||
@item color_range
|
||||
Set the color range in the stream (see AV1 section 6.4.2; note that
|
||||
this cannot be set for streams using BT.709 primaries, sRGB transfer
|
||||
characteristic and identity (RGB) matrix coefficients).
|
||||
@table @samp
|
||||
@item tv
|
||||
Limited range.
|
||||
@item pc
|
||||
Full range.
|
||||
@end table
|
||||
|
||||
@item chroma_sample_position
|
||||
Set the chroma sample location in the stream (see AV1 section 6.4.2).
|
||||
This can only be set for 4:2:0 streams.
|
||||
|
||||
@table @samp
|
||||
@item vertical
|
||||
Left position (matching the default in MPEG-2 and H.264).
|
||||
@item colocated
|
||||
Top-left position.
|
||||
@end table
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (@emph{time_scale / num_units_in_display_tick}) in
|
||||
the timing info in the sequence header.
|
||||
@item num_ticks_per_picture
|
||||
Set the number of ticks in each picture, to indicate that the stream
|
||||
has a fixed framerate. Ignored if @option{tick_rate} is not also set.
|
||||
|
||||
@item delete_padding
|
||||
Deletes Padding OBUs.
|
||||
|
||||
@end table
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
@@ -103,9 +48,7 @@ DTS-HD.
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets except when
|
||||
said packets already exactly begin with the extradata that is intended
|
||||
to be added.
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
|
||||
@table @option
|
||||
@item freq
|
||||
@@ -122,7 +65,7 @@ add extradata to all packets
|
||||
@end table
|
||||
@end table
|
||||
|
||||
If not specified it is assumed @samp{k}.
|
||||
If not specified it is assumed @samp{e}.
|
||||
|
||||
For example the following @command{ffmpeg} command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
@@ -132,36 +75,6 @@ the header stored in extradata to the key packets:
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section dv_error_marker
|
||||
|
||||
Blocks in DV which are marked as damaged are replaced by blocks of the specified color.
|
||||
|
||||
@table @option
|
||||
@item color
|
||||
The color to replace damaged blocks by
|
||||
@item sta
|
||||
A 16 bit mask which specifies which of the 16 possible error status values are
|
||||
to be replaced by colored blocks. 0xFFFE is the default which replaces all non 0
|
||||
error status values.
|
||||
@table @samp
|
||||
@item ok
|
||||
No error, no concealment
|
||||
@item err
|
||||
Error, No concealment
|
||||
@item res
|
||||
Reserved
|
||||
@item notok
|
||||
Error or concealment
|
||||
@item notres
|
||||
Not reserved
|
||||
@item Aa, Ba, Ca, Ab, Bb, Cb, A, B, C, a, b, erri, erru
|
||||
The specific error status code
|
||||
@end table
|
||||
see page 44-46 or section 5.5 of
|
||||
@url{http://web.archive.org/web/20060927044735/http://www.smpte.org/smpte_store/standards/pdf/s314m.pdf}
|
||||
|
||||
@end table
|
||||
|
||||
@section eac3_core
|
||||
|
||||
Extract the core from a E-AC-3 stream, dropping extra channels.
|
||||
@@ -247,20 +160,12 @@ Modify metadata embedded in an H.264 stream.
|
||||
Insert or remove AUD NAL units in all access units of the stream.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
@item sample_aspect_ratio
|
||||
Set the sample aspect ratio of the stream in the VUI parameters.
|
||||
See H.264 table E-1.
|
||||
|
||||
@item overscan_appropriate_flag
|
||||
Set whether the stream is suitable for display using overscan
|
||||
or not (see H.264 section E.2.1).
|
||||
|
||||
@item video_format
|
||||
@item video_full_range_flag
|
||||
@@ -278,7 +183,7 @@ Set the chroma sample location in the stream (see H.264 section
|
||||
E.2.1 and figure E-1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (time_scale / num_units_in_tick) in the VUI
|
||||
Set the tick rate (num_units_in_tick / time_scale) in the VUI
|
||||
parameters. This is the smallest time unit representable in the
|
||||
stream, and in many cases represents the field rate of the stream
|
||||
(double the frame rate).
|
||||
@@ -287,11 +192,6 @@ Set whether the stream has fixed framerate - typically this indicates
|
||||
that the framerate is exactly half the tick rate, but the exact
|
||||
meaning is dependent on interlacing and the picture structure (see
|
||||
H.264 section E.2.1 and table E-6).
|
||||
@item zero_new_constraint_set_flags
|
||||
Zero constraint_set4_flag and constraint_set5_flag in the SPS. These
|
||||
bits were reserved in a previous version of the H.264 spec, and thus
|
||||
some hardware decoders require these to be zero. The result of zeroing
|
||||
this is still a valid bitstream.
|
||||
|
||||
@item crop_left
|
||||
@item crop_right
|
||||
@@ -315,46 +215,6 @@ insert the string ``hello'' associated with the given UUID.
|
||||
@item delete_filler
|
||||
Deletes both filler NAL units and filler SEI messages.
|
||||
|
||||
@item display_orientation
|
||||
Insert, extract or remove Display orientation SEI messages.
|
||||
See H.264 section D.1.27 and D.2.27 for syntax and semantics.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@item extract
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
Insert mode works in conjunction with @code{rotate} and @code{flip} options.
|
||||
Any pre-existing Display orientation messages will be removed in insert or remove mode.
|
||||
Extract mode attaches the display matrix to the packet as side data.
|
||||
|
||||
@item rotate
|
||||
Set rotation in display orientation SEI (anticlockwise angle in degrees).
|
||||
Range is -360 to +360. Default is NaN.
|
||||
|
||||
@item flip
|
||||
Set flip in display orientation SEI.
|
||||
|
||||
@table @samp
|
||||
@item horizontal
|
||||
@item vertical
|
||||
@end table
|
||||
|
||||
Default is unset.
|
||||
|
||||
@item level
|
||||
Set the level in the SPS. Refer to H.264 section A.3 and tables A-1
|
||||
to A-5.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{4.2}), a
|
||||
level_idc value (for example, @samp{42}), or the special name @samp{auto}
|
||||
indicating that the filter should attempt to guess the level from the
|
||||
input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section h264_mp4toannexb
|
||||
@@ -382,6 +242,9 @@ This applies a specific fixup to some Blu-ray streams which contain
|
||||
redundant PPSs modifying irrelevant parameters of the stream which
|
||||
confuse other transformations which require correct extradata.
|
||||
|
||||
A new single global PPS is created, and all of the redundant PPSs
|
||||
within the stream are removed.
|
||||
|
||||
@section hevc_metadata
|
||||
|
||||
Modify metadata embedded in an HEVC stream.
|
||||
@@ -414,8 +277,8 @@ Set the chroma sample location in the stream (see H.265 section
|
||||
E.3.1 and figure E.1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate in the VPS and VUI parameters (time_scale /
|
||||
num_units_in_tick). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
Set the tick rate in the VPS and VUI parameters (num_units_in_tick /
|
||||
time_scale). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
set a constant framerate in the stream. Note that it is likely to be
|
||||
overridden by container parameters when the stream is in a container.
|
||||
|
||||
@@ -434,15 +297,6 @@ will replace the current ones if the stream is already cropped.
|
||||
These fields are set in pixels. Note that some sizes may not be
|
||||
representable if the chroma is subsampled (H.265 section 7.4.3.2.1).
|
||||
|
||||
@item level
|
||||
Set the level in the VPS and SPS. See H.265 section A.4 and tables
|
||||
A.6 and A.7.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{5.1}), a
|
||||
@emph{general_level_idc} value (for example, @samp{153} for level 5.1),
|
||||
or the special name @samp{auto} indicating that the filter should
|
||||
attempt to guess the level from the input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section hevc_mp4toannexb
|
||||
@@ -596,185 +450,25 @@ container. Can be used for fuzzing or testing error resilience/concealment.
|
||||
Parameters:
|
||||
@table @option
|
||||
@item amount
|
||||
Accepts an expression whose evaluation per-packet determines how often bytes in that
|
||||
packet will be modified. A value below 0 will result in a variable frequency.
|
||||
Default is 0 which results in no modification. However, if neither amount nor drop is specified,
|
||||
amount will be set to @var{-1}. See below for accepted variables.
|
||||
@item drop
|
||||
Accepts an expression evaluated per-packet whose value determines whether that packet is dropped.
|
||||
Evaluation to a positive value results in the packet being dropped. Evaluation to a negative
|
||||
value results in a variable chance of it being dropped, roughly inverse in proportion to the magnitude
|
||||
of the value. Default is 0 which results in no drops. See below for accepted variables.
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
@item dropamount
|
||||
Accepts a non-negative integer, which assigns a variable chance of it being dropped, roughly inverse
|
||||
in proportion to the value. Default is 0 which results in no drops. This option is kept for backwards
|
||||
compatibility and is equivalent to setting drop to a negative value with the same magnitude
|
||||
i.e. @code{dropamount=4} is the same as @code{drop=-4}. Ignored if drop is also specified.
|
||||
A numeral string, whose value is related to how often packets will be dropped.
|
||||
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||
@end table
|
||||
|
||||
Both @code{amount} and @code{drop} accept expressions containing the following variables:
|
||||
|
||||
@table @samp
|
||||
@item n
|
||||
The index of the packet, starting from zero.
|
||||
@item tb
|
||||
The timebase for packet timestamps.
|
||||
@item pts
|
||||
Packet presentation timestamp.
|
||||
@item dts
|
||||
Packet decoding timestamp.
|
||||
@item nopts
|
||||
Constant representing AV_NOPTS_VALUE.
|
||||
@item startpts
|
||||
First non-AV_NOPTS_VALUE PTS seen in the stream.
|
||||
@item startdts
|
||||
First non-AV_NOPTS_VALUE DTS seen in the stream.
|
||||
@item duration
|
||||
@itemx d
|
||||
Packet duration, in timebase units.
|
||||
@item pos
|
||||
Packet position in input; may be -1 when unknown or not set.
|
||||
@item size
|
||||
Packet size, in bytes.
|
||||
@item key
|
||||
Whether packet is marked as a keyframe.
|
||||
@item state
|
||||
A pseudo random integer, primarily derived from the content of packet payload.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Apply modification to every byte but don't drop any packets.
|
||||
The following example applies the modification to every byte but does not drop
|
||||
any packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
||||
@end example
|
||||
|
||||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
||||
modify any of the remaining packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
|
||||
@end example
|
||||
|
||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
|
||||
@section pcm_rechunk
|
||||
|
||||
Repacketize PCM audio to a fixed number of samples per packet or a fixed packet
|
||||
rate per second. This is similar to the @ref{asetnsamples,,asetnsamples audio
|
||||
filter,ffmpeg-filters} but works on audio packets instead of audio frames.
|
||||
|
||||
@table @option
|
||||
@item nb_out_samples, n
|
||||
Set the number of samples per each output audio packet. The number is intended
|
||||
as the number of samples @emph{per each channel}. Default value is 1024.
|
||||
|
||||
@item pad, p
|
||||
If set to 1, the filter will pad the last audio packet with silence, so that it
|
||||
will contain the same number of samples (or roughly the same number of samples,
|
||||
see @option{frame_rate}) as the previous ones. Default value is 1.
|
||||
|
||||
@item frame_rate, r
|
||||
This option makes the filter output a fixed number of packets per second instead
|
||||
of a fixed number of samples per packet. If the audio sample rate is not
|
||||
divisible by the frame rate then the number of samples will not be constant but
|
||||
will vary slightly so that each packet will start as close to the frame
|
||||
boundary as possible. Using this option has precedence over @option{nb_out_samples}.
|
||||
@end table
|
||||
|
||||
You can generate the well known 1602-1601-1602-1601-1602 pattern of 48kHz audio
|
||||
for NTSC frame rate using the @option{frame_rate} option.
|
||||
@example
|
||||
ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc -
|
||||
@end example
|
||||
|
||||
@section pgs_frame_merge
|
||||
|
||||
Merge a sequence of PGS Subtitle segments ending with an "end of display set"
|
||||
segment into a single packet.
|
||||
|
||||
This is required by some containers that support PGS subtitles
|
||||
(muxer @code{matroska}).
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
|
||||
@table @option
|
||||
@item color_primaries
|
||||
Set the color primaries.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same color primaries property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item bt470bg
|
||||
BT601 625
|
||||
|
||||
@item smpte170m
|
||||
BT601 525
|
||||
|
||||
@item bt2020
|
||||
@item smpte431
|
||||
DCI P3
|
||||
|
||||
@item smpte432
|
||||
P3 D65
|
||||
|
||||
@end table
|
||||
|
||||
@item transfer_characteristics
|
||||
Set the color transfer.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same transfer characteristics property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
BT 601, BT 709, BT 2020
|
||||
@item smpte2084
|
||||
SMPTE ST 2084
|
||||
@item arib-std-b67
|
||||
ARIB STD-B67
|
||||
@end table
|
||||
|
||||
|
||||
@item matrix_coefficients
|
||||
Set the matrix coefficient.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same colorspace property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item smpte170m
|
||||
BT 601
|
||||
|
||||
@item bt2020nc
|
||||
@end table
|
||||
@end table
|
||||
|
||||
Set Rec709 colorspace for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov
|
||||
@end example
|
||||
|
||||
Set Hybrid Log-Gamma parameters for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt2020:color_trc=arib-std-b67:colorspace=bt2020nc output.mov
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
Remove extradata from packets.
|
||||
@@ -797,91 +491,6 @@ Remove extradata from all frames.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section setts
|
||||
Set PTS and DTS in packets.
|
||||
|
||||
It accepts the following parameters:
|
||||
@table @option
|
||||
@item ts
|
||||
@item pts
|
||||
@item dts
|
||||
Set expressions for PTS, DTS or both.
|
||||
@item duration
|
||||
Set expression for duration.
|
||||
@item time_base
|
||||
Set output time base.
|
||||
@end table
|
||||
|
||||
The expressions are evaluated through the eval API and can contain the following
|
||||
constants:
|
||||
|
||||
@table @option
|
||||
@item N
|
||||
The count of the input packet. Starting from 0.
|
||||
|
||||
@item TS
|
||||
The demux timestamp in input in case of @code{ts} or @code{dts} option or presentation
|
||||
timestamp in case of @code{pts} option.
|
||||
|
||||
@item POS
|
||||
The original position in the file of the packet, or undefined if undefined
|
||||
for the current packet
|
||||
|
||||
@item DTS
|
||||
The demux timestamp in input.
|
||||
|
||||
@item PTS
|
||||
The presentation timestamp in input.
|
||||
|
||||
@item DURATION
|
||||
The duration in input.
|
||||
|
||||
@item STARTDTS
|
||||
The DTS of the first packet.
|
||||
|
||||
@item STARTPTS
|
||||
The PTS of the first packet.
|
||||
|
||||
@item PREV_INDTS
|
||||
The previous input DTS.
|
||||
|
||||
@item PREV_INPTS
|
||||
The previous input PTS.
|
||||
|
||||
@item PREV_INDURATION
|
||||
The previous input duration.
|
||||
|
||||
@item PREV_OUTDTS
|
||||
The previous output DTS.
|
||||
|
||||
@item PREV_OUTPTS
|
||||
The previous output PTS.
|
||||
|
||||
@item PREV_OUTDURATION
|
||||
The previous output duration.
|
||||
|
||||
@item NEXT_DTS
|
||||
The next input DTS.
|
||||
|
||||
@item NEXT_PTS
|
||||
The next input PTS.
|
||||
|
||||
@item NEXT_DURATION
|
||||
The next input duration.
|
||||
|
||||
@item TB
|
||||
The timebase of stream packet belongs.
|
||||
|
||||
@item TB_OUT
|
||||
The output timebase.
|
||||
|
||||
@item SR
|
||||
The sample rate of stream packet belongs.
|
||||
|
||||
@item NOPTS
|
||||
The AV_NOPTS_VALUE constant.
|
||||
@end table
|
||||
|
||||
@anchor{text2movsub}
|
||||
@section text2movsub
|
||||
|
||||
@@ -896,40 +505,7 @@ Log trace output containing all syntax elements in the coded stream
|
||||
headers (everything above the level of individual coded blocks).
|
||||
This can be useful for debugging low-level stream issues.
|
||||
|
||||
Supports AV1, H.264, H.265, (M)JPEG, MPEG-2 and VP9, but depending
|
||||
on the build only a subset of these may be available.
|
||||
|
||||
@section truehd_core
|
||||
|
||||
Extract the core from a TrueHD stream, dropping ATMOS data.
|
||||
|
||||
@section vp9_metadata
|
||||
|
||||
Modify metadata embedded in a VP9 stream.
|
||||
|
||||
@table @option
|
||||
@item color_space
|
||||
Set the color space value in the frame header. Note that any frame
|
||||
set to RGB will be implicitly set to PC range and that RGB is
|
||||
incompatible with profiles 0 and 2.
|
||||
@table @samp
|
||||
@item unknown
|
||||
@item bt601
|
||||
@item bt709
|
||||
@item smpte170
|
||||
@item smpte240
|
||||
@item bt2020
|
||||
@item rgb
|
||||
@end table
|
||||
|
||||
@item color_range
|
||||
Set the color range value in the frame header. Note that any value
|
||||
imposed by the color space will take precedence over this value.
|
||||
@table @samp
|
||||
@item tv
|
||||
@item pc
|
||||
@end table
|
||||
@end table
|
||||
Supports H.264, H.265 and MPEG-2.
|
||||
|
||||
@section vp9_superframe
|
||||
|
||||
|
||||
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -36,11 +36,11 @@ install
|
||||
examples
|
||||
Build all examples located in doc/examples.
|
||||
|
||||
checkheaders
|
||||
Check headers dependencies.
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
|
||||
alltools
|
||||
Build all tools in tools directory.
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as an example).
|
||||
|
||||
config
|
||||
Reconfigure the project with the current configuration.
|
||||
@@ -48,8 +48,6 @@ config
|
||||
tools/target_dec_<decoder>_fuzzer
|
||||
Build fuzzer to fuzz the specified decoder.
|
||||
|
||||
tools/target_bsf_<filter>_fuzzer
|
||||
Build fuzzer to fuzz the specified bitstream filter.
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
||||
363
doc/codecs.texi
363
doc/codecs.texi
@@ -50,13 +50,11 @@ Use internal 2pass ratecontrol in first pass mode.
|
||||
Use internal 2pass ratecontrol in second pass mode.
|
||||
@item gray
|
||||
Only decode/encode grayscale.
|
||||
@item emu_edge
|
||||
Do not draw edges.
|
||||
@item psnr
|
||||
Set error[?] variables during encoding.
|
||||
@item truncated
|
||||
Input bitstream might be randomly truncated.
|
||||
@item drop_changed
|
||||
Don't output frames whose parameters differ from first decoded frame in stream.
|
||||
Error AVERROR_INPUT_CHANGED is returned when a frame is dropped.
|
||||
|
||||
@item ildct
|
||||
Use interlaced DCT.
|
||||
@@ -70,14 +68,50 @@ This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item aic
|
||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||
@item cbp
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item qprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item ilme
|
||||
Apply interlaced motion estimation.
|
||||
@item cgop
|
||||
Use closed gop.
|
||||
@item output_corrupt
|
||||
Output even potentially corrupted frames.
|
||||
@end table
|
||||
|
||||
@item me_method @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item zero
|
||||
zero motion estimation (fastest)
|
||||
@item full
|
||||
full motion estimation (slowest)
|
||||
@item epzs
|
||||
EPZS motion estimation (default)
|
||||
@item esa
|
||||
esa motion estimation (alias for full)
|
||||
@item tesa
|
||||
tesa motion estimation
|
||||
@item dia
|
||||
dia motion estimation (alias for epzs)
|
||||
@item log
|
||||
log motion estimation
|
||||
@item phods
|
||||
phods motion estimation
|
||||
@item x1
|
||||
X1 motion estimation
|
||||
@item hex
|
||||
hex motion estimation
|
||||
@item umh
|
||||
umh motion estimation
|
||||
@item iter
|
||||
iter motion estimation
|
||||
@end table
|
||||
|
||||
@item extradata_size @var{integer}
|
||||
Set extradata size.
|
||||
|
||||
@item time_base @var{rational number}
|
||||
Set codec time base.
|
||||
|
||||
@@ -144,6 +178,24 @@ Default value is 0.
|
||||
@item b_qfactor @var{float} (@emph{encoding,video})
|
||||
Set qp factor between P and B frames.
|
||||
|
||||
@item rc_strategy @var{integer} (@emph{encoding,video})
|
||||
Set ratecontrol method.
|
||||
|
||||
@item b_strategy @var{integer} (@emph{encoding,video})
|
||||
Set strategy to choose between I/P/B-frames.
|
||||
|
||||
@item ps @var{integer} (@emph{encoding,video})
|
||||
Set RTP payload size in bytes.
|
||||
|
||||
@item mv_bits @var{integer}
|
||||
@item header_bits @var{integer}
|
||||
@item i_tex_bits @var{integer}
|
||||
@item p_tex_bits @var{integer}
|
||||
@item i_count @var{integer}
|
||||
@item p_count @var{integer}
|
||||
@item skip_count @var{integer}
|
||||
@item misc_bits @var{integer}
|
||||
@item frame_bits @var{integer}
|
||||
@item codec_tag @var{integer}
|
||||
@item bug @var{flags} (@emph{decoding,video})
|
||||
Workaround not auto detected encoder bugs.
|
||||
@@ -152,6 +204,8 @@ Possible values:
|
||||
@table @samp
|
||||
@item autodetect
|
||||
|
||||
@item old_msmpeg4
|
||||
some old lavc generated msmpeg4v3 files (no autodetection)
|
||||
@item xvid_ilace
|
||||
Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
@item ump4
|
||||
@@ -160,6 +214,8 @@ Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
padding bug (autodetected)
|
||||
@item amv
|
||||
|
||||
@item ac_vlc
|
||||
illegal vlc bug (autodetected per fourcc)
|
||||
@item qpel_chroma
|
||||
|
||||
@item std_qpel
|
||||
@@ -180,6 +236,14 @@ Workaround various bugs in microsoft broken decoders.
|
||||
trancated frames
|
||||
@end table
|
||||
|
||||
@item lelim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for luminance (negative
|
||||
values also consider DC coefficient).
|
||||
|
||||
@item celim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for chrominance (negative
|
||||
values also consider dc coefficient)
|
||||
|
||||
@item strict @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
Specify how strictly to follow the standards.
|
||||
|
||||
@@ -233,8 +297,29 @@ consider things that a sane encoder should not do as an error
|
||||
|
||||
@item block_align @var{integer}
|
||||
|
||||
@item mpeg_quant @var{integer} (@emph{encoding,video})
|
||||
Use MPEG quantizers instead of H.263.
|
||||
|
||||
@item qsquish @var{float} (@emph{encoding,video})
|
||||
How to keep quantizer between qmin and qmax (0 = clip, 1 = use
|
||||
differentiable function).
|
||||
|
||||
@item rc_qmod_amp @var{float} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_qmod_freq @var{integer} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_override_count @var{integer}
|
||||
|
||||
@item rc_eq @var{string} (@emph{encoding,video})
|
||||
Set rate control equation. When computing the expression, besides the
|
||||
standard functions defined in the section 'Expression Evaluation', the
|
||||
following functions are available: bits2qp(bits), qp2bits(qp). Also
|
||||
the following constants are available: iTex pTex tex mv fCode iCount
|
||||
mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
|
||||
avgTex.
|
||||
|
||||
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
||||
Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
|
||||
|
||||
@@ -245,12 +330,18 @@ encode. It is of little use elsewise.
|
||||
@item bufsize @var{integer} (@emph{encoding,audio,video})
|
||||
Set ratecontrol buffer size (in bits).
|
||||
|
||||
@item rc_buf_aggressivity @var{float} (@emph{encoding,video})
|
||||
Currently useless.
|
||||
|
||||
@item i_qfactor @var{float} (@emph{encoding,video})
|
||||
Set QP factor between P and I frames.
|
||||
|
||||
@item i_qoffset @var{float} (@emph{encoding,video})
|
||||
Set QP offset between P and I frames.
|
||||
|
||||
@item rc_init_cplx @var{float} (@emph{encoding,video})
|
||||
Set initial complexity for 1-pass encoding.
|
||||
|
||||
@item dct @var{integer} (@emph{encoding,video})
|
||||
Set DCT algorithm.
|
||||
|
||||
@@ -315,7 +406,11 @@ Automatically pick a IDCT compatible with the simple one
|
||||
|
||||
@item simpleneon
|
||||
|
||||
@item xvid
|
||||
@item simplealpha
|
||||
|
||||
@item ipp
|
||||
|
||||
@item xvidmmx
|
||||
|
||||
@item faani
|
||||
floating point AAN IDCT
|
||||
@@ -338,6 +433,19 @@ favor predicting from the previous frame instead of the current
|
||||
|
||||
@item bits_per_coded_sample @var{integer}
|
||||
|
||||
@item pred @var{integer} (@emph{encoding,video})
|
||||
Set prediction method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item left
|
||||
|
||||
@item plane
|
||||
|
||||
@item median
|
||||
|
||||
@end table
|
||||
|
||||
@item aspect @var{rational number} (@emph{encoding,video})
|
||||
Set sample aspect ratio.
|
||||
|
||||
@@ -532,28 +640,13 @@ noise preserving sum of squared differences
|
||||
|
||||
@item dia_size @var{integer} (@emph{encoding,video})
|
||||
Set diamond type & size for motion estimation.
|
||||
@table @samp
|
||||
@item (1024, INT_MAX)
|
||||
full motion estimation(slowest)
|
||||
@item (768, 1024]
|
||||
umh motion estimation
|
||||
@item (512, 768]
|
||||
hex motion estimation
|
||||
@item (256, 512]
|
||||
l2s diamond motion estimation
|
||||
@item [2,256]
|
||||
var diamond motion estimation
|
||||
@item (-1, 2)
|
||||
small diamond motion estimation
|
||||
@item -1
|
||||
funny diamond motion estimation
|
||||
@item (INT_MIN, -1)
|
||||
sab diamond motion estimation
|
||||
@end table
|
||||
|
||||
@item last_pred @var{integer} (@emph{encoding,video})
|
||||
Set amount of motion predictors from the previous frame.
|
||||
|
||||
@item preme @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation.
|
||||
|
||||
@item precmp @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation compare function.
|
||||
|
||||
@@ -597,11 +690,40 @@ Set diamond type & size for motion estimation pre-pass.
|
||||
@item subq @var{integer} (@emph{encoding,video})
|
||||
Set sub pel motion estimation quality.
|
||||
|
||||
@item dtg_active_format @var{integer}
|
||||
|
||||
@item me_range @var{integer} (@emph{encoding,video})
|
||||
Set limit motion vectors range (1023 for DivX player).
|
||||
|
||||
@item ibias @var{integer} (@emph{encoding,video})
|
||||
Set intra quant bias.
|
||||
|
||||
@item pbias @var{integer} (@emph{encoding,video})
|
||||
Set inter quant bias.
|
||||
|
||||
@item color_table_id @var{integer}
|
||||
|
||||
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
@item coder @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item vlc
|
||||
variable length coder / huffman coder
|
||||
@item ac
|
||||
arithmetic coder
|
||||
@item raw
|
||||
raw (no encoding)
|
||||
@item rle
|
||||
run-length coder
|
||||
@item deflate
|
||||
deflate-based coder
|
||||
@end table
|
||||
|
||||
@item context @var{integer} (@emph{encoding,video})
|
||||
Set context model.
|
||||
|
||||
@item slice_flags @var{integer}
|
||||
|
||||
@item mbd @var{integer} (@emph{encoding,video})
|
||||
@@ -617,16 +739,32 @@ use fewest bits
|
||||
use best rate distortion
|
||||
@end table
|
||||
|
||||
@item stream_codec_tag @var{integer}
|
||||
|
||||
@item sc_threshold @var{integer} (@emph{encoding,video})
|
||||
Set scene change threshold.
|
||||
|
||||
@item lmin @var{integer} (@emph{encoding,video})
|
||||
Set min lagrange factor (VBR).
|
||||
|
||||
@item lmax @var{integer} (@emph{encoding,video})
|
||||
Set max lagrange factor (VBR).
|
||||
|
||||
@item nr @var{integer} (@emph{encoding,video})
|
||||
Set noise reduction.
|
||||
|
||||
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
||||
Set number of bits which should be loaded into the rc buffer before
|
||||
decoding starts.
|
||||
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item fast
|
||||
Allow non spec compliant speedup tricks.
|
||||
@item sgop
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item noout
|
||||
Skip bitstream encoding.
|
||||
@item ignorecrop
|
||||
@@ -637,34 +775,17 @@ Place global headers at every keyframe instead of in extradata.
|
||||
Frame data might be split into multiple chunks.
|
||||
@item showall
|
||||
Show all frames before the first keyframe.
|
||||
@item skiprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item skip_manual
|
||||
Do not skip samples and export skip information as frame side data.
|
||||
@item ass_ro_flush_noop
|
||||
Do not reset ASS ReadOrder field on flush.
|
||||
@item icc_profiles
|
||||
Generate/parse embedded ICC profiles from/to colorimetry tags.
|
||||
@end table
|
||||
|
||||
@item export_side_data @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item prft
|
||||
Export encoder Producer Reference Time into packet side-data (see @code{AV_PKT_DATA_PRFT})
|
||||
for codecs that support it.
|
||||
@item venc_params
|
||||
Export video encoding parameters through frame side data (see @code{AV_FRAME_DATA_VIDEO_ENC_PARAMS})
|
||||
for codecs that support it. At present, those are H.264 and VP9.
|
||||
@item film_grain
|
||||
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
|
||||
Supported at present by AV1 decoders.
|
||||
@end table
|
||||
@item qns @var{integer} (@emph{encoding,video})
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
|
||||
@item threads @var{integer} (@emph{decoding/encoding,video})
|
||||
Set the number of threads to be used, in case the selected codec
|
||||
@@ -678,6 +799,12 @@ automatically select the number of threads to set
|
||||
|
||||
Default value is @samp{auto}.
|
||||
|
||||
@item me_threshold @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation threshold.
|
||||
|
||||
@item mb_threshold @var{integer} (@emph{encoding,video})
|
||||
Set macroblock threshold.
|
||||
|
||||
@item dc @var{integer} (@emph{encoding,video})
|
||||
Set intra_dc_precision.
|
||||
|
||||
@@ -692,29 +819,122 @@ Set number of macroblock rows at the bottom which are skipped.
|
||||
|
||||
@item profile @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set encoder codec profile. Default value is @samp{unknown}. Encoder specific
|
||||
profiles are documented in the relevant encoder documentation.
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@item aac_main
|
||||
|
||||
@item aac_low
|
||||
|
||||
@item aac_ssr
|
||||
|
||||
@item aac_ltp
|
||||
|
||||
@item aac_he
|
||||
|
||||
@item aac_he_v2
|
||||
|
||||
@item aac_ld
|
||||
|
||||
@item aac_eld
|
||||
|
||||
@item mpeg2_aac_low
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
||||
@item dts_96_24
|
||||
|
||||
@item dts_hd_hra
|
||||
|
||||
@item dts_hd_ma
|
||||
|
||||
@end table
|
||||
|
||||
@item level @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set the encoder level. This level depends on the specific codec, and
|
||||
might correspond to the profile level. It is set by default to
|
||||
@samp{unknown}.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@end table
|
||||
|
||||
@item lowres @var{integer} (@emph{decoding,audio,video})
|
||||
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
||||
|
||||
@item skip_threshold @var{integer} (@emph{encoding,video})
|
||||
Set frame skip threshold.
|
||||
|
||||
@item skip_factor @var{integer} (@emph{encoding,video})
|
||||
Set frame skip factor.
|
||||
|
||||
@item skip_exp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip exponent.
|
||||
Negative values behave identical to the corresponding positive ones, except
|
||||
that the score is normalized.
|
||||
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||
|
||||
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip compare function.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item w53
|
||||
5/3 wavelet, only used in snow
|
||||
@item w97
|
||||
9/7 wavelet, only used in snow
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
|
||||
@item border_mask @var{float} (@emph{encoding,video})
|
||||
Increase the quantizer for macroblocks close to borders.
|
||||
|
||||
@item mblmin @var{integer} (@emph{encoding,video})
|
||||
Set min macroblock lagrange factor (VBR).
|
||||
|
||||
@item mblmax @var{integer} (@emph{encoding,video})
|
||||
Set max macroblock lagrange factor (VBR).
|
||||
|
||||
@item mepc @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation bitrate penalty compensation (1.0 = 256).
|
||||
|
||||
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
||||
@item skip_idct @var{integer} (@emph{decoding,video})
|
||||
@item skip_frame @var{integer} (@emph{decoding,video})
|
||||
@@ -742,9 +962,6 @@ Discard all bidirectional frames.
|
||||
@item nokey
|
||||
Discard all frames excepts keyframes.
|
||||
|
||||
@item nointra
|
||||
Discard all frames except I frames.
|
||||
|
||||
@item all
|
||||
Discard all frames.
|
||||
@end table
|
||||
@@ -754,17 +971,38 @@ Default value is @samp{default}.
|
||||
@item bidir_refine @var{integer} (@emph{encoding,video})
|
||||
Refine the two motion vectors used in bidirectional macroblocks.
|
||||
|
||||
@item brd_scale @var{integer} (@emph{encoding,video})
|
||||
Downscale frames for dynamic B-frame decision.
|
||||
|
||||
@item keyint_min @var{integer} (@emph{encoding,video})
|
||||
Set minimum interval between IDR-frames.
|
||||
|
||||
@item refs @var{integer} (@emph{encoding,video})
|
||||
Set reference frames to consider for motion compensation.
|
||||
|
||||
@item chromaoffset @var{integer} (@emph{encoding,video})
|
||||
Set chroma qp offset from luma.
|
||||
|
||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||
Set rate-distortion optimal quantization.
|
||||
|
||||
@item sc_factor @var{integer} (@emph{encoding,video})
|
||||
Set value multiplied by qscale for each frame and added to
|
||||
scene_change_score.
|
||||
|
||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||
Adjust sensitivity of b_frame_strategy 1.
|
||||
|
||||
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
||||
@item min_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item max_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item timecode_frame_start @var{integer} (@emph{encoding,video})
|
||||
Set GOP timecode frame start number, in non drop frame format.
|
||||
|
||||
@item request_channels @var{integer} (@emph{decoding,audio})
|
||||
Set desired number of audio channels.
|
||||
|
||||
@item bits_per_raw_sample @var{integer}
|
||||
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
||||
|
||||
@@ -778,6 +1016,7 @@ Possible values:
|
||||
@end table
|
||||
@item rc_max_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item rc_min_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item ticks_per_frame @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
|
||||
Possible values:
|
||||
@@ -877,12 +1116,6 @@ BT.2020 NCL
|
||||
BT.2020 CL
|
||||
@item smpte2085
|
||||
SMPTE 2085
|
||||
@item chroma-derived-nc
|
||||
Chroma-derived NCL
|
||||
@item chroma-derived-c
|
||||
Chroma-derived CL
|
||||
@item ictcp
|
||||
ICtCp
|
||||
@end table
|
||||
|
||||
@item color_range @var{integer} (@emph{decoding/encoding,video})
|
||||
@@ -892,11 +1125,9 @@ Possible values:
|
||||
@table @samp
|
||||
@item tv
|
||||
@item mpeg
|
||||
@item limited
|
||||
MPEG (219*2^(n-8))
|
||||
@item pc
|
||||
@item jpeg
|
||||
@item full
|
||||
JPEG (2^n-1)
|
||||
@end table
|
||||
|
||||
@@ -1005,7 +1236,7 @@ instead of alpha. Default is 0.
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
|
||||
@@ -1,175 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Community
|
||||
@titlepage
|
||||
@center @titlefont{Community}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@anchor{Organisation}
|
||||
@chapter Organisation
|
||||
|
||||
The FFmpeg project is organized through a community working on global consensus.
|
||||
|
||||
Decisions are taken by the ensemble of active members, through voting and are aided by two committees.
|
||||
|
||||
@anchor{General Assembly}
|
||||
@chapter General Assembly
|
||||
|
||||
The ensemble of active members is called the General Assembly (GA).
|
||||
|
||||
The General Assembly is sovereign and legitimate for all its decisions regarding the FFmpeg project.
|
||||
|
||||
The General Assembly is made up of active contributors.
|
||||
|
||||
Contributors are considered "active contributors" if they have pushed more than 20 patches in the last 36 months in the main FFmpeg repository, or if they have been voted in by the GA.
|
||||
|
||||
Additional members are added to the General Assembly through a vote after proposal by a member of the General Assembly. They are part of the GA for two years, after which they need a confirmation by the GA.
|
||||
|
||||
A script to generate the current members of the general assembly (minus members voted in) can be found in `tools/general_assembly.pl`.
|
||||
|
||||
@anchor{Voting}
|
||||
@chapter Voting
|
||||
|
||||
Voting is done using a ranked voting system, currently running on https://vote.ffmpeg.org/ .
|
||||
|
||||
Majority vote means more than 50% of the expressed ballots.
|
||||
|
||||
@anchor{Technical Committee}
|
||||
@chapter Technical Committee
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project. They will consider the merits of all the positions, judge them and make a decision.
|
||||
|
||||
The TC resolves technical conflicts but is not a technical steering committee.
|
||||
|
||||
Decisions by the TC are binding for all the contributors.
|
||||
|
||||
Decisions made by the TC can be re-opened after 1 year or by a majority vote of the General Assembly, requested by one of the member of the GA.
|
||||
|
||||
The TC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the TC.
|
||||
|
||||
The members of the TC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The conflict resolution process is detailed in the resolution process document.
|
||||
|
||||
The TC can be contacted at <tc@@ffmpeg>.
|
||||
|
||||
@anchor{Resolution Process}
|
||||
@section Resolution Process
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project.
|
||||
|
||||
The TC main role is to resolve technical conflicts. It is therefore not a technical steering committee, but it is understood that some decisions might impact the future of the project.
|
||||
|
||||
@subsection Seizing
|
||||
|
||||
The TC can take possession of any technical matter that it sees fit.
|
||||
|
||||
To involve the TC in a matter, email tc@ or CC them on an ongoing discussion.
|
||||
|
||||
As members of TC are developers, they also can email tc@ to raise an issue.
|
||||
@subsection Announcement
|
||||
|
||||
The TC, once seized, must announce itself on the main mailing list, with a [TC] tag.
|
||||
|
||||
The TC has 2 modes of operation: a RFC one and an internal one.
|
||||
|
||||
If the TC thinks it needs the input from the larger community, the TC can call for a RFC. Else, it can decide by itself.
|
||||
|
||||
If the disagreement involves a member of the TC, that member should recuse themselves from the decision.
|
||||
|
||||
The decision to use a RFC process or an internal discussion is a discretionary decision of the TC.
|
||||
|
||||
The TC can also reject a seizure for a few reasons such as: the matter was not discussed enough previously; it lacks expertise to reach a beneficial decision on the matter; or the matter is too trivial.
|
||||
@subsection RFC call
|
||||
|
||||
In the RFC mode, one person from the TC posts on the mailing list the technical question and will request input from the community.
|
||||
|
||||
The mail will have the following specification:
|
||||
|
||||
a precise title
|
||||
a specific tag [TC RFC]
|
||||
a top-level email
|
||||
contain a precise question that does not exceed 100 words and that is answerable by developers
|
||||
may have an extra description, or a link to a previous discussion, if deemed necessary,
|
||||
contain a precise end date for the answers.
|
||||
|
||||
The answers from the community must be on the main mailing list and must have the following specification:
|
||||
|
||||
keep the tag and the title unchanged
|
||||
limited to 400 words
|
||||
a first-level, answering directly to the main email
|
||||
answering to the question.
|
||||
|
||||
Further replies to answers are permitted, as long as they conform to the community standards of politeness, they are limited to 100 words, and are not nested more than once. (max-depth=2)
|
||||
|
||||
After the end-date, mails on the thread will be ignored.
|
||||
|
||||
Violations of those rules will be escalated through the Community Committee.
|
||||
|
||||
After all the emails are in, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay, that will be notified on the mailing list.
|
||||
@subsection Within TC
|
||||
|
||||
In the internal case, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay.
|
||||
@subsection Decisions
|
||||
|
||||
The decisions from the TC will be sent on the mailing list, with the [TC] tag.
|
||||
|
||||
Internally, the TC should take decisions with a majority, or using ranked-choice voting.
|
||||
|
||||
The decision from the TC should be published with a summary of the reasons that lead to this decision.
|
||||
|
||||
The decisions from the TC are final, until the matters are reopened after no less than one year.
|
||||
|
||||
@anchor{Community Committee}
|
||||
@chapter Community Committee
|
||||
|
||||
The Community Committee (CC) is here to arbitrage and make decisions when inter-personal conflicts occur in the project. It will decide quickly and take actions, for the sake of the project.
|
||||
|
||||
The CC can remove privileges of offending members, including removal of commit access and temporary ban from the community.
|
||||
|
||||
Decisions made by the CC can be re-opened after 1 year or by a majority vote of the General Assembly. Indefinite bans from the community must be confirmed by the General Assembly, in a majority vote.
|
||||
|
||||
The CC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the CC.
|
||||
|
||||
The members of the CC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The CC is governed by and responsible for enforcing the Code of Conduct.
|
||||
|
||||
The CC can be contacted at <cc@@ffmpeg>.
|
||||
|
||||
@anchor{Code of Conduct}
|
||||
@chapter Code of Conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@bye
|
||||
@@ -25,19 +25,6 @@ enabled decoders.
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section av1
|
||||
|
||||
AOMedia Video 1 (AV1) decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item operating_point
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
@@ -60,133 +47,6 @@ top-field-first is assumed
|
||||
|
||||
@end table
|
||||
|
||||
@section libdav1d
|
||||
|
||||
dav1d AV1 decoder.
|
||||
|
||||
libdav1d allows libavcodec to decode the AOMedia Video 1 (AV1) codec.
|
||||
Requires the presence of the libdav1d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libdav1d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libdav1d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item framethreads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
option @code{max_frame_delay} and the global option @code{threads} instead.
|
||||
|
||||
@item tilethreads
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
global option @code{threads} instead.
|
||||
|
||||
@item max_frame_delay
|
||||
Set max amount of frames the decoder may buffer internally. The default value is 0
|
||||
(autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||
internal default of the library.
|
||||
This option is deprecated and will be removed in the future. See the global option
|
||||
@code{export_side_data} to export Film Grain parameters instead of applying it.
|
||||
|
||||
@item oppoint
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||
internal default of the library.
|
||||
|
||||
@item alllayers
|
||||
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
|
||||
|
||||
@end table
|
||||
|
||||
@section libdavs2
|
||||
|
||||
AVS2-P2/IEEE1857.4 video decoder wrapper.
|
||||
|
||||
This decoder allows libavcodec to decode AVS2 streams with davs2 library.
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@section libuavs3d
|
||||
|
||||
AVS3-P2/IEEE1857.10 video decoder.
|
||||
|
||||
libuavs3d allows libavcodec to decode AVS3 streams.
|
||||
Requires the presence of the libuavs3d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libuavs3d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libuavs3d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item frame_threads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@end table
|
||||
|
||||
@section QSV Decoders
|
||||
|
||||
The family of Intel QuickSync Video decoders (VC1, MPEG-2, H.264, HEVC,
|
||||
JPEG/MJPEG, VP8, VP9, AV1).
|
||||
|
||||
@subsection Common Options
|
||||
|
||||
The following options are supported by all qsv decoders.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{async_depth}
|
||||
Internal parallelization depth, the higher the value the higher the latency.
|
||||
|
||||
@item @var{gpu_copy}
|
||||
A GPU-accelerated copy between video and system memory
|
||||
@table @samp
|
||||
@item default
|
||||
@item on
|
||||
@item off
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HEVC Options
|
||||
Extra options for hevc_qsv.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{load_plugin}
|
||||
A user plugin to load in an internal session
|
||||
@table @samp
|
||||
@item none
|
||||
@item hevc_sw
|
||||
@item hevc_hw
|
||||
@end table
|
||||
|
||||
@item @var{load_plugins}
|
||||
A :-separate list of hexadecimal plugin UIDs to load in an internal session
|
||||
|
||||
@end table
|
||||
|
||||
@section v210
|
||||
|
||||
Uncompressed 4:2:2 10-bit decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item custom_stride
|
||||
Set the line size of the v210 data in bytes. The default value is 0
|
||||
(autodetect). You can use the special -1 value for a strideless v210 as seen in
|
||||
BOXX files.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@@ -208,7 +68,7 @@ the undocumented RealAudio 3 (a.k.a. dnet).
|
||||
|
||||
@item -drc_scale @var{value}
|
||||
Dynamic Range Scale Factor. The factor to apply to dynamic range values
|
||||
from the AC-3 stream. This factor is applied exponentially. The default value is 1.
|
||||
from the AC-3 stream. This factor is applied exponentially.
|
||||
There are 3 notable scale factor ranges:
|
||||
@table @option
|
||||
@item drc_scale == 0
|
||||
@@ -328,194 +188,6 @@ without this library.
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section libaribb24
|
||||
|
||||
ARIB STD-B24 caption decoder.
|
||||
|
||||
Implements profiles A and C of the ARIB STD-B24 standard.
|
||||
|
||||
@subsection libaribb24 Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -aribb24-base-path @var{path}
|
||||
Sets the base path for the libaribb24 library. This is utilized for reading of
|
||||
configuration files (for custom unicode conversions), and for dumping of
|
||||
non-text symbols as images under that location.
|
||||
|
||||
Unset by default.
|
||||
|
||||
@item -aribb24-skip-ruby-text @var{boolean}
|
||||
Tells the decoder wrapper to skip text blocks that contain half-height ruby
|
||||
text.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
@end table
|
||||
|
||||
@section libaribcaption
|
||||
|
||||
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
||||
library.
|
||||
|
||||
Implements profiles A and C of the Japanse ARIB STD-B24 standard,
|
||||
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
||||
|
||||
Requires the presence of the libaribcaption headers and library
|
||||
(@url{https://github.com/xqq/libaribcaption}) during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libaribcaption}.
|
||||
If both @dfn{libaribb24} and @dfn{libaribcaption} are enabled, @dfn{libaribcaption}
|
||||
decoder precedes.
|
||||
|
||||
@subsection libaribcaption Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -sub_type @var{subtitle_type}
|
||||
Specifies the format of the decoded subtitles.
|
||||
|
||||
@table @samp
|
||||
@item bitmap
|
||||
Graphical image.
|
||||
@item ass
|
||||
ASS formatted text.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@end table
|
||||
|
||||
The default is @dfn{ass} as same as @dfn{libaribb24} decoder.
|
||||
Some present players (e.g., @dfn{mpv}) expect ASS format for ARIB caption.
|
||||
|
||||
@item -caption_encoding @var{encoding_scheme}
|
||||
Specifies the encoding scheme of input subtitle text.
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically detect text encoding (default).
|
||||
@item jis
|
||||
8bit-char JIS encoding defined in ARIB STD B24.
|
||||
This encoding used in Japan for ISDB captions.
|
||||
@item utf8
|
||||
UTF-8 encoding defined in ARIB STD B24.
|
||||
This encoding is used in Philippines for ISDB-T captions.
|
||||
@item latin
|
||||
Latin character encoding defined in ABNT NBR 15606-1.
|
||||
This encoding is used in South America for SBTVD / ISDB-Tb captions.
|
||||
@end table
|
||||
|
||||
@item -font @var{font_name[,font_name2,...]}
|
||||
Specify comma-separated list of font family names to be used for @dfn{bitmap}
|
||||
or @dfn{ass} type subtitle rendering.
|
||||
Only first font name is used for @dfn{ass} type subtitle.
|
||||
|
||||
If not specified, use internaly defined default font family.
|
||||
|
||||
@item -ass_single_rect @var{boolean}
|
||||
ARIB STD-B24 specifies that some captions may be displayed at different
|
||||
positions at a time (multi-rectangle subtitle).
|
||||
Since some players (e.g., old @dfn{mpv}) can't handle multiple ASS rectangles
|
||||
in a single AVSubtitle, or multiple ASS rectangles of indeterminate duration
|
||||
with the same start timestamp, this option can change the behavior so that
|
||||
all the texts are displayed in a single ASS rectangle.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
If your player cannot handle AVSubtitles with multiple ASS rectangles properly,
|
||||
set this option to @var{true} or define @env{ASS_SINGLE_RECT=1} to change
|
||||
default behavior at compilation.
|
||||
|
||||
@item -force_outline_text @var{boolean}
|
||||
Specify whether always render outline text for all characters regardless of
|
||||
the indication by charactor style.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -outline_width @var{number} (0.0 - 3.0)
|
||||
Specify width for outline text, in dots (relative).
|
||||
|
||||
The default is @var{1.5}.
|
||||
|
||||
@item -ignore_background @var{boolean}
|
||||
Specify whether to ignore background color rendering.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -ignore_ruby @var{boolean}
|
||||
Specify whether to ignore rendering for ruby-like (furigana) characters.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -replace_drcs @var{boolean}
|
||||
Specify whether to render replaced DRCS characters as Unicode characters.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_ascii @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) fullwidth
|
||||
alphanumerics with halfwidth alphanumerics.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_japanese @var{boolean}
|
||||
Specify whether to replace some MSZ (Middle Size; half width) fullwidth
|
||||
japanese special characters with halfwidth ones.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_glyph @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) characters
|
||||
with halfwidth glyphs if the fonts supports it.
|
||||
This option works under FreeType or DirectWrite renderer
|
||||
with Adobe-Japan1 compliant fonts.
|
||||
e.g., IBM Plex Sans JP, Morisawa BIZ UDGothic, Morisawa BIZ UDMincho,
|
||||
Yu Gothic, Yu Mincho, and Meiryo.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -canvas_size @var{image_size}
|
||||
Specify the resolution of the canvas to render subtitles to; usually, this
|
||||
should be frame size of input video.
|
||||
This only applies when @code{-subtitle_type} is set to @var{bitmap}.
|
||||
|
||||
The libaribcaption decoder assumes input frame size for bitmap rendering as below:
|
||||
@enumerate
|
||||
@item
|
||||
PROFILE_A : 1440 x 1080 with SAR (PAR) 4:3
|
||||
@item
|
||||
PROFILE_C : 320 x 180 with SAR (PAR) 1:1
|
||||
@end enumerate
|
||||
|
||||
If actual frame size of input video does not match above assumption,
|
||||
the rendered captions may be distorted.
|
||||
To make the captions undistorted, add @code{-canvas_size} option to specify
|
||||
actual input video size.
|
||||
|
||||
Note that the @code{-canvas_size} option is not required for video with
|
||||
different size but same aspect ratio.
|
||||
In such cases, the caption will be stretched or shrunk to actual video size
|
||||
if @code{-canvas_size} option is not specified.
|
||||
If @code{-canvas_size} option is specified with different size,
|
||||
the caption will be stretched or shrunk as specified size with calculated SAR.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection libaribcaption decoder usage examples
|
||||
|
||||
Display MPEG-TS file with ARIB subtitle by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap MPEG.TS
|
||||
@end example
|
||||
|
||||
Display MPEG-TS file with input frame size 1920x1080 by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap -canvas_size 1920x1080 MPEG.TS
|
||||
@end example
|
||||
|
||||
Embed ARIB subtitle in transcoded video:
|
||||
@example
|
||||
ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h264 dest.mp4
|
||||
@end example
|
||||
|
||||
@section dvbsub
|
||||
|
||||
@subsection Options
|
||||
@@ -523,8 +195,6 @@ ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h
|
||||
@table @option
|
||||
@item compute_clut
|
||||
@table @option
|
||||
@item -2
|
||||
Compute clut once if no matching CLUT is in the stream.
|
||||
@item -1
|
||||
Compute clut if no matching CLUT is in the stream.
|
||||
@item 0
|
||||
@@ -553,7 +223,7 @@ palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by commas, for example @code{0d00ee,
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@@ -578,30 +248,18 @@ configuration. You need to explicitly configure the build with
|
||||
|
||||
@table @option
|
||||
@item txt_page
|
||||
List of teletext page numbers to decode. Pages that do not match the specified
|
||||
list are dropped. You may use the special @code{*} string to match all pages,
|
||||
or @code{subtitle} to match all subtitle pages.
|
||||
List of teletext page numbers to decode. You may use the special * string to
|
||||
match all pages. Pages that do not match the specified list are dropped.
|
||||
Default value is *.
|
||||
@item txt_default_region
|
||||
Set default character set used for decoding, a value between 0 and 87 (see
|
||||
ETS 300 706, Section 15, Table 32). Default value is -1, which does not
|
||||
override the libzvbi default. This option is needed for some legacy level 1.0
|
||||
transmissions which cannot signal the proper charset.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
Specifies the format of the decoded subtitles.
|
||||
@table @option
|
||||
@item bitmap
|
||||
The default format, you should use this for teletext pages, because certain
|
||||
graphics and colors cannot be expressed in simple text or even ASS.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@item ass
|
||||
Formatted ASS output, subtitle pages and teletext pages are returned in
|
||||
different styles, subtitle pages are stripped down to text, but an effort is
|
||||
made to keep the text alignment and the formatting.
|
||||
@end table
|
||||
Specifies the format of the decoded subtitles. The teletext decoder is capable
|
||||
of decoding the teletext pages to bitmaps or to simple text, you should use
|
||||
"bitmap" for teletext pages, because certain graphics and colors cannot be
|
||||
expressed in simple text. You might use "text" for teletext based subtitles if
|
||||
your application can handle simple text based subtitles. Default value is
|
||||
bitmap.
|
||||
@item txt_left
|
||||
X offset of generated bitmaps, default is 0.
|
||||
@item txt_top
|
||||
@@ -614,8 +272,7 @@ present between the subtitle lines because of double-sized teletext characters.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
milliseconds. Default value is -1 which means infinity or until the next
|
||||
subtitle event comes.
|
||||
milliseconds. Default value is 30000 which is 30 seconds.
|
||||
@item txt_transparent
|
||||
Force transparent background of the generated teletext bitmaps. Default value
|
||||
is 0 which means an opaque background.
|
||||
|
||||
@@ -25,12 +25,16 @@ Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section aac
|
||||
@section applehttp
|
||||
|
||||
Raw Audio Data Transport Stream AAC demuxer.
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer is used to demux an ADTS input containing a single AAC stream
|
||||
alongwith any ID3v1/2 or APE tags in it.
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section apng
|
||||
|
||||
@@ -44,15 +48,12 @@ between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set. Default is enabled.
|
||||
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second. Default of 0 imposes no limit.
|
||||
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible). Default is 15.
|
||||
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
@@ -103,7 +104,8 @@ backslash or single quotes.
|
||||
All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version.
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appear exactly as is (no extra space or byte-order-mark) on the very first
|
||||
@@ -157,16 +159,6 @@ directive) will be reduced based on their specified Out point.
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
This directive is deprecated, use @code{file_packet_meta} instead.
|
||||
|
||||
@item @code{file_packet_meta @var{key} @var{value}}
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
|
||||
@item @code{option @var{key} @var{value}}
|
||||
Option to access, open and probe the file.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
@@ -184,20 +176,6 @@ subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@item @code{stream_meta @var{key} @var{value}}
|
||||
Metadata for the stream.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream_codec @var{value}}
|
||||
Codec for the stream.
|
||||
|
||||
@item @code{stream_extradata @var{hex_string}}
|
||||
Extradata for the string, encoded in hexadecimal.
|
||||
|
||||
@item @code{chapter @var{id} @var{start} @var{end}}
|
||||
Add a chapter. @var{id} is an unique identifier, possibly small and
|
||||
consecutive.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
@@ -207,8 +185,7 @@ This demuxer accepts the following option:
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths and directives.
|
||||
A file path is considered safe if it
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
@@ -218,6 +195,9 @@ If set to 0, any file name is accepted.
|
||||
|
||||
The default is 1.
|
||||
|
||||
-1 is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
@@ -274,55 +254,11 @@ which streams to actually receive.
|
||||
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
||||
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item cenc_decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@end table
|
||||
|
||||
@section ea
|
||||
|
||||
Electronic Arts Multimedia format demuxer.
|
||||
|
||||
This format is used by various Electronic Arts games.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item merge_alpha @var{bool}
|
||||
|
||||
Normally the VP6 alpha channel (if exists) is returned as a secondary video
|
||||
stream, by setting this option you can make the demuxer return a single video
|
||||
stream which contains the alpha channel in addition to the ordinary video.
|
||||
|
||||
@end table
|
||||
|
||||
@section imf
|
||||
|
||||
Interoperable Master Format demuxer.
|
||||
|
||||
This demuxer presents audio and video streams found in an IMF Composition, as
|
||||
specified in @url{https://doi.org/10.5594/SMPTE.ST2067-2.2020, SMPTE ST 2067-2}.
|
||||
|
||||
@example
|
||||
ffmpeg [-assetmaps <path of ASSETMAP1>,<path of ASSETMAP2>,...] -i <path of CPL> ...
|
||||
@end example
|
||||
|
||||
If @code{-assetmaps} is not specified, the demuxer looks for a file called
|
||||
@file{ASSETMAP.xml} in the same directory as the CPL.
|
||||
|
||||
@section flv, live_flv, kux
|
||||
@section flv, live_flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
||||
KUX is a flv variant used on the Youku platform.
|
||||
|
||||
@example
|
||||
ffmpeg -f flv -i myfile.flv ...
|
||||
@@ -333,12 +269,6 @@ ffmpeg -f live_flv -i rtmp://<any.server>/anything/key ....
|
||||
@table @option
|
||||
@item -flv_metadata @var{bool}
|
||||
Allocate the streams according to the onMetaData array content.
|
||||
|
||||
@item -flv_ignore_prevtag @var{bool}
|
||||
Ignore the size of previous tag value.
|
||||
|
||||
@item -flv_full_metadata @var{bool}
|
||||
Output all context of the onMetadata.
|
||||
@end table
|
||||
|
||||
@section gif
|
||||
@@ -384,42 +314,19 @@ infinitely.
|
||||
|
||||
HLS demuxer
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item live_start_index
|
||||
segment index to start live streams at (negative values are from the end).
|
||||
|
||||
@item prefer_x_start
|
||||
prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
|
||||
@item allowed_extensions
|
||||
',' separated list of file extensions that hls is allowed to access.
|
||||
|
||||
@item extension_picky
|
||||
This blocks disallowed extensions from probing
|
||||
It also requires all available segments to have matching extensions to the format
|
||||
except mpegts, which is always allowed.
|
||||
It is recommended to set the whitelists correctly instead of depending on extensions
|
||||
Enabled by default.
|
||||
|
||||
@item max_reload
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
|
||||
@item m3u8_hold_counters
|
||||
The maximum number of times to load m3u8 when it refreshes without new segments.
|
||||
Default value is 1000.
|
||||
|
||||
@item http_persistent
|
||||
Use persistent HTTP connections. Applicable only for HTTP streams.
|
||||
Enabled by default.
|
||||
@@ -427,17 +334,6 @@ Enabled by default.
|
||||
@item http_multiple
|
||||
Use multiple HTTP connections for downloading HTTP segments.
|
||||
Enabled by default for HTTP/1.1 servers.
|
||||
|
||||
@item http_seekable
|
||||
Use HTTP partial requests for downloading HTTP segments.
|
||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||
|
||||
@item seg_format_options
|
||||
Set options for the demuxer of media segments using a list of key=value pairs separated by @code{:}.
|
||||
|
||||
@item seg_max_retry
|
||||
Maximum number of times to reload a segment on error, useful when segment skip on network error is not desired.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
@@ -548,17 +444,6 @@ nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@item export_path_metadata
|
||||
If set to 1, will add two extra fields to the metadata found in input, making them
|
||||
also available for other filters (see @var{drawtext} filter for examples). Default
|
||||
value is 0. The extra fields are described below:
|
||||
@table @option
|
||||
@item lavf.image2dec.source_path
|
||||
Corresponds to the full path to the input file being read.
|
||||
@item lavf.image2dec.source_basename
|
||||
Corresponds to the name of the file being read.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -590,84 +475,14 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{https://bitbucket.org/mpyne/game-music-emu/overview} for more information.
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
|
||||
It accepts the following options:
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
|
||||
@table @option
|
||||
|
||||
@item track_index
|
||||
Set the index of which track to demux. The demuxer can only export one track.
|
||||
Track indexes start at 0. Default is to pick the first track. Number of tracks
|
||||
is exported as @var{tracks} metadata entry.
|
||||
|
||||
@item sample_rate
|
||||
Set the sampling rate of the exported track. Range is 1000 to 999999. Default is 44100.
|
||||
|
||||
@item max_size @emph{(bytes)}
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read.
|
||||
Default is 50 MiB.
|
||||
|
||||
@end table
|
||||
|
||||
@section libmodplug
|
||||
|
||||
ModPlug based module demuxer
|
||||
|
||||
See @url{https://github.com/Konstanty/libmodplug}
|
||||
|
||||
It will export one 2-channel 16-bit 44.1 kHz audio stream.
|
||||
Optionally, a @code{pal8} 16-color video stream can be exported with or without printed metadata.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item noise_reduction
|
||||
Apply a simple low-pass filter. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item reverb_depth
|
||||
Set amount of reverb. Range 0-100. Default is 0.
|
||||
|
||||
@item reverb_delay
|
||||
Set delay in ms, clamped to 40-250 ms. Default is 0.
|
||||
|
||||
@item bass_amount
|
||||
Apply bass expansion a.k.a. XBass or megabass. Range is 0 (quiet) to 100 (loud). Default is 0.
|
||||
|
||||
@item bass_range
|
||||
Set cutoff i.e. upper-bound for bass frequencies. Range is 10-100 Hz. Default is 0.
|
||||
|
||||
@item surround_depth
|
||||
Apply a Dolby Pro-Logic surround effect. Range is 0 (quiet) to 100 (heavy). Default is 0.
|
||||
|
||||
@item surround_delay
|
||||
Set surround delay in ms, clamped to 5-40 ms. Default is 0.
|
||||
|
||||
@item max_size
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read. Range is 0 to 100 MiB.
|
||||
0 removes buffer size limit (not recommended). Default is 5 MiB.
|
||||
|
||||
@item video_stream_expr
|
||||
String which is evaluated using the eval API to assign colors to the generated video stream.
|
||||
Variables which can be used are @code{x}, @code{y}, @code{w}, @code{h}, @code{t}, @code{speed},
|
||||
@code{tempo}, @code{order}, @code{pattern} and @code{row}.
|
||||
|
||||
@item video_stream
|
||||
Generate video stream. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item video_stream_w
|
||||
Set video frame width in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_h
|
||||
Set video frame height in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_ptxt
|
||||
Print metadata on video stream. Includes @code{speed}, @code{tempo}, @code{order}, @code{pattern},
|
||||
@code{row} and @code{ts} (time in ms). Can be 1 (on) or 0 (off). Default is 1.
|
||||
|
||||
@end table
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
|
||||
@section libopenmpt
|
||||
|
||||
@@ -696,13 +511,9 @@ Set the sample rate for libopenmpt to output.
|
||||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@section mov/mp4/3gp
|
||||
@section mov/mp4/3gp/QuickTime
|
||||
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
|
||||
Registered extensions: mov, mp4, m4a, 3gp, 3g2, mj2, psp, m4b, ism, ismv, isma, f4v
|
||||
|
||||
@subsection Options
|
||||
QuickTime / MP4 demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@@ -713,95 +524,10 @@ Enabling this can theoretically leak information in some use cases.
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non-malicious.
|
||||
|
||||
@item seek_streams_individually
|
||||
When seeking, identify the closest point in each stream individually and demux packets in
|
||||
that stream from identified point. This can lead to a different sequence of packets compared
|
||||
to demuxing linearly from the beginning. Default is true.
|
||||
|
||||
@item ignore_editlist
|
||||
Ignore any edit list atoms. The demuxer, by default, modifies the stream index to reflect the
|
||||
timeline described by the edit list. Default is false.
|
||||
|
||||
@item advanced_editlist
|
||||
Modify the stream index to reflect the timeline described by the edit list. @code{ignore_editlist}
|
||||
must be set to false for this option to be effective.
|
||||
If both @code{ignore_editlist} and this option are set to false, then only the
|
||||
start of the stream index is modified to reflect initial dwell time or starting timestamp
|
||||
described by the edit list. Default is true.
|
||||
|
||||
@item ignore_chapters
|
||||
Don't parse chapters. This includes GoPro 'HiLight' tags/moments. Note that chapters are
|
||||
only parsed when input is seekable. Default is false.
|
||||
|
||||
@item use_mfra_for
|
||||
For seekable fragmented input, set fragment's starting timestamp from media fragment random access box, if present.
|
||||
|
||||
Following options are available:
|
||||
@table @samp
|
||||
@item auto
|
||||
Auto-detect whether to set mfra timestamps as PTS or DTS @emph{(default)}
|
||||
|
||||
@item dts
|
||||
Set mfra timestamps as DTS
|
||||
|
||||
@item pts
|
||||
Set mfra timestamps as PTS
|
||||
|
||||
@item 0
|
||||
Don't use mfra box to set timestamps
|
||||
@end table
|
||||
|
||||
@item use_tfdt
|
||||
For fragmented input, set fragment's starting timestamp to @code{baseMediaDecodeTime} from the @code{tfdt} box.
|
||||
Default is enabled, which will prefer to use the @code{tfdt} box to set DTS. Disable to use the @code{earliest_presentation_time} from the @code{sidx} box.
|
||||
In either case, the timestamp from the @code{mfra} box will be used if it's available and @code{use_mfra_for} is
|
||||
set to pts or dts.
|
||||
|
||||
@item export_all
|
||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||
characters of the box type are set as the key. Default is false.
|
||||
|
||||
@item export_xmp
|
||||
Export entire contents of @var{XMP_} box and @var{uuid} box as a string with key @code{xmp}. Note that
|
||||
if @code{export_all} is set and this option isn't, the contents of @var{XMP_} box are still exported
|
||||
but with key @code{XMP_}. Default is false.
|
||||
|
||||
@item activation_bytes
|
||||
4-byte key required to decrypt Audible AAX and AAX+ files. See Audible AAX subsection below.
|
||||
|
||||
@item audible_fixed_key
|
||||
Fixed key used for handling Audible AAX/AAX+ files. It has been pre-set so should not be necessary to
|
||||
specify.
|
||||
|
||||
@item decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@item max_stts_delta
|
||||
Very high sample deltas written in a trak's stts box may occasionally be intended but usually they are written in
|
||||
error or used to store a negative value for dts correction when treated as signed 32-bit integers. This option lets
|
||||
the user set an upper limit, beyond which the delta is clamped to 1. Values greater than the limit if negative when
|
||||
cast to int32 are used to adjust onward dts.
|
||||
|
||||
Unit is the track time scale. Range is 0 to UINT_MAX. Default is @code{UINT_MAX - 48000*10} which allows upto
|
||||
a 10 second dts correction for 48 kHz audio streams while accommodating 99.9% of @code{uint32} range.
|
||||
|
||||
@item interleaved_read
|
||||
Interleave packets from multiple tracks at demuxer level. For badly interleaved files, this prevents playback issues
|
||||
caused by large gaps between packets in different tracks, as MOV/MP4 do not have packet placement requirements.
|
||||
However, this can cause excessive seeking on very badly interleaved files, due to seeking between tracks, so disabling
|
||||
it may prevent I/O issues, at the expense of playback.
|
||||
is known to be non malicious.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
@@ -812,9 +538,6 @@ This demuxer accepts the following options:
|
||||
Set size limit for looking up a new synchronization. Default value is
|
||||
65536.
|
||||
|
||||
@item skip_unknown_pmt
|
||||
Skip PMTs for programs not defined in the PAT. Default value is 0.
|
||||
|
||||
@item fix_teletext_pts
|
||||
Override teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
@@ -829,14 +552,6 @@ Show the detected raw packet size, cannot be set by the user.
|
||||
Scan and combine all PMTs. The value is an integer with value from -1
|
||||
to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||
disabled). Default value is -1.
|
||||
|
||||
@item merge_pmt_versions
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
|
||||
@item max_packet_size
|
||||
Set maximum size, in bytes, of packet emitted by the demuxer. Payloads above this size
|
||||
are split across multiple packets. Range is 1 to INT_MAX/2. Default is 204800 bytes.
|
||||
@end table
|
||||
|
||||
@section mpjpeg
|
||||
@@ -934,20 +649,4 @@ Example: convert the captions to a format most players understand:
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@section vapoursynth
|
||||
|
||||
Vapoursynth wrapper.
|
||||
|
||||
Due to security concerns, Vapoursynth scripts will not
|
||||
be autodetected so the input format has to be forced. For ff* CLI tools,
|
||||
add @code{-f vapoursynth} before the input @code{-i yourscript.vpy}.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item max_script_size
|
||||
The demuxer buffers the entire script into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of scripts that can be read.
|
||||
Default is 1 MiB.
|
||||
@end table
|
||||
|
||||
@c man end DEMUXERS
|
||||
|
||||
@@ -10,115 +10,41 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
@chapter Notes for external developers
|
||||
|
||||
This text is concerned with the development @emph{of} FFmpeg itself. Information
|
||||
on using the FFmpeg libraries in other programs can be found elsewhere, e.g. in:
|
||||
@itemize @bullet
|
||||
@item
|
||||
the installed header files
|
||||
@item
|
||||
@url{http://ffmpeg.org/doxygen/trunk/index.html, the Doxygen documentation}
|
||||
generated from the headers
|
||||
@item
|
||||
the examples under @file{doc/examples}
|
||||
@end itemize
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{https://ffmpeg.org/legal.html}.
|
||||
|
||||
If you modify FFmpeg code for your own use case, you are highly encouraged to
|
||||
@emph{submit your changes back to us}, using this document as a guide. There are
|
||||
both pragmatic and ideological reasons to do so:
|
||||
@chapter Contributing
|
||||
|
||||
There are 2 ways by which code gets into FFmpeg:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Maintaining external changes to keep up with upstream development is
|
||||
time-consuming and error-prone. With your code in the main tree, it will be
|
||||
maintained by FFmpeg developers.
|
||||
@item
|
||||
FFmpeg developers include leading experts in the field who can find bugs or
|
||||
design flaws in your code.
|
||||
@item
|
||||
By supporting the project you find useful you ensure it continues to be
|
||||
maintained and developed.
|
||||
@item Submitting patches to the ffmpeg-devel mailing list.
|
||||
See @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@end itemize
|
||||
|
||||
All proposed code changes should be submitted for review to
|
||||
@url{mailto:ffmpeg-devel@@ffmpeg.org, the development mailing list}, as
|
||||
described in more detail in the @ref{Submitting patches} chapter. The code
|
||||
should comply with the @ref{Development Policy} and follow the @ref{Coding Rules}.
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@chapter Coding Rules
|
||||
|
||||
@section Language
|
||||
|
||||
FFmpeg is mainly programmed in the ISO C99 language, extended with:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Atomic operations from C11 @file{stdatomic.h}. They are emulated on
|
||||
architectures/compilers that do not support them, so all FFmpeg-internal code
|
||||
may use atomics without any extra checks. However, @file{stdatomic.h} must not
|
||||
be included in public headers, so they stay C99-compatible.
|
||||
@end itemize
|
||||
|
||||
Compiler-specific extensions may be used with good reason, but must not be
|
||||
depended on, i.e. the code must still compile and work with compilers lacking
|
||||
the extension.
|
||||
|
||||
The following C99 features must not be used anywhere in the codebase:
|
||||
@itemize @bullet
|
||||
@item
|
||||
variable-length arrays;
|
||||
|
||||
@item
|
||||
complex numbers;
|
||||
|
||||
@item
|
||||
mixed statements and declarations.
|
||||
@end itemize
|
||||
|
||||
@subsection SIMD/DSP
|
||||
@anchor{SIMD/DSP}
|
||||
|
||||
As modern compilers are unable to generate efficient SIMD or other
|
||||
performance-critical DSP code from plain C, handwritten assembly is used.
|
||||
Usually such code is isolated in a separate function. Then the standard approach
|
||||
is writing multiple versions of this function – a plain C one that works
|
||||
everywhere and may also be useful for debugging, and potentially multiple
|
||||
architecture-specific optimized implementations. Initialization code then
|
||||
chooses the best available version at runtime and loads it into a function
|
||||
pointer; the function in question is then always called through this pointer.
|
||||
|
||||
The specific syntax used for writing assembly is:
|
||||
@itemize @bullet
|
||||
@item
|
||||
NASM on x86;
|
||||
|
||||
@item
|
||||
GAS on ARM.
|
||||
@end itemize
|
||||
|
||||
A unit testing framework for assembly called @code{checkasm} lives under
|
||||
@file{tests/checkasm}. All new assembly should come with @code{checkasm} tests;
|
||||
adding tests for existing assembly that lacks them is also strongly encouraged.
|
||||
|
||||
@subsection Other languages
|
||||
|
||||
Other languages than C may be used in special cases:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Compiler intrinsics or inline assembly when the code in question cannot be
|
||||
written in the standard way described in the @ref{SIMD/DSP} section. This
|
||||
typically applies to code that needs to be inlined.
|
||||
|
||||
@item
|
||||
Objective-C where required for interacting with macOS-specific interfaces.
|
||||
@end itemize
|
||||
|
||||
@section Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
@@ -141,39 +67,8 @@ K&R coding style is used.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
@subsection Vim configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
@subsection Emacs configuration
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@section Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
@@ -215,52 +110,86 @@ int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@anchor{Naming conventions}
|
||||
@section Naming conventions
|
||||
@section C language features
|
||||
|
||||
Names of functions, variables, and struct members must be lowercase, using
|
||||
underscores (_) to separate words. For example, @samp{avfilter_get_video_buffer}
|
||||
is an acceptable function name and @samp{AVFilterGetVideo} is not.
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
Struct, union, enum, and typedeffed type names must use CamelCase. All structs
|
||||
and unions should be typedeffed to the same name as the struct/union tag, e.g.
|
||||
@code{typedef struct AVFoo @{ ... @} AVFoo;}. Enums are typically not
|
||||
typedeffed.
|
||||
|
||||
Enumeration constants and macros must be UPPERCASE, except for macros
|
||||
masquerading as functions, which should use the function naming convention.
|
||||
|
||||
All identifiers in the libraries should be namespaced as follows:
|
||||
@itemize @bullet
|
||||
@item
|
||||
No namespacing for identifiers with file and lower scope (e.g. local variables,
|
||||
static functions), and struct and union members,
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
The @code{ff_} prefix must be used for variables and functions visible outside
|
||||
of file scope, but only used internally within a single library, e.g.
|
||||
@samp{ff_w64_demuxer}. This prevents name collisions when FFmpeg is statically
|
||||
linked.
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
casts are binary truncated. Shift right of signed values uses sign extension.
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@section Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in CamelCase.
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_report_missing_feature}.
|
||||
|
||||
@item
|
||||
All other internal identifiers, like private type or macro names, should be
|
||||
namespaced only to avoid possible internal conflicts. E.g. @code{H264_NAL_SPS}
|
||||
vs. @code{HEVC_NAL_SPS}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
|
||||
@item
|
||||
Other public identifiers (struct, union, enum, macro, type names) must use their
|
||||
library's public prefix (@code{AV}, @code{Sws}, or @code{Swr}).
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
@@ -274,50 +203,50 @@ symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
@section Miscellaneous conventions
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
@item
|
||||
Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@anchor{Development Policy}
|
||||
@section Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
|
||||
@chapter Development Policy
|
||||
|
||||
@section Code behaviour
|
||||
|
||||
@subheading Correctness
|
||||
The code must be valid. It must not crash, abort, access invalid pointers, leak
|
||||
memory, cause data races or signed integer overflow, or otherwise cause
|
||||
undefined behaviour. Error codes should be checked and, when applicable,
|
||||
forwarded to the caller.
|
||||
|
||||
@subheading Thread- and library-safety
|
||||
Our libraries may be called by multiple independent callers in the same process.
|
||||
These calls may happen from any number of threads and the different call sites
|
||||
may not be aware of each other - e.g. a user program may be calling our
|
||||
libraries directly, and use one or more libraries that also call our libraries.
|
||||
The code must behave correctly under such conditions.
|
||||
|
||||
@subheading Robustness
|
||||
The code must treat as untrusted any bytestream received from a caller or read
|
||||
from a file, network, etc. It must not misbehave when arbitrary data is sent to
|
||||
it - typically it should print an error message and return
|
||||
@code{AVERROR_INVALIDDATA} on encountering invalid input data.
|
||||
|
||||
@subheading Memory allocation
|
||||
The code must use the @code{av_malloc()} family of functions from
|
||||
@file{libavutil/mem.h} to perform all memory allocation, except in special cases
|
||||
(e.g. when interacting with an external library that requires a specific
|
||||
allocator to be used).
|
||||
|
||||
All allocations should be checked and @code{AVERROR(ENOMEM)} returned on
|
||||
failure. A common mistake is that error paths leak memory - make sure that does
|
||||
not happen.
|
||||
|
||||
@subheading stdio
|
||||
Our libraries must not access the stdio streams stdin/stdout/stderr directly
|
||||
(e.g. via @code{printf()} family of functions), as that is not library-safe. For
|
||||
logging, use @code{av_log()}.
|
||||
|
||||
@section Patches/Committing
|
||||
@subheading Licenses for patches must be compatible with FFmpeg.
|
||||
Contributions should be licensed under the
|
||||
@@ -340,24 +269,13 @@ missing samples or an implementation with a small subset of features.
|
||||
Always check the mailing list for any reviewers with issues and test
|
||||
FATE before you push.
|
||||
|
||||
@subheading Commit messages
|
||||
Commit messages are highly important tools for informing other developers on
|
||||
what a given change does and why. Every commit must always have a properly
|
||||
filled out commit message with the following format:
|
||||
@example
|
||||
area changed: short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
If the commit addresses a known bug on our bug tracker or other external issue
|
||||
(e.g. CVE), the commit message should include the relevant bug ID(s) or other
|
||||
external identifiers. Note that this should be done in addition to a proper
|
||||
explanation and not instead of it. Comments such as "fixed!" or "Changed it."
|
||||
are not acceptable.
|
||||
|
||||
When applying patches that have been discussed at length on the mailing list,
|
||||
reference the thread in the commit message.
|
||||
@subheading Keep the main commit message short with an extended description below.
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
@subheading Testing must be adequate but not excessive.
|
||||
If it works for you, others, and passes FATE then it should be OK to commit
|
||||
@@ -376,6 +294,15 @@ later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
@subheading Ask before you change the build system (configure, etc).
|
||||
Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
@subheading Cosmetic changes should be kept in separate patches.
|
||||
We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
@@ -390,15 +317,27 @@ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
|
||||
@subheading Commit messages should always be filled out properly.
|
||||
Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@subheading Credit the author of the patch.
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
@subheading Credit any researchers
|
||||
If a commit/patch fixes an issues found by some researcher, always credit the
|
||||
researcher in the commit message for finding/reporting the issue.
|
||||
@subheading Complex patches should refer to discussion surrounding them.
|
||||
When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
@subheading Always wait long enough before pushing changes
|
||||
Do NOT commit to code actively maintained by others without permission.
|
||||
@@ -408,6 +347,22 @@ time-frame (12h for build failures and security fixes, 3 days small changes,
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
@section Code
|
||||
@subheading API/ABI changes should be discussed before they are made.
|
||||
Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove widely used functionality or features (redundant code can be removed).
|
||||
|
||||
@subheading Remember to check if you need to bump versions for libav*.
|
||||
Depending on the change, you may need to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@subheading Warnings for correct code may be disabled if there is no other option.
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
@@ -417,150 +372,10 @@ If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
@section Library public interfaces
|
||||
Every library in FFmpeg provides a set of public APIs in its installed headers,
|
||||
which are those listed in the variable @code{HEADERS} in that library's
|
||||
@file{Makefile}. All identifiers defined in those headers (except for those
|
||||
explicitly documented otherwise), and corresponding symbols exported from
|
||||
compiled shared or static libraries are considered public interfaces and must
|
||||
comply with the API and ABI compatibility rules described in this section.
|
||||
|
||||
Public APIs must be backward compatible within a given major version. I.e. any
|
||||
valid user code that compiles and works with a given library version must still
|
||||
compile and work with any later version, as long as the major version number is
|
||||
unchanged. "Valid user code" here means code that is calling our APIs in a
|
||||
documented and/or intended manner and is not relying on any undefined behavior.
|
||||
Incrementing the major version may break backward compatibility, but only to the
|
||||
extent described in @ref{Major version bumps}.
|
||||
|
||||
We also guarantee backward ABI compatibility for shared and static libraries.
|
||||
I.e. it should be possible to replace a shared or static build of our library
|
||||
with a build of any later version (re-linking the user binary in the static
|
||||
case) without breaking any valid user binaries, as long as the major version
|
||||
number remains unchanged.
|
||||
|
||||
@subsection Adding new interfaces
|
||||
Any new public identifiers in installed headers are considered new API - this
|
||||
includes new functions, structs, macros, enum values, typedefs, new fields in
|
||||
existing structs, new installed headers, etc. Consider the following
|
||||
guidelines when adding new APIs.
|
||||
|
||||
@subsubheading Motivation
|
||||
While new APIs can be added relatively easily, changing or removing them is much
|
||||
harder due to abovementioned compatibility requirements. You should then
|
||||
consider carefully whether the functionality you are adding really needs to be
|
||||
exposed to our callers as new public API.
|
||||
|
||||
Your new API should have at least one well-established use case outside of the
|
||||
library that cannot be easily achieved with existing APIs. Every library in
|
||||
FFmpeg also has a defined scope - your new API must fit within it.
|
||||
|
||||
@subsubheading Replacing existing APIs
|
||||
If your new API is replacing an existing one, it should be strictly superior to
|
||||
it, so that the advantages of using the new API outweight the cost to the
|
||||
callers of changing their code. After adding the new API you should then
|
||||
deprecate the old one and schedule it for removal, as described in
|
||||
@ref{Removing interfaces}.
|
||||
|
||||
If you deem an existing API deficient and want to fix it, the preferred approach
|
||||
in most cases is to add a differently-named replacement and deprecate the
|
||||
existing API rather than modify it. It is important to make the changes visible
|
||||
to our callers (e.g. through compile- or run-time deprecation warnings) and make
|
||||
it clear how to transition to the new API (e.g. in the Doxygen documentation or
|
||||
on the wiki).
|
||||
|
||||
@subsubheading API design
|
||||
The FFmpeg libraries are used by a variety of callers to perform a wide range of
|
||||
multimedia-related processing tasks. You should therefore - within reason - try
|
||||
to design your new API for the broadest feasible set of use cases and avoid
|
||||
unnecessarily limiting it to a specific type of callers (e.g. just media
|
||||
playback or just transcoding).
|
||||
|
||||
@subsubheading Consistency
|
||||
Check whether similar APIs already exist in FFmpeg. If they do, try to model
|
||||
your new addition on them to achieve better overall consistency.
|
||||
|
||||
The naming of your new identifiers should follow the @ref{Naming conventions}
|
||||
and be aligned with other similar APIs, if applicable.
|
||||
|
||||
@subsubheading Extensibility
|
||||
You should also consider how your API might be extended in the future in a
|
||||
backward-compatible way. If you are adding a new struct @code{AVFoo}, the
|
||||
standard approach is requiring the caller to always allocate it through a
|
||||
constructor function, typically named @code{av_foo_alloc()}. This way new fields
|
||||
may be added to the end of the struct without breaking ABI compatibility.
|
||||
Typically you will also want a destructor - @code{av_foo_free(AVFoo**)} that
|
||||
frees the indirectly supplied object (and its contents, if applicable) and
|
||||
writes @code{NULL} to the supplied pointer, thus eliminating the potential
|
||||
dangling pointer in the caller's memory.
|
||||
|
||||
If you are adding new functions, consider whether it might be desirable to tweak
|
||||
their behavior in the future - you may want to add a flags argument, even though
|
||||
it would be unused initially.
|
||||
|
||||
@subsubheading Documentation
|
||||
All new APIs must be documented as Doxygen-formatted comments above the
|
||||
identifiers you add to the public headers. You should also briefly mention the
|
||||
change in @file{doc/APIchanges}.
|
||||
|
||||
@subsubheading Bump the version
|
||||
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
||||
major version number, as described in @ref{Major version bumps}. Major
|
||||
bumps are significant events that happen on a schedule - so if your change
|
||||
strictly requires one you should add it under @code{#if} preprocesor guards that
|
||||
disable it until the next major bump happens.
|
||||
|
||||
New APIs that can be added without breaking API or ABI compatibility require
|
||||
bumping the minor version number.
|
||||
|
||||
Incrementing the third (micro) version component means a noteworthy binary
|
||||
compatible change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@anchor{Removing interfaces}
|
||||
@subsection Removing interfaces
|
||||
Due to abovementioned compatibility guarantees, removing APIs is an involved
|
||||
process that should only be undertaken with good reason. Typically a deficient,
|
||||
restrictive, or otherwise inadequate API is replaced by a superior one, though
|
||||
it does at times happen that we remove an API without any replacement (e.g. when
|
||||
the feature it provides is deemed not worth the maintenance effort, out of scope
|
||||
of the project, fundamentally flawed, etc.).
|
||||
|
||||
The removal has two steps - first the API is deprecated and scheduled for
|
||||
removal, but remains present and functional. The second step is actually
|
||||
removing the API - this is described in @ref{Major version bumps}.
|
||||
|
||||
To deprecate an API you should signal to our users that they should stop using
|
||||
it. E.g. if you intend to remove struct members or functions, you should mark
|
||||
them with @code{attribute_deprecated}. When this cannot be done, it may be
|
||||
possible to detect the use of the deprecated API at runtime and print a warning
|
||||
(though take care not to print it too often). You should also document the
|
||||
deprecation (and the replacement, if applicable) in the relevant Doxygen
|
||||
documentation block.
|
||||
|
||||
Finally, you should define a deprecation guard along the lines of
|
||||
@code{#define FF_API_<FOO> (LIBAVBAR_VERSION_MAJOR < XX)} (where XX is the major
|
||||
version in which the API will be removed) in @file{libavbar/version_major.h}
|
||||
(@file{version.h} in case of @code{libavutil}). Then wrap all uses of the
|
||||
deprecated API in @code{#if FF_API_<FOO> .... #endif}, so that the code will
|
||||
automatically get disabled once the major version reaches XX. You can also use
|
||||
@code{FF_DISABLE_DEPRECATION_WARNINGS} and @code{FF_ENABLE_DEPRECATION_WARNINGS}
|
||||
to suppress compiler deprecation warnings inside these guards. You should test
|
||||
that the code compiles and works with the guard macro evaluating to both true
|
||||
and false.
|
||||
|
||||
@anchor{Major version bumps}
|
||||
@subsection Major version bumps
|
||||
A major version bump signifies an API and/or ABI compatibility break. To reduce
|
||||
the negative effects on our callers, who are required to adapt their code,
|
||||
backward-incompatible changes during a major bump should be limited to:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Removing previously deprecated APIs.
|
||||
|
||||
@item
|
||||
Performing ABI- but not API-breaking changes, like reordering struct contents.
|
||||
@end itemize
|
||||
@subheading Check untrusted input properly.
|
||||
Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
@section Documentation/Other
|
||||
@subheading Subscribe to the ffmpeg-devel mailing list.
|
||||
@@ -604,6 +419,35 @@ finding a new maintainer and also don't forget to update the @file{MAINTAINERS}
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
@chapter Code of conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@chapter Submitting patches
|
||||
|
||||
@@ -644,27 +488,6 @@ patch is inline or attached per mail.
|
||||
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
||||
likely was wrong.
|
||||
|
||||
@subheading How to setup git send-email?
|
||||
|
||||
Please see @url{https://git-send-email.io/}.
|
||||
For gmail additionally see @url{https://shallowsky.com/blog/tech/email/gmail-app-passwds.html}.
|
||||
|
||||
@subheading Sending patches from email clients
|
||||
Using @code{git send-email} might not be desirable for everyone. The
|
||||
following trick allows to send patches via email clients in a safe
|
||||
way. It has been tested with Outlook and Thunderbird (with X-Unsent
|
||||
extension) and might work with other applications.
|
||||
|
||||
Create your patch like this:
|
||||
|
||||
@verbatim
|
||||
git format-patch -s -o "outputfolder" --add-header "X-Unsent: 1" --suffix .eml --to ffmpeg-devel@ffmpeg.org -1 1a2b3c4d
|
||||
@end verbatim
|
||||
|
||||
Now you'll just need to open the eml file with the email application
|
||||
and execute 'Send'.
|
||||
|
||||
@subheading Reviews
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
@@ -796,7 +619,7 @@ If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to @url{https://streams.videolan.org/upload/}.
|
||||
URL, you can upload to ftp://upload.ffmpeg.org.
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
@@ -825,14 +648,16 @@ Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
@item
|
||||
Consider adding a regression test for your code. All new modules
|
||||
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
||||
filters, bitstream filters, parsers. If its not possible to do that, add
|
||||
an explanation why to your patchset, its ok to not test if theres a reason.
|
||||
Consider adding a regression test for your code.
|
||||
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm.
|
||||
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
of leaks, out of array accesses, etc.
|
||||
@@ -882,8 +707,6 @@ accordingly].
|
||||
|
||||
@section Adding files to the fate-suite dataset
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
@@ -933,25 +756,6 @@ In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Maintenance}
|
||||
@chapter Maintenance process
|
||||
|
||||
@anchor{MAINTAINERS}
|
||||
@section MAINTAINERS
|
||||
|
||||
The developers maintaining each part of the codebase are listed in @file{MAINTAINERS}.
|
||||
Being listed in @file{MAINTAINERS}, gives one the right to have git write access to
|
||||
the specific repository.
|
||||
|
||||
@anchor{Becoming a maintainer}
|
||||
@section Becoming a maintainer
|
||||
|
||||
People add themselves to @file{MAINTAINERS} by sending a patch like any other code
|
||||
change. These get reviewed by the community like any other patch. It is expected
|
||||
that, if someone has an objection to a new maintainer, she is willing to object
|
||||
in public with her full name and is willing to take over maintainership for the area.
|
||||
|
||||
|
||||
@anchor{Release process}
|
||||
@chapter Release process
|
||||
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
OUT_DIR="${1}"
|
||||
SRC_DIR="${2}"
|
||||
DOXYFILE="${3}"
|
||||
DOXYGEN="${4}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 4
|
||||
|
||||
cd ${SRC_DIR}
|
||||
shift 3
|
||||
|
||||
if [ -e "VERSION" ]; then
|
||||
VERSION=`cat "VERSION"`
|
||||
|
||||
1745
doc/encoders.texi
1745
doc/encoders.texi
File diff suppressed because it is too large
Load Diff
5
doc/examples/.gitignore
vendored
5
doc/examples/.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
/avio_list_dir
|
||||
/avio_dir_cmd
|
||||
/avio_reading
|
||||
/decode_audio
|
||||
/decode_video
|
||||
@@ -20,6 +20,3 @@
|
||||
/scaling_video
|
||||
/transcode_aac
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
||||
/qsv_transcode
|
||||
|
||||
@@ -1,27 +1,26 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_HTTP_SERVE_FILES) += avio_http_serve_files
|
||||
EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
EXAMPLES-$(CONFIG_AVIO_READ_CALLBACK_EXAMPLE) += avio_read_callback
|
||||
EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_AUDIO_EXAMPLE) += decode_filter_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_VIDEO_EXAMPLE) += decode_filter_video
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
EXAMPLES-$(CONFIG_DEMUX_DECODE_EXAMPLE) += demux_decode
|
||||
EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
EXAMPLES-$(CONFIG_HW_DECODE_EXAMPLE) += hw_decode
|
||||
EXAMPLES-$(CONFIG_MUX_EXAMPLE) += mux
|
||||
EXAMPLES-$(CONFIG_QSV_DECODE_EXAMPLE) += qsv_decode
|
||||
EXAMPLES-$(CONFIG_REMUX_EXAMPLE) += remux
|
||||
EXAMPLES-$(CONFIG_RESAMPLE_AUDIO_EXAMPLE) += resample_audio
|
||||
EXAMPLES-$(CONFIG_SCALE_VIDEO_EXAMPLE) += scale_video
|
||||
EXAMPLES-$(CONFIG_SHOW_METADATA_EXAMPLE) += show_metadata
|
||||
EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_EXAMPLE) += transcode
|
||||
EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
EXAMPLES-$(CONFIG_VAAPI_ENCODE_EXAMPLE) += vaapi_encode
|
||||
EXAMPLES-$(CONFIG_VAAPI_TRANSCODE_EXAMPLE) += vaapi_transcode
|
||||
EXAMPLES-$(CONFIG_QSV_TRANSCODE_EXAMPLE) += qsv_transcode
|
||||
|
||||
EXAMPLES := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
EXAMPLES_G := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
@@ -38,7 +37,7 @@ $(EXAMPLES_G): %$(PROGSSUF)_g$(EXESUF): %.o
|
||||
examples: $(EXAMPLES)
|
||||
|
||||
$(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.o): | doc/examples
|
||||
OUTDIRS += doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT += $(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.c)
|
||||
|
||||
|
||||
@@ -11,40 +11,33 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
# missing the following targets, since they need special options in the FFmpeg build:
|
||||
# qsv_decode
|
||||
# qsv_transcode
|
||||
# vaapi_encode
|
||||
# vaapi_transcode
|
||||
|
||||
EXAMPLES=\
|
||||
avio_http_serve_files \
|
||||
avio_list_dir \
|
||||
avio_read_callback \
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_filter_audio \
|
||||
decode_filter_video \
|
||||
decode_video \
|
||||
demux_decode \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
hw_decode \
|
||||
mux \
|
||||
remux \
|
||||
resample_audio \
|
||||
scale_video \
|
||||
show_metadata \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcode
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
mux: LDLIBS += -lm
|
||||
resample_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
|
||||
@@ -7,10 +7,8 @@ that you have them installed and working on your system.
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and run:
|
||||
make -f Makefile.example
|
||||
|
||||
It will link to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
@@ -22,4 +20,4 @@ examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make -f Makefile.example
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
||||
|
||||
@@ -20,13 +20,6 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext list directory API usage example
|
||||
* @example avio_list_dir.c
|
||||
*
|
||||
* Show how to list directories through the libavformat AVIOContext API.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
@@ -109,15 +102,38 @@ static int list_op(const char *input_dir)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s input_dir\n"
|
||||
"API example program to show how to list files in directory "
|
||||
"accessed through AVIOContext.\n", program_name);
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@@ -129,7 +145,32 @@ int main(int argc, char *argv[])
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
ret = list_op(argv[1]);
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext read callback API usage example
|
||||
* @example avio_read_callback.c
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
@@ -95,7 +96,6 @@ int main(int argc, char *argv[])
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
av_freep(&avio_ctx_buffer);
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
@@ -117,12 +117,11 @@ int main(int argc, char *argv[])
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx)
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
avio_context_free(&avio_ctx);
|
||||
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec audio decoding API usage example
|
||||
* @example decode_audio.c
|
||||
* @file
|
||||
* audio decoding with libavcodec API example
|
||||
*
|
||||
* Decode data from an MP2 input file and generate a raw audio file to
|
||||
* be played with ffplay.
|
||||
* @example decode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -40,35 +39,6 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
FILE *outfile)
|
||||
{
|
||||
@@ -98,7 +68,7 @@ static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++)
|
||||
for (ch = 0; ch < dec_ctx->ch_layout.nb_channels; ch++)
|
||||
for (ch = 0; ch < dec_ctx->channels; ch++)
|
||||
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
}
|
||||
@@ -116,9 +86,6 @@ int main(int argc, char **argv)
|
||||
size_t data_size;
|
||||
AVPacket *pkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
enum AVSampleFormat sfmt;
|
||||
int n_channels = 0;
|
||||
const char *fmt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -128,10 +95,6 @@ int main(int argc, char **argv)
|
||||
outfilename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1); /* or proper cleanup and returning */
|
||||
}
|
||||
|
||||
/* find the MPEG audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
@@ -165,7 +128,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
fprintf(stderr, "Could not open %s\n", outfilename);
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -209,26 +172,6 @@ int main(int argc, char **argv)
|
||||
pkt->size = 0;
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
/* print output pcm infomations, because there have no metadata of pcm */
|
||||
sfmt = c->sample_fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
}
|
||||
|
||||
n_channels = c->ch_layout.nb_channels;
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, c->sample_rate,
|
||||
outfilename);
|
||||
end:
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec video decoding API usage example
|
||||
* @example decode_video.c *
|
||||
* @file
|
||||
* video decoding with libavcodec API example
|
||||
*
|
||||
* Read from an MPEG1 video file, decode frames, and generate PGM images as
|
||||
* output.
|
||||
* @example decode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -42,7 +41,7 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"wb");
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
@@ -70,12 +69,12 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("saving frame %3"PRId64"\n", dec_ctx->frame_num);
|
||||
printf("saving frame %3d\n", dec_ctx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%"PRId64, filename, dec_ctx->frame_num);
|
||||
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
}
|
||||
@@ -93,12 +92,10 @@ int main(int argc, char **argv)
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
int ret;
|
||||
int eof;
|
||||
AVPacket *pkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n"
|
||||
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(0);
|
||||
}
|
||||
filename = argv[1];
|
||||
@@ -152,16 +149,15 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
do {
|
||||
while (!feof(f)) {
|
||||
/* read raw data from the input file */
|
||||
data_size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (ferror(f))
|
||||
if (!data_size)
|
||||
break;
|
||||
eof = !data_size;
|
||||
|
||||
/* use the parser to split the data into frames */
|
||||
data = inbuf;
|
||||
while (data_size > 0 || eof) {
|
||||
while (data_size > 0) {
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
@@ -173,10 +169,8 @@ int main(int argc, char **argv)
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, frame, pkt, outfilename);
|
||||
else if (eof)
|
||||
break;
|
||||
}
|
||||
} while (!eof);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
decode(c, frame, NULL, outfilename);
|
||||
|
||||
@@ -21,18 +21,17 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat and libavcodec demuxing and decoding API usage example
|
||||
* @example demux_decode.c
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and decode audio
|
||||
* and video data. Write the output as raw audio and input files to be played by
|
||||
* ffplay.
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -52,95 +51,99 @@ static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket *pkt = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
/* Enable or disable frame reference counting. You are not supposed to support
|
||||
* both paths in your application but pick the one most appropriate to your
|
||||
* needs. Look for the use of refcount in this example to see what are the
|
||||
* differences of API usage between them. */
|
||||
static int refcount = 0;
|
||||
|
||||
printf("video_frame n:%d\n",
|
||||
video_frame_count++);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy2(video_dst_data, video_dst_linesize,
|
||||
frame->data, frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
*got_frame = 0;
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
if (*got_frame) {
|
||||
|
||||
av_frame_unref(frame);
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
/* If we use frame reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && refcount)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
@@ -148,7 +151,8 @@ static int open_codec_context(int *stream_idx,
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
@@ -182,8 +186,9 @@ static int open_codec_context(int *stream_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) {
|
||||
/* Init the decoders, with or without reference counting */
|
||||
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -225,17 +230,24 @@ static int get_format_from_sample_fmt(const char **fmt,
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
|
||||
refcount = 1;
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
@@ -301,12 +313,10 @@ int main (int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
@@ -314,23 +324,24 @@ int main (int argc, char **argv)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, pkt);
|
||||
else if (pkt->stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, pkt);
|
||||
av_packet_unref(pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_packet_unref(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
@@ -343,7 +354,7 @@ int main (int argc, char **argv)
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->ch_layout.nb_channels;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
@@ -372,7 +383,6 @@ end:
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding audio API usage examples
|
||||
* @example encode_audio.c
|
||||
* @file
|
||||
* audio encoding with libavcodec API example.
|
||||
*
|
||||
* Generate a synthetic audio signal and encode it to an output MP2 file.
|
||||
* @example encode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
@@ -70,25 +70,26 @@ static int select_sample_rate(const AVCodec *codec)
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst)
|
||||
static int select_channel_layout(const AVCodec *codec)
|
||||
{
|
||||
const AVChannelLayout *p, *best_ch_layout;
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->ch_layouts)
|
||||
return av_channel_layout_copy(dst, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->ch_layouts;
|
||||
while (p->nb_channels) {
|
||||
int nb_channels = p->nb_channels;
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = p;
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return av_channel_layout_copy(dst, best_ch_layout);
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
|
||||
@@ -163,9 +164,8 @@ int main(int argc, char **argv)
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
ret = select_channel_layout(codec, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
@@ -195,9 +195,7 @@ int main(int argc, char **argv)
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
ret = av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* allocate the data buffers */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
@@ -220,7 +218,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->ch_layout.nb_channels; k++)
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding video API usage example
|
||||
* @example encode_video.c
|
||||
* @file
|
||||
* video encoding with libavcodec API example
|
||||
*
|
||||
* Generate synthetic video data and encode it to an output file.
|
||||
* @example encode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -145,7 +145,7 @@ int main(int argc, char **argv)
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate the video frame data\n");
|
||||
exit(1);
|
||||
@@ -155,25 +155,12 @@ int main(int argc, char **argv)
|
||||
for (i = 0; i < 25; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
/* Make sure the frame data is writable.
|
||||
On the first round, the frame is fresh from av_frame_get_buffer()
|
||||
and therefore we know it is writable.
|
||||
But on the next rounds, encode() will have called
|
||||
avcodec_send_frame(), and the codec may have kept a reference to
|
||||
the frame in its internal structures, that makes the frame
|
||||
unwritable.
|
||||
av_frame_make_writable() checks that and allocates a new buffer
|
||||
for the frame only if necessary.
|
||||
*/
|
||||
/* make sure the frame data is writable */
|
||||
ret = av_frame_make_writable(frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* Prepare a dummy image.
|
||||
In real code, this is where you would have your own logic for
|
||||
filling the frame. FFmpeg does not care what you put in the
|
||||
frame.
|
||||
*/
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
@@ -198,14 +185,8 @@ int main(int argc, char **argv)
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
|
||||
/* Add sequence end code to have a real MPEG file.
|
||||
It makes only sense because this tiny examples writes packets
|
||||
directly. This is called "elementary stream" and only works for some
|
||||
codecs. To create a valid file, you usually need to write packets
|
||||
into a proper file format or protocol; see mux.c.
|
||||
*/
|
||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_free_context(&c);
|
||||
|
||||
@@ -21,16 +21,7 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec motion vectors extraction API usage example
|
||||
* @example extract_mvs.c
|
||||
*
|
||||
* Read from input file, decode video stream and print a motion vectors
|
||||
* representation to stdout.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -69,11 +60,10 @@ static int decode_packet(const AVPacket *pkt)
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64",%4d,%4d,%4d\n",
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags,
|
||||
mv->motion_x, mv->motion_y, mv->motion_scale);
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
@@ -88,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
||||
@@ -114,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
ret = avcodec_open2(dec_ctx, dec, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (ret < 0) {
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -133,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
@@ -168,20 +156,13 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags,motion_x,motion_y,motion_scale\n");
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(pkt);
|
||||
av_packet_unref(pkt);
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(&pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -193,6 +174,5 @@ end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
return ret < 0;
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavfilter audio filtering API usage example
|
||||
* @example filter_audio.c
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* This example will generate a sine wave audio, pass it through a simple filter
|
||||
* chain, and then compute the MD5 checksum of the output data.
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
@@ -53,7 +55,7 @@
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
@@ -98,7 +100,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
@@ -152,8 +154,9 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100);
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
@@ -212,7 +215,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = frame->ch_layout.nb_channels;
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
@@ -245,7 +248,7 @@ static int get_input(AVFrame *frame, int frame_num)
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
av_channel_layout_copy(&frame->ch_layout, &INPUT_CHANNEL_LAYOUT);
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
|
||||
@@ -23,11 +23,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio decoding and filtering usage example
|
||||
* @example decode_filter_audio.c
|
||||
*
|
||||
* Demux, decode and filter audio input file, generate a raw audio
|
||||
* file to be played with ffplay.
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
@@ -36,7 +34,6 @@
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
@@ -51,8 +48,8 @@ static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -77,6 +74,7 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
@@ -96,6 +94,7 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
@@ -107,13 +106,12 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
ret = snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=",
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt));
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, args + ret, sizeof(args) - ret);
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -136,7 +134,7 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
@@ -187,7 +185,7 @@ static int init_filters(const char *filters_descr)
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_channel_layout_describe(&outlink->ch_layout, args, sizeof(args));
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
@@ -202,7 +200,7 @@ end:
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * frame->ch_layout.nb_channels;
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
@@ -217,12 +215,12 @@ static void print_frame(const AVFrame *frame)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (!packet || !frame || !filt_frame) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
@@ -237,11 +235,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -277,13 +275,12 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_packet_free(&packet);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
@@ -24,13 +24,11 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example decode_filter_video.c
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
@@ -53,8 +51,8 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -79,6 +77,7 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
@@ -210,20 +209,16 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet;
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
packet = av_packet_alloc();
|
||||
if (!frame || !filt_frame || !packet) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -234,11 +229,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -253,28 +248,30 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
if (ret >= 0) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
@@ -282,7 +279,6 @@ end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
av_packet_free(&packet);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat multi-client network API usage example
|
||||
* @example avio_http_serve_files.c
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* Serve a file without decoding or demuxing it over the HTTP protocol. Multiple
|
||||
* clients can connect and will receive the same file.
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
@@ -4,31 +4,30 @@
|
||||
*
|
||||
* HW Acceleration API (video decoding) decode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file HW-accelerated decoding API usage.example
|
||||
* @example hw_decode.c
|
||||
* @file
|
||||
* HW-Accelerated decoding example.
|
||||
*
|
||||
* Perform HW-accelerated decoding with output frames from HW video
|
||||
* surfaces.
|
||||
* @example hw_decode.c
|
||||
* This example shows how to do HW-accelerated decoding with output
|
||||
* frames from the HW video surfaces.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -151,8 +150,8 @@ int main(int argc, char *argv[])
|
||||
int video_stream, ret;
|
||||
AVStream *video = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVPacket *packet = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVPacket packet;
|
||||
enum AVHWDeviceType type;
|
||||
int i;
|
||||
|
||||
@@ -171,12 +170,6 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
}
|
||||
|
||||
packet = av_packet_alloc();
|
||||
if (!packet) {
|
||||
fprintf(stderr, "Failed to allocate AVPacket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||
@@ -218,6 +211,7 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
|
||||
decoder_ctx->get_format = get_hw_format;
|
||||
av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
if (hw_decoder_init(decoder_ctx, type) < 0)
|
||||
return -1;
|
||||
@@ -228,25 +222,27 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* open the file to dump raw data */
|
||||
output_file = fopen(argv[3], "w+b");
|
||||
output_file = fopen(argv[3], "w+");
|
||||
|
||||
/* actual decoding and dump the raw data */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(input_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == packet->stream_index)
|
||||
ret = decode_write(decoder_ctx, packet);
|
||||
if (video_stream == packet.stream_index)
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_write(decoder_ctx, NULL);
|
||||
packet.data = NULL;
|
||||
packet.size = 0;
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
av_packet_unref(&packet);
|
||||
|
||||
if (output_file)
|
||||
fclose(output_file);
|
||||
av_packet_free(&packet);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avformat_close_input(&input_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat metadata extraction API usage example
|
||||
* @example show_metadata.c
|
||||
*
|
||||
* Show metadata from an input file.
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -35,7 +34,7 @@
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
const AVDictionaryEntry *tag = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
@@ -48,12 +47,7 @@ int main (int argc, char **argv)
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_iterate(fmt_ctx->metadata, tag)))
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat muxing API usage example
|
||||
* @example mux.c
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Generate a synthetic audio and video signal and mux them to a media file in
|
||||
* any supported libavformat format. The default codecs are used.
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@@ -38,7 +39,6 @@
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
@@ -61,8 +61,6 @@ typedef struct OutputStream {
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVPacket *tmp_pkt;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
@@ -80,50 +78,20 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
const AVCodec **codec,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
@@ -137,12 +105,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->tmp_pkt = av_packet_alloc();
|
||||
if (!ost->tmp_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
@@ -169,7 +131,16 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
av_channel_layout_copy(&c->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
@@ -199,7 +170,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
@@ -214,22 +185,25 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
const AVChannelLayout *channel_layout,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
av_channel_layout_copy(&frame->ch_layout, channel_layout);
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -238,8 +212,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
@@ -268,9 +241,9 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
@@ -281,25 +254,25 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
@@ -312,12 +285,12 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
|
||||
for (i = 0; i < ost->enc->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
@@ -336,19 +309,23 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples;
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
@@ -372,37 +349,51 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *frame;
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame)
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
frame->format = pix_fmt;
|
||||
frame->width = width;
|
||||
frame->height = height;
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return frame;
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->enc;
|
||||
@@ -419,7 +410,7 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
@@ -430,9 +421,9 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_frame(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary video frame\n");
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -473,7 +464,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
@@ -515,7 +506,37 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
@@ -523,7 +544,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
av_packet_free(&ost->tmp_pkt);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
@@ -534,10 +554,10 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const AVOutputFormat *fmt;
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
const AVCodec *audio_codec, *video_codec;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
@@ -624,6 +644,10 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
@@ -1,435 +0,0 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated video transcoding API usage example
|
||||
* @example qsv_transcode.c
|
||||
*
|
||||
* Perform QSV-accelerated transcoding and show to dynamically change
|
||||
* encoder's options.
|
||||
*
|
||||
* Usage: qsv_transcode input_stream codec output_stream initial option
|
||||
* { frame_number new_option }
|
||||
* e.g: - qsv_transcode input.mp4 h264_qsv output_h264.mp4 "g 60"
|
||||
* - qsv_transcode input.mp4 hevc_qsv output_hevc.mp4 "g 60 async_depth 1"
|
||||
* 100 "g 120"
|
||||
* (initialize codec with gop_size 60 and change it to 120 after 100
|
||||
* frames)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
static AVBufferRef *hw_device_ctx = NULL;
|
||||
static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;
|
||||
static int video_stream = -1;
|
||||
|
||||
typedef struct DynamicSetting {
|
||||
int frame_number;
|
||||
char* optstr;
|
||||
} DynamicSetting;
|
||||
static DynamicSetting *dynamic_setting;
|
||||
static int setting_number;
|
||||
static int current_setting_number;
|
||||
|
||||
static int str_to_dict(char* optstr, AVDictionary **opt)
|
||||
{
|
||||
char *key, *value;
|
||||
if (strlen(optstr) == 0)
|
||||
return 0;
|
||||
key = strtok(optstr, " ");
|
||||
if (key == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
do {
|
||||
key = strtok(NULL, " ");
|
||||
if (key == NULL)
|
||||
return 0;
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(ENAVAIL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
} while(1);
|
||||
}
|
||||
|
||||
static int dynamic_set_parameter(AVCodecContext *avctx)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
static int frame_number = 0;
|
||||
frame_number++;
|
||||
if (current_setting_number < setting_number &&
|
||||
frame_number == dynamic_setting[current_setting_number].frame_number) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
ret = str_to_dict(dynamic_setting[current_setting_number++].optstr, &opts);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "The dynamic parameter is wrong\n");
|
||||
goto fail;
|
||||
}
|
||||
/* Set common option. The dictionary will be freed and replaced
|
||||
* by a new one containing all options not found in common option list.
|
||||
* Then this new dictionary is used to set private option. */
|
||||
if ((ret = av_opt_set_dict(avctx, &opts)) < 0)
|
||||
goto fail;
|
||||
/* Set codec specific option */
|
||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||
goto fail;
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to set
|
||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
avctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
}
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int open_input_file(char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
|
||||
filename, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot find a video stream in the input file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
video_stream = ret;
|
||||
video = ifmt_ctx->streams[video_stream];
|
||||
|
||||
switch(video->codecpar->codec_id) {
|
||||
case AV_CODEC_ID_H264:
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
decoder = avcodec_find_decoder_by_name("hevc_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP9:
|
||||
decoder = avcodec_find_decoder_by_name("vp9_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP8:
|
||||
decoder = avcodec_find_decoder_by_name("vp8_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_AV1:
|
||||
decoder = avcodec_find_decoder_by_name("av1_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
decoder = avcodec_find_decoder_by_name("mpeg2_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MJPEG:
|
||||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Codec is not supportted by qsv\n");
|
||||
return AVERROR(ENAVAIL);
|
||||
}
|
||||
|
||||
if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
|
||||
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
decoder_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video, NULL);
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
if (!decoder_ctx->hw_device_ctx) {
|
||||
fprintf(stderr, "A hardware device reference create failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
decoder_ctx->get_format = get_format;
|
||||
decoder_ctx->pkt_timebase = video->time_base;
|
||||
if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
|
||||
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if((ret = dynamic_set_parameter(encoder_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to set dynamic parameter. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
if (ret = avcodec_receive_packet(encoder_ctx, enc_pkt))
|
||||
break;
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, encoder_ctx->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
if ((ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt)) < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
if (ret == AVERROR_EOF)
|
||||
return 0;
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding. Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
if (!(frame = av_frame_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
av_frame_free(&frame);
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while decoding. Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
AVStream *ost;
|
||||
/* we need to ref hw_frames_ctx of decoder to initialize encoder's codec.
|
||||
Only after we get a decoded frame, can we obtain its hw_frames_ctx */
|
||||
encoder_ctx->hw_frames_ctx = av_buffer_ref(decoder_ctx->hw_frames_ctx);
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
/* set AVCodecContext Parameters for encoder, here we keep them stay
|
||||
* the same as decoder.
|
||||
*/
|
||||
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
|
||||
encoder_ctx->pix_fmt = AV_PIX_FMT_QSV;
|
||||
encoder_ctx->width = decoder_ctx->width;
|
||||
encoder_ctx->height = decoder_ctx->height;
|
||||
if ((ret = str_to_dict(optstr, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||
goto fail;
|
||||
}
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to
|
||||
* set framerate, which is compatible with ffmpeg commandline. The
|
||||
* video is assumed to be average frame rate, so set time_base to
|
||||
* 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
encoder_ctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
if ((ret = avcodec_open2(encoder_ctx, enc_codec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
av_dict_free(&opts);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {
|
||||
fprintf(stderr, "Failed to allocate stream for output format.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ost->time_base = encoder_ctx->time_base;
|
||||
ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy the stream parameters. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the stream header */
|
||||
if ((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Error while writing stream header. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
frame->pts = av_rescale_q(frame->pts, decoder_ctx->pkt_timebase,
|
||||
encoder_ctx->time_base);
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt = NULL;
|
||||
|
||||
if (argc < 5 || (argc - 5) % 2) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <encoder> <output file>"
|
||||
" <\"encoding option set 0\"> [<frame_number> <\"encoding options set 1\">]...\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
setting_number = (argc - 5) / 2;
|
||||
dynamic_setting = av_malloc(setting_number * sizeof(*dynamic_setting));
|
||||
current_setting_number = 0;
|
||||
for (int i = 0; i < setting_number; i++) {
|
||||
dynamic_setting[i].frame_number = atoi(argv[i*2 + 5]);
|
||||
dynamic_setting[i].optstr = argv[i*2 + 6];
|
||||
}
|
||||
|
||||
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to create a QSV device. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {
|
||||
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) {
|
||||
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
|
||||
"%s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec, argv[4]);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
if ((ret = dec_enc(dec_pkt, enc_codec, argv[4])) < 0) {
|
||||
fprintf(stderr, "Failed to flush decoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
if ((ret = encode_write(dec_pkt, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to flush encoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* write the trailer for output stream */
|
||||
if ((ret = av_write_trailer(ofmt_ctx)) < 0)
|
||||
fprintf(stderr, "Failed to write trailer %s\n", av_err2str(ret));
|
||||
|
||||
end:
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
avformat_close_input(&ofmt_ctx);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
av_freep(&dynamic_setting);
|
||||
return ret;
|
||||
}
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated H.264 decoding API usage example
|
||||
* @example qsv_decode.c
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* Perform QSV-accelerated H.264 decoding with output frames in the
|
||||
* GPU video surfaces, write the decoded frames to an output file.
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the GPU video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
@@ -43,10 +44,38 @@
|
||||
#include "libavutil/hwcontext_qsv.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
AVBufferRef *hw_device_ref;
|
||||
} DecodeContext;
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
AVQSVFramesContext *frames_hwctx;
|
||||
int ret;
|
||||
|
||||
/* create a pool of surfaces to be used by the decoder */
|
||||
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||
if (!avctx->hw_frames_ctx)
|
||||
return AV_PIX_FMT_NONE;
|
||||
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||
frames_ctx->initial_pool_size = 32;
|
||||
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
@@ -58,7 +87,7 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *decoder_ctx,
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
{
|
||||
@@ -112,15 +141,15 @@ int main(int argc, char **argv)
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
|
||||
AVBufferRef *device_ref = NULL;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
@@ -148,7 +177,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
@@ -180,8 +209,7 @@ int main(int argc, char **argv)
|
||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||
}
|
||||
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
@@ -199,26 +227,27 @@ int main(int argc, char **argv)
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
pkt = av_packet_alloc();
|
||||
if (!frame || !sw_frame || !pkt) {
|
||||
if (!frame || !sw_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, pkt);
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt->stream_index == video_st->index)
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx);
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(pkt);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
@@ -231,11 +260,10 @@ finish:
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&device_ref);
|
||||
av_buffer_unref(&decode.hw_device_ref);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat/libavcodec demuxing and muxing API usage example
|
||||
* @example remux.c
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another. Data is copied from the
|
||||
* input to the output without transcoding.
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
@@ -45,9 +45,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, cons
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVOutputFormat *ofmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
int stream_index = 0;
|
||||
@@ -65,12 +65,6 @@ int main(int argc, char **argv)
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
@@ -91,7 +85,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping));
|
||||
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
|
||||
if (!stream_mapping) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
@@ -146,39 +140,38 @@ int main(int argc, char **argv)
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, pkt);
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt->stream_index];
|
||||
if (pkt->stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt->stream_index] < 0) {
|
||||
av_packet_unref(pkt);
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
if (pkt.stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt.stream_index] < 0) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
|
||||
pkt->stream_index = stream_mapping[pkt->stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt->stream_index];
|
||||
log_packet(ifmt_ctx, pkt, "in");
|
||||
pkt.stream_index = stream_mapping[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
|
||||
pkt->pos = -1;
|
||||
log_packet(ofmt_ctx, pkt, "out");
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
@@ -21,12 +21,8 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio resampling API usage example
|
||||
* @example resample_audio.c
|
||||
*
|
||||
* Generate a synthetic audio signal, and Use libswresample API to perform audio
|
||||
* resampling. The output is written to a raw audio file to be played with
|
||||
* ffplay.
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
@@ -84,7 +80,7 @@ static void fill_samples(double *dst, int nb_samples, int nb_channels, int sampl
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_STEREO, dst_ch_layout = AV_CHANNEL_LAYOUT_SURROUND;
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
@@ -96,7 +92,6 @@ int main(int argc, char **argv)
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
char buf[64];
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
@@ -125,11 +120,11 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout(swr_ctx, "in_chlayout", &src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
@@ -141,7 +136,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = src_ch_layout.nb_channels;
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -156,7 +151,7 @@ int main(int argc, char **argv)
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = dst_ch_layout.nb_channels;
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -199,10 +194,9 @@ int main(int argc, char **argv)
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
av_channel_layout_describe(&dst_ch_layout, buf, sizeof(buf));
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %s -channels %d -ar %d %s\n",
|
||||
fmt, buf, dst_nb_channels, dst_rate, dst_filename);
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libswscale API usage example
|
||||
* @example scale_video.c
|
||||
*
|
||||
* Generate a synthetic video signal and use libswscale to perform rescaling.
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2022 Andreas Unterweger
|
||||
* Copyright (c) 2013-2018 Andreas Unterweger
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -19,11 +19,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio transcoding to MPEG/AAC API usage example
|
||||
* @example transcode_aac.c
|
||||
* @file
|
||||
* Simple audio converter
|
||||
*
|
||||
* Convert an input audio file to AAC in an MP4 container. Formats other than
|
||||
* MP4 are supported based on the output file extension.
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* Formats other than MP4 are supported based on the output file extension.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
@@ -37,7 +38,6 @@
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
@@ -60,8 +60,7 @@ static int open_input_file(const char *filename,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodecContext *avctx;
|
||||
const AVCodec *input_codec;
|
||||
const AVStream *stream;
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/* Open the input file to read from it. */
|
||||
@@ -89,10 +88,8 @@ static int open_input_file(const char *filename,
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
stream = (*input_format_context)->streams[0];
|
||||
|
||||
/* Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder(stream->codecpar->codec_id))) {
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
@@ -107,7 +104,7 @@ static int open_input_file(const char *filename,
|
||||
}
|
||||
|
||||
/* Initialize the stream parameters with demuxer information. */
|
||||
error = avcodec_parameters_to_context(avctx, stream->codecpar);
|
||||
error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
|
||||
if (error < 0) {
|
||||
avformat_close_input(input_format_context);
|
||||
avcodec_free_context(&avctx);
|
||||
@@ -123,9 +120,6 @@ static int open_input_file(const char *filename,
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Set the packet timebase for the decoder. */
|
||||
avctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Save the decoder context for easier access later. */
|
||||
*input_codec_context = avctx;
|
||||
|
||||
@@ -150,7 +144,7 @@ static int open_output_file(const char *filename,
|
||||
AVCodecContext *avctx = NULL;
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
const AVCodec *output_codec = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/* Open the output file to write to it. */
|
||||
@@ -205,11 +199,15 @@ static int open_output_file(const char *filename,
|
||||
|
||||
/* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion. */
|
||||
av_channel_layout_default(&avctx->ch_layout, OUTPUT_CHANNELS);
|
||||
avctx->channels = OUTPUT_CHANNELS;
|
||||
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
avctx->sample_rate = input_codec_context->sample_rate;
|
||||
avctx->sample_fmt = output_codec->sample_fmts[0];
|
||||
avctx->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/* Allow the use of the experimental AAC encoder. */
|
||||
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/* Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
@@ -247,16 +245,14 @@ cleanup:
|
||||
|
||||
/**
|
||||
* Initialize one data packet for reading or writing.
|
||||
* @param[out] packet Packet to be initialized
|
||||
* @return Error code (0 if successful)
|
||||
* @param packet Packet to be initialized
|
||||
*/
|
||||
static int init_packet(AVPacket **packet)
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
if (!(*packet = av_packet_alloc())) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
av_init_packet(packet);
|
||||
/* Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -291,18 +287,21 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
/*
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
error = swr_alloc_set_opts2(resample_context,
|
||||
&output_codec_context->ch_layout,
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
&input_codec_context->ch_layout,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (error < 0) {
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return error;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/*
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
@@ -330,7 +329,7 @@ static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
{
|
||||
/* Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->ch_layout.nb_channels, 1))) {
|
||||
output_codec_context->channels, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -372,33 +371,28 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *input_packet;
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
error = init_packet(&input_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
*data_present = 0;
|
||||
*finished = 0;
|
||||
/* Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, input_packet)) < 0) {
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/* If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the audio frame stored in the temporary packet to the decoder.
|
||||
* The input audio stream decoder is used to do this. */
|
||||
if ((error = avcodec_send_packet(input_codec_context, input_packet)) < 0) {
|
||||
if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one frame from the decoder. */
|
||||
@@ -424,7 +418,7 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&input_packet);
|
||||
av_packet_unref(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -447,17 +441,26 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
int error;
|
||||
|
||||
/* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will point to the audio samples of the corresponding
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience. */
|
||||
if ((error = av_samples_alloc_array_and_samples(converted_input_samples, NULL,
|
||||
output_codec_context->ch_layout.nb_channels,
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
av_err2str(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
@@ -550,7 +553,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFrame *input_frame = NULL;
|
||||
/* Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int data_present = 0;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/* Initialize temporary storage for one input frame. */
|
||||
@@ -589,9 +592,10 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples)
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
av_freep(&converted_input_samples);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
@@ -623,7 +627,7 @@ static int init_output_frame(AVFrame **frame,
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity. */
|
||||
(*frame)->nb_samples = frame_size;
|
||||
av_channel_layout_copy(&(*frame)->ch_layout, &output_codec_context->ch_layout);
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
@@ -657,12 +661,9 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int *data_present)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *output_packet;
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
|
||||
error = init_packet(&output_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/* Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
@@ -670,20 +671,21 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
*data_present = 0;
|
||||
/* Send the audio frame stored in the temporary packet to the encoder.
|
||||
* The output audio stream encoder is used to do this. */
|
||||
error = avcodec_send_frame(output_codec_context, frame);
|
||||
/* Check for errors, but proceed with fetching encoded samples if the
|
||||
* encoder signals that it has nothing more to encode. */
|
||||
if (error < 0 && error != AVERROR_EOF) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
/* The encoder signals that it has nothing more to encode. */
|
||||
if (error == AVERROR_EOF) {
|
||||
error = 0;
|
||||
goto cleanup;
|
||||
} else if (error < 0) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one encoded frame from the encoder. */
|
||||
error = avcodec_receive_packet(output_codec_context, output_packet);
|
||||
error = avcodec_receive_packet(output_codec_context, &output_packet);
|
||||
/* If the encoder asks for more data to be able to provide an
|
||||
* encoded frame, return indicating that no data is present. */
|
||||
if (error == AVERROR(EAGAIN)) {
|
||||
@@ -704,14 +706,14 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
|
||||
/* Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present &&
|
||||
(error = av_write_frame(output_format_context, output_packet)) < 0) {
|
||||
(error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&output_packet);
|
||||
av_packet_unref(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -850,6 +852,7 @@ int main(int argc, char **argv)
|
||||
int data_written;
|
||||
/* Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
data_written = 0;
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
|
||||
@@ -23,18 +23,15 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file demuxing, decoding, filtering, encoding and muxing API usage example
|
||||
* @example transcode.c
|
||||
*
|
||||
* Convert input to output file, applying some hard-coded filter-graph on both
|
||||
* audio and video streams.
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
@@ -44,17 +41,12 @@ typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
|
||||
AVPacket *enc_pkt;
|
||||
AVFrame *filtered_frame;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
typedef struct StreamContext {
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
AVFrame *dec_frame;
|
||||
} StreamContext;
|
||||
static StreamContext *stream_ctx;
|
||||
|
||||
@@ -74,13 +66,13 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
if (!stream_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream = ifmt_ctx->streams[i];
|
||||
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodecContext *codec_ctx;
|
||||
if (!dec) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||
@@ -97,11 +89,6 @@ static int open_input_file(const char *filename)
|
||||
"for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Inform the decoder about the timebase for the packet timestamps.
|
||||
* This is highly recommended, but not mandatory. */
|
||||
codec_ctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
@@ -115,10 +102,6 @@ static int open_input_file(const char *filename)
|
||||
}
|
||||
}
|
||||
stream_ctx[i].dec_ctx = codec_ctx;
|
||||
|
||||
stream_ctx[i].dec_frame = av_frame_alloc();
|
||||
if (!stream_ctx[i].dec_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
@@ -130,7 +113,7 @@ static int open_output_file(const char *filename)
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
const AVCodec *encoder;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
@@ -182,9 +165,8 @@ static int open_output_file(const char *filename)
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
@@ -271,7 +253,7 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
@@ -297,7 +279,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
@@ -306,14 +287,14 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf));
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s",
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den, dec_ctx->sample_rate,
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
buf);
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -336,9 +317,9 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf));
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts",
|
||||
buf, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
@@ -417,67 +398,54 @@ static int init_filters(void)
|
||||
stream_ctx[i].enc_ctx, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filter_ctx[i].enc_pkt = av_packet_alloc();
|
||||
if (!filter_ctx[i].enc_pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
filter_ctx[i].filtered_frame = av_frame_alloc();
|
||||
if (!filter_ctx[i].filtered_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(unsigned int stream_index, int flush)
|
||||
{
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
|
||||
AVPacket *enc_pkt = filter->enc_pkt;
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if (filt_frame && filt_frame->pts != AV_NOPTS_VALUE)
|
||||
filt_frame->pts = av_rescale_q(filt_frame->pts, filt_frame->time_base,
|
||||
stream->enc_ctx->time_base);
|
||||
|
||||
ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
|
||||
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt->stream_index = stream_index;
|
||||
av_packet_rescale_ts(enc_pkt,
|
||||
stream->enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
}
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
stream_ctx[stream_index].enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
@@ -486,9 +454,14 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter->buffersink_ctx,
|
||||
filter->filtered_frame);
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
@@ -496,13 +469,12 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filter->filtered_frame->time_base = av_buffersink_get_time_base(filter->buffersink_ctx);;
|
||||
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(stream_index, 0);
|
||||
av_frame_unref(filter->filtered_frame);
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -512,20 +484,34 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
return encode_write_frame(stream_index, 1);
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = NULL;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -538,85 +524,63 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
if (!(packet = av_packet_alloc()))
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet->stream_index;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
|
||||
ret = avcodec_send_packet(stream->dec_ctx, packet);
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
stream_ctx[stream_index].dec_ctx->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, stream_index);
|
||||
if (got_frame) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(packet,
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush decoders, filters and encoders */
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
StreamContext *stream;
|
||||
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
|
||||
stream = &stream_ctx[i];
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream %u decoder\n", i);
|
||||
|
||||
/* flush decoder */
|
||||
ret = avcodec_send_packet(stream->dec_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing decoding failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, i);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush filter */
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
@@ -633,18 +597,14 @@ int main(int argc, char **argv)
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&packet);
|
||||
av_packet_unref(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_free_context(&stream_ctx[i].dec_ctx);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
|
||||
avcodec_free_context(&stream_ctx[i].enc_ctx);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph) {
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
av_packet_free(&filter_ctx[i].enc_pkt);
|
||||
av_frame_free(&filter_ctx[i].filtered_frame);
|
||||
}
|
||||
|
||||
av_frame_free(&stream_ctx[i].dec_frame);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
av_free(stream_ctx);
|
||||
@@ -1,30 +1,31 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* Video Acceleration API (video encoding) encode sample
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated encoding API usage example
|
||||
* @example vaapi_encode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated encoding example.
|
||||
*
|
||||
* @example vaapi_encode.c
|
||||
* This example shows how to do VAAPI-accelerated encoding. now only support NV12
|
||||
* raw file, usage like: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*
|
||||
* Perform VAAPI-accelerated encoding. Read input from an NV12 raw
|
||||
* file, and write the H.264 encoded data to an output raw file.
|
||||
* Usage: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -71,31 +72,27 @@ static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
|
||||
static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *enc_pkt;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
if (!(enc_pkt = av_packet_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(avctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(avctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(avctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
ret = fwrite(enc_pkt->data, enc_pkt->size, 1, fout);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (ret != enc_pkt->size) {
|
||||
ret = AVERROR(errno);
|
||||
break;
|
||||
}
|
||||
enc_pkt.stream_index = 0;
|
||||
ret = fwrite(enc_pkt.data, enc_pkt.size, 1, fout);
|
||||
av_packet_unref(&enc_pkt);
|
||||
}
|
||||
|
||||
end:
|
||||
av_packet_free(&enc_pkt);
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0 : -1);
|
||||
return ret;
|
||||
}
|
||||
@@ -106,7 +103,7 @@ int main(int argc, char *argv[])
|
||||
FILE *fin = NULL, *fout = NULL;
|
||||
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
||||
AVCodecContext *avctx = NULL;
|
||||
const AVCodec *codec = NULL;
|
||||
AVCodec *codec = NULL;
|
||||
const char *enc_name = "h264_vaapi";
|
||||
|
||||
if (argc < 5) {
|
||||
@@ -173,7 +170,7 @@ int main(int argc, char *argv[])
|
||||
sw_frame->width = width;
|
||||
sw_frame->height = height;
|
||||
sw_frame->format = AV_PIX_FMT_NV12;
|
||||
if ((err = av_frame_get_buffer(sw_frame, 0)) < 0)
|
||||
if ((err = av_frame_get_buffer(sw_frame, 32)) < 0)
|
||||
goto close;
|
||||
if ((err = fread((uint8_t*)(sw_frame->data[0]), size, 1, fin)) <= 0)
|
||||
break;
|
||||
|
||||
@@ -1,28 +1,29 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* Video Acceleration API (video transcoding) transcode sample
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated transcoding API usage example
|
||||
* @example vaapi_transcode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated transcoding example.
|
||||
*
|
||||
* Perform VAAPI-accelerated transcoding.
|
||||
* @example vaapi_transcode.c
|
||||
* This example shows how to do VAAPI-accelerated transcoding.
|
||||
* Usage: vaapi_transcode input_stream codec output_stream
|
||||
* e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
|
||||
* - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
|
||||
@@ -59,7 +60,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -106,25 +107,28 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
static int encode_write(AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(encoder_ctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(encoder_ctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
enc_pkt.stream_index = 0;
|
||||
av_packet_rescale_ts(&enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
@@ -139,7 +143,7 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
@@ -210,20 +214,22 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
if ((ret = encode_write(frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt;
|
||||
AVPacket dec_pkt;
|
||||
AVCodec *enc_codec;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
||||
@@ -238,12 +244,6 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
@@ -273,21 +273,23 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
if (video_stream == dec_pkt.stream_index)
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
av_packet_unref(&dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
dec_pkt.data = NULL;
|
||||
dec_pkt.size = 0;
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
av_packet_unref(&dec_pkt);
|
||||
|
||||
/* flush encoder */
|
||||
ret = encode_write(dec_pkt, NULL);
|
||||
ret = encode_write(NULL);
|
||||
|
||||
/* write the trailer for output stream */
|
||||
av_write_trailer(ofmt_ctx);
|
||||
@@ -298,6 +300,5 @@ end:
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
12
doc/faq.texi
12
doc/faq.texi
@@ -76,7 +76,7 @@ the gcc developers. Note that we will not add workarounds for gcc bugs.
|
||||
|
||||
Also note that (some of) the gcc developers believe this is not a bug or
|
||||
not a bug they should fix:
|
||||
@url{https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@@ -257,13 +257,13 @@ default.
|
||||
@section Which are good parameters for encoding high quality MPEG-4?
|
||||
|
||||
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
|
||||
|
||||
'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2'
|
||||
but beware the '-g 100' might cause problems with some decoders.
|
||||
Things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd.
|
||||
|
||||
@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong?
|
||||
|
||||
@@ -516,7 +516,7 @@ in the ffmpeg invocation. This is effective whether you run ffmpeg in a shell
|
||||
or invoke ffmpeg in its own process via an operating system API.
|
||||
|
||||
As an alternative, when you are running ffmpeg in a shell, you can redirect
|
||||
standard input to @code{/dev/null} (on Linux and macOS)
|
||||
standard input to @code{/dev/null} (on Linux and Mac OS)
|
||||
or @code{NUL} (on Windows). You can do this redirect either
|
||||
on the ffmpeg invocation, or from a shell script which calls ffmpeg.
|
||||
|
||||
@@ -526,7 +526,7 @@ For example:
|
||||
ffmpeg -nostdin -i INPUT OUTPUT
|
||||
@end example
|
||||
|
||||
or (on Linux, macOS, and other UNIX-like shells):
|
||||
or (on Linux, Mac OS, and other UNIX-like shells):
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT OUTPUT </dev/null
|
||||
@@ -601,7 +601,7 @@ No. These tools are too bloated and they complicate the build.
|
||||
FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read @uref{https://web.archive.org/web/20111004021423/http://kernel.org/pub/linux/docs/lkml/#s15, "Programming Religion"}.
|
||||
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
|
||||
|
||||
@section Why are the ffmpeg programs devoid of debugging symbols?
|
||||
|
||||
|
||||
@@ -79,21 +79,6 @@ Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To get the complete list of tests, run the command:
|
||||
@example
|
||||
make fate-list
|
||||
@end example
|
||||
|
||||
You can specify a subset of tests to run by specifying the
|
||||
corresponding elements from the list with the @code{fate-} prefix,
|
||||
e.g. as in:
|
||||
@example
|
||||
make fate-ffprobe_compact fate-ffprobe_xml
|
||||
@end example
|
||||
|
||||
This makes it easier to run a few tests in case of failure without
|
||||
running the complete test suite.
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
@@ -164,18 +149,12 @@ the synchronisation of the samples directory.
|
||||
|
||||
@chapter Uploading new samples to the fate suite
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
This is for developers who have an account on the fate suite server.
|
||||
If you upload new samples, please make sure they are as small as possible,
|
||||
space on each client, network bandwidth and so on benefit from smaller test cases.
|
||||
Also keep in mind older checkouts use existing sample files, that means in
|
||||
practice generally do not replace, remove or overwrite files as it likely would
|
||||
break older checkouts or releases.
|
||||
Also all needed samples for a commit should be uploaded, ideally 24
|
||||
hours, before the push.
|
||||
If you need an account for frequently uploading samples or you wish to help
|
||||
others by doing that send a mail to ffmpeg-devel.
|
||||
|
||||
@example
|
||||
#First update your local samples copy:
|
||||
@@ -223,14 +202,6 @@ meaning only while running the regression tests.
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
This variable may be set to the string "random", optionally followed by a
|
||||
number, like "random99", This will cause each test to use a random number of
|
||||
threads. If a number is specified, it is used as a maximum number of threads,
|
||||
otherwise 16 is the maximum.
|
||||
|
||||
In case a test fails, the thread count used for it will be written into the
|
||||
errfile.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @samp{slice} or @samp{frame},
|
||||
by default @samp{slice+frame}
|
||||
@@ -251,11 +222,6 @@ Set to @samp{1} to generate the missing or mismatched references.
|
||||
Specify which hardware acceleration to use while running regression tests,
|
||||
by default @samp{none} is used.
|
||||
|
||||
@item KEEP
|
||||
Set to @samp{1} to keep temp files generated by fate test(s) when test is successful.
|
||||
Default is @samp{0}, which removes these files. Files are always kept when a test
|
||||
fails.
|
||||
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
slot= # some unique identifier
|
||||
repo=https://git.ffmpeg.org/ffmpeg.git # the source repository
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
#branch=release/2.6 # the branch to test
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
|
||||
1145
doc/ffmpeg.texi
1145
doc/ffmpeg.texi
File diff suppressed because it is too large
Load Diff
@@ -34,6 +34,10 @@ various FFmpeg APIs.
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
@@ -56,14 +60,10 @@ Play @var{duration} seconds of audio/video.
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -seek_interval
|
||||
Set custom interval, in seconds, for seeking using left/right keys. Default is 10 seconds.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -noborder
|
||||
Borderless window.
|
||||
@item -alwaysontop
|
||||
Window always on top. Available on: X11 with SDL >= 2.0.5, Windows SDL >= 2.0.6.
|
||||
@item -volume
|
||||
Set the startup volume. 0 means silence, 100 means no volume reduction or
|
||||
amplification. Negative values are treated as 0, values above 100 are treated
|
||||
@@ -72,10 +72,6 @@ as 100.
|
||||
Force format.
|
||||
@item -window_title @var{title}
|
||||
Set window title (default is the input filename).
|
||||
@item -left @var{title}
|
||||
Set the x position for the left of the window (default is a centered window).
|
||||
@item -top @var{title}
|
||||
Set the y position for the top of the window (default is a centered window).
|
||||
@item -loop @var{number}
|
||||
Loops movie playback <number> times. 0 means forever.
|
||||
@item -showmode @var{mode}
|
||||
@@ -122,12 +118,15 @@ Read @var{input_url}.
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is shown by default, unless the
|
||||
log level is lower than @code{info}. Its display can be forced by manually
|
||||
specifying this option. To disable it, you need to specify @code{-nostats}.
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@@ -190,12 +189,6 @@ input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use @option{-noinfbuf} to disable it.
|
||||
|
||||
@item -filter_threads @var{nb_threads}
|
||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||
will produce a thread pool with this many threads available for parallel
|
||||
processing. The default is 0 which means that the thread count will be
|
||||
determined by the number of available CPUs.
|
||||
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -214,6 +207,8 @@ Pause.
|
||||
Toggle mute.
|
||||
|
||||
@item 9, 0
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@item /, *
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@@ -285,7 +280,6 @@ Toggle full screen.
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] @file{input_url}
|
||||
ffprobe [@var{options}] [@file{input_url}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -28,9 +28,6 @@ If a url is specified in input, ffprobe will try to open and
|
||||
probe the url content. If the url cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
If no output is specified as output with @option{o} ffprobe will write
|
||||
to stdout.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
@@ -41,7 +38,7 @@ ffprobe will show it.
|
||||
|
||||
ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{output_format} option.
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
@@ -83,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -output_format, -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@@ -91,7 +88,7 @@ Set the output printing format.
|
||||
|
||||
For example for printing the output in JSON format, specify:
|
||||
@example
|
||||
-output_format json
|
||||
-print_format json
|
||||
@end example
|
||||
|
||||
For more details on the available output printing formats, see the
|
||||
@@ -338,12 +335,6 @@ Show information about all pixel formats supported by FFmpeg.
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -show_optional_fields @var{value}
|
||||
Some writers viz. JSON and XML, omit the printing of fields with invalid or non-applicable values,
|
||||
while other writers always print them. This option enables one to control this behaviour.
|
||||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
||||
Default is @var{auto}.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
@@ -351,10 +342,6 @@ on the specific build.
|
||||
@item -i @var{input_url}
|
||||
Read @var{input_url}.
|
||||
|
||||
@item -o @var{output_url}
|
||||
Write output to @var{output_url}. If not specified, the output is sent
|
||||
to stdout.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@@ -438,7 +425,7 @@ The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specified, the output has the form:
|
||||
If no option is specifid, the output has the form:
|
||||
@example
|
||||
section|key1=val1| ... |keyN=valN
|
||||
@end example
|
||||
@@ -597,14 +584,14 @@ value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_strict, x
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{https://www.w3.org/XML/}.
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@@ -655,7 +642,6 @@ DV, GXF and AVI timecodes are available in format metadata
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
698
doc/ffprobe.xsd
698
doc/ffprobe.xsd
@@ -1,409 +1,379 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_datum" type="ffprobe:packetSideDatumType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<xsd:attribute name="type" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<xsd:complexType name="packetSideDatumType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="crop_top" type="xsd:long" />
|
||||
<xsd:attribute name="crop_bottom" type="xsd:long" />
|
||||
<xsd:attribute name="crop_left" type="xsd:long" />
|
||||
<xsd:attribute name="crop_right" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecodes" type="ffprobe:frameSideDataTimecodeList" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecode" type="ffprobe:frameSideDataTimecodeType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeType">
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="non_diegetic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="captions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="descriptions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="metadata" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dependent" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="still_image" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_size" type="xsd:int" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="film_grain" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="initial_padding" type="xsd:int"/>
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
|
||||
@@ -34,24 +34,27 @@ Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4. If @var{stream_index} is used as an
|
||||
additional stream specifier (see below), then it selects stream number
|
||||
@var{stream_index} from the matching streams. Stream numbering is based on the
|
||||
order of the streams as detected by libavformat except when a program ID is
|
||||
also specified. In this case it is based on the ordering of the streams in the
|
||||
program.
|
||||
@item @var{stream_type}[:@var{additional_stream_specifier}]
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
|
||||
streams, 'V' only matches video streams which are not attached pictures, video
|
||||
thumbnails or cover arts. If @var{additional_stream_specifier} is used, then
|
||||
it matches streams which both have this type and match the
|
||||
@var{additional_stream_specifier}. Otherwise, it matches all streams of the
|
||||
specified type.
|
||||
@item p:@var{program_id}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the program with the id @var{program_id}. If
|
||||
@var{additional_stream_specifier} is used, then it matches streams which both
|
||||
are part of the program and match the @var{additional_stream_specifier}.
|
||||
thumbnails or cover arts. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}] or p:@var{program_id}[:@var{stream_type}[:@var{stream_index}]] or
|
||||
p:@var{program_id}:m:@var{key}[:@var{value}]
|
||||
In first version, if @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program. In the second version, @var{stream_type} is one of following: 'v' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data. If @var{stream_index} is also given, then it matches
|
||||
stream number @var{stream_index} of this type in the program with the id @var{program_id}.
|
||||
Otherwise, if only @var{stream_type} is given, it matches all
|
||||
streams of this type in the program with the id @var{program_id}.
|
||||
In the third version matches streams in the program with the id @var{program_id} with the metadata
|
||||
tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@@ -107,24 +110,13 @@ Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter named @var{filter_name}. Use the
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
|
||||
@item bsf=@var{bitstream_filter_name}
|
||||
Print detailed information about the bitstream filter named @var{bitstream_filter_name}.
|
||||
Use the @option{-bsfs} option to get a list of all bitstream filters.
|
||||
|
||||
@item protocol=@var{protocol_name}
|
||||
Print detailed information about the protocol named @var{protocol_name}.
|
||||
Use the @option{-protocols} option to get a list of all protocols.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -buildconf
|
||||
Show the build configuration, one option per line.
|
||||
|
||||
@item -formats
|
||||
Show available formats (including devices).
|
||||
|
||||
@@ -167,9 +159,6 @@ Show available sample formats.
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -dispositions
|
||||
Show stream dispositions.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@@ -246,15 +235,17 @@ ffmpeg [...] -loglevel +repeat
|
||||
By default the program logs to stderr. If coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR}, or can be forced setting
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a future FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and log output to a file named
|
||||
Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel debug}.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
@@ -355,19 +346,6 @@ Possible flags for this option are:
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -cpucount @var{count} (@emph{global})
|
||||
Override detection of CPU count. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpucount 2
|
||||
@end example
|
||||
|
||||
@item -max_alloc @var{bytes}
|
||||
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
||||
family of malloc functions. Exercise @strong{extreme caution} when using
|
||||
this option. Don't use if you do not understand the full consequence of doing so.
|
||||
Default is INT_MAX.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
@@ -393,15 +371,7 @@ ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
@end example
|
||||
|
||||
All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them:
|
||||
@example
|
||||
ffmpeg -i multichannel.mxf -map 0:v:0 -map 0:a:0 -map 0:a:0 -c:a:0 ac3 -b:a:0 640k -ac:a:1 2 -c:a:1 aac -b:2 128k out.mp4
|
||||
@end example
|
||||
|
||||
In the above example, a multichannel audio stream is mapped twice for output.
|
||||
The first instance is encoded with codec ac3 and bitrate 640k.
|
||||
The second instance is downmixed to 2 channels and encoded with codec aac. A bitrate of 128k is specified for it using
|
||||
absolute index of the output stream.
|
||||
should be attached to them.
|
||||
|
||||
Note: the @option{-nooption} syntax cannot be used for boolean
|
||||
AVOptions, use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
13506
doc/filters.texi
13506
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -27,48 +27,40 @@ stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item max_probe_packets @var{integer} (@emph{input})
|
||||
Set the maximum number of buffered packets when probing a codec.
|
||||
Default is 2500 packets.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags}
|
||||
Set format flags. Some are implemented for a limited number of formats.
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
|
||||
Possible values for input files:
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item discardcorrupt
|
||||
Discard corrupted packets.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item fastseek
|
||||
Enable fast, but inaccurate seeks for some formats.
|
||||
@item genpts
|
||||
Generate missing PTS if DTS is present.
|
||||
@item igndts
|
||||
Ignore DTS if PTS is set. Inert when nofillin is set.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by buffering during initial input streams analysis.
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
Do not fill in missing values in packet fields that can be exactly calculated.
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS. At present, available only for AVIs with an index.
|
||||
@end table
|
||||
|
||||
Possible values for output files:
|
||||
@table @samp
|
||||
@item autobsf
|
||||
Automatically apply bitstream filters as required by the output format. Enabled by default.
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data.
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item flush_packets
|
||||
Write out packets immediately.
|
||||
@item shortest
|
||||
Stop muxing at the end of the shortest stream.
|
||||
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
||||
@@ -141,7 +133,7 @@ Consider things that a sane encoder should not do as an error.
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 10000000 (10 seconds).
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
@@ -213,7 +205,7 @@ is @code{0} (meaning that no offset is applied).
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@@ -222,32 +214,6 @@ ffprobe -dump_separator "
|
||||
@item max_streams @var{integer} (@emph{input})
|
||||
Specifies the maximum number of streams. This can be used to reject files that
|
||||
would require too many resources due to a large number of streams.
|
||||
|
||||
@item skip_estimate_duration_from_pts @var{bool} (@emph{input})
|
||||
Skip estimation of input duration when calculated using PTS.
|
||||
At present, applicable for MPEG-PS and MPEG-TS.
|
||||
|
||||
@item strict, f_strict @var{integer} (@emph{input/output})
|
||||
Specify how strictly to follow the standards. @code{f_strict} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item very
|
||||
strictly conform to an older more strict version of the spec or reference software
|
||||
@item strict
|
||||
strictly conform to all the things in the spec no matter what consequences
|
||||
@item normal
|
||||
|
||||
@item unofficial
|
||||
allow unofficial extensions
|
||||
@item experimental
|
||||
allow non standardized experimental things, experimental
|
||||
(unfinished/work in progress/not well tested) decoders and encoders.
|
||||
Note: experimental decoders can pose a security risk, do not use this for
|
||||
decoding untrusted input.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
@@ -258,10 +224,30 @@ decoding untrusted input.
|
||||
Format stream specifiers allow selection of one or more streams that
|
||||
match specific properties.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index.
|
||||
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio,
|
||||
's' for subtitle, 'd' for data, and 't' for attachments. If
|
||||
@var{stream_index} is given, then it matches the stream number
|
||||
@var{stream_index} of this type. Otherwise, it matches all streams of
|
||||
this type.
|
||||
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number
|
||||
@var{stream_index} in the program with the id
|
||||
@var{program_id}. Otherwise, it matches all streams in the program.
|
||||
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
The exact semantics of stream specifiers is defined by the
|
||||
@code{avformat_match_stream_specifier()} function declared in the
|
||||
@file{libavformat/avformat.h} header and documented in the
|
||||
@ref{Stream specifiers,,Stream specifiers section in the ffmpeg(1) manual,ffmpeg}.
|
||||
@file{libavformat/avformat.h} header.
|
||||
|
||||
@ifclear config-writeonly
|
||||
@include demuxers.texi
|
||||
|
||||
1289
doc/general.texi
1289
doc/general.texi
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it.
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git <target>
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
@@ -143,7 +143,7 @@ git log <filename(s)>
|
||||
@end example
|
||||
|
||||
You may also use the graphical tools like @command{gitview} or @command{gitk}
|
||||
or the web interface available at @url{https://git.ffmpeg.org/ffmpeg.git}.
|
||||
or the web interface available at @url{http://source.ffmpeg.org/}.
|
||||
|
||||
@section Checking source tree status
|
||||
|
||||
@@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions.
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told Git your name, email address and GPG key
|
||||
Make sure you have told Git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
git config --global user.signingkey ABCDEF0123245
|
||||
@end example
|
||||
|
||||
Enable signing all commits or use -S
|
||||
|
||||
@example
|
||||
git config --global commit.gpgsign true
|
||||
@end example
|
||||
|
||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||
@@ -224,46 +217,16 @@ git config --global core.editor
|
||||
or set by one of the following environment variables:
|
||||
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
||||
|
||||
@section Writing a commit message
|
||||
Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don't
|
||||
include filenames in log messages, Git provides that information.
|
||||
|
||||
Log messages should be concise but descriptive.
|
||||
|
||||
The first line must contain the context, a colon and a very short
|
||||
summary of what the commit does. Details can be added, if necessary,
|
||||
separated by an empty line. These details should not exceed 60-72 characters
|
||||
per line, except when containing code.
|
||||
|
||||
Example of a good commit message:
|
||||
|
||||
@example
|
||||
avcodec/cbs: add a helper to read extradata within packet side data
|
||||
|
||||
Using ff_cbs_read() on the raw buffer will not parse it as extradata,
|
||||
resulting in parsing errors for example when handling ISOBMFF avcC.
|
||||
This helper works around that.
|
||||
@end example
|
||||
|
||||
@example
|
||||
ptr might be NULL
|
||||
@end example
|
||||
|
||||
If the summary on the first line is not enough, in the body of the message,
|
||||
explain why you made a change, what you did will be obvious from the changes
|
||||
themselves most of the time. Saying just "bug fix" or "10l" is bad. Remember
|
||||
that people of varying skill levels look at and educate themselves while
|
||||
reading through your code. Don't include filenames in log messages except in
|
||||
the context, Git provides that information.
|
||||
|
||||
If the commit fixes a registered issue, state it in a separate line of the
|
||||
body: @code{Fix Trac ticket #42.}
|
||||
|
||||
The first line will be used to name
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by @command{git format-patch}.
|
||||
|
||||
Common mistakes for the first line, as seen in @command{git log --oneline}
|
||||
include: missing context at the beginning; description of what the code did
|
||||
before the patch; line too long or wrapped to the second line.
|
||||
|
||||
@section Preparing a patchset
|
||||
|
||||
@example
|
||||
@@ -430,19 +393,6 @@ git checkout -b svn_23456 $SHA1
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter gpg key generation
|
||||
|
||||
If you have no gpg key yet, we recommend that you create a ed25519 based key as it
|
||||
is small, fast and secure. Especially it results in small signatures in git.
|
||||
|
||||
@example
|
||||
gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com"
|
||||
@end example
|
||||
|
||||
When generating a key, make sure the email specified matches the email used in git as some sites like
|
||||
github consider mismatches a reason to declare such commits unverified. After generating a key you
|
||||
can add it to the MAINTAINER file and upload it to a keyserver.
|
||||
|
||||
@chapter Pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
|
||||
334
doc/indevs.texi
334
doc/indevs.texi
@@ -178,9 +178,6 @@ Capture the mouse pointer. Default is 0.
|
||||
@item -capture_mouse_clicks
|
||||
Capture the screen mouse clicks. Default is 0.
|
||||
|
||||
@item -capture_raw_data
|
||||
Capture the raw device data. Default is 0.
|
||||
Using this option may result in receiving the underlying data delivered to the AVFoundation framework. E.g. for muxed devices that sends raw DV data to the framework (like tape-based camcorders), setting this option to false results in extracted video frames captured in the designated pixel format only. Setting this option to true results in receiving the raw DV stream untouched.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -211,13 +208,6 @@ Record video from the system default video device using the pixel format bgr0 an
|
||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record raw DV data from a suitable input device and write the output into out.dv:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -capture_raw_data true -i "zr100:none" out.dv
|
||||
@end example
|
||||
|
||||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
@@ -277,8 +267,7 @@ audio track.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sources} option of ffmpeg to list the available input devices.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -292,35 +281,25 @@ as @option{pal} (3 letters).
|
||||
Default behavior is autodetection of the input video format, if the hardware
|
||||
supports it.
|
||||
|
||||
@item bm_v210
|
||||
This is a deprecated option, you can use @option{raw_format} instead.
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item raw_format
|
||||
Set the pixel format of the captured video.
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item auto
|
||||
|
||||
This is the default which means 8-bit YUV 422 or 8-bit ARGB if format
|
||||
autodetection is used, 8-bit YUV 422 otherwise.
|
||||
|
||||
@item uyvy422
|
||||
|
||||
8-bit YUV 422.
|
||||
|
||||
@item yuv422p10
|
||||
|
||||
10-bit YUV 422.
|
||||
|
||||
@item argb
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item bgra
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item rgb10
|
||||
|
||||
10-bit RGB.
|
||||
|
||||
@end table
|
||||
|
||||
@item teletext_lines
|
||||
@@ -344,34 +323,9 @@ Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp
|
||||
Defaults to @samp{2}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timecode_format
|
||||
Timecode type to include in the frame and video stream metadata. Must be
|
||||
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
||||
@samp{rp188hfr}, @samp{rp188any}, @samp{vitc}, @samp{vitc2}, or @samp{serial}.
|
||||
Defaults to @samp{none} (not included).
|
||||
|
||||
In order to properly support 50/60 fps timecodes, the ordering of the queried
|
||||
timecode types for @samp{rp188any} is HFR, VITC1, VITC2 and LTC for >30 fps
|
||||
content. Note that this is slightly different to the ordering used by the
|
||||
DeckLink API, which is HFR, VITC1, LTC, VITC2.
|
||||
|
||||
@item video_input
|
||||
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
|
||||
@samp{optical_sdi}, @samp{component}, @samp{composite} or @samp{s_video}.
|
||||
@@ -410,34 +364,6 @@ If set to @option{true}, timestamps are forwarded as they are without removing
|
||||
the initial offset.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item timestamp_align
|
||||
Capture start time alignment in seconds. If set to nonzero, input frames are
|
||||
dropped till the system timestamp aligns with configured value.
|
||||
Alignment difference of up to one frame duration is tolerated.
|
||||
This is useful for maintaining input synchronization across N different
|
||||
hardware devices deployed for 'N-way' redundancy. The system time of different
|
||||
hardware devices should be synchronized with protocols such as NTP or PTP,
|
||||
before using this option.
|
||||
Note that this method is not foolproof. In some border cases input
|
||||
synchronization may not happen due to thread scheduling jitters in the OS.
|
||||
Either sync could go wrong by 1 frame or in a rarer case
|
||||
@option{timestamp_align} seconds.
|
||||
Defaults to @samp{0}.
|
||||
|
||||
@item wait_for_tc (@emph{bool})
|
||||
Drop frames till a frame with timecode is received. Sometimes serial timecode
|
||||
isn't received with the first input frame. If that happens, the stored stream
|
||||
timecode will be inaccurate. If this option is set to @option{true}, input frames
|
||||
are dropped till a frame with timecode is received.
|
||||
Option @var{timecode_format} must be specified.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item enable_klv(@emph{bool})
|
||||
If set to @option{true}, extracts KLV data from VANC and outputs KLV packets.
|
||||
KLV VANC packets are joined based on MID and PSC fields and aggregated into
|
||||
one KLV packet.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -447,7 +373,7 @@ Defaults to @option{false}.
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -sources decklink
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -465,7 +391,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -476,6 +402,116 @@ ffmpeg -channels 16 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder'
|
||||
|
||||
@end itemize
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek input device provides capture capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Input filename is a NDI source name that could be found by sending -find_sources 1
|
||||
to command line - it has no specific syntax but human-readable formatted.
|
||||
|
||||
To enable this input device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item find_sources
|
||||
If set to @option{true}, print a list of found/available NDI sources and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item wait_sources
|
||||
Override time to wait until the number of online sources have changed.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item allow_video_fields
|
||||
When this flag is @option{false}, all video that you receive will be progressive.
|
||||
Defaults to @option{true}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -find_sources 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
Restream to NDI:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -i "DEV-5.INTERNAL.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@@ -625,12 +661,6 @@ Save the currently used video capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@item use_video_device_timestamps
|
||||
If set to @option{false}, the timestamp for video frames will be
|
||||
derived from the wallclock instead of the timestamp provided by
|
||||
the capture device. This allows working around devices that
|
||||
provide unreliable timestamps.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -846,7 +876,7 @@ ffplay -f iec61883 -i auto
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -dvbuffer 100000 out.mpg
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@@ -909,80 +939,6 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. This can be autodetected if you are running Linux 5.7
|
||||
or later, but needs to be provided for earlier versions. Defaults to @option{bgr0},
|
||||
which is the most common format used by the Linux console and Xorg X server.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs. It can be autodetected if you are running Linux 5.7 or later, but will need
|
||||
to be provided explicitly when needed in earlier versions. See the libdrm documentation
|
||||
for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
To capture only part of a plane the output can be cropped - this can be used to capture
|
||||
a single window, as long as it has a known absolute position and size. For example, to
|
||||
capture and encode the middle quarter of a 1920x1080 plane:
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,crop=960:540:480:270,scale_vaapi=960:540:nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section lavfi
|
||||
|
||||
Libavfilter input virtual device.
|
||||
@@ -991,8 +947,9 @@ This input device reads data from the open output pads of a libavfilter
|
||||
filtergraph.
|
||||
|
||||
For each filtergraph open output, the input device will create a
|
||||
corresponding stream which is mapped to the generated output.
|
||||
The filtergraph is specified through the option @option{graph}.
|
||||
corresponding stream which is mapped to the generated output. Currently
|
||||
only video data is supported. The filtergraph is specified through the
|
||||
option @option{graph}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -1120,21 +1077,6 @@ IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
Requires the configure option @code{--enable-libdc1394}.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is @code{ntsc}, corresponding to a frame
|
||||
rate of @code{30000/1001}.
|
||||
|
||||
@item pixel_format
|
||||
Select the pixel format. Default is @code{uyvy422}.
|
||||
|
||||
@item video_size
|
||||
Set the video size given as a string such as @code{640x480} or @code{hd720}.
|
||||
Default is @code{qvga}.
|
||||
@end table
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
@@ -1253,6 +1195,7 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
|
||||
@section pulse
|
||||
|
||||
PulseAudio input device.
|
||||
@@ -1288,11 +1231,11 @@ Specify the samplerate in Hz, by default 48kHz is used.
|
||||
Specify the channels in use, by default 2 (stereo) is set.
|
||||
|
||||
@item frame_size
|
||||
This option does nothing and is deprecated.
|
||||
Specify the number of bytes per frame, by default it is set to 1024.
|
||||
|
||||
@item fragment_size
|
||||
Specify the size in bytes of the minimal buffering fragment in PulseAudio, it
|
||||
will affect the audio latency. By default it is set to 50 ms amount of data.
|
||||
Specify the minimal buffering fragment in PulseAudio, it will affect the
|
||||
audio latency. By default it is unset.
|
||||
|
||||
@item wallclock
|
||||
Set the initial PTS using the current time. Default is 1.
|
||||
@@ -1527,14 +1470,6 @@ ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item select_region
|
||||
Specify whether to select the grabbing area graphically using the pointer.
|
||||
A value of @code{1} prompts the user to select the grabbing area graphically
|
||||
by clicking and dragging. A single click with no dragging will select the
|
||||
whole screen. A region with zero width or height will also select the whole
|
||||
screen. This option overwrites the @var{video_size}, @var{grab_x}, and
|
||||
@var{grab_y} options. Default value is @code{0}.
|
||||
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specifies
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
@@ -1583,21 +1518,8 @@ With @var{follow_mouse}:
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item window_id
|
||||
Grab this window, instead of the whole screen. Default value is 0, which maps to
|
||||
the whole screen (root window).
|
||||
|
||||
The id of a window can be found using the @command{xwininfo} program, possibly with options -tree and
|
||||
-root.
|
||||
|
||||
If the window is later enlarged, the new area is not recorded. Video ends when
|
||||
the window is closed, unmapped (i.e., iconified) or shrunk beyond the video
|
||||
size (which defaults to the initial window size).
|
||||
|
||||
This option disables options @option{follow_mouse} and @option{select_region}.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default is the full desktop or window.
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item grab_x
|
||||
@item grab_y
|
||||
|
||||
@@ -95,16 +95,17 @@ Stuff that didn't reach the codebase:
|
||||
- 0cef06df0 checkasm: add HEVC MC tests
|
||||
- e7078e842 hevcdsp: add x86 SIMD for MC
|
||||
- 7993ec19a hevc: Add hevc_get_pixel_4/8/12/16/24/32/48/64
|
||||
- new bitstream reader (see http://ffmpeg.org/pipermail/ffmpeg-devel/2017-April/209609.html)
|
||||
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
|
||||
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
|
||||
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
|
||||
- Support recovery from an already present HLS playlist (see 16cb06bb30)
|
||||
- Remove all output devices (see 8e7e042d41, 8d3db95f20, 6ce13070bd, d46cd24986 and https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/216904.html)
|
||||
- avcodec/libaomenc: export the Sequence Header OBU as extradata (See a024c3ce9a)
|
||||
|
||||
Collateral damage that needs work locally:
|
||||
------------------------------------------
|
||||
|
||||
- Merge proresdec2.c and proresdec_lgpl.c
|
||||
- Merge proresenc_anatoliy.c and proresenc_kostya.c
|
||||
- Fix MIPS AC3 downmix
|
||||
|
||||
|
||||
@@ -47,8 +47,7 @@ We cannot provide help for scripts and/or third-party tools.
|
||||
@anchor{How do I ask a question or send a message to a mailing list?}
|
||||
@section How do I ask a question or send a message to a mailing list?
|
||||
|
||||
First you must @ref{How do I subscribe?, subscribe}. Then all you have to do is
|
||||
send an email:
|
||||
All you have to do is send an email:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@@ -58,14 +57,49 @@ ffmpeg-user mailing list.
|
||||
@item
|
||||
Email @email{libav-user@@ffmpeg.org} to send a message to the
|
||||
libav-user mailing list.
|
||||
|
||||
@item
|
||||
Email @email{ffmpeg-devel@@ffmpeg.org} to send a message to the
|
||||
ffmpeg-devel mailing list.
|
||||
@end itemize
|
||||
|
||||
If you are not subscribed to the mailing list then your question must be
|
||||
manually approved. Approval may take several days, but the wait is
|
||||
usually less. If you want the message to be sent with no delay then you
|
||||
must subscribe first. See @ref{How do I subscribe?}
|
||||
|
||||
Please do not send a message, subscribe, and re-send the message: this
|
||||
results in duplicates, causes more work for the admins, and may lower
|
||||
your chance at getting an answer. However, you may do so if you first
|
||||
@ref{How do I delete my message in the moderation queue?, delete your original message from the moderation queue}.
|
||||
|
||||
@chapter Subscribing / Unsubscribing
|
||||
|
||||
@section What does subscribing do?
|
||||
|
||||
Subscribing allows two things:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Your messages will show up in the mailing list without waiting in the
|
||||
moderation queue and needing to be manually approved by a mailing list
|
||||
admin.
|
||||
|
||||
@item
|
||||
You will receive all messages to the mailing list including replies to
|
||||
your messages. Non-subscribed users do not receive any messages.
|
||||
@end itemize
|
||||
|
||||
@section Do I need to subscribe?
|
||||
|
||||
No. You can still send a message to the mailing list without
|
||||
subscribing. See @ref{How do I ask a question or send a message to a mailing list?}
|
||||
|
||||
However, your message will need to be manually approved by a mailing
|
||||
list admin, and you will not receive any mailing list messages or
|
||||
replies.
|
||||
|
||||
You can ask to be CCd in your message, but replying users will
|
||||
sometimes forget to do so.
|
||||
|
||||
You may also view and reply to messages via the @ref{Where are the archives?, archives}.
|
||||
|
||||
@anchor{How do I subscribe?}
|
||||
@section How do I subscribe?
|
||||
|
||||
@@ -90,9 +124,6 @@ The process is the same for the other mailing lists.
|
||||
Please avoid asking a mailing list admin to unsubscribe you unless you
|
||||
are absolutely unable to do so by yourself. See @ref{Who do I contact if I have a problem with the mailing list?}
|
||||
|
||||
Note that it is possible to temporarily halt message delivery (vacation mode).
|
||||
See @ref{How do I disable mail delivery without unsubscribing?}
|
||||
|
||||
@chapter Moderation Queue
|
||||
@anchor{Why is my message awaiting moderator approval?}
|
||||
@section Why is my message awaiting moderator approval?
|
||||
@@ -103,6 +134,8 @@ must be manually approved by a mailing list admin:
|
||||
These are:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Messages from users who are @strong{not} subscribed.
|
||||
|
||||
@item
|
||||
Messages that exceed the @ref{What is the message size limit?, message size limit}.
|
||||
@@ -115,13 +148,13 @@ or is abusive towards others).
|
||||
|
||||
@section How long does it take for my message in the moderation queue to be approved?
|
||||
|
||||
The queue is not checked on a regular basis. You can ask on the
|
||||
@t{#ffmpeg-devel} IRC channel on Libera Chat for someone to approve your message.
|
||||
The queue is usually checked once or twice a day, but on occasion
|
||||
several days may pass before someone checks the queue.
|
||||
|
||||
@anchor{How do I delete my message in the moderation queue?}
|
||||
@section How do I delete my message in the moderation queue?
|
||||
|
||||
You should have received an email with the subject @emph{Your message to <mailing list name> awaits moderator approval}.
|
||||
You should have received an email with the subject @emph{Your message to ffmpeg-user awaits moderator approval}.
|
||||
A link is in the message that will allow you to delete your message
|
||||
unless a mailing list admin already approved or rejected it.
|
||||
|
||||
@@ -142,9 +175,6 @@ Click the email link at the top of the message just under the subject
|
||||
title. The link will provide the proper headers to keep the message
|
||||
within the thread.
|
||||
|
||||
Note that you must be subscribed to send a message to the ffmpeg-user or
|
||||
libav-user mailing lists.
|
||||
|
||||
@section How do I search the archives?
|
||||
|
||||
Perform a site search using your favorite search engine. Example:
|
||||
@@ -155,14 +185,13 @@ Perform a site search using your favorite search engine. Example:
|
||||
|
||||
@section Is there an alternative to the mailing list?
|
||||
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Libera Chat.
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||
|
||||
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
||||
interface which presents the mailing lists in a typical forum layout.
|
||||
Some users prefer the third-party Nabble interface which presents the
|
||||
mailing lists in a typical forum layout.
|
||||
|
||||
There are also numerous third-party help sites such as
|
||||
@url{https://superuser.com/tags/ffmpeg, Super User} and
|
||||
@url{https://www.reddit.com/r/ffmpeg/, r/ffmpeg on reddit}.
|
||||
There are also numerous third-party help sites such as Super User and
|
||||
r/ffmpeg on reddit.
|
||||
|
||||
@anchor{What is top-posting?}
|
||||
@section What is top-posting?
|
||||
@@ -174,15 +203,16 @@ Instead, use trimmed interleaved/inline replies (@url{https://lists.ffmpeg.org/p
|
||||
@anchor{What is the message size limit?}
|
||||
@section What is the message size limit?
|
||||
|
||||
The message size limit is 1000 kilobytes. Please provide links to larger files
|
||||
instead of attaching them.
|
||||
The message size limit is 500 kilobytes for the user lists and 1000
|
||||
kilobytes for ffmpeg-devel. Please provide links to larger files instead
|
||||
of attaching them.
|
||||
|
||||
@section Where can I upload sample files?
|
||||
|
||||
Anywhere that is not too annoying for us to use.
|
||||
|
||||
Google Drive and Dropbox are acceptable if you need a file host, and
|
||||
@url{https://0x0.st/, 0x0.st} is good for files under 256 MiB.
|
||||
0x0.st is good for files under 256 MiB.
|
||||
|
||||
Small, short samples are preferred if possible.
|
||||
|
||||
@@ -229,54 +259,6 @@ or headers.
|
||||
|
||||
You can then filter the mailing list messages to their own folder.
|
||||
|
||||
@anchor{How do I disable mail delivery without unsubscribing?}
|
||||
@section How do I disable mail delivery without unsubscribing?
|
||||
|
||||
Sometimes you may want to temporarily stop receiving all mailing list
|
||||
messages. This "vacation mode" is simple to do:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Go to the @url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user mailing list info page}
|
||||
|
||||
@item
|
||||
Enter your email address in the box at very bottom of the page and click the
|
||||
@emph{Unsubscribe or edit options} box.
|
||||
|
||||
@item
|
||||
Enter your password and click the @emph{Log in} button.
|
||||
|
||||
@item
|
||||
Look for the @emph{Mail delivery} option. Here you can disable/enable mail
|
||||
delivery. If you check @emph{Set globally} it will apply your choice to all
|
||||
other FFmpeg mailing lists you are subscribed to.
|
||||
@end enumerate
|
||||
|
||||
Alternatively, from your subscribed address, send a message to @email{ffmpeg-user-request@@ffmpeg.org}
|
||||
with the subject @emph{set delivery off}. To re-enable mail delivery send a
|
||||
message to @email{ffmpeg-user-request@@ffmpeg.org} with the subject
|
||||
@emph{set delivery on}.
|
||||
|
||||
@anchor{Why is the mailing list munging my address?}
|
||||
@section Why is the mailing list munging my address?
|
||||
|
||||
This is due to subscribers that use an email service with a DMARC reject policy
|
||||
which adds difficulties to mailing list operators.
|
||||
|
||||
The mailing list must re-write (munge) the @emph{From:} header for such users;
|
||||
otherwise their email service will reject and bounce the message resulting in
|
||||
automatic unsubscribing from the mailing list.
|
||||
|
||||
When sending a message these users will see @emph{via <mailing list name>}
|
||||
added to their name and the @emph{From:} address munged to the address of
|
||||
the particular mailing list.
|
||||
|
||||
If you want to avoid this then please use a different email service.
|
||||
|
||||
Note that ffmpeg-devel does not apply any munging as it causes issues with
|
||||
patch authorship. As a result users with an email service with a DMARC reject
|
||||
policy may be automatically unsubscribed due to rejected and bounced messages.
|
||||
|
||||
@chapter Rules and Etiquette
|
||||
|
||||
@section What are the rules and the proper etiquette?
|
||||
@@ -344,7 +326,7 @@ recommended.
|
||||
Avoid sending the same message to multiple mailing lists.
|
||||
|
||||
@item
|
||||
Please follow our @url{https://ffmpeg.org/community.html#Code-of-conduct, Code of Conduct}.
|
||||
Please follow our @url{https://ffmpeg.org/developer.html#Code-of-conduct, Code of Conduct}.
|
||||
@end itemize
|
||||
|
||||
@chapter Help
|
||||
@@ -375,15 +357,6 @@ form a multi-part message is recommended by email standards.
|
||||
Check your spam folder.
|
||||
@end itemize
|
||||
|
||||
@anchor{Why do I keep getting unsubscribed from ffmpeg-devel?}
|
||||
@section Why do I keep getting unsubscribed from ffmpeg-devel?
|
||||
|
||||
Users with an email service that has a DMARC reject or quarantine policy may be
|
||||
automatically unsubscribed from the ffmpeg-devel mailing list due to the mailing
|
||||
list messages being continuously rejected and bounced back.
|
||||
|
||||
Consider using a different email service.
|
||||
|
||||
@anchor{Who do I contact if I have a problem with the mailing list?}
|
||||
@section Who do I contact if I have a problem with the mailing list?
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ At the beginning of a chapter section there may be an optional timebase to be
|
||||
used for start/end values. It must be in form
|
||||
@samp{TIMEBASE=@var{num}/@var{den}}, where @var{num} and @var{den} are
|
||||
integers. If the timebase is missing then start/end times are assumed to
|
||||
be in nanoseconds.
|
||||
be in milliseconds.
|
||||
|
||||
Next a chapter section must contain chapter start and end times in form
|
||||
@samp{START=@var{num}}, @samp{END=@var{num}}, where @var{num} is a positive
|
||||
|
||||
@@ -48,6 +48,11 @@ Files that have MIPS copyright notice in them:
|
||||
float_dsp_mips.c
|
||||
libm_mips.h
|
||||
softfloat_tables.h
|
||||
* libavcodec/
|
||||
fft_fixed_32.c
|
||||
fft_init_table.c
|
||||
fft_table.h
|
||||
mdct_fixed_32.c
|
||||
* libavcodec/mips/
|
||||
aacdec_fixed.c
|
||||
aacsbr_fixed.c
|
||||
@@ -65,6 +70,9 @@ Files that have MIPS copyright notice in them:
|
||||
compute_antialias_float.h
|
||||
lsp_mips.h
|
||||
dsputil_mips.c
|
||||
fft_mips.c
|
||||
fft_table.h
|
||||
fft_init_table.c
|
||||
fmtconvert_mips.c
|
||||
iirfilter_mips.c
|
||||
mpegaudiodsp_mips_fixed.c
|
||||
|
||||
@@ -20,7 +20,8 @@ Slice threading -
|
||||
|
||||
Frame threading -
|
||||
* Restrictions with slice threading also apply.
|
||||
* Custom get_buffer2() and get_format() callbacks must be thread-safe.
|
||||
* For best performance, the client should set thread_safe_callbacks if it
|
||||
provides a thread-safe get_buffer() callback.
|
||||
* There is one frame of delay added for every thread beyond the first one.
|
||||
Clients must be able to handle this; the pkt_dts and pkt_pts fields in
|
||||
AVFrame will work as usual.
|
||||
@@ -50,13 +51,16 @@ the decode process starts. Call ff_thread_finish_setup() afterwards. If
|
||||
some code can't be moved, have update_thread_context() run it in the next
|
||||
thread.
|
||||
|
||||
If the codec allocates writable tables in its init(), add an init_thread_copy()
|
||||
which re-allocates them for other threads.
|
||||
|
||||
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set FF_CODEC_CAP_ALLOCATE_PROGRESS in
|
||||
FFCodec.caps_internal and use ff_thread_get_buffer() to allocate frames.
|
||||
Otherwise decode directly into the user-supplied frames.
|
||||
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
Otherwise leave it at zero and decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
A good place to put this is where draw_horiz_band() is called - add this if it isn't
|
||||
|
||||
1340
doc/muxers.texi
1340
doc/muxers.texi
File diff suppressed because it is too large
Load Diff
@@ -267,11 +267,6 @@ CELL/SPU:
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf
|
||||
|
||||
RISC-V-specific:
|
||||
----------------
|
||||
The RISC-V Instruction Set Manual, Volume 1, Unprivileged ISA:
|
||||
https://riscv.org/technical/specifications/
|
||||
|
||||
GCC asm links:
|
||||
--------------
|
||||
official doc but quite ugly
|
||||
|
||||
172
doc/outdevs.texi
172
doc/outdevs.texi
@@ -38,52 +38,6 @@ ffmpeg -i INPUT -f alsa hw:1,7
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section AudioToolbox
|
||||
|
||||
AudioToolbox output device.
|
||||
|
||||
Allows native output to CoreAudio devices on OSX.
|
||||
|
||||
The output filename can be empty (or @code{-}) to refer to the default system output device or a number that refers to the device index as shown using: @code{-list_devices true}.
|
||||
|
||||
Alternatively, the audio input device can be chosen by index using the
|
||||
@option{
|
||||
-audio_device_index <INDEX>
|
||||
}
|
||||
, overriding any device name or index given in the input filename.
|
||||
|
||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
||||
all device names, UIDs and corresponding indices.
|
||||
|
||||
@subsection Options
|
||||
|
||||
AudioToolbox supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item -audio_device_index <INDEX>
|
||||
Specify the audio device by its index. Overrides anything given in the output filename.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of supported devices and output a sine wave to the default device:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -list_devices true -
|
||||
@end example
|
||||
|
||||
@item
|
||||
Output a sine wave to the device with the index 2, overriding any output filename:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -audio_device_index 2 -
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section caca
|
||||
|
||||
CACA output device.
|
||||
@@ -186,8 +140,7 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sinks} option of ffmpeg to list the available output devices.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -197,49 +150,6 @@ Defaults to @option{false}.
|
||||
Amount of time to preroll video in seconds.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timing_offset
|
||||
Sets the genlock timing pixel offset on the used output.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item link
|
||||
Sets the SDI video link configuration on the used output. Must be
|
||||
@samp{unset}, @samp{single} link SDI, @samp{dual} link SDI or @samp{quad} link
|
||||
SDI.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item sqd
|
||||
Enable Square Division Quad Split mode for Quad-link SDI output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@item level_a
|
||||
Enable SMPTE Level A mode on the used output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@item vanc_queue_size
|
||||
Sets maximum output buffer size in bytes for VANC data. If the buffering reaches this value,
|
||||
outgoing VANC data will be dropped.
|
||||
Defaults to @samp{1048576}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -249,7 +159,7 @@ Defaults to @samp{1048576}.
|
||||
@item
|
||||
List output devices:
|
||||
@example
|
||||
ffmpeg -sinks decklink
|
||||
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -272,6 +182,51 @@ ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLi
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek output device provides playback capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Output filename is a NDI name.
|
||||
|
||||
To enable this output device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
NDI uses uyvy422 pixel format natively, but also supports bgra, bgr0, rgba and
|
||||
rgb0.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item reference_level
|
||||
The audio reference level in dB. This specifies how many dB above the
|
||||
reference level (+4dBU) is the full range of 16 bit audio.
|
||||
Defaults to @option{0}.
|
||||
|
||||
@item clock_video
|
||||
These specify whether video "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item clock_audio
|
||||
These specify whether audio "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Play video clip:
|
||||
@example
|
||||
ffmpeg -i "udp://@@239.1.1.1:10480?fifo_size=1000000&overrun_nonfatal=1" -vf "scale=720:576,fps=fps=25,setdar=dar=16/9,format=pix_fmts=uyvy422" -f libndi_newtek NEW_NDI1
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer output device.
|
||||
@@ -410,8 +365,6 @@ ffmpeg -i INPUT -f pulse "stream name"
|
||||
|
||||
SDL (Simple DirectMedia Layer) output device.
|
||||
|
||||
"sdl2" can be used as alias for "sdl".
|
||||
|
||||
This output device allows one to show a video stream in an SDL
|
||||
window. Only one SDL window is allowed per application, so you can
|
||||
have only one instance of this output device in an application.
|
||||
@@ -426,18 +379,13 @@ For more information about SDL, check:
|
||||
|
||||
@table @option
|
||||
|
||||
@item window_borderless
|
||||
Set SDL window border off.
|
||||
Default value is 0 (enable window border).
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_enable_quit
|
||||
Enable quit action (using window button or keyboard key)
|
||||
when non-zero value is provided.
|
||||
Default value is 1 (enable quit action).
|
||||
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
@item icon_title
|
||||
Set the name of the iconified SDL window, if not specified it is set
|
||||
to the same value of @var{window_title}.
|
||||
|
||||
@item window_size
|
||||
Set the SDL window size, can be a string of the form
|
||||
@@ -445,13 +393,9 @@ Set the SDL window size, can be a string of the form
|
||||
If not specified it defaults to the size of the input video,
|
||||
downscaled according to the aspect ratio.
|
||||
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_x
|
||||
@item window_y
|
||||
Set the position of the window on the screen.
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
@end table
|
||||
|
||||
@subsection Interactive commands
|
||||
@@ -476,10 +420,6 @@ ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL out
|
||||
|
||||
sndio audio output device.
|
||||
|
||||
@section v4l2
|
||||
|
||||
Video4Linux2 output device.
|
||||
|
||||
@section xv
|
||||
|
||||
XV (XVideo) output device.
|
||||
|
||||
@@ -92,6 +92,9 @@ For information about compiling FFmpeg on OS/2 see
|
||||
|
||||
@chapter Windows
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
||||
@@ -145,11 +148,16 @@ To target 32 bits replace @code{x86_64} with @code{i686} in the command above.
|
||||
|
||||
@section Microsoft Visual C++ or Intel C++ Compiler for Windows
|
||||
|
||||
FFmpeg can be built with MSVC 2013 or later.
|
||||
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
|
||||
and wrapper, or with MSVC 2013 and ICL natively.
|
||||
|
||||
You will need the following prerequisites:
|
||||
|
||||
@itemize
|
||||
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.nasm.us/, NASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@@ -158,13 +166,16 @@ You will need the following prerequisites:
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}.
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your
|
||||
@code{PATH} as well.
|
||||
|
||||
Next, make sure any other headers and libs you want to use, such as zlib, are
|
||||
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
|
||||
and @code{INCLUDE} environment variables to include the @strong{Windows-style}
|
||||
paths to these directories. Alternatively, you can try to use the
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC
|
||||
2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too.
|
||||
|
||||
Finally, run:
|
||||
|
||||
@@ -206,6 +217,8 @@ can see.
|
||||
|
||||
@item FFmpeg has been tested with the following on i686 and x86_64:
|
||||
@itemize
|
||||
@item Visual Studio 2010 Pro and Express
|
||||
@item Visual Studio 2012 Pro and Express
|
||||
@item Visual Studio 2013 Pro and Express
|
||||
@item Intel Composer XE 2013
|
||||
@item Intel Composer XE 2013 SP1
|
||||
|
||||
@@ -51,82 +51,6 @@ in microseconds.
|
||||
|
||||
A description of the currently available protocols follows.
|
||||
|
||||
@section amqp
|
||||
|
||||
Advanced Message Queueing Protocol (AMQP) version 0-9-1 is a broker based
|
||||
publish-subscribe communication protocol.
|
||||
|
||||
FFmpeg must be compiled with --enable-librabbitmq to support AMQP. A separate
|
||||
AMQP broker must also be run. An example open-source AMQP broker is RabbitMQ.
|
||||
|
||||
After starting the broker, an FFmpeg client may stream data to the broker using
|
||||
the command:
|
||||
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||
@end example
|
||||
|
||||
Where hostname and port (default is 5672) is the address of the broker. The
|
||||
client may also set a user/password for authentication. The default for both
|
||||
fields is "guest". Name of virtual host on broker can be set with vhost. The
|
||||
default value is "/".
|
||||
|
||||
Muliple subscribers may stream from the broker using the command:
|
||||
@example
|
||||
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||
@end example
|
||||
|
||||
In RabbitMQ all data published to the broker flows through a specific exchange,
|
||||
and each subscribing client has an assigned queue/buffer. When a packet arrives
|
||||
at an exchange, it may be copied to a client's queue depending on the exchange
|
||||
and routing_key fields.
|
||||
|
||||
The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item exchange
|
||||
Sets the exchange to use on the broker. RabbitMQ has several predefined
|
||||
exchanges: "amq.direct" is the default exchange, where the publisher and
|
||||
subscriber must have a matching routing_key; "amq.fanout" is the same as a
|
||||
broadcast operation (i.e. the data is forwarded to all queues on the fanout
|
||||
exchange independent of the routing_key); and "amq.topic" is similar to
|
||||
"amq.direct", but allows for more complex pattern matching (refer to the RabbitMQ
|
||||
documentation).
|
||||
|
||||
@item routing_key
|
||||
Sets the routing key. The default value is "amqp". The routing key is used on
|
||||
the "amq.direct" and "amq.topic" exchanges to decide whether packets are written
|
||||
to the queue of a subscriber.
|
||||
|
||||
@item pkt_size
|
||||
Maximum size of each packet sent/received to the broker. Default is 131072.
|
||||
Minimum is 4096 and max is any large value (representable by an int). When
|
||||
receiving packets, this sets an internal buffer size in FFmpeg. It should be
|
||||
equal to or greater than the size of the published packets to the broker. Otherwise
|
||||
the received message may be truncated causing decoding errors.
|
||||
|
||||
@item connection_timeout
|
||||
The timeout in seconds during the initial connection to the broker. The
|
||||
default value is rw_timeout, or 5 seconds if rw_timeout is not set.
|
||||
|
||||
@item delivery_mode @var{mode}
|
||||
Sets the delivery mode of each message sent to broker.
|
||||
The following values are accepted:
|
||||
@table @samp
|
||||
@item persistent
|
||||
Delivery mode set to "persistent" (2). This is the default value.
|
||||
Messages may be written to the broker's disk depending on its setup.
|
||||
|
||||
@item non-persistent
|
||||
Delivery mode set to "non-persistent" (1).
|
||||
Messages will stay in broker's memory unless the broker is under memory
|
||||
pressure.
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section async
|
||||
|
||||
Asynchronous data filling wrapper for input stream.
|
||||
@@ -175,16 +99,6 @@ Caching wrapper for input stream.
|
||||
|
||||
Cache the input stream to temporary file. It brings seeking capability to live streams.
|
||||
|
||||
The accepted options are:
|
||||
@table @option
|
||||
|
||||
@item read_ahead_limit
|
||||
Amount in bytes that may be read ahead when seeking isn't supported. Range is -1 to INT_MAX.
|
||||
-1 for unlimited. Default is 65536.
|
||||
|
||||
@end table
|
||||
|
||||
URL Syntax is
|
||||
@example
|
||||
cache:@var{URL}
|
||||
@end example
|
||||
@@ -215,38 +129,6 @@ ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
|
||||
Note that you may need to escape the character "|" which is special for
|
||||
many shells.
|
||||
|
||||
@section concatf
|
||||
|
||||
Physical concatenation protocol using a line break delimited list of
|
||||
resources.
|
||||
|
||||
Read and seek from many resources in sequence as if they were
|
||||
a unique resource.
|
||||
|
||||
A URL accepted by this protocol has the syntax:
|
||||
@example
|
||||
concatf:@var{URL}
|
||||
@end example
|
||||
|
||||
where @var{URL} is the url containing a line break delimited list of
|
||||
resources to be concatenated, each one possibly specifying a distinct
|
||||
protocol. Special characters must be escaped with backslash or single
|
||||
quotes. See @ref{quoting_and_escaping,,the "Quoting and escaping"
|
||||
section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
For example to read a sequence of files @file{split1.mpeg},
|
||||
@file{split2.mpeg}, @file{split3.mpeg} listed in separate lines within
|
||||
a file @file{split.txt} with @command{ffplay} use the command:
|
||||
@example
|
||||
ffplay concatf:split.txt
|
||||
@end example
|
||||
Where @file{split.txt} contains the lines:
|
||||
@example
|
||||
split1.mpeg
|
||||
split2.mpeg
|
||||
split3.mpeg
|
||||
@end example
|
||||
|
||||
@section crypto
|
||||
|
||||
AES-encrypted stream reading protocol.
|
||||
@@ -275,33 +157,6 @@ For example, to convert a GIF file given inline with @command{ffmpeg}:
|
||||
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
|
||||
@end example
|
||||
|
||||
@section fd
|
||||
|
||||
File descriptor access protocol.
|
||||
|
||||
The accepted syntax is:
|
||||
@example
|
||||
fd: -fd @var{file_descriptor}
|
||||
@end example
|
||||
|
||||
If @option{fd} is not specified, by default the stdout file descriptor will be
|
||||
used for writing, stdin for reading. Unlike the pipe protocol, fd protocol has
|
||||
seek support if it corresponding to a regular file. fd protocol doesn't support
|
||||
pass file descriptor via URL for security.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item blocksize
|
||||
Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable if data transmission is slow.
|
||||
|
||||
@item fd
|
||||
Set file descriptor.
|
||||
@end table
|
||||
|
||||
@section file
|
||||
|
||||
File access protocol.
|
||||
@@ -338,20 +193,6 @@ Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable for files on slow medium.
|
||||
|
||||
@item follow
|
||||
If set to 1, the protocol will retry reading at the end of the file, allowing
|
||||
reading files that still are being written. In order for this to terminate,
|
||||
you either need to use the rw_timeout option, or use the interrupt callback
|
||||
(for API users).
|
||||
|
||||
@item seekable
|
||||
Controls if seekability is advertised on the file. 0 means non-seekable, -1
|
||||
means auto (seekable for normal files, non-seekable for named pipes).
|
||||
|
||||
Many demuxers handle seekable and non-seekable resources differently,
|
||||
overriding this might speed up opening certain files at the cost of losing some
|
||||
features (e.g. accurate seeking).
|
||||
@end table
|
||||
|
||||
@section ftp
|
||||
@@ -373,14 +214,6 @@ Set timeout in microseconds of socket I/O operations used by the underlying low
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@item ftp-user
|
||||
Set a user to be used for authenticating to the FTP server. This is overridden by the
|
||||
user in the FTP URL.
|
||||
|
||||
@item ftp-password
|
||||
Set a password to be used for authenticating to the FTP server. This is overridden by
|
||||
the password in the FTP URL, or by @option{ftp-anonymous-password} if no user is set.
|
||||
|
||||
@item ftp-anonymous-password
|
||||
Password used when login as anonymous user. Typically an e-mail address
|
||||
should be used.
|
||||
@@ -396,16 +229,21 @@ it, unless special care is taken (tests, customized server configuration
|
||||
etc.). Different FTP servers behave in different way during seek
|
||||
operation. ff* tools may produce incomplete content due to server limitations.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item follow
|
||||
If set to 1, the protocol will retry reading at the end of the file, allowing
|
||||
reading files that still are being written. In order for this to terminate,
|
||||
you either need to use the rw_timeout option, or use the interrupt callback
|
||||
(for API users).
|
||||
|
||||
@end table
|
||||
|
||||
@section gopher
|
||||
|
||||
Gopher protocol.
|
||||
|
||||
@section gophers
|
||||
|
||||
Gophers protocol.
|
||||
|
||||
The Gopher protocol with TLS encapsulation.
|
||||
|
||||
@section hls
|
||||
|
||||
Read Apple HTTP Live Streaming compliant segmented stream as
|
||||
@@ -465,6 +303,14 @@ Set the Referer header. Include 'Referer: URL' header in HTTP request.
|
||||
Override the User-Agent header. If not specified the protocol will use a
|
||||
string describing the libavformat build. ("Lavf/<version>")
|
||||
|
||||
@item user-agent
|
||||
This is a deprecated option, you can use user_agent instead it.
|
||||
|
||||
@item timeout
|
||||
Set timeout in microseconds of socket I/O operations used by the underlying low level
|
||||
operation. By default it is set to -1, which means that the timeout is
|
||||
not specified.
|
||||
|
||||
@item reconnect_at_eof
|
||||
If set then eof is treated like an error and causes reconnection, this is useful
|
||||
for live / endless streams.
|
||||
@@ -472,13 +318,6 @@ for live / endless streams.
|
||||
@item reconnect_streamed
|
||||
If set then even streamed/non seekable streams will be reconnected on errors.
|
||||
|
||||
@item reconnect_on_network_error
|
||||
Reconnect automatically in case of TCP/TLS errors during connect.
|
||||
|
||||
@item reconnect_on_http_error
|
||||
A comma separated list of HTTP status codes to reconnect on. The list can
|
||||
include specific status codes (e.g. '503') or the strings '4xx' / '5xx'.
|
||||
|
||||
@item reconnect_delay_max
|
||||
Sets the maximum delay in seconds after which to give up reconnecting
|
||||
|
||||
@@ -551,33 +390,6 @@ ffmpeg -i somefile.ogg -chunked_post 0 -c copy -f ogg http://@var{server}:@var{p
|
||||
wget --post-file=somefile.ogg http://@var{server}:@var{port}
|
||||
@end example
|
||||
|
||||
@item send_expect_100
|
||||
Send an Expect: 100-continue header for POST. If set to 1 it will send, if set
|
||||
to 0 it won't, if set to -1 it will try to send if it is applicable. Default
|
||||
value is -1.
|
||||
|
||||
@item auth_type
|
||||
|
||||
Set HTTP authentication type. No option for Digest, since this method requires
|
||||
getting nonce parameters from the server first and can't be used straight away like
|
||||
Basic.
|
||||
|
||||
@table @option
|
||||
@item none
|
||||
Choose the HTTP authentication type automatically. This is the default.
|
||||
@item basic
|
||||
|
||||
Choose the HTTP basic authentication.
|
||||
|
||||
Basic authentication sends a Base64-encoded string that contains a user name and password
|
||||
for the client. Base64 is not a form of encryption and should be considered the same as
|
||||
sending the user name and password in clear text (Base64 is a reversible encoding).
|
||||
If a resource needs to be protected, strongly consider using an authentication scheme
|
||||
other than basic authentication. HTTPS/TLS should be used with basic authentication.
|
||||
Without these additional security enhancements, basic authentication should not be used
|
||||
to protect sensitive or valuable information.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HTTP Cookies
|
||||
@@ -632,44 +444,12 @@ audio/mpeg.
|
||||
This enables support for Icecast versions < 2.4.0, that do not support the
|
||||
HTTP PUT method but the SOURCE method.
|
||||
|
||||
@item tls
|
||||
Establish a TLS (HTTPS) connection to Icecast.
|
||||
|
||||
@end table
|
||||
|
||||
@example
|
||||
icecast://[@var{username}[:@var{password}]@@]@var{server}:@var{port}/@var{mountpoint}
|
||||
@end example
|
||||
|
||||
@section ipfs
|
||||
|
||||
InterPlanetary File System (IPFS) protocol support. One can access files stored
|
||||
on the IPFS network through so-called gateways. These are http(s) endpoints.
|
||||
This protocol wraps the IPFS native protocols (ipfs:// and ipns://) to be sent
|
||||
to such a gateway. Users can (and should) host their own node which means this
|
||||
protocol will use one's local gateway to access files on the IPFS network.
|
||||
|
||||
This protocol accepts the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item gateway
|
||||
Defines the gateway to use. When not set, the protocol will first try
|
||||
locating the local gateway by looking at @code{$IPFS_GATEWAY}, @code{$IPFS_PATH}
|
||||
and @code{$HOME/.ipfs/}, in that order.
|
||||
|
||||
@end table
|
||||
|
||||
One can use this protocol in 2 ways. Using IPFS:
|
||||
@example
|
||||
ffplay ipfs://<hash>
|
||||
@end example
|
||||
|
||||
Or the IPNS protocol (IPNS is mutable IPFS):
|
||||
@example
|
||||
ffplay ipns://<hash>
|
||||
@end example
|
||||
|
||||
@section mmst
|
||||
|
||||
MMS (Microsoft Media Server) protocol over TCP.
|
||||
@@ -714,7 +494,7 @@ The accepted syntax is:
|
||||
pipe:[@var{number}]
|
||||
@end example
|
||||
|
||||
If @option{fd} isn't specified, @var{number} is the number corresponding to the file descriptor of the
|
||||
@var{number} is the number corresponding to the file descriptor of the
|
||||
pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If @var{number}
|
||||
is not specified, by default the stdout file descriptor will be used
|
||||
for writing, stdin for reading.
|
||||
@@ -741,8 +521,6 @@ Set I/O operation maximum block size, in bytes. Default value is
|
||||
@code{INT_MAX}, which results in not limiting the requested block size.
|
||||
Setting this value reasonably low improves user termination request reaction
|
||||
time, which is valuable if data transmission is slow.
|
||||
@item fd
|
||||
Set file descriptor.
|
||||
@end table
|
||||
|
||||
Note that some formats (typically MOV), require the output protocol to
|
||||
@@ -783,50 +561,6 @@ Example usage:
|
||||
-f rtp_mpegts -fec prompeg=l=8:d=4 rtp://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
@section rist
|
||||
|
||||
Reliable Internet Streaming Transport protocol
|
||||
|
||||
The accepted options are:
|
||||
@table @option
|
||||
@item rist_profile
|
||||
Supported values:
|
||||
@table @samp
|
||||
@item simple
|
||||
@item main
|
||||
This one is default.
|
||||
@item advanced
|
||||
@end table
|
||||
|
||||
@item buffer_size
|
||||
Set internal RIST buffer size in milliseconds for retransmission of data.
|
||||
Default value is 0 which means the librist default (1 sec). Maximum value is 30
|
||||
seconds.
|
||||
|
||||
@item fifo_size
|
||||
Size of the librist receiver output fifo in number of packets. This must be a
|
||||
power of 2.
|
||||
Defaults to 8192 (vs the librist default of 1024).
|
||||
|
||||
@item overrun_nonfatal=@var{1|0}
|
||||
Survive in case of librist fifo buffer overrun. Default value is 0.
|
||||
|
||||
@item pkt_size
|
||||
Set maximum packet size for sending data. 1316 by default.
|
||||
|
||||
@item log_level
|
||||
Set loglevel for RIST logging messages. You only need to set this if you
|
||||
explicitly want to enable debug level messages or packet loss simulation,
|
||||
otherwise the regular loglevel is respected.
|
||||
|
||||
@item secret
|
||||
Set override of encryption secret, by default is unset.
|
||||
|
||||
@item encryption
|
||||
Set encryption type, by default is disabled.
|
||||
Acceptable values are 128 and 256.
|
||||
@end table
|
||||
|
||||
@section rtmp
|
||||
|
||||
Real-Time Messaging Protocol.
|
||||
@@ -896,13 +630,6 @@ be named, by prefixing the type with 'N' and specifying the name before
|
||||
the value (i.e. @code{NB:myFlag:1}). This option may be used multiple
|
||||
times to construct arbitrary AMF sequences.
|
||||
|
||||
@item rtmp_enhanced_codecs
|
||||
Specify the list of codecs the client advertises to support in an
|
||||
enhanced RTMP stream. This option should be set to a comma separated
|
||||
list of fourcc values, like @code{hvc1,av01,vp09} for multiple codecs
|
||||
or @code{hvc1} for only one codec. The specified list will be presented
|
||||
in the "fourCcLive" property of the Connect Command Message.
|
||||
|
||||
@item rtmp_flashver
|
||||
Version of the Flash plugin used to run the SWF player. The default
|
||||
is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
|
||||
@@ -948,11 +675,6 @@ URL to player swf file, compute hash/size automatically.
|
||||
@item rtmp_tcurl
|
||||
URL of the target stream. Defaults to proto://host[:port]/app.
|
||||
|
||||
@item tcp_nodelay=@var{1|0}
|
||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||
|
||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
||||
|
||||
@end table
|
||||
|
||||
For example to read with @command{ffplay} a multimedia resource named
|
||||
@@ -1140,9 +862,6 @@ Set the local RTCP port to @var{n}.
|
||||
@item pkt_size=@var{n}
|
||||
Set max packet size (in bytes) to @var{n}.
|
||||
|
||||
@item buffer_size=@var{size}
|
||||
Set the maximum UDP socket buffer size in bytes.
|
||||
|
||||
@item connect=0|1
|
||||
Do a @code{connect()} on the UDP socket (if set to 1) or not (if set
|
||||
to 0).
|
||||
@@ -1160,13 +879,6 @@ set to 1) or to a default remote address (if set to 0).
|
||||
@item localport=@var{n}
|
||||
Set the local RTP port to @var{n}.
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Local IP address of a network interface used for sending packets or joining
|
||||
multicast groups.
|
||||
|
||||
@item timeout=@var{n}
|
||||
Set timeout (in microseconds) of socket I/O operations to @var{n}.
|
||||
|
||||
This is a deprecated option. Instead, @option{localrtpport} should be
|
||||
used.
|
||||
|
||||
@@ -1211,59 +923,6 @@ Options can be set on the @command{ffmpeg}/@command{ffplay} command
|
||||
line, or set in code via @code{AVOption}s or in
|
||||
@code{avformat_open_input}.
|
||||
|
||||
@subsection Muxer
|
||||
The following options are supported.
|
||||
|
||||
@table @option
|
||||
@item rtsp_transport
|
||||
Set RTSP transport protocols.
|
||||
|
||||
It accepts the following values:
|
||||
@table @samp
|
||||
@item udp
|
||||
Use UDP as lower transport protocol.
|
||||
|
||||
@item tcp
|
||||
Use TCP (interleaving within the RTSP control channel) as lower
|
||||
transport protocol.
|
||||
@end table
|
||||
|
||||
Default value is @samp{0}.
|
||||
|
||||
@item rtsp_flags
|
||||
Set RTSP flags.
|
||||
|
||||
The following values are accepted:
|
||||
@table @samp
|
||||
@item latm
|
||||
Use MP4A-LATM packetization instead of MPEG4-GENERIC for AAC.
|
||||
@item rfc2190
|
||||
Use RFC 2190 packetization instead of RFC 4629 for H.263.
|
||||
@item skip_rtcp
|
||||
Don't send RTCP sender reports.
|
||||
@item h264_mode0
|
||||
Use mode 0 for H.264 in RTP.
|
||||
@item send_bye
|
||||
Send RTCP BYE packets when finishing.
|
||||
@end table
|
||||
|
||||
Default value is @samp{0}.
|
||||
|
||||
|
||||
@item min_port
|
||||
Set minimum local UDP port. Default value is 5000.
|
||||
|
||||
@item max_port
|
||||
Set maximum local UDP port. Default value is 65000.
|
||||
|
||||
@item buffer_size
|
||||
Set the maximum socket buffer size in bytes.
|
||||
|
||||
@item pkt_size
|
||||
Set max send packet size (in bytes). Default value is 1472.
|
||||
@end table
|
||||
|
||||
@subsection Demuxer
|
||||
The following options are supported.
|
||||
|
||||
@table @option
|
||||
@@ -1289,10 +948,6 @@ Use UDP multicast as lower transport protocol.
|
||||
@item http
|
||||
Use HTTP tunneling as lower transport protocol, which is useful for
|
||||
passing proxies.
|
||||
|
||||
@item https
|
||||
Use HTTPs tunneling as lower transport protocol, which is useful for
|
||||
passing proxies and widely used for security consideration.
|
||||
@end table
|
||||
|
||||
Multiple lower transport protocols may be specified, in that case they are
|
||||
@@ -1310,9 +965,6 @@ Accept packets only from negotiated peer address and port.
|
||||
Act as a server, listening for an incoming connection.
|
||||
@item prefer_tcp
|
||||
Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
|
||||
@item satip_raw
|
||||
Export raw MPEG-TS stream instead of demuxing. The flag will simply write out
|
||||
the raw stream, with the original PAT/PMT/PIDs intact.
|
||||
@end table
|
||||
|
||||
Default value is @samp{none}.
|
||||
@@ -1325,7 +977,6 @@ The following flags are accepted:
|
||||
@item video
|
||||
@item audio
|
||||
@item data
|
||||
@item subtitle
|
||||
@end table
|
||||
|
||||
By default it accepts all media types.
|
||||
@@ -1336,23 +987,21 @@ Set minimum local UDP port. Default value is 5000.
|
||||
@item max_port
|
||||
Set maximum local UDP port. Default value is 65000.
|
||||
|
||||
@item listen_timeout
|
||||
Set maximum timeout (in seconds) to establish an initial connection. Setting
|
||||
@option{listen_timeout} > 0 sets @option{rtsp_flags} to @samp{listen}. Default is -1
|
||||
which means an infinite timeout when @samp{listen} mode is set.
|
||||
@item timeout
|
||||
Set maximum timeout (in seconds) to wait for incoming connections.
|
||||
|
||||
A value of -1 means infinite (default). This option implies the
|
||||
@option{rtsp_flags} set to @samp{listen}.
|
||||
|
||||
@item reorder_queue_size
|
||||
Set number of packets to buffer for handling of reordered packets.
|
||||
|
||||
@item timeout
|
||||
@item stimeout
|
||||
Set socket TCP I/O timeout in microseconds.
|
||||
|
||||
@item user_agent
|
||||
@item user-agent
|
||||
Override User-Agent header. If not specified, it defaults to the
|
||||
libavformat identifier string.
|
||||
|
||||
@item buffer_size
|
||||
Set the maximum socket buffer size in bytes.
|
||||
@end table
|
||||
|
||||
When receiving data over UDP, the demuxer tries to reorder received packets
|
||||
@@ -1530,7 +1179,7 @@ options.
|
||||
This protocol accepts the following options.
|
||||
|
||||
@table @option
|
||||
@item connect_timeout=@var{milliseconds}
|
||||
@item connect_timeout
|
||||
Connection timeout; SRT cannot connect for RTT > 1500 msec
|
||||
(2 handshake exchanges) with the default connect timeout of
|
||||
3 seconds. This option applies to the caller and rendezvous
|
||||
@@ -1561,18 +1210,7 @@ IP Type of Service. Applies to sender only. Default value is 0xB8.
|
||||
@item ipttl=@var{ttl}
|
||||
IP Time To Live. Applies to sender only. Default value is 64.
|
||||
|
||||
@item latency=@var{microseconds}
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb bursts of missed packet retransmissions.
|
||||
This flag sets both @option{rcvlatency} and @option{peerlatency}
|
||||
to the same value. Note that prior to version 1.3.0
|
||||
this is the only flag to set the latency, however
|
||||
this is effectively equivalent to setting @option{peerlatency},
|
||||
when side is sender and @option{rcvlatency}
|
||||
when side is receiver, and the bidirectional stream
|
||||
sending is not supported.
|
||||
|
||||
@item listen_timeout=@var{microseconds}
|
||||
@item listen_timeout
|
||||
Set socket listen timeout.
|
||||
|
||||
@item maxbw=@var{bytes/seconds}
|
||||
@@ -1617,51 +1255,6 @@ only if @option{pbkeylen} is non-zero. It is used on
|
||||
the receiver only if the received data is encrypted.
|
||||
The configured passphrase cannot be recovered (write-only).
|
||||
|
||||
@item enforced_encryption=@var{1|0}
|
||||
If true, both connection parties must have the same password
|
||||
set (including empty, that is, with no encryption). If the
|
||||
password doesn't match or only one side is unencrypted,
|
||||
the connection is rejected. Default is true.
|
||||
|
||||
@item kmrefreshrate=@var{packets}
|
||||
The number of packets to be transmitted after which the
|
||||
encryption key is switched to a new key. Default is -1.
|
||||
-1 means auto (0x1000000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item kmpreannounce=@var{packets}
|
||||
The interval between when a new encryption key is sent and
|
||||
when switchover occurs. This value also applies to the
|
||||
subsequent interval between when switchover occurs and
|
||||
when the old encryption key is decommissioned. Default is -1.
|
||||
-1 means auto (0x1000 in srt library). The range for
|
||||
this option is integers in the 0 - @code{INT_MAX}.
|
||||
|
||||
@item snddropdelay=@var{microseconds}
|
||||
The sender's extra delay before dropping packets. This delay is
|
||||
added to the default drop delay time interval value.
|
||||
|
||||
Special value -1: Do not drop packets on the sender at all.
|
||||
|
||||
@item payload_size=@var{bytes}
|
||||
Sets the maximum declared size of a packet transferred
|
||||
during the single call to the sending function in Live
|
||||
mode. Use 0 if this value isn't used (which is default in
|
||||
file mode).
|
||||
Default is -1 (automatic), which typically means MPEG-TS;
|
||||
if you are going to use SRT
|
||||
to send any different kind of payload, such as, for example,
|
||||
wrapping a live stream in very small frames, then you can
|
||||
use a bigger maximum frame size, though not greater than
|
||||
1456 bytes.
|
||||
|
||||
@item pkt_size=@var{bytes}
|
||||
Alias for @samp{payload_size}.
|
||||
|
||||
@item peerlatency=@var{microseconds}
|
||||
The latency value (as described in @option{rcvlatency}) that is
|
||||
set by the sender side as a minimum value for the receiver.
|
||||
|
||||
@item pbkeylen=@var{bytes}
|
||||
Sender encryption key length, in bytes.
|
||||
Only can be set to 0, 16, 24 and 32.
|
||||
@@ -1670,28 +1263,18 @@ Not required on receiver (set to 0),
|
||||
key size obtained from sender in HaiCrypt handshake.
|
||||
Default value is 0.
|
||||
|
||||
@item rcvlatency=@var{microseconds}
|
||||
The time that should elapse since the moment when the
|
||||
packet was sent and the moment when it's delivered to
|
||||
the receiver application in the receiving function.
|
||||
This time should be a buffer time large enough to cover
|
||||
the time spent for sending, unexpectedly extended RTT
|
||||
time, and the time needed to retransmit the lost UDP
|
||||
packet. The effective latency value will be the maximum
|
||||
of this options' value and the value of @option{peerlatency}
|
||||
set by the peer side. Before version 1.3.0 this option
|
||||
is only available as @option{latency}.
|
||||
|
||||
@item recv_buffer_size=@var{bytes}
|
||||
Set UDP receive buffer size, expressed in bytes.
|
||||
Set receive buffer size, expressed in bytes.
|
||||
|
||||
@item send_buffer_size=@var{bytes}
|
||||
Set UDP send buffer size, expressed in bytes.
|
||||
Set send buffer size, expressed in bytes.
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
Set raise error timeouts for read, write and connect operations. Note that the
|
||||
SRT library has internal timeouts which can be controlled separately, the
|
||||
value set here is only a cap on those.
|
||||
@item rw_timeout
|
||||
Set raise error timeout for read/write optations.
|
||||
|
||||
This option is only relevant in read mode:
|
||||
if no data arrived in more than this time
|
||||
interval, raise error.
|
||||
|
||||
@item tlpktdrop=@var{1|0}
|
||||
Too-late Packet Drop. When enabled on receiver, it skips
|
||||
@@ -1704,100 +1287,9 @@ have no chance of being delivered in time. It was
|
||||
automatically enabled in the sender if the receiver
|
||||
supports it.
|
||||
|
||||
@item sndbuf=@var{bytes}
|
||||
Set send buffer size, expressed in bytes.
|
||||
|
||||
@item rcvbuf=@var{bytes}
|
||||
Set receive buffer size, expressed in bytes.
|
||||
|
||||
Receive buffer must not be greater than @option{ffs}.
|
||||
|
||||
@item lossmaxttl=@var{packets}
|
||||
The value up to which the Reorder Tolerance may grow. When
|
||||
Reorder Tolerance is > 0, then packet loss report is delayed
|
||||
until that number of packets come in. Reorder Tolerance
|
||||
increases every time a "belated" packet has come, but it
|
||||
wasn't due to retransmission (that is, when UDP packets tend
|
||||
to come out of order), with the difference between the latest
|
||||
sequence and this packet's sequence, and not more than the
|
||||
value of this option. By default it's 0, which means that this
|
||||
mechanism is turned off, and the loss report is always sent
|
||||
immediately upon experiencing a "gap" in sequences.
|
||||
|
||||
@item minversion
|
||||
The minimum SRT version that is required from the peer. A connection
|
||||
to a peer that does not satisfy the minimum version requirement
|
||||
will be rejected.
|
||||
|
||||
The version format in hex is 0xXXYYZZ for x.y.z in human readable
|
||||
form.
|
||||
|
||||
@item streamid=@var{string}
|
||||
A string limited to 512 characters that can be set on the socket prior
|
||||
to connecting. This stream ID will be able to be retrieved by the
|
||||
listener side from the socket that is returned from srt_accept and
|
||||
was connected by a socket with that set stream ID. SRT does not enforce
|
||||
any special interpretation of the contents of this string.
|
||||
This option doesn’t make sense in Rendezvous connection; the result
|
||||
might be that simply one side will override the value from the other
|
||||
side and it’s the matter of luck which one would win
|
||||
|
||||
@item srt_streamid=@var{string}
|
||||
Alias for @samp{streamid} to avoid conflict with ffmpeg command line option.
|
||||
|
||||
@item smoother=@var{live|file}
|
||||
The type of Smoother used for the transmission for that socket, which
|
||||
is responsible for the transmission and congestion control. The Smoother
|
||||
type must be exactly the same on both connecting parties, otherwise
|
||||
the connection is rejected.
|
||||
|
||||
@item messageapi=@var{1|0}
|
||||
When set, this socket uses the Message API, otherwise it uses Buffer
|
||||
API. Note that in live mode (see @option{transtype}) there’s only
|
||||
message API available. In File mode you can chose to use one of two modes:
|
||||
|
||||
Stream API (default, when this option is false). In this mode you may
|
||||
send as many data as you wish with one sending instruction, or even use
|
||||
dedicated functions that read directly from a file. The internal facility
|
||||
will take care of any speed and congestion control. When receiving, you
|
||||
can also receive as many data as desired, the data not extracted will be
|
||||
waiting for the next call. There is no boundary between data portions in
|
||||
the Stream mode.
|
||||
|
||||
Message API. In this mode your single sending instruction passes exactly
|
||||
one piece of data that has boundaries (a message). Contrary to Live mode,
|
||||
this message may span across multiple UDP packets and the only size
|
||||
limitation is that it shall fit as a whole in the sending buffer. The
|
||||
receiver shall use as large buffer as necessary to receive the message,
|
||||
otherwise the message will not be given up. When the message is not
|
||||
complete (not all packets received or there was a packet loss) it will
|
||||
not be given up.
|
||||
|
||||
@item transtype=@var{live|file}
|
||||
Sets the transmission type for the socket, in particular, setting this
|
||||
option sets multiple other parameters to their default values as required
|
||||
for a particular transmission type.
|
||||
|
||||
live: Set options as for live transmission. In this mode, you should
|
||||
send by one sending instruction only so many data that fit in one UDP packet,
|
||||
and limited to the value defined first in @option{payload_size} (1316 is
|
||||
default in this mode). There is no speed control in this mode, only the
|
||||
bandwidth control, if configured, in order to not exceed the bandwidth with
|
||||
the overhead transmission (retransmitted and control packets).
|
||||
|
||||
file: Set options as for non-live transmission. See @option{messageapi}
|
||||
for further explanations
|
||||
|
||||
@item linger=@var{seconds}
|
||||
The number of seconds that the socket waits for unsent data when closing.
|
||||
Default is -1. -1 means auto (off with 0 seconds in live mode, on with 180
|
||||
seconds in file mode). The range for this option is integers in the
|
||||
0 - @code{INT_MAX}.
|
||||
|
||||
@item tsbpd=@var{1|0}
|
||||
When true, use Timestamp-based Packet Delivery mode. The default behavior
|
||||
depends on the transmission type: enabled in live mode, disabled in file
|
||||
mode.
|
||||
@item tsbpddelay
|
||||
Timestamp-based Packet Delivery Delay.
|
||||
Used to absorb burst of missed packet retransmission.
|
||||
|
||||
@end table
|
||||
|
||||
@@ -1885,15 +1377,8 @@ tcp://@var{hostname}:@var{port}[?@var{options}]
|
||||
The list of supported options follows.
|
||||
|
||||
@table @option
|
||||
@item listen=@var{2|1|0}
|
||||
Listen for an incoming connection. 0 disables listen, 1 enables listen in
|
||||
single client mode, 2 enables listen in multi-client mode. Default value is 0.
|
||||
|
||||
@item local_addr=@var{addr}
|
||||
Local IP address of a network interface used for tcp socket connect.
|
||||
|
||||
@item local_port=@var{port}
|
||||
Local port used for tcp socket connect.
|
||||
@item listen=@var{1|0}
|
||||
Listen for an incoming connection. Default value is 0.
|
||||
|
||||
@item timeout=@var{microseconds}
|
||||
Set raise error timeout, expressed in microseconds.
|
||||
@@ -1912,11 +1397,6 @@ Set send buffer size, expressed bytes.
|
||||
|
||||
@item tcp_nodelay=@var{1|0}
|
||||
Set TCP_NODELAY to disable Nagle's algorithm. Default value is 0.
|
||||
|
||||
@emph{Remark: Writing to the socket is currently not optimized to minimize system calls and reduces the efficiency / effect of TCP_NODELAY.}
|
||||
|
||||
@item tcp_mss=@var{bytes}
|
||||
Set maximum segment size for outgoing TCP packets, expressed in bytes.
|
||||
@end table
|
||||
|
||||
The following example shows how to setup a listening TCP connection
|
||||
@@ -1971,10 +1451,6 @@ A file containing the private key for the certificate.
|
||||
If enabled, listen for connections on the provided port, and assume
|
||||
the server role in the handshake instead of the client role.
|
||||
|
||||
@item http_proxy
|
||||
The HTTP proxy to tunnel through, e.g. @code{http://example.com:1234}.
|
||||
The proxy must support the CONNECT method.
|
||||
|
||||
@end table
|
||||
|
||||
Example command lines:
|
||||
@@ -2013,7 +1489,7 @@ The list of supported options follows.
|
||||
@item buffer_size=@var{size}
|
||||
Set the UDP maximum socket buffer size in bytes. This is used to set either
|
||||
the receive or send buffer size, depending on what the socket is used for.
|
||||
Default is 32 KB for output, 384 KB for input. See also @var{fifo_size}.
|
||||
Default is 64KB. See also @var{fifo_size}.
|
||||
|
||||
@item bitrate=@var{bitrate}
|
||||
If set to nonzero, the output will have the specified constant bitrate if the
|
||||
@@ -2027,8 +1503,9 @@ packet bursts.
|
||||
Override the local UDP port to bind with.
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Local IP address of a network interface used for sending packets or joining
|
||||
multicast groups.
|
||||
Choose the local IP address. This is useful e.g. if sending multicast
|
||||
and the host has multiple interfaces, where the user can choose
|
||||
which interface to send on by specifying the IP address of that interface.
|
||||
|
||||
@item pkt_size=@var{size}
|
||||
Set the size in bytes of UDP packets.
|
||||
@@ -2051,12 +1528,12 @@ For receiving, this gives the benefit of only receiving packets from
|
||||
the specified peer address/port.
|
||||
|
||||
@item sources=@var{address}[,@var{address}]
|
||||
Only receive packets sent from the specified addresses. In case of multicast,
|
||||
also subscribe to multicast traffic coming from these addresses only.
|
||||
Only receive packets sent to the multicast group from one of the
|
||||
specified sender IP addresses.
|
||||
|
||||
@item block=@var{address}[,@var{address}]
|
||||
Ignore packets sent from the specified addresses. In case of multicast, also
|
||||
exclude the source addresses in the multicast subscription.
|
||||
Ignore packets sent to the multicast group from the specified
|
||||
sender IP addresses.
|
||||
|
||||
@item fifo_size=@var{units}
|
||||
Set the UDP receiving circular buffer size, expressed as a number of
|
||||
@@ -2122,50 +1599,4 @@ Timeout in ms.
|
||||
Create the Unix socket in listening mode.
|
||||
@end table
|
||||
|
||||
@section zmq
|
||||
|
||||
ZeroMQ asynchronous messaging using the libzmq library.
|
||||
|
||||
This library supports unicast streaming to multiple clients without relying on
|
||||
an external server.
|
||||
|
||||
The required syntax for streaming or connecting to a stream is:
|
||||
@example
|
||||
zmq:tcp://ip-address:port
|
||||
@end example
|
||||
|
||||
Example:
|
||||
Create a localhost stream on port 5555:
|
||||
@example
|
||||
ffmpeg -re -i input -f mpegts zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Multiple clients may connect to the stream using:
|
||||
@example
|
||||
ffplay zmq:tcp://127.0.0.1:5555
|
||||
@end example
|
||||
|
||||
Streaming to multiple clients is implemented using a ZeroMQ Pub-Sub pattern.
|
||||
The server side binds to a port and publishes data. Clients connect to the
|
||||
server (via IP address/port) and subscribe to the stream. The order in which
|
||||
the server and client start generally does not matter.
|
||||
|
||||
ffmpeg must be compiled with the --enable-libzmq option to support
|
||||
this protocol.
|
||||
|
||||
Options can be set on the @command{ffmpeg}/@command{ffplay} command
|
||||
line. The following options are supported:
|
||||
|
||||
@table @option
|
||||
|
||||
@item pkt_size
|
||||
Forces the maximum packet size for sending/receiving data. The default value is
|
||||
131,072 bytes. On the server side, this sets the maximum size of sent packets
|
||||
via ZeroMQ. On the clients, it sets an internal buffer size for receiving
|
||||
packets. Note that pkt_size on the clients should be equal to or greater than
|
||||
pkt_size on the server. Otherwise the received message may be truncated causing
|
||||
decoding errors.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end PROTOCOLS
|
||||
|
||||
@@ -11,8 +11,18 @@ programmatic use.
|
||||
|
||||
@table @option
|
||||
|
||||
@item uchl, used_chlayout
|
||||
Set used input channel layout. Default is unset. This option is
|
||||
@item ich, in_channel_count
|
||||
Set the number of input channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{in_channel_layout} is set.
|
||||
|
||||
@item och, out_channel_count
|
||||
Set the number of output channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
@option{out_channel_layout} is set.
|
||||
|
||||
@item uch, used_channel_count
|
||||
Set the number of used input channels. Default value is 0. This option is
|
||||
only used for special remapping.
|
||||
|
||||
@item isr, in_sample_rate
|
||||
@@ -31,8 +41,8 @@ Specify the output sample format. It is set by default to @code{none}.
|
||||
Set the internal sample format. Default value is @code{none}.
|
||||
This will automatically be chosen when it is not explicitly set.
|
||||
|
||||
@item ichl, in_chlayout
|
||||
@item ochl, out_chlayout
|
||||
@item icl, in_channel_layout
|
||||
@item ocl, out_channel_layout
|
||||
Set the input/output channel layout.
|
||||
|
||||
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user