mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-08 07:49:59 +01:00
Compare commits
330 Commits
n2.4.1
...
release/0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b256bd3be3 | ||
|
|
15d6b44ddc | ||
|
|
e74795e541 | ||
|
|
473b016096 | ||
|
|
831416692b | ||
|
|
b8129b1a7a | ||
|
|
7128f67c3f | ||
|
|
eac21ee7ba | ||
|
|
b2f2cbdb1c | ||
|
|
9b4507e423 | ||
|
|
f7170c4832 | ||
|
|
3714036011 | ||
|
|
a81f72e482 | ||
|
|
24a0273cb8 | ||
|
|
974c2ad87c | ||
|
|
90c8fa5221 | ||
|
|
43aae00455 | ||
|
|
02ac859dfe | ||
|
|
04fb6bb915 | ||
|
|
b012da4019 | ||
|
|
617a9eedc6 | ||
|
|
e7484d5425 | ||
|
|
31f9e849a8 | ||
|
|
fde0b7d91c | ||
|
|
b5f685211c | ||
|
|
588571d41d | ||
|
|
81b754b1e4 | ||
|
|
5f5bf9faf9 | ||
|
|
c2d3f06882 | ||
|
|
01c90eea6c | ||
|
|
2abf5eeea6 | ||
|
|
d34cfb33af | ||
|
|
13093f9767 | ||
|
|
fee26d352a | ||
|
|
a23a3dba25 | ||
|
|
0360dbefad | ||
|
|
deb650c692 | ||
|
|
deaaacbc3f | ||
|
|
0b25c3b67c | ||
|
|
10ec2308b0 | ||
|
|
b9500bf864 | ||
|
|
4f8f4458a5 | ||
|
|
9def5c4666 | ||
|
|
7aeb281aa5 | ||
|
|
a49599b125 | ||
|
|
fe4409a396 | ||
|
|
c3761b6618 | ||
|
|
2e1474fd99 | ||
|
|
1f1b2f1806 | ||
|
|
80f89a9b40 | ||
|
|
5235db68c0 | ||
|
|
6731776795 | ||
|
|
d4e4234147 | ||
|
|
2ae6bdbb9b | ||
|
|
4fac60d568 | ||
|
|
d1729c3715 | ||
|
|
c28c631d29 | ||
|
|
6b97e76dfc | ||
|
|
4475a7d88b | ||
|
|
0f6d4da8de | ||
|
|
5c9d2d8377 | ||
|
|
ac476bfa9f | ||
|
|
272e7f6443 | ||
|
|
776fb2e10d | ||
|
|
0dfcbe5285 | ||
|
|
6d6373dc64 | ||
|
|
7296a6b5e9 | ||
|
|
f695be22d8 | ||
|
|
9125aa9218 | ||
|
|
09a278fdd1 | ||
|
|
9eaec5b8f0 | ||
|
|
48ef116631 | ||
|
|
cc511b36f3 | ||
|
|
85b22a8924 | ||
|
|
6ec1d3b3ba | ||
|
|
d843e7dc94 | ||
|
|
acc665f22c | ||
|
|
4c223fe519 | ||
|
|
5a9588b088 | ||
|
|
02cd93f4ad | ||
|
|
f8a31e2113 | ||
|
|
8ba939fcda | ||
|
|
79674d27d9 | ||
|
|
b061ee9a5d | ||
|
|
baba561c0b | ||
|
|
f6147effd2 | ||
|
|
b990187f99 | ||
|
|
ae6c57859c | ||
|
|
5629c39101 | ||
|
|
7867cbaf6c | ||
|
|
0bf8e22cdb | ||
|
|
7944a87ba8 | ||
|
|
a55db1fc49 | ||
|
|
123e925956 | ||
|
|
96c6b3a11c | ||
|
|
5e3cd42b6d | ||
|
|
f73106256d | ||
|
|
782c3ab777 | ||
|
|
4b2e02a4c4 | ||
|
|
79fbcd9f03 | ||
|
|
468cc41d6d | ||
|
|
6c9b404dba | ||
|
|
b2ac7e585e | ||
|
|
8bb3ba5541 | ||
|
|
5a92aa378d | ||
|
|
c4e8c99507 | ||
|
|
479869c499 | ||
|
|
ec4979e16e | ||
|
|
056c909d9d | ||
|
|
bde4b66063 | ||
|
|
0fda37cff9 | ||
|
|
2e693be7e9 | ||
|
|
8e101086eb | ||
|
|
f13de3c653 | ||
|
|
1eb7872238 | ||
|
|
15df4428d2 | ||
|
|
ec0124203c | ||
|
|
6b01bcebb9 | ||
|
|
efd453d82d | ||
|
|
7209c2b13f | ||
|
|
7ee536e87a | ||
|
|
665421f3b1 | ||
|
|
3eb6983dbc | ||
|
|
e75056bc54 | ||
|
|
8755a7890e | ||
|
|
d0688fdd31 | ||
|
|
0e05292a6c | ||
|
|
23f228a0d0 | ||
|
|
110aff4b24 | ||
|
|
4a1c3df592 | ||
|
|
185abfb218 | ||
|
|
280590e338 | ||
|
|
9767ea7aa7 | ||
|
|
771ceb19f2 | ||
|
|
7739947671 | ||
|
|
8abf1d882e | ||
|
|
1a53095406 | ||
|
|
60eebf5c12 | ||
|
|
30ee6c1995 | ||
|
|
b769df8ff2 | ||
|
|
cc2580e6e9 | ||
|
|
4b0f8aed13 | ||
|
|
2c8ac66456 | ||
|
|
4f209fe960 | ||
|
|
037b1142cd | ||
|
|
37cc48861d | ||
|
|
0cd61bfa6d | ||
|
|
16ea6af381 | ||
|
|
8bd374858f | ||
|
|
d39cc3c092 | ||
|
|
e124c3c298 | ||
|
|
8acc0546bb | ||
|
|
36e4be0a0a | ||
|
|
c603cf5170 | ||
|
|
e1a46eff7a | ||
|
|
23aaa82b1d | ||
|
|
58087a4e64 | ||
|
|
8d1fa1c97e | ||
|
|
2eb5f77bc8 | ||
|
|
ddbbe500b0 | ||
|
|
d1a5b53ede | ||
|
|
8e6173c76a | ||
|
|
1a6f024520 | ||
|
|
3699a46ed7 | ||
|
|
62da9203fd | ||
|
|
2e1e3c1e41 | ||
|
|
635256a324 | ||
|
|
240546a185 | ||
|
|
07df40db6e | ||
|
|
b24c2e59fe | ||
|
|
25bc1108c2 | ||
|
|
8ef917c033 | ||
|
|
1883249be3 | ||
|
|
648dc68098 | ||
|
|
10da0edddc | ||
|
|
d6bf79993f | ||
|
|
0f2735e839 | ||
|
|
9463a28792 | ||
|
|
1330a8a1cb | ||
|
|
8135c35528 | ||
|
|
38423fe0b7 | ||
|
|
71132596ae | ||
|
|
d6f8b65417 | ||
|
|
d8439f0452 | ||
|
|
eb6b0ed8be | ||
|
|
6108f04d4f | ||
|
|
b261ebfd22 | ||
|
|
03db051b43 | ||
|
|
cc885682e3 | ||
|
|
1415ebf031 | ||
|
|
4e0315b30e | ||
|
|
a9ded3d272 | ||
|
|
ef93642aac | ||
|
|
9cda3d7915 | ||
|
|
52b8edc94c | ||
|
|
f936799f0b | ||
|
|
f6d3dfe78b | ||
|
|
2e17744a90 | ||
|
|
19431d4d4e | ||
|
|
ecd6fa11c2 | ||
|
|
7cb35d4954 | ||
|
|
457f869b73 | ||
|
|
70f01f1262 | ||
|
|
80fb9f2c57 | ||
|
|
46f9a6203a | ||
|
|
e9210b4e17 | ||
|
|
4f07a3aa2c | ||
|
|
04888edef3 | ||
|
|
24cd7c5df7 | ||
|
|
8210ee22e2 | ||
|
|
eed5697f99 | ||
|
|
52ee20f2aa | ||
|
|
f17b892787 | ||
|
|
18c5fe919f | ||
|
|
c5898d7c1d | ||
|
|
2c4d6aeabc | ||
|
|
31c8dcedb2 | ||
|
|
2adad90ae7 | ||
|
|
808f9ce727 | ||
|
|
8069e2f6fb | ||
|
|
f7494394ee | ||
|
|
04847a06c2 | ||
|
|
329e816ed7 | ||
|
|
d6860fb653 | ||
|
|
e332c41670 | ||
|
|
11f6eebdd3 | ||
|
|
9109a58867 | ||
|
|
44511b17cb | ||
|
|
48b086b0ef | ||
|
|
c9864adf34 | ||
|
|
7d10059aeb | ||
|
|
69e8b43812 | ||
|
|
2f504d7a90 | ||
|
|
2dea9a1266 | ||
|
|
84e6629de3 | ||
|
|
fc038df32e | ||
|
|
021054a196 | ||
|
|
ee20f19b20 | ||
|
|
2fcb56dab9 | ||
|
|
96ca078b22 | ||
|
|
7fd4cbb519 | ||
|
|
557e065d5f | ||
|
|
461243731d | ||
|
|
fe95afe1e2 | ||
|
|
775aa5f38c | ||
|
|
578c32814c | ||
|
|
c46038f6b7 | ||
|
|
306eefc49f | ||
|
|
eade5150e4 | ||
|
|
6d767afb7c | ||
|
|
015a7d7362 | ||
|
|
922c55a09b | ||
|
|
4c83c13bc8 | ||
|
|
bd7e30ea00 | ||
|
|
4fb58ecea8 | ||
|
|
a317cd5722 | ||
|
|
8e2149d7df | ||
|
|
9d9f1ecfaa | ||
|
|
53b90bb25e | ||
|
|
a0244ae347 | ||
|
|
26f74e832b | ||
|
|
9593c80062 | ||
|
|
48b98cdc67 | ||
|
|
9d442d2d7d | ||
|
|
afc97d4735 | ||
|
|
e5bea45df7 | ||
|
|
9e3935dfd8 | ||
|
|
4f5ee3f87b | ||
|
|
736d36b792 | ||
|
|
dc5cc27d5a | ||
|
|
eb70d77e1e | ||
|
|
b8ec4c49bd | ||
|
|
49487dfd78 | ||
|
|
0d3f25eb48 | ||
|
|
9ef13f70f4 | ||
|
|
beb0dc5f36 | ||
|
|
f3fd428697 | ||
|
|
8811fe6987 | ||
|
|
3835603e7e | ||
|
|
4b360ee2ca | ||
|
|
ab546a7463 | ||
|
|
acac7858bd | ||
|
|
c42640b200 | ||
|
|
ef84190a1a | ||
|
|
7db16a8173 | ||
|
|
e91ba7dc9d | ||
|
|
95f90d27d2 | ||
|
|
1e9ac36f66 | ||
|
|
7d061cfe17 | ||
|
|
e21e76a914 | ||
|
|
b18806f811 | ||
|
|
478394bab7 | ||
|
|
de3196da60 | ||
|
|
49549033fd | ||
|
|
15dfbc503d | ||
|
|
f5c694972e | ||
|
|
a9785f58c6 | ||
|
|
7a5e131735 | ||
|
|
93229681b5 | ||
|
|
8819b9c600 | ||
|
|
5d62141092 | ||
|
|
44b20d1d74 | ||
|
|
9ad437eafb | ||
|
|
4fcef88c4d | ||
|
|
dd2089dfd8 | ||
|
|
df0ff1a029 | ||
|
|
41a4fd7a61 | ||
|
|
a4d8ebfaa1 | ||
|
|
0ae7dcae2c | ||
|
|
d6c23ec06a | ||
|
|
3499f0f3e3 | ||
|
|
7056dd763f | ||
|
|
eade41f3ec | ||
|
|
2f14399e40 | ||
|
|
4f3ce00704 | ||
|
|
d8ef221893 | ||
|
|
8d003e22ca | ||
|
|
b0b57fa13b | ||
|
|
266f6af570 | ||
|
|
241c55aabe | ||
|
|
030896c76a | ||
|
|
beb93f987c | ||
|
|
8e8813a0a1 | ||
|
|
c3c2325adc | ||
|
|
f8429ed58c | ||
|
|
df4763a782 | ||
|
|
58af0caf04 | ||
|
|
3aafe82485 | ||
|
|
07679e680c | ||
|
|
da835cc8a3 |
124
.gitignore
vendored
124
.gitignore
vendored
@@ -1,88 +1,46 @@
|
||||
*.a
|
||||
.config
|
||||
.version
|
||||
*.o
|
||||
*.d
|
||||
*.def
|
||||
*.dll
|
||||
*.dylib
|
||||
*.exe
|
||||
*.exp
|
||||
*.gcda
|
||||
*.gcno
|
||||
*.h.c
|
||||
*.ilk
|
||||
*.lib
|
||||
*.pc
|
||||
*.pdb
|
||||
*.so
|
||||
*.so.*
|
||||
*.swp
|
||||
*.ver
|
||||
*.d
|
||||
*.exe
|
||||
*.ho
|
||||
*-example
|
||||
*-test
|
||||
*_g
|
||||
/.config
|
||||
/.version
|
||||
/ffmpeg
|
||||
/ffplay
|
||||
/ffprobe
|
||||
/ffserver
|
||||
/config.*
|
||||
/coverage.info
|
||||
/doc/*.1
|
||||
/doc/*.3
|
||||
/doc/*.html
|
||||
/doc/*.pod
|
||||
/doc/config.texi
|
||||
/doc/avoptions_codec.texi
|
||||
/doc/avoptions_format.texi
|
||||
/doc/doxy/html/
|
||||
/doc/examples/avio_reading
|
||||
/doc/examples/decoding_encoding
|
||||
/doc/examples/demuxing_decoding
|
||||
/doc/examples/extract_mvs
|
||||
/doc/examples/filter_audio
|
||||
/doc/examples/filtering_audio
|
||||
/doc/examples/filtering_video
|
||||
/doc/examples/metadata
|
||||
/doc/examples/muxing
|
||||
/doc/examples/pc-uninstalled
|
||||
/doc/examples/remuxing
|
||||
/doc/examples/resampling_audio
|
||||
/doc/examples/scaling_video
|
||||
/doc/examples/transcode_aac
|
||||
/doc/examples/transcoding
|
||||
/doc/fate.txt
|
||||
/doc/print_options
|
||||
/lcov/
|
||||
/libavcodec/*_tablegen
|
||||
/libavcodec/*_tables.c
|
||||
/libavcodec/*_tables.h
|
||||
/libavutil/avconfig.h
|
||||
/libavutil/ffversion.h
|
||||
/tests/audiogen
|
||||
/tests/base64
|
||||
/tests/data/
|
||||
/tests/pixfmts.mak
|
||||
/tests/rotozoom
|
||||
/tests/tiny_psnr
|
||||
/tests/tiny_ssim
|
||||
/tests/videogen
|
||||
/tests/vsynth1/
|
||||
/tools/aviocat
|
||||
/tools/ffbisect
|
||||
/tools/bisect.need
|
||||
/tools/crypto_bench
|
||||
/tools/cws2fws
|
||||
/tools/fourcc2pixfmt
|
||||
/tools/ffescape
|
||||
/tools/ffeval
|
||||
/tools/ffhash
|
||||
/tools/graph2dot
|
||||
/tools/ismindex
|
||||
/tools/pktdumper
|
||||
/tools/probetest
|
||||
/tools/qt-faststart
|
||||
/tools/trasher
|
||||
/tools/seek_print
|
||||
/tools/uncoded_frame
|
||||
/tools/zmqsend
|
||||
config.*
|
||||
doc/*.1
|
||||
doc/*.html
|
||||
doc/*.pod
|
||||
doxy
|
||||
ffmpeg
|
||||
ffplay
|
||||
ffprobe
|
||||
ffserver
|
||||
libavcodec/libavcodec*
|
||||
libavcore/libavcore*
|
||||
libavdevice/libavdevice*
|
||||
libavfilter/libavfilter*
|
||||
libavformat/libavformat*
|
||||
libavutil/avconfig.h
|
||||
libavutil/libavutil*
|
||||
libpostproc/libpostproc*
|
||||
libswscale/libswscale*
|
||||
tests/audiogen
|
||||
tests/base64
|
||||
tests/data
|
||||
tests/rotozoom
|
||||
tests/seek_test
|
||||
tests/tiny_psnr
|
||||
tests/videogen
|
||||
tests/vsynth1
|
||||
tests/vsynth2
|
||||
tools/cws2fws
|
||||
tools/graph2dot
|
||||
tools/lavfi-showfiltfmts
|
||||
tools/pktdumper
|
||||
tools/probetest
|
||||
tools/qt-faststart
|
||||
tools/trasher
|
||||
tools/trasher*.d
|
||||
version.h
|
||||
|
||||
@@ -500,3 +500,5 @@ necessary. Here is a sample; alter the names:
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
||||
|
||||
|
||||
|
||||
54
CREDITS
54
CREDITS
@@ -1,6 +1,50 @@
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
This file contains the name of the people who have contributed to
|
||||
FFmpeg. The names are sorted alphabetically by last name.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
http://source.ffmpeg.org.
|
||||
Dénes Balatoni
|
||||
Michel Bardiaux
|
||||
Fabrice Bellard
|
||||
Patrice Bensoussan
|
||||
Alex Beregszaszi
|
||||
BERO
|
||||
Mario Brito
|
||||
Ronald Bultje
|
||||
Maarten Daniels
|
||||
Reimar Doeffinger
|
||||
Tim Ferguson
|
||||
Brian Foley
|
||||
Arpad Gereoffy
|
||||
Philip Gladstone
|
||||
Vladimir Gneushev
|
||||
Roine Gustafsson
|
||||
David Hammerton
|
||||
Wolfgang Hesseler
|
||||
Marc Hoffman
|
||||
Falk Hueffner
|
||||
Aurélien Jacobs
|
||||
Steven Johnson
|
||||
Zdenek Kabelac
|
||||
Robin Kay
|
||||
Todd Kirby
|
||||
Nick Kurshev
|
||||
Benjamin Larsson
|
||||
Loïc Le Loarer
|
||||
Daniel Maas
|
||||
Mike Melanson
|
||||
Loren Merritt
|
||||
Jeff Muizelaar
|
||||
Michael Niedermayer
|
||||
François Revol
|
||||
Peter Ross
|
||||
Måns Rullgård
|
||||
Roman Shaposhnik
|
||||
Oded Shimon
|
||||
Dieter Shirley
|
||||
Konstantin Shishkov
|
||||
Juan J. Sierralta
|
||||
Ewald Snel
|
||||
Sascha Sommer
|
||||
Leon van Stuivenberg
|
||||
Roberto Togni
|
||||
Lionel Ulmer
|
||||
Reynaldo Verdejo
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
11
INSTALL
Normal file
11
INSTALL
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
1) Type './configure' to create the configuration. A list of configure
|
||||
options is printed by running 'configure --help'.
|
||||
|
||||
'configure' can be launched from a directory different from the FFmpeg
|
||||
sources to build the objects out of tree. To do this, use an absolute
|
||||
path when launching 'configure', e.g. '/ffmpegdir/ffmpeg/configure'.
|
||||
|
||||
2) Then type 'make' to build FFmpeg. GNU Make 3.81 or later is required.
|
||||
|
||||
3) Type 'make install' to install all binaries and libraries you built.
|
||||
17
INSTALL.md
17
INSTALL.md
@@ -1,17 +0,0 @@
|
||||
#Installing FFmpeg:
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
||||
`configure` can be launched from a directory different from the FFmpeg
|
||||
sources to build the objects out of tree. To do this, use an absolute
|
||||
path when launching `configure`, e.g. `/ffmpegdir/ffmpeg/configure`.
|
||||
|
||||
2. Then type `make` to build FFmpeg. GNU Make 3.81 or later is required.
|
||||
|
||||
3. Type `make install` to install all binaries and libraries you built.
|
||||
|
||||
NOTICE
|
||||
------
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
||||
50
LICENSE
Normal file
50
LICENSE
Normal file
@@ -0,0 +1,50 @@
|
||||
FFmpeg:
|
||||
-------
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||
FFmpeg.
|
||||
|
||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are
|
||||
|
||||
- libpostproc
|
||||
- some x86 optimizations in libswscale
|
||||
- optional x86 optimizations in the files
|
||||
libavcodec/x86/h264_deblock_sse2.asm
|
||||
libavcodec/x86/h264_idct_sse2.asm
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint.c, libavcodec/jrevdct.c
|
||||
are taken from libjpeg, see the top of the files for licensing details.
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
|
||||
external libraries:
|
||||
-------------------
|
||||
|
||||
Some external libraries, e.g. libx264, are under GPL and can be used in
|
||||
conjunction with FFmpeg. They require --enable-gpl to be passed to configure
|
||||
as well.
|
||||
|
||||
The OpenCORE external libraries are under the Apache License 2.0. That license
|
||||
is incompatible with the LGPL v2.1 and the GPL v2, but not with version 3 of
|
||||
those licenses. So to combine the OpenCORE libraries with FFmpeg, the license
|
||||
version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
The nonfree external libraries libamrnb, libamrwb and libfaac can be hooked up
|
||||
in FFmpeg. You need to pass --enable-nonfree to configure to enable them. Employ
|
||||
this option with care as FFmpeg then becomes nonfree and unredistributable.
|
||||
Note that libfaac claims to be LGPL, but is not.
|
||||
105
LICENSE.md
105
LICENSE.md
@@ -1,105 +0,0 @@
|
||||
#FFmpeg:
|
||||
|
||||
Most files in FFmpeg are under the GNU Lesser General Public License version 2.1
|
||||
or later (LGPL v2.1+). Read the file COPYING.LGPLv2.1 for details. Some other
|
||||
files have MIT/X11/BSD-style licenses. In combination the LGPL v2.1+ applies to
|
||||
FFmpeg.
|
||||
|
||||
Some optional parts of FFmpeg are licensed under the GNU General Public License
|
||||
version 2 or later (GPL v2+). See the file COPYING.GPLv2 for details. None of
|
||||
these parts are used by default, you have to explicitly pass --enable-gpl to
|
||||
configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- libmpcodecs
|
||||
- optional x86 optimizations in the files
|
||||
libavcodec/x86/flac_dsp_gpl.asm
|
||||
libavcodec/x86/idct_mmx.c
|
||||
- libutvideo encoding/decoding wrappers in
|
||||
libavcodec/libutvideo*.cpp
|
||||
- the X11 grabber in libavdevice/x11grab.c
|
||||
- the swresample test app in
|
||||
libswresample/swresample-test.c
|
||||
- the texi2pod.pl tool
|
||||
- the following filters in libavfilter:
|
||||
- f_ebur128.c
|
||||
- vf_blackframe.c
|
||||
- vf_boxblur.c
|
||||
- vf_colormatrix.c
|
||||
- vf_cropdetect.c
|
||||
- vf_decimate.c
|
||||
- vf_delogo.c
|
||||
- vf_geq.c
|
||||
- vf_histeq.c
|
||||
- vf_hqdn3d.c
|
||||
- vf_interlace.c
|
||||
- vf_kerndeint.c
|
||||
- vf_mcdeint.c
|
||||
- vf_mp.c
|
||||
- vf_owdenoise.c
|
||||
- vf_perspective.c
|
||||
- vf_phase.c
|
||||
- vf_pp.c
|
||||
- vf_pullup.c
|
||||
- vf_sab.c
|
||||
- vf_smartblur.c
|
||||
- vf_spp.c
|
||||
- vf_stereo3d.c
|
||||
- vf_super2xsai.c
|
||||
- vf_tinterlace.c
|
||||
- vsrc_mptestsrc.c
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
the configure parameter --enable-version3 will activate this licensing option
|
||||
for you. Read the file COPYING.LGPLv3 or, if you have enabled GPL parts,
|
||||
COPYING.GPLv3 to learn the exact legal terms that apply in this case.
|
||||
|
||||
There are a handful of files under other licensing terms, namely:
|
||||
|
||||
* The files libavcodec/jfdctfst.c, libavcodec/jfdctint_template.c and
|
||||
libavcodec/jrevdct.c are taken from libjpeg, see the top of the files for
|
||||
licensing details. Specifically note that you must credit the IJG in the
|
||||
documentation accompanying your program if you only distribute executables.
|
||||
You must also indicate any changes including additions and deletions to
|
||||
those three files in the documentation.
|
||||
|
||||
|
||||
external libraries
|
||||
==================
|
||||
|
||||
FFmpeg can be combined with a number of external libraries, which sometimes
|
||||
affect the licensing of binaries resulting from the combination.
|
||||
|
||||
compatible libraries
|
||||
--------------------
|
||||
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libutvideo
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxvid
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing --enable-gpl to configure.
|
||||
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing --enable-version3 to configure.
|
||||
|
||||
incompatible libraries
|
||||
----------------------
|
||||
|
||||
The Fraunhofer AAC library, FAAC and aacplus are under licenses which
|
||||
are incompatible with the GPLv2 and v3. We do not know for certain if their
|
||||
licenses are compatible with the LGPL.
|
||||
If you wish to enable these libraries, pass --enable-nonfree to configure.
|
||||
But note that if you enable any of these libraries the resulting binary will
|
||||
be under a complex license mix that is more restrictive than the LGPL and that
|
||||
may result in additional obligations. It is possible that these
|
||||
restrictions cause the resulting binary to be unredistributeable.
|
||||
366
MAINTAINERS
366
MAINTAINERS
@@ -4,17 +4,10 @@ FFmpeg maintainers
|
||||
Below is a list of the people maintaining different parts of the
|
||||
FFmpeg code.
|
||||
|
||||
Please try to keep entries where you are the maintainer up to date!
|
||||
|
||||
Names in () mean that the maintainer currently has no time to maintain the code.
|
||||
A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
|
||||
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
Michael Niedermayer
|
||||
final design decisions
|
||||
|
||||
|
||||
@@ -24,43 +17,38 @@ Applications
|
||||
ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c Marton Balint
|
||||
Video Hooks:
|
||||
vhook
|
||||
vhook/watermark.c Marcus Engene
|
||||
vhook/ppm.c
|
||||
vhook/drawtext.c
|
||||
vhook/fish.c
|
||||
vhook/null.c
|
||||
vhook/imlib2.c
|
||||
|
||||
ffprobe:
|
||||
ffprobe.c Stefano Sabatini
|
||||
ffplay:
|
||||
ffplay.c Michael Niedermayer
|
||||
|
||||
ffserver:
|
||||
ffserver.c Reynaldo H. Verdejo Pinochet
|
||||
ffserver.c, ffserver.h Baptiste Coudurier
|
||||
|
||||
Commandline utility code:
|
||||
cmdutils.c, cmdutils.h Michael Niedermayer
|
||||
|
||||
QuickTime faststart:
|
||||
tools/qt-faststart.c Baptiste Coudurier
|
||||
qt-faststart.c Mike Melanson
|
||||
|
||||
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu
|
||||
build system (configure, makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser
|
||||
documentation Mike Melanson, Diego Biurrun
|
||||
website Robert Swain
|
||||
build system (configure,Makefiles) Diego Biurrun, Mans Rullgard
|
||||
project server Diego Biurrun, Mans Rullgard
|
||||
mailinglists Michael Niedermayer, Baptiste Coudurier
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
|
||||
|
||||
Communication
|
||||
=============
|
||||
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Michael Niedermayer, Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan
|
||||
Launchpad Timothy Gu
|
||||
release management Diego Biurrun, Reinhard Tartler
|
||||
|
||||
|
||||
libavutil
|
||||
@@ -72,24 +60,11 @@ Internal Interfaces:
|
||||
libavutil/common.h Michael Niedermayer
|
||||
|
||||
Other:
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
des Reimar Doeffinger
|
||||
dynarray.h Nicolas George
|
||||
eval.c, eval.h Michael Niedermayer
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
mem.c, mem.h Michael Niedermayer
|
||||
opencl.c, opencl.h Wei Gao
|
||||
opt.c, opt.h Michael Niedermayer
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
timecode Clément Bœsch
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
bswap.h
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -100,14 +75,16 @@ Generic Parts:
|
||||
avcodec.h Michael Niedermayer
|
||||
utility code:
|
||||
utils.c Michael Niedermayer
|
||||
mem.c Michael Niedermayer
|
||||
opt.c, opt.h Michael Niedermayer
|
||||
arithmetic expression evaluator:
|
||||
eval.c Michael Niedermayer
|
||||
audio and video frame extraction:
|
||||
parser.c Michael Niedermayer
|
||||
bitstream reading:
|
||||
bitstream.c, bitstream.h Michael Niedermayer
|
||||
CABAC:
|
||||
cabac.h, cabac.c Michael Niedermayer
|
||||
codec names:
|
||||
codec_names.sh Nicolas George
|
||||
DSP utilities:
|
||||
dsputils.c, dsputils.h Michael Niedermayer
|
||||
entropy coding:
|
||||
@@ -128,107 +105,68 @@ Generic Parts:
|
||||
simple_idct.c, simple_idct.h Michael Niedermayer
|
||||
postprocessing:
|
||||
libpostproc/* Michael Niedermayer
|
||||
table generation:
|
||||
tableprint.c, tableprint.h Reimar Doeffinger
|
||||
fixed point FFT:
|
||||
fft* Zeljko Lukac
|
||||
Text Subtitles Clément Bœsch
|
||||
vdpau:
|
||||
vdpau* Carl Eugen Hoyos
|
||||
|
||||
Codecs:
|
||||
4xm.c Michael Niedermayer
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aasc.c Kostya Shishkov
|
||||
aac.[ch], aactab.[ch], aacdectab.h Robert Swain
|
||||
ac3* Justin Ruggles
|
||||
alacenc.c Jaikrishnan Menon
|
||||
alsdec.c Thilo Borgmann
|
||||
apedec.c Kostya Shishkov
|
||||
ass* Aurelien Jacobs
|
||||
asv* Michael Niedermayer
|
||||
atrac3* Benjamin Larsson
|
||||
atrac3plus* Maxim Poliakovski
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
bink.c Kostya Shishkov
|
||||
binkaudio.c Peter Ross
|
||||
bmp.c Mans Rullgard, Kostya Shishkov
|
||||
cavs* Stefan Gehrer
|
||||
cdxl.c Paul B Mahol
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
cljr Alex Beregszaszi
|
||||
cllc.c Derek Buitenhuis
|
||||
cook.c, cookdata.h Benjamin Larsson
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
dca.c Kostya Shishkov, Benjamin Larsson
|
||||
dnxhd* Baptiste Coudurier
|
||||
dpcm.c Mike Melanson
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxa.c Kostya Shishkov
|
||||
dv.c Roman Shaposhnik
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fic.c Derek Buitenhuis
|
||||
flac* Justin Ruggles
|
||||
ffv1.c Michael Niedermayer
|
||||
flacdec.c Alex Beregszaszi, Justin Ruggles
|
||||
flacenc.c Justin Ruggles
|
||||
flashsv* Benjamin Larsson
|
||||
flicvideo.c Mike Melanson
|
||||
g722.c Martin Storsjo
|
||||
g726.c Roman Shaposhnik
|
||||
gifdec.c Baptiste Coudurier
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
h261* Michael Niedermayer
|
||||
h263* Michael Niedermayer
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
huffyuv* Michael Niedermayer, Christophe Gisquet
|
||||
huffyuv.c Michael Niedermayer
|
||||
idcinvideo.c Mike Melanson
|
||||
imc* Benjamin Larsson
|
||||
indeo2* Kostya Shishkov
|
||||
indeo5* Kostya Shishkov
|
||||
interplayvideo.c Mike Melanson
|
||||
ivi* Kostya Shishkov
|
||||
jacosub* Clément Bœsch
|
||||
jpeg2000* Nicolas Bertrand
|
||||
jpeg_ls.c Kostya Shishkov
|
||||
jvdec.c Peter Ross
|
||||
kmvc.c Kostya Shishkov
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libdirac* David Conrad
|
||||
lcl*.c Roberto Togni
|
||||
libgsm.c Michel Bardiaux
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libschroedinger* David Conrad
|
||||
libspeexdec.c Justin Ruggles
|
||||
libtheoraenc.c David Conrad
|
||||
libutvideo* Derek Buitenhuis
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
libx264.c Mans Rullgard, Jason Garrett-Glaser
|
||||
libx265.c Derek Buitenhuis
|
||||
libxavs.c Stefan Gehrer
|
||||
libzvbi-teletextdec.c Marton Balint
|
||||
loco.c Kostya Shishkov
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
mimic.c Ramiro Polla
|
||||
mjpeg*.c Michael Niedermayer
|
||||
mlp* Ramiro Polla
|
||||
mjpeg.c Michael Niedermayer
|
||||
mmvideo.c Peter Ross
|
||||
mpc* Kostya Shishkov
|
||||
mpeg12.c, mpeg12data.h Michael Niedermayer
|
||||
mpegvideo.c, mpegvideo.h Michael Niedermayer
|
||||
mqc* Nicolas Bertrand
|
||||
msmpeg4.c, msmpeg4data.h Michael Niedermayer
|
||||
msrle.c Mike Melanson
|
||||
msvideo1.c Mike Melanson
|
||||
nellymoserdec.c Benjamin Larsson
|
||||
nuv.c Reimar Doeffinger
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni, Benjamin Larsson
|
||||
@@ -242,65 +180,40 @@ Codecs:
|
||||
rtjpeg.c, rtjpeg.h Reimar Doeffinger
|
||||
rv10.c Michael Niedermayer
|
||||
rv3* Kostya Shishkov
|
||||
rv4* Kostya Shishkov, Christophe Gisquet
|
||||
rv4* Kostya Shishkov
|
||||
s3tc* Ivo van Poorten
|
||||
smacker.c Kostya Shishkov
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
snow.c Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
tak* Paul B Mahol
|
||||
targa.c Kostya Shishkov
|
||||
tiff.c Kostya Shishkov
|
||||
truemotion1* Mike Melanson
|
||||
truemotion2* Kostya Shishkov
|
||||
truespeech.c Kostya Shishkov
|
||||
tscc.c Kostya Shishkov
|
||||
tta.c Alex Beregszaszi, Jaikrishnan Menon
|
||||
ttaenc.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
ulti* Kostya Shishkov
|
||||
v410*.c Derek Buitenhuis
|
||||
vb.c Kostya Shishkov
|
||||
vble.c Derek Buitenhuis
|
||||
vc1* Kostya Shishkov, Christophe Gisquet
|
||||
vc1* Kostya Shishkov
|
||||
vcr1.c Michael Niedermayer
|
||||
vda_h264_dec.c Xidorn Quan
|
||||
vima.c Paul B Mahol
|
||||
vmnc.c Kostya Shishkov
|
||||
vorbisdec.c Denes Balatoni, David Conrad
|
||||
vorbisenc.c Oded Shimon
|
||||
vorbis_enc.c Oded Shimon
|
||||
vorbis_dec.c Denes Balatoni
|
||||
vp3* Mike Melanson
|
||||
vp5 Aurelien Jacobs
|
||||
vp6 Aurelien Jacobs
|
||||
vp8 David Conrad, Jason Garrett-Glaser, Ronald Bultje
|
||||
vp9 Ronald Bultje, Clément Bœsch
|
||||
vqavideo.c Mike Melanson
|
||||
wavpack.c Kostya Shishkov
|
||||
wmaprodec.c Sascha Sommer
|
||||
wmavoice.c Ronald S. Bultje
|
||||
wmv2.c Michael Niedermayer
|
||||
wnv1.c Kostya Shishkov
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xface Stefano Sabatini
|
||||
xl.c Kostya Shishkov
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
zerocodec.c Derek Buitenhuis
|
||||
zmbv* Kostya Shishkov
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Laurent Aimar
|
||||
libstagefright.cpp Mohamed Naufal
|
||||
vaapi* Gwenole Beauchesne
|
||||
vda* Sebastien Zwickert
|
||||
vdpau* Carl Eugen Hoyos
|
||||
|
||||
|
||||
libavdevice
|
||||
===========
|
||||
@@ -308,65 +221,10 @@ libavdevice
|
||||
libavdevice/avdevice.h
|
||||
|
||||
|
||||
avfoundation.m Thilo Borgmann
|
||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
fbdev_enc.c Lukasz Marek
|
||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
iec61883.c Georg Lippitsch
|
||||
lavfi Stefano Sabatini
|
||||
libdc1394.c Roman Shaposhnik
|
||||
opengl_enc.c Lukasz Marek
|
||||
pulse_audio_enc.c Lukasz Marek
|
||||
qtkit.m Thilo Borgmann
|
||||
sdl Stefano Sabatini
|
||||
v4l2.c Giorgio Vazzana
|
||||
v4l2.c Luca Abeni
|
||||
vfwcap.c Ramiro Polla
|
||||
xv.c Lukasz Marek
|
||||
|
||||
libavfilter
|
||||
===========
|
||||
|
||||
Generic parts:
|
||||
graphdump.c Nicolas George
|
||||
|
||||
Filters:
|
||||
af_adelay.c Paul B Mahol
|
||||
af_aecho.c Paul B Mahol
|
||||
af_afade.c Paul B Mahol
|
||||
af_amerge.c Nicolas George
|
||||
af_aphaser.c Paul B Mahol
|
||||
af_aresample.c Michael Niedermayer
|
||||
af_astats.c Paul B Mahol
|
||||
af_astreamsync.c Nicolas George
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_biquads.c Paul B Mahol
|
||||
af_compand.c Paul B Mahol
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_pan.c Nicolas George
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_dejudder.c Nicholas Robbins
|
||||
vf_delogo.c Jean Delvare (CC <khali@linux-fr.org>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_scale.c Michael Niedermayer
|
||||
vf_separatefields.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_yadif.c Michael Niedermayer
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
libavformat
|
||||
===========
|
||||
@@ -381,200 +239,96 @@ Generic parts:
|
||||
Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiff.c Baptiste Coudurier
|
||||
ape.c Kostya Shishkov
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
avi* Michael Niedermayer
|
||||
avisynth.c AvxSynth Team (avxsynth.testing at gmail dot com)
|
||||
avr.c Paul B Mahol
|
||||
bink.c Peter Ross
|
||||
brstm.c Paul B Mahol
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
daud.c Reimar Doeffinger
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dxa.c Kostya Shishkov
|
||||
electronicarts.c Peter Ross
|
||||
epafdec.c Paul B Mahol
|
||||
ffm* Baptiste Coudurier
|
||||
flac* Justin Ruggles
|
||||
flic.c Mike Melanson
|
||||
flvdec.c, flvenc.c Michael Niedermayer
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
idroq.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
img2.c Michael Niedermayer
|
||||
iss.c Stefan Gehrer
|
||||
jacosub* Clément Bœsch
|
||||
jvdec.c Peter Ross
|
||||
libmodplug.c Clément Bœsch
|
||||
libnut.c Oded Shimon
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
mm.c Peter Ross
|
||||
mov.c Michael Niedermayer, Baptiste Coudurier
|
||||
movenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
movenc.c Michael Niedermayer, Baptiste Coudurier
|
||||
mpc.c Kostya Shishkov
|
||||
mpeg.c Michael Niedermayer
|
||||
mpegenc.c Michael Niedermayer
|
||||
mpegts.c Marton Balint
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
mpegts* Mans Rullgard
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
nut.c Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggdec.c, oggdec.h Mans Rullgard
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
oggparsevorbis.c Mans Rullgard
|
||||
oggparseogm.c Mans Rullgard
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rdt.c Ronald S. Bultje
|
||||
rl2.c Sascha Sommer
|
||||
rmdec.c, rmenc.c Ronald S. Bultje, Kostya Shishkov
|
||||
rtmp* Kostya Shishkov
|
||||
rtp.c, rtpenc.c Martin Storsjo
|
||||
rtpdec_h261.*, rtpenc_h261.* Thomas Volkert
|
||||
rtpdec_hevc.* Thomas Volkert
|
||||
rtpdec_asf.* Ronald S. Bultje
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
rm.c Roberto Togni
|
||||
rtp.c, rtpenc.c Luca Abeni
|
||||
rtp_mpv.*, rtp_aac.* Luca Abeni
|
||||
rtsp.c Luca Barbato
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
sdp.c Luca Abeni
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
siff.c Kostya Shishkov
|
||||
smacker.c Kostya Shishkov
|
||||
smjpeg* Paul B Mahol
|
||||
spdif* Anssi Hannula
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
takdec.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
voc.c Aurelien Jacobs
|
||||
wav.c Michael Niedermayer
|
||||
wc3movie.c Mike Melanson
|
||||
webm dash (matroskaenc.c) Vignesh Venkatasubramanian
|
||||
webvtt* Matthew J Heaney
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
wv.c Kostya Shishkov
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libssh.c Lukasz Marek
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
|
||||
|
||||
libswresample
|
||||
=============
|
||||
|
||||
Generic parts:
|
||||
audioconvert.c Michael Niedermayer
|
||||
dither.c Michael Niedermayer
|
||||
rematrix*.c Michael Niedermayer
|
||||
swresample*.c Michael Niedermayer
|
||||
|
||||
Resamplers:
|
||||
resample*.c Michael Niedermayer
|
||||
soxr_resample.c Rob Sykes
|
||||
|
||||
|
||||
Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
Alpha Mans Rullgard, Falk Hueffner
|
||||
ARM Mans Rullgard
|
||||
AVR32 Mans Rullgard
|
||||
MIPS Mans Rullgard, Nedeljko Babic
|
||||
BeOS Francois Revol
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Luca Barbato
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Matthew Oliver
|
||||
Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
x86 Michael Niedermayer
|
||||
|
||||
|
||||
Releases
|
||||
========
|
||||
GnuPG Fingerprints of maintainers and others who have svn write access
|
||||
======================================================================
|
||||
|
||||
2.4 Michael Niedermayer
|
||||
2.2 Michael Niedermayer
|
||||
1.2 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
==================================================
|
||||
|
||||
Alexander Strasser 1C96 78B7 83CB 8AA7 9AF5 D1EB A7D8 A57B A876 E58F
|
||||
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
|
||||
Anton Khirnov 6D0C 6625 56F8 65D1 E5F5 814B B50A 1241 C067 07AB
|
||||
Ash Hughes 694D 43D2 D180 C7C7 6421 ABD3 A641 D0B7 623D 6029
|
||||
Attila Kinali 11F0 F9A6 A1D2 11F6 C745 D10C 6520 BCDD F2DF E765
|
||||
Baptiste Coudurier 8D77 134D 20CC 9220 201F C5DB 0AC9 325C 5C1A BAAA
|
||||
Ben Littler 3EE3 3723 E560 3214 A8CD 4DEB 2CDB FCE7 768C 8D2C
|
||||
Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
Diego Biurrun 8227 1E31 B6D9 4994 7427 E220 9CAE D6CC 4757 FCC5
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Justin Ruggles 3136 ECC0 C10D 6C04 5F43 CA29 FCBE CD2A 3787 1EBF
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Luca Barbato 6677 4209 213C 8843 5B67 29E7 E84C 78C2 84E9 0E34
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reimar Döffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
|
||||
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
|
||||
432
Makefile
432
Makefile
@@ -1,86 +1,54 @@
|
||||
MAIN_MAKEFILE=1
|
||||
include config.mak
|
||||
|
||||
vpath %.c $(SRC_PATH)
|
||||
vpath %.cpp $(SRC_PATH)
|
||||
vpath %.h $(SRC_PATH)
|
||||
vpath %.m $(SRC_PATH)
|
||||
vpath %.S $(SRC_PATH)
|
||||
vpath %.asm $(SRC_PATH)
|
||||
vpath %.rc $(SRC_PATH)
|
||||
vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
SRC_DIR = $(SRC_PATH_BARE)
|
||||
|
||||
AVPROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
AVPROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
AVPROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
AVPROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
vpath %.texi $(SRC_PATH_BARE)
|
||||
|
||||
AVPROGS := $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
INSTPROGS = $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
PROGS += $(AVPROGS)
|
||||
PROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
PROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
PROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
|
||||
AVBASENAMES = ffmpeg ffplay ffprobe ffserver
|
||||
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
PROGS = $(addsuffix $(EXESUF), $(PROGS-yes))
|
||||
PROGS_G = $(addsuffix _g$(EXESUF), $(PROGS-yes))
|
||||
OBJS = $(addsuffix .o, $(PROGS-yes)) cmdutils.o
|
||||
MANPAGES = $(addprefix doc/, $(addsuffix .1, $(PROGS-yes)))
|
||||
|
||||
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog) += cmdutils.o))
|
||||
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_opencl.o))
|
||||
BASENAMES = ffmpeg ffplay ffserver
|
||||
ALLPROGS = $(addsuffix $(EXESUF), $(BASENAMES))
|
||||
ALLPROGS_G = $(addsuffix _g$(EXESUF), $(BASENAMES))
|
||||
ALLMANPAGES = $(addsuffix .1, $(BASENAMES))
|
||||
|
||||
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_vda.o
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
TOOLS = qt-faststart trasher uncoded_frame
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
FFLIBS := avdevice avformat avcodec avutil
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
DATA_FILES := $(wildcard $(SRC_DIR)/ffpresets/*.ffpreset)
|
||||
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
|
||||
|
||||
SKIPHEADERS = cmdutils_common_opts.h compat/w32pthreads.h
|
||||
|
||||
include $(SRC_PATH)/common.mak
|
||||
include common.mak
|
||||
|
||||
FF_LDFLAGS := $(FFLDFLAGS)
|
||||
FF_EXTRALIBS := $(FFEXTRALIBS)
|
||||
FF_DEP_LIBS := $(DEP_LIBS)
|
||||
|
||||
all: $(AVPROGS)
|
||||
ALL_TARGETS-$(CONFIG_VHOOK) += videohook
|
||||
ALL_TARGETS-$(BUILD_DOC) += documentation
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
INSTALL_TARGETS-$(CONFIG_VHOOK) += install-vhook
|
||||
ifneq ($(PROGS),)
|
||||
INSTALL_TARGETS-yes += install-progs install-data
|
||||
INSTALL_TARGETS-$(BUILD_DOC) += install-man
|
||||
endif
|
||||
INSTALL_PROGS_TARGETS-$(BUILD_SHARED) = install-libs
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
all: $(FF_DEP_LIBS) $(PROGS) $(ALL_TARGETS-yes)
|
||||
|
||||
config.h: .config
|
||||
.config: $(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c))
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
$(PROGS): %$(EXESUF): %_g$(EXESUF)
|
||||
cp -p $< $@
|
||||
$(STRIP) $@
|
||||
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSPR1-OBJS MIPS32R2-OBJS \
|
||||
OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
SUBDIR_VARS := OBJS FFLIBS CLEANFILES DIRS TESTS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -90,109 +58,303 @@ endef
|
||||
define DOSUBDIR
|
||||
$(foreach V,$(SUBDIR_VARS),$(eval $(call RESET,$(V))))
|
||||
SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(INTRINSICS)/Makefile
|
||||
include $(SRC_PATH)/library.mak
|
||||
include $(1)/Makefile
|
||||
endef
|
||||
|
||||
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
|
||||
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS)
|
||||
ffserver_g$(EXESUF): FF_LDFLAGS += $(FFSERVERLDFLAGS)
|
||||
|
||||
define DOPROG
|
||||
OBJS-$(1) += $(1).o $(EXEOBJS) $(OBJS-$(1)-yes)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
|
||||
-include $$(OBJS-$(1):.o=.d)
|
||||
endef
|
||||
%_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
|
||||
$(CC) $(FF_LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
output_example$(EXESUF): output_example.o $(FF_DEP_LIBS)
|
||||
$(CC) $(CFLAGS) $(FF_LDFLAGS) -o $@ $< $(FF_EXTRALIBS)
|
||||
|
||||
ffprobe.o cmdutils.o : libavutil/ffversion.h
|
||||
tools/%$(EXESUF): tools/%.c
|
||||
$(CC) $(CFLAGS) $(FF_LDFLAGS) -o $@ $< $(FF_EXTRALIBS)
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
ffplay.o ffplay.d: CFLAGS += $(SDL_CFLAGS)
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
cmdutils.o cmdutils.d: version.h
|
||||
|
||||
OBJDIRS += tools
|
||||
alltools: $(addsuffix $(EXESUF),$(addprefix tools/, cws2fws pktdumper qt-faststart trasher))
|
||||
|
||||
-include $(wildcard tools/*.d)
|
||||
VHOOKCFLAGS += $(filter-out -mdynamic-no-pic,$(CFLAGS))
|
||||
|
||||
VERSION_SH = $(SRC_PATH)/version.sh
|
||||
GIT_LOG = $(SRC_PATH)/.git/logs/HEAD
|
||||
BASEHOOKS = fish null watermark
|
||||
ALLHOOKS = $(BASEHOOKS) drawtext imlib2 ppm
|
||||
ALLHOOKS_SRCS = $(addprefix vhook/, $(addsuffix .c, $(ALLHOOKS)))
|
||||
|
||||
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) config.mak
|
||||
.version: M=@
|
||||
HOOKS-$(HAVE_FORK) += ppm
|
||||
HOOKS-$(HAVE_IMLIB2) += imlib2
|
||||
HOOKS-$(HAVE_FREETYPE2) += drawtext
|
||||
|
||||
libavutil/ffversion.h .version:
|
||||
$(M)$(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
|
||||
$(Q)touch .version
|
||||
HOOKS = $(addprefix vhook/, $(addsuffix $(SLIBSUF), $(BASEHOOKS) $(HOOKS-yes)))
|
||||
|
||||
# force version.sh to run whenever version might have changed
|
||||
-include .version
|
||||
VHOOKCFLAGS-$(HAVE_IMLIB2) += `imlib2-config --cflags`
|
||||
LIBS_imlib2$(SLIBSUF) = `imlib2-config --libs`
|
||||
|
||||
ifdef AVPROGS
|
||||
install: install-progs install-data
|
||||
endif
|
||||
VHOOKCFLAGS-$(HAVE_FREETYPE2) += `freetype-config --cflags`
|
||||
LIBS_drawtext$(SLIBSUF) = `freetype-config --libs`
|
||||
|
||||
install: install-libs install-headers
|
||||
VHOOKCFLAGS += $(VHOOKCFLAGS-yes)
|
||||
|
||||
install-libs: install-libs-yes
|
||||
vhook/%.o vhook/%.d: CFLAGS:=$(VHOOKCFLAGS)
|
||||
|
||||
install-progs-yes:
|
||||
install-progs-$(CONFIG_SHARED): install-libs
|
||||
# vhooks compile fine without libav*, but need them nonetheless.
|
||||
videohook: $(FF_DEP_LIBS) $(HOOKS)
|
||||
|
||||
install-progs: install-progs-yes $(AVPROGS)
|
||||
$(Q)mkdir -p "$(BINDIR)"
|
||||
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
|
||||
$(eval VHOOKSHFLAGS=$(VHOOKSHFLAGS))
|
||||
vhook/%$(SLIBSUF): vhook/%.o
|
||||
$(CC) $(LDFLAGS) -o $@ $(VHOOKSHFLAGS) $< $(VHOOKLIBS) $(LIBS_$(@F))
|
||||
|
||||
install-data: $(DATA_FILES) $(EXAMPLES_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)/examples"
|
||||
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
|
||||
VHOOK_DEPS = $(HOOKS:$(SLIBSUF)=.d)
|
||||
depend dep: $(VHOOK_DEPS)
|
||||
|
||||
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
|
||||
documentation: $(addprefix doc/, ffmpeg-doc.html faq.html ffserver-doc.html \
|
||||
ffplay-doc.html general.html hooks.html \
|
||||
$(ALLMANPAGES))
|
||||
|
||||
doc/%.html: doc/%.texi
|
||||
texi2html -monolithic -number $<
|
||||
mv $(@F) $@
|
||||
|
||||
doc/%.pod: doc/%-doc.texi
|
||||
doc/texi2pod.pl $< $@
|
||||
|
||||
doc/%.1: doc/%.pod
|
||||
pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
|
||||
install: $(INSTALL_TARGETS-yes)
|
||||
|
||||
install-progs: $(PROGS) $(INSTALL_PROGS_TARGETS-yes)
|
||||
install -d "$(BINDIR)"
|
||||
install -c -m 755 $(PROGS) "$(BINDIR)"
|
||||
|
||||
install-data: $(DATA_FILES)
|
||||
install -d "$(DATADIR)"
|
||||
install -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
|
||||
install-man: $(MANPAGES)
|
||||
install -d "$(MANDIR)/man1"
|
||||
install -m 644 $(MANPAGES) "$(MANDIR)/man1"
|
||||
|
||||
install-vhook: videohook
|
||||
install -d "$(SHLIBDIR)/vhook"
|
||||
install -m 755 $(HOOKS) "$(SHLIBDIR)/vhook"
|
||||
|
||||
uninstall: uninstall-progs uninstall-data uninstall-man uninstall-vhook
|
||||
|
||||
uninstall-progs:
|
||||
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
|
||||
rm -f $(addprefix "$(BINDIR)/", $(ALLPROGS))
|
||||
|
||||
uninstall-data:
|
||||
$(RM) -r "$(DATADIR)"
|
||||
rm -rf "$(DATADIR)"
|
||||
|
||||
clean::
|
||||
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G)
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(CLEANSUFFIXES:%=tools/%)
|
||||
$(RM) -r coverage-html
|
||||
$(RM) -rf coverage.info lcov
|
||||
uninstall-man:
|
||||
rm -f $(addprefix "$(MANDIR)/man1/",$(ALLMANPAGES))
|
||||
|
||||
uninstall-vhook:
|
||||
rm -f $(addprefix "$(SHLIBDIR)/",$(ALLHOOKS_SRCS:.c=$(SLIBSUF)))
|
||||
-rmdir "$(SHLIBDIR)/vhook/"
|
||||
|
||||
testclean:
|
||||
rm -rf tests/vsynth1 tests/vsynth2 tests/data tests/asynth1.sw tests/*~
|
||||
|
||||
clean:: testclean
|
||||
rm -f $(ALLPROGS) $(ALLPROGS_G) output_example$(EXESUF)
|
||||
rm -f doc/*.html doc/*.pod doc/*.1
|
||||
rm -f $(addprefix tests/,$(addsuffix $(EXESUF),audiogen videogen rotozoom seek_test tiny_psnr))
|
||||
rm -f $(addprefix tools/,$(addsuffix $(EXESUF),cws2fws pktdumper qt-faststart trasher))
|
||||
rm -f vhook/*.o vhook/*~ vhook/*.so vhook/*.dylib vhook/*.dll
|
||||
|
||||
distclean::
|
||||
$(RM) $(DISTCLEANSUFFIXES)
|
||||
$(RM) config.* .config libavutil/avconfig.h .version version.h libavutil/ffversion.h libavcodec/codec_names.h
|
||||
rm -f version.h config.* vhook/*.d
|
||||
|
||||
config:
|
||||
$(SRC_PATH)/configure $(value FFMPEG_CONFIGURATION)
|
||||
# regression tests
|
||||
|
||||
check: all alltools examples testprogs fate
|
||||
check: test checkheaders
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
fulltest test: codectest libavtest seektest
|
||||
|
||||
$(sort $(OBJDIRS)):
|
||||
$(Q)mkdir -p $@
|
||||
FFMPEG_REFFILE = $(SRC_PATH)/tests/ffmpeg.regression.ref
|
||||
FFSERVER_REFFILE = $(SRC_PATH)/tests/ffserver.regression.ref
|
||||
LIBAV_REFFILE = $(SRC_PATH)/tests/libav.regression.ref
|
||||
ROTOZOOM_REFFILE = $(SRC_PATH)/tests/rotozoom.regression.ref
|
||||
SEEK_REFFILE = $(SRC_PATH)/tests/seek.regression.ref
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
%.h:
|
||||
@:
|
||||
CODEC_TESTS = $(addprefix regtest-, \
|
||||
mpeg \
|
||||
mpeg2 \
|
||||
mpeg2thread \
|
||||
msmpeg4v2 \
|
||||
msmpeg4 \
|
||||
wmv1 \
|
||||
wmv2 \
|
||||
h261 \
|
||||
h263 \
|
||||
h263p \
|
||||
mpeg4 \
|
||||
huffyuv \
|
||||
rc \
|
||||
mpeg4adv \
|
||||
mpeg4thread \
|
||||
error \
|
||||
mpeg4nr \
|
||||
mpeg1b \
|
||||
mjpeg \
|
||||
ljpeg \
|
||||
jpegls \
|
||||
rv10 \
|
||||
rv20 \
|
||||
asv1 \
|
||||
asv2 \
|
||||
flv \
|
||||
ffv1 \
|
||||
snow \
|
||||
snowll \
|
||||
dv \
|
||||
dv50 \
|
||||
svq1 \
|
||||
flashsv \
|
||||
mp2 \
|
||||
ac3 \
|
||||
g726 \
|
||||
adpcm_ima_wav \
|
||||
adpcm_ima_qt \
|
||||
adpcm_ms \
|
||||
adpcm_yam \
|
||||
adpcm_swf \
|
||||
flac \
|
||||
wma \
|
||||
pcm \
|
||||
)
|
||||
|
||||
# Disable suffix rules. Most of the builtin rules are suffix rules,
|
||||
# so this saves some time on slow systems.
|
||||
.SUFFIXES:
|
||||
LAVF_TESTS = $(addprefix regtest-, \
|
||||
avi \
|
||||
asf \
|
||||
rm \
|
||||
mpg \
|
||||
ts \
|
||||
swf \
|
||||
ffm \
|
||||
flv_fmt \
|
||||
mov \
|
||||
dv_fmt \
|
||||
gxf \
|
||||
nut \
|
||||
mkv \
|
||||
pbmpipe \
|
||||
pgmpipe \
|
||||
ppmpipe \
|
||||
gif \
|
||||
yuv4mpeg \
|
||||
pgm \
|
||||
ppm \
|
||||
bmp \
|
||||
tga \
|
||||
tiff \
|
||||
sgi \
|
||||
jpg \
|
||||
wav \
|
||||
alaw \
|
||||
mulaw \
|
||||
au \
|
||||
mmf \
|
||||
aiff \
|
||||
voc \
|
||||
ogg \
|
||||
pixfmt \
|
||||
)
|
||||
|
||||
.PHONY: all all-yes alltools check *clean config install*
|
||||
.PHONY: testprogs uninstall*
|
||||
REGFILES = $(addprefix tests/data/,$(addsuffix .$(1),$(2:regtest-%=%)))
|
||||
|
||||
CODEC_ROTOZOOM = $(call REGFILES,rotozoom.regression,$(CODEC_TESTS))
|
||||
CODEC_VSYNTH = $(call REGFILES,vsynth.regression,$(CODEC_TESTS))
|
||||
|
||||
LAVF_REGFILES = $(call REGFILES,lavf.regression,$(LAVF_TESTS))
|
||||
|
||||
LAVF_REG = tests/data/lavf.regression
|
||||
ROTOZOOM_REG = tests/data/rotozoom.regression
|
||||
VSYNTH_REG = tests/data/vsynth.regression
|
||||
|
||||
ifneq ($(CONFIG_SWSCALE),yes)
|
||||
servertest codectest $(CODEC_TESTS) libavtest: swscale-error
|
||||
swscale-error:
|
||||
@echo
|
||||
@echo "This regression test requires --enable-swscale."
|
||||
@echo
|
||||
@exit 1
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_ZLIB),yes)
|
||||
regtest-flashsv codectest: zlib-error
|
||||
endif
|
||||
zlib-error:
|
||||
@echo
|
||||
@echo "This regression test requires zlib."
|
||||
@echo
|
||||
@exit 1
|
||||
|
||||
codectest: $(VSYNTH_REG) $(ROTOZOOM_REG)
|
||||
diff -u -w $(FFMPEG_REFFILE) $(VSYNTH_REG)
|
||||
diff -u -w $(ROTOZOOM_REFFILE) $(ROTOZOOM_REG)
|
||||
|
||||
libavtest: $(LAVF_REG)
|
||||
diff -u -w $(LIBAV_REFFILE) $(LAVF_REG)
|
||||
|
||||
$(VSYNTH_REG) $(ROTOZOOM_REG) $(LAVF_REG):
|
||||
cat $^ > $@
|
||||
|
||||
$(LAVF_REG): $(LAVF_REGFILES)
|
||||
$(ROTOZOOM_REG): $(CODEC_ROTOZOOM)
|
||||
$(VSYNTH_REG): $(CODEC_VSYNTH)
|
||||
|
||||
$(CODEC_VSYNTH) $(CODEC_ROTOZOOM): $(CODEC_TESTS)
|
||||
|
||||
$(LAVF_REGFILES): $(LAVF_TESTS)
|
||||
|
||||
$(CODEC_TESTS) $(LAVF_TESTS): regtest-ref
|
||||
|
||||
regtest-ref: ffmpeg$(EXESUF) tests/vsynth1/00.pgm tests/vsynth2/00.pgm tests/asynth1.sw
|
||||
|
||||
$(CODEC_TESTS) regtest-ref: tests/tiny_psnr$(EXESUF)
|
||||
$(SRC_PATH)/tests/regression.sh $@ vsynth tests/vsynth1 a "$(TARGET_EXEC)" "$(TARGET_PATH)"
|
||||
$(SRC_PATH)/tests/regression.sh $@ rotozoom tests/vsynth2 a "$(TARGET_EXEC)" "$(TARGET_PATH)"
|
||||
|
||||
$(LAVF_TESTS):
|
||||
$(SRC_PATH)/tests/regression.sh $@ lavf tests/vsynth1 b "$(TARGET_EXEC)" "$(TARGET_PATH)"
|
||||
|
||||
seektest: codectest libavtest tests/seek_test$(EXESUF)
|
||||
$(SRC_PATH)/tests/seek_test.sh $(SEEK_REFFILE) "$(TARGET_EXEC)" "$(TARGET_PATH)"
|
||||
|
||||
servertest: ffserver$(EXESUF) tests/vsynth1/00.pgm tests/asynth1.sw
|
||||
@echo
|
||||
@echo "Unfortunately ffserver is broken and therefore its regression"
|
||||
@echo "test fails randomly. Treat the results accordingly."
|
||||
@echo
|
||||
$(SRC_PATH)/tests/server-regression.sh $(FFSERVER_REFFILE) $(SRC_PATH)/tests/test.conf
|
||||
|
||||
tests/vsynth1/00.pgm: tests/videogen$(EXESUF)
|
||||
mkdir -p tests/vsynth1
|
||||
$(BUILD_ROOT)/$< 'tests/vsynth1/'
|
||||
|
||||
tests/vsynth2/00.pgm: tests/rotozoom$(EXESUF)
|
||||
mkdir -p tests/vsynth2
|
||||
$(BUILD_ROOT)/$< 'tests/vsynth2/' $(SRC_PATH)/tests/lena.pnm
|
||||
|
||||
tests/asynth1.sw: tests/audiogen$(EXESUF)
|
||||
$(BUILD_ROOT)/$< $@
|
||||
|
||||
tests/%$(EXESUF): tests/%.c
|
||||
$(HOSTCC) $(HOSTCFLAGS) $(HOSTLDFLAGS) -o $@ $< $(HOSTLIBS)
|
||||
|
||||
tests/seek_test$(EXESUF): tests/seek_test.c $(FF_DEP_LIBS)
|
||||
$(CC) $(FF_LDFLAGS) $(CFLAGS) -o $@ $< $(FF_EXTRALIBS)
|
||||
|
||||
|
||||
.PHONY: lib videohook documentation *test regtest-* swscale-error zlib-error alltools check
|
||||
|
||||
-include $(VHOOK_DEPS)
|
||||
|
||||
12
README
Normal file
12
README
Normal file
@@ -0,0 +1,12 @@
|
||||
FFmpeg README
|
||||
-------------
|
||||
|
||||
1) Documentation
|
||||
----------------
|
||||
|
||||
* Read the documentation in the doc/ directory.
|
||||
|
||||
2) Licensing
|
||||
------------
|
||||
|
||||
* See the LICENSE file.
|
||||
40
README.md
40
README.md
@@ -1,40 +0,0 @@
|
||||
FFmpeg README
|
||||
=============
|
||||
|
||||
FFmpeg is a collection of libraries and tools to process multimedia content
|
||||
such as audio, video, subtitles and related metadata.
|
||||
|
||||
## Libraries
|
||||
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
|
||||
## Tools
|
||||
|
||||
* [ffmpeg](http://ffmpeg.org/ffmpeg.html) is a command line toolbox to
|
||||
manipulate, convert and stream multimedia content.
|
||||
* [ffplay](http://ffmpeg.org/ffplay.html) is a minimalistic multimedia player.
|
||||
* [ffprobe](http://ffmpeg.org/ffprobe.html) is a simple analisys tool to inspect
|
||||
multimedia content.
|
||||
* Additional small tools such as `aviocat`, `ismindex` and `qt-faststart`.
|
||||
|
||||
## Documentation
|
||||
|
||||
The offline documentation is available in the **doc/** directory.
|
||||
|
||||
The online documentation is available in the main [website](http://ffmpeg.org)
|
||||
and in the [wiki](http://trac.ffmpeg.org).
|
||||
|
||||
### Examples
|
||||
|
||||
Conding examples are available in the **doc/example** directory.
|
||||
|
||||
## License
|
||||
|
||||
FFmpeg codebase is mainly LGPL-licensed with optional components licensed under
|
||||
GPL. Please refer to the LICENSE file for detailed information.
|
||||
255
RELEASE
255
RELEASE
@@ -1 +1,254 @@
|
||||
2.4.1
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 0.5 "Bike Shed World Domination" March 3, 2009
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
It has been so long since the last release that this should be considered the
|
||||
first FFmpeg release of recent times. Because of the way things have unfolded to
|
||||
date, the notes for this version cannot be entirely conventional.
|
||||
|
||||
See the Changelog file for a list of significant changes.
|
||||
|
||||
Please note that our policy on bug reports has not changed. We still only accept
|
||||
bug reports against HEAD of the FFmpeg trunk repository. If you are experiencing
|
||||
any issues with any formally released version of FFmpeg, please try a current
|
||||
version of the development code to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
||||
|
||||
API notes
|
||||
---------
|
||||
|
||||
In the next release, it is intended to remove a number of deprecated APIs. We
|
||||
decided to put out a release that includes said APIs for the benefit of third
|
||||
party software.
|
||||
|
||||
As such, this release:
|
||||
- provides a sync point for said APIs
|
||||
- increases awareness of API changes
|
||||
- allows the next release to detail how to transition from the old to the new
|
||||
|
||||
The deprecated APIs to be removed are:
|
||||
- imgconvert (to be replaced by libswscale)
|
||||
- vhook (to be replaced by libavfilter)
|
||||
|
||||
If at all possible, do not use the deprecated APIs. All notes on API changes
|
||||
should appear in doc/APIchanges.
|
||||
|
||||
|
||||
|
||||
* 0.5.1 March 2, 2010
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This point release includes some minor updates to make the 0.5 release series
|
||||
usable for users that need to retain the existing behavior as closely as
|
||||
possible. The changes follow below:
|
||||
|
||||
Security fixes
|
||||
--------------
|
||||
|
||||
Various programming errors in container and codec implementations
|
||||
may lead to denial of service or the execution of arbitrary code
|
||||
if the user is tricked into opening a malformed media file or stream.
|
||||
|
||||
Affected and updated have been the implementations of the following
|
||||
codecs and container formats:
|
||||
|
||||
- the Vorbis audio codec
|
||||
- the FF Video 1 codec
|
||||
- the MPEG audio codec
|
||||
- the H264 video codec
|
||||
- the MLP codec
|
||||
- the HuffYUV codec
|
||||
- the ASF demuxer
|
||||
- the Ogg container implementation
|
||||
- the MOV container implementation
|
||||
|
||||
Symbol Versioning enabled
|
||||
-------------------------
|
||||
|
||||
The backported symbol versioning change is enabled on platforms that support
|
||||
it. This allows users to upgrade from 0.5.1 to the upcoming 0.6 release
|
||||
without having to recompile their applications. Please note that distributors
|
||||
have to recompile applications against 0.5.1 before upgrading to 0.6.
|
||||
|
||||
libx264.c backport
|
||||
------------------
|
||||
|
||||
This release includes a backport to the libx264 wrapper that allows FFmpeg to
|
||||
be compiled against newer versions of libx264 up to API version 85.
|
||||
|
||||
licensing changes
|
||||
-----------------
|
||||
|
||||
Previously both libswscale and our AC-3 decoder had GPLed parts. These have
|
||||
been replaced by fresh LGPL code. x86 optimizations for libswscale remain GPL,
|
||||
but the C code is fully functional. Optimizations for other architectures have
|
||||
been relicensed to LGPL.
|
||||
|
||||
AMR-NB decoding/encoding and AMR-WB decoding is now possible through the free
|
||||
software OpenCORE libraries as an alternative to the non-free libamr libraries.
|
||||
|
||||
We found out that libfaac contains non-free parts and is not LGPL as previously
|
||||
claimed. We have changed configure to reflect this. You now have to pass the
|
||||
--enable-nonfree option if you wish to compile with libfaac support enabled.
|
||||
|
||||
Furthermore the non-free bits in libavcodec/fdctref.c have been rewritten. Note
|
||||
well that they were only used in a test program and never compiled into any
|
||||
FFmpeg library.
|
||||
|
||||
|
||||
|
||||
* 0.5.2 May 25, 2010
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This is a maintenance-only release that addresses a small number of security
|
||||
and portability issues. Distributors and system integrators are encouraged
|
||||
to update and share their patches against this branch.
|
||||
|
||||
|
||||
|
||||
* 0.5.3 Oct 18, 2010
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This is (again) another maintenance-only release that addresses a fix
|
||||
for seekable HTTP and an exploitable bug in the FLIC decoder
|
||||
(cf. CVE-2010-3429 for details). Distributors and system integrators are
|
||||
encouraged to update and share their patches against this branch.
|
||||
|
||||
|
||||
|
||||
* 0.5.4 Mar 17, 2011
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This is the first release that we cut after git migration. It is another
|
||||
maintenance-only release that addresses several security issues that were
|
||||
brought to our attention. In detail, fixes for RV30/40, WMV, Vorbis and
|
||||
VC-1 have been backported from trunk. Distributors and system integrators
|
||||
are encouraged to update and share their patches against this branch.
|
||||
|
||||
|
||||
|
||||
* 0.5.5 Nov 6, 2011
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This maintenance-only release addresses several security issues that
|
||||
were brought to our attention. In detail, fixes for the MJPEG decoder,
|
||||
the CAVS decoder (CVE-2011-3362, CVE-2011-3973, CVE-2011-3974), and the
|
||||
Matroska decoder (MSVR11-011/CVE-2011-3504) and many others have been
|
||||
corrected. Additional, this release contains fixes for compilation with
|
||||
gcc-4.6. Distributors and system integrators are encouraged to update
|
||||
and share their patches against this branch.
|
||||
|
||||
|
||||
|
||||
* 0.5.6 Nov 21, 2011
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This maintenance-only release addresses several security issues that
|
||||
were brought to our attention.
|
||||
|
||||
|
||||
|
||||
* 0.5.7 Dec 25, 2011
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This maintenance-only release addresses several security issues that
|
||||
were brought to our attention. In details, it features fixes for the
|
||||
QDM2 decoder (CVE-2011-4351), DoS in the VP5/VP6 decoders
|
||||
(CVE-2011-4353), and a buffer overflow in the Sierra VMD decoder
|
||||
CVE-2011-4364, and a safety fix in the SVQ1 decoder (CVE-2011-4579).
|
||||
CVE-2011-4352, a bug in the VP3 decoder, is not known to affect this
|
||||
release.
|
||||
|
||||
Distributors and system integrators are encouraged to update and share
|
||||
their patches against this branch.
|
||||
|
||||
|
||||
|
||||
* 0.5.8 Jan 12, 2012
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This mostly maintenance-only release that addresses a number a number of
|
||||
bugs such as security and compilation issues that have been brought to
|
||||
our attention. Among other (rather minor) fixes, this release features
|
||||
fixes for the VP3 decoder (CVE-2011-3892), vorbis decoder, and matroska
|
||||
demuxer (CVE-2011-3893 and CVE-2011-3895).
|
||||
|
||||
Distributors and system integrators are encouraged
|
||||
to update and share their patches against this branch. For a full list
|
||||
of changes please see the Changelog file.
|
||||
|
||||
|
||||
|
||||
* 0.5.9 May 11, 2012
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This maintenance-only release that addresses a number a number of
|
||||
security issues that have been brought to our attention. Among other
|
||||
(rather minor) fixes, this release features fixes for the DV decoder
|
||||
(CVE-2011-3929 and CVE-2011-3936), nsvdec (CVE-2011-3940), Atrac3
|
||||
(CVE-2012-0853), mjpegdec (CVE-2011-3947) and the VQA video decoder
|
||||
(CVE-2012-0947).
|
||||
|
||||
Distributors and system integrators are encouraged
|
||||
to update and share their patches against this branch. For a full list
|
||||
of changes please see the Changelog file.
|
||||
|
||||
|
||||
|
||||
* 0.5.10 Jun 09, 2012
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This mostly maintenance-only release addresses a number of bugs such as
|
||||
security and compilation issues that have been brought to our
|
||||
attention. Among other fixes, this release includes security updates for
|
||||
the DPCM codecs (CVE-2011-3951), H.264 (CVE-2012-0851), ADPCM
|
||||
(CVE-2012-0852), and the KMVC decoder (CVE-2011-3952).
|
||||
|
||||
Distributors and system integrators are encouraged to update and share
|
||||
their patches against this branch. For a full list of changes please see
|
||||
the Changelog file or the Git commit history.
|
||||
|
||||
|
||||
|
||||
* 0.5.11 Feb 17, 2013
|
||||
|
||||
General notes
|
||||
-------------
|
||||
|
||||
This maintenance-only release addresses a number of bugs such as
|
||||
security and compilation issues that have been brought to our
|
||||
attention. Among other fixes, this release includes security updates for
|
||||
the mpeg12 codecs (CVE-2012-2803), H.264, VP5/VP6 (CVE-2012-2783,
|
||||
CVE-2012-2783), shorten (CVE-2012-0858), CAVS (CVE-2012-2777 and
|
||||
CVE-2012-2784), AVS (CVE-2012-2801) and a number of additional safe but
|
||||
important bugs in other decoders. Additionally, reported bugs in the
|
||||
yuv4mpeg (Bug 373) and BMP decoder (Bug 367) have been addressed.
|
||||
|
||||
Distributors and system integrators are encouraged to update and share
|
||||
their patches against this branch. For a full list of changes please
|
||||
see the Changelog file or the Git commit history.
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 2.4 "Fresnel" │
|
||||
└────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 2.4 "Fresnel", just 2 months
|
||||
after the release of 2.3. Since this wasn't a long time ago, the Changelog
|
||||
is a bit short this time.
|
||||
|
||||
The most important thing in this release is the major version bump of the
|
||||
libraries. This means that this release is neither ABI-compatible nor
|
||||
fully API-compatible. But on the other hand it is aligned with the Libav
|
||||
11 release series, and will as a result probably end up being maintained for
|
||||
a long time.
|
||||
|
||||
As usual, if you have any question on this release or any FFmpeg related
|
||||
topic, feel free to join us on the #ffmpeg IRC channel (on
|
||||
irc.freenode.net).
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ 🔨 API Information │
|
||||
└────────────────────────────┘
|
||||
|
||||
FFmpeg 2.4 includes the following library versions:
|
||||
|
||||
• libavutil 54.7.100
|
||||
• libavcodec 56.1.100
|
||||
• libavformat 56.4.101
|
||||
• libavdevice 56.0.100
|
||||
• libavfilter 5.1.100
|
||||
• libswscale 3.0.100
|
||||
• libswresample 1.1.100
|
||||
• libpostproc 53.0.100
|
||||
|
||||
Important API changes since 2.3:
|
||||
|
||||
• The new field mime_type was added to AVProbeData, which can
|
||||
cause crashes, if it is not initialized.
|
||||
• Some deprecated functions were removed.
|
||||
• The avfilter_graph_parse function was made compatible with Libav.
|
||||
• The Matroska demuxer now outputs verbatim ASS packets.
|
||||
|
||||
Please refer to the doc/APIchanges file for more information.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ★ List of New Features │
|
||||
└────────────────────────────┘
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavformat │
|
||||
└────────────────────────────┘
|
||||
|
||||
• Icecast protocol.
|
||||
• API for live metadata updates through event flags.
|
||||
• UTF-16 support in text subtitles formats.
|
||||
• The ASS muxer now reorders the Dialogue events properly.
|
||||
• support for H.261 RTP payload format (RFC 4587)
|
||||
• HEVC/H.265 RTP payload format (draft v6) depacketizer
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavfilter │
|
||||
└────────────────────────────┘
|
||||
|
||||
• Ported lenscorrection filter from frei0r filter.
|
||||
• Large optimizations in dctdnoiz to make it usable.
|
||||
• Added codecview filter to visualize information exported by some codecs.
|
||||
• Added silenceremove filter.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ libavutil │
|
||||
└────────────────────────────┘
|
||||
|
||||
• Added clip() function in eval.
|
||||
|
||||
┌────────────────────────────┐
|
||||
│ ⚠ Behaviour changes │
|
||||
└────────────────────────────┘
|
||||
|
||||
• dctdnoiz filter now uses a block size of 8x8 instead of 16x16 by default
|
||||
• -vismv option is deprecated in favor of the codecview filter
|
||||
• libmodplug is now detected through pkg-config
|
||||
• HTML documentation generation through texi2html is deprecated in
|
||||
favor of makeinfo/texi2any
|
||||
• ICY metadata are now requested by default with the HTTP protocol
|
||||
15
arch.mak
15
arch.mak
@@ -1,15 +0,0 @@
|
||||
OBJS-$(HAVE_ARMV5TE) += $(ARMV5TE-OBJS) $(ARMV5TE-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
||||
OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPS32R2) += $(MIPS32R2-OBJS) $(MIPS32R2-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR1) += $(MIPSDSPR1-OBJS) $(MIPSDSPR1-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSPR2) += $(MIPSDSPR2-OBJS) $(MIPSDSPR2-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
||||
2138
cmdutils.c
2138
cmdutils.c
File diff suppressed because it is too large
Load Diff
517
cmdutils.h
517
cmdutils.h
@@ -22,18 +22,11 @@
|
||||
#ifndef FFMPEG_CMDUTILS_H
|
||||
#define FFMPEG_CMDUTILS_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "config.h"
|
||||
#include <inttypes.h>
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libswscale/swscale.h"
|
||||
|
||||
#ifdef _WIN32
|
||||
#undef main /* We don't want SDL to override our main() */
|
||||
#endif
|
||||
|
||||
/**
|
||||
* program name, defined by the program for show_version().
|
||||
*/
|
||||
@@ -44,118 +37,51 @@ extern const char program_name[];
|
||||
*/
|
||||
extern const int program_birth_year;
|
||||
|
||||
extern AVCodecContext *avcodec_opts[AVMEDIA_TYPE_NB];
|
||||
extern const int this_year;
|
||||
|
||||
extern const char **opt_names;
|
||||
extern AVCodecContext *avctx_opts[CODEC_TYPE_NB];
|
||||
extern AVFormatContext *avformat_opts;
|
||||
extern struct SwsContext *sws_opts;
|
||||
extern AVDictionary *swr_opts;
|
||||
extern AVDictionary *format_opts, *codec_opts, *resample_opts;
|
||||
extern int hide_banner;
|
||||
|
||||
/**
|
||||
* Register a program-specific cleanup routine.
|
||||
*/
|
||||
void register_exit(void (*cb)(int ret));
|
||||
|
||||
/**
|
||||
* Wraps exit with a program-specific cleanup routine.
|
||||
*/
|
||||
void exit_program(int ret) av_noreturn;
|
||||
|
||||
/**
|
||||
* Initialize the cmdutils option system, in particular
|
||||
* allocate the *_opts contexts.
|
||||
*/
|
||||
void init_opts(void);
|
||||
/**
|
||||
* Uninitialize the cmdutils option system, in particular
|
||||
* free the *_opts contexts and their contents.
|
||||
*/
|
||||
void uninit_opts(void);
|
||||
|
||||
/**
|
||||
* Trivial log callback.
|
||||
* Only suitable for opt_help and similar since it lacks prefix handling.
|
||||
*/
|
||||
void log_callback_help(void* ptr, int level, const char* fmt, va_list vl);
|
||||
|
||||
/**
|
||||
* Override the cpuflags.
|
||||
*/
|
||||
int opt_cpuflags(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Fallback for options that are not explicitly handled, these will be
|
||||
* parsed through AVOptions.
|
||||
*/
|
||||
int opt_default(void *optctx, const char *opt, const char *arg);
|
||||
int opt_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Set the libav* libraries log level.
|
||||
*/
|
||||
int opt_loglevel(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_report(const char *opt);
|
||||
|
||||
int opt_max_alloc(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_codec_debug(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
#if CONFIG_OPENCL
|
||||
int opt_opencl(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
int opt_opencl_bench(void *optctx, const char *opt, const char *arg);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Limit the execution time.
|
||||
*/
|
||||
int opt_timelimit(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse a string and return its corresponding value as a double.
|
||||
* Exit from the application if the string cannot be correctly
|
||||
* Parses a string and returns its corresponding value as a double.
|
||||
* Exits from the application if the string cannot be correctly
|
||||
* parsed or the corresponding value is invalid.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param numstr the string to be parsed
|
||||
* @param type the type (OPT_INT64 or OPT_FLOAT) as which the
|
||||
* string should be parsed
|
||||
* @param min the minimum valid accepted value
|
||||
* @param max the maximum valid accepted value
|
||||
*/
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type,
|
||||
double min, double max);
|
||||
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max);
|
||||
|
||||
/**
|
||||
* Parse a string specifying a time and return its corresponding
|
||||
* value as a number of microseconds. Exit from the application if
|
||||
* Parses a string specifying a time and returns its corresponding
|
||||
* value as a number of microseconds. Exits from the application if
|
||||
* the string cannot be correctly parsed.
|
||||
*
|
||||
* @param context the context of the value to be set (e.g. the
|
||||
* corresponding command line option name)
|
||||
* corresponding commandline option name)
|
||||
* @param timestr the string to be parsed
|
||||
* @param is_duration a flag which tells how to interpret timestr, if
|
||||
* not zero timestr is interpreted as a duration, otherwise as a
|
||||
* @param is_duration a flag which tells how to interpret \p timestr, if
|
||||
* not zero \p timestr is interpreted as a duration, otherwise as a
|
||||
* date
|
||||
*
|
||||
* @see av_parse_time()
|
||||
* @see parse_date()
|
||||
*/
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr,
|
||||
int is_duration);
|
||||
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration);
|
||||
|
||||
typedef struct SpecifierOpt {
|
||||
char *specifier; /**< stream/chapter/program/... specifier */
|
||||
union {
|
||||
uint8_t *str;
|
||||
int i;
|
||||
int64_t i64;
|
||||
float f;
|
||||
double dbl;
|
||||
} u;
|
||||
} SpecifierOpt;
|
||||
|
||||
typedef struct OptionDef {
|
||||
typedef struct {
|
||||
const char *name;
|
||||
int flags;
|
||||
#define HAS_ARG 0x0001
|
||||
@@ -164,423 +90,66 @@ typedef struct OptionDef {
|
||||
#define OPT_STRING 0x0008
|
||||
#define OPT_VIDEO 0x0010
|
||||
#define OPT_AUDIO 0x0020
|
||||
#define OPT_GRAB 0x0040
|
||||
#define OPT_INT 0x0080
|
||||
#define OPT_FLOAT 0x0100
|
||||
#define OPT_SUBTITLE 0x0200
|
||||
#define OPT_INT64 0x0400
|
||||
#define OPT_EXIT 0x0800
|
||||
#define OPT_DATA 0x1000
|
||||
#define OPT_PERFILE 0x2000 /* the option is per-file (currently ffmpeg-only).
|
||||
implied by OPT_OFFSET or OPT_SPEC */
|
||||
#define OPT_OFFSET 0x4000 /* option is specified as an offset in a passed optctx */
|
||||
#define OPT_SPEC 0x8000 /* option is to be stored in an array of SpecifierOpt.
|
||||
Implies OPT_OFFSET. Next element after the offset is
|
||||
an int containing element count in the array. */
|
||||
#define OPT_TIME 0x10000
|
||||
#define OPT_DOUBLE 0x20000
|
||||
#define OPT_INPUT 0x40000
|
||||
#define OPT_OUTPUT 0x80000
|
||||
#define OPT_FUNC2 0x0400
|
||||
#define OPT_INT64 0x0800
|
||||
#define OPT_EXIT 0x1000
|
||||
union {
|
||||
void *dst_ptr;
|
||||
int (*func_arg)(void *, const char *, const char *);
|
||||
size_t off;
|
||||
void (*func_arg)(const char *); //FIXME passing error code as int return would be nicer then exit() in the func
|
||||
int *int_arg;
|
||||
char **str_arg;
|
||||
float *float_arg;
|
||||
int (*func2_arg)(const char *, const char *);
|
||||
int64_t *int64_arg;
|
||||
} u;
|
||||
const char *help;
|
||||
const char *argname;
|
||||
} OptionDef;
|
||||
|
||||
/**
|
||||
* Print help for all options matching specified flags.
|
||||
*
|
||||
* @param options a list of options
|
||||
* @param msg title of this group. Only printed if at least one option matches.
|
||||
* @param req_flags print only options which have all those flags set.
|
||||
* @param rej_flags don't print options which have any of those flags set.
|
||||
* @param alt_flags print only options that have at least one of those flags set
|
||||
*/
|
||||
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int rej_flags, int alt_flags);
|
||||
void show_help_options(const OptionDef *options, const char *msg, int mask, int value);
|
||||
|
||||
/**
|
||||
* Show help for all options with given flags in class and all its
|
||||
* children.
|
||||
*/
|
||||
void show_help_children(const AVClass *class, int flags);
|
||||
|
||||
/**
|
||||
* Per-fftool specific help handler. Implemented in each
|
||||
* fftool, called by show_help().
|
||||
*/
|
||||
void show_help_default(const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Generic -h handler common to all fftools.
|
||||
*/
|
||||
int show_help(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Parse the command line arguments.
|
||||
*
|
||||
* @param optctx an opaque options context
|
||||
* @param argc number of command line arguments
|
||||
* @param argv values of command line arguments
|
||||
* Parses the command line arguments.
|
||||
* @param options Array with the definitions required to interpret every
|
||||
* option of the form: -option_name [argument]
|
||||
* option of the form: -<option_name> [<argument>]
|
||||
* @param parse_arg_function Name of the function called to process every
|
||||
* argument without a leading option name flag. NULL if such arguments do
|
||||
* not have to be processed.
|
||||
*/
|
||||
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options,
|
||||
void (* parse_arg_function)(void *optctx, const char*));
|
||||
void parse_options(int argc, char **argv, const OptionDef *options,
|
||||
void (* parse_arg_function)(const char*));
|
||||
|
||||
/**
|
||||
* Parse one given option.
|
||||
*
|
||||
* @return on success 1 if arg was consumed, 0 otherwise; negative number on error
|
||||
*/
|
||||
int parse_option(void *optctx, const char *opt, const char *arg,
|
||||
const OptionDef *options);
|
||||
void set_context_opts(void *ctx, void *opts_ctx, int flags);
|
||||
|
||||
/**
|
||||
* An option extracted from the commandline.
|
||||
* Cannot use AVDictionary because of options like -map which can be
|
||||
* used multiple times.
|
||||
*/
|
||||
typedef struct Option {
|
||||
const OptionDef *opt;
|
||||
const char *key;
|
||||
const char *val;
|
||||
} Option;
|
||||
|
||||
typedef struct OptionGroupDef {
|
||||
/**< group name */
|
||||
const char *name;
|
||||
/**
|
||||
* Option to be used as group separator. Can be NULL for groups which
|
||||
* are terminated by a non-option argument (e.g. ffmpeg output files)
|
||||
*/
|
||||
const char *sep;
|
||||
/**
|
||||
* Option flags that must be set on each option that is
|
||||
* applied to this group
|
||||
*/
|
||||
int flags;
|
||||
} OptionGroupDef;
|
||||
|
||||
typedef struct OptionGroup {
|
||||
const OptionGroupDef *group_def;
|
||||
const char *arg;
|
||||
|
||||
Option *opts;
|
||||
int nb_opts;
|
||||
|
||||
AVDictionary *codec_opts;
|
||||
AVDictionary *format_opts;
|
||||
AVDictionary *resample_opts;
|
||||
struct SwsContext *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
} OptionGroup;
|
||||
|
||||
/**
|
||||
* A list of option groups that all have the same group type
|
||||
* (e.g. input files or output files)
|
||||
*/
|
||||
typedef struct OptionGroupList {
|
||||
const OptionGroupDef *group_def;
|
||||
|
||||
OptionGroup *groups;
|
||||
int nb_groups;
|
||||
} OptionGroupList;
|
||||
|
||||
typedef struct OptionParseContext {
|
||||
OptionGroup global_opts;
|
||||
|
||||
OptionGroupList *groups;
|
||||
int nb_groups;
|
||||
|
||||
/* parsing state */
|
||||
OptionGroup cur_group;
|
||||
} OptionParseContext;
|
||||
|
||||
/**
|
||||
* Parse an options group and write results into optctx.
|
||||
*
|
||||
* @param optctx an app-specific options context. NULL for global options group
|
||||
*/
|
||||
int parse_optgroup(void *optctx, OptionGroup *g);
|
||||
|
||||
/**
|
||||
* Split the commandline into an intermediate form convenient for further
|
||||
* processing.
|
||||
*
|
||||
* The commandline is assumed to be composed of options which either belong to a
|
||||
* group (those with OPT_SPEC, OPT_OFFSET or OPT_PERFILE) or are global
|
||||
* (everything else).
|
||||
*
|
||||
* A group (defined by an OptionGroupDef struct) is a sequence of options
|
||||
* terminated by either a group separator option (e.g. -i) or a parameter that
|
||||
* is not an option (doesn't start with -). A group without a separator option
|
||||
* must always be first in the supplied groups list.
|
||||
*
|
||||
* All options within the same group are stored in one OptionGroup struct in an
|
||||
* OptionGroupList, all groups with the same group definition are stored in one
|
||||
* OptionGroupList in OptionParseContext.groups. The order of group lists is the
|
||||
* same as the order of group definitions.
|
||||
*/
|
||||
int split_commandline(OptionParseContext *octx, int argc, char *argv[],
|
||||
const OptionDef *options,
|
||||
const OptionGroupDef *groups, int nb_groups);
|
||||
|
||||
/**
|
||||
* Free all allocated memory in an OptionParseContext.
|
||||
*/
|
||||
void uninit_parse_context(OptionParseContext *octx);
|
||||
|
||||
/**
|
||||
* Find the '-loglevel' option in the command line args and apply it.
|
||||
*/
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options);
|
||||
|
||||
/**
|
||||
* Return index of option opt in argv or 0 if not found.
|
||||
*/
|
||||
int locate_option(int argc, char **argv, const OptionDef *options,
|
||||
const char *optname);
|
||||
|
||||
/**
|
||||
* Check if the given stream matches a stream specifier.
|
||||
*
|
||||
* @param s Corresponding format context.
|
||||
* @param st Stream from s to be checked.
|
||||
* @param spec A stream specifier of the [v|a|s|d]:[\<stream index\>] form.
|
||||
*
|
||||
* @return 1 if the stream matches, 0 if it doesn't, <0 on error
|
||||
*/
|
||||
int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec);
|
||||
|
||||
/**
|
||||
* Filter out options for given codec.
|
||||
*
|
||||
* Create a new options dictionary containing only the options from
|
||||
* opts which apply to the codec with ID codec_id.
|
||||
*
|
||||
* @param opts dictionary to place options in
|
||||
* @param codec_id ID of the codec that should be filtered for
|
||||
* @param s Corresponding format context.
|
||||
* @param st A stream from s for which the options should be filtered.
|
||||
* @param codec The particular codec for which the options should be filtered.
|
||||
* If null, the default one is looked up according to the codec id.
|
||||
* @return a pointer to the created dictionary
|
||||
*/
|
||||
AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
|
||||
AVFormatContext *s, AVStream *st, AVCodec *codec);
|
||||
|
||||
/**
|
||||
* Setup AVCodecContext options for avformat_find_stream_info().
|
||||
*
|
||||
* Create an array of dictionaries, one dictionary for each stream
|
||||
* contained in s.
|
||||
* Each dictionary will contain the options from codec_opts which can
|
||||
* be applied to the corresponding stream codec context.
|
||||
*
|
||||
* @return pointer to the created array of dictionaries, NULL if it
|
||||
* cannot be created
|
||||
*/
|
||||
AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
|
||||
AVDictionary *codec_opts);
|
||||
|
||||
/**
|
||||
* Print an error message to stderr, indicating filename and a human
|
||||
* readable description of the error code err.
|
||||
*
|
||||
* If strerror_r() is not available the use of this function in a
|
||||
* multithreaded application may be unsafe.
|
||||
*
|
||||
* @see av_strerror()
|
||||
*/
|
||||
void print_error(const char *filename, int err);
|
||||
|
||||
/**
|
||||
* Print the program banner to stderr. The banner contents depend on the
|
||||
* Prints the program banner to stderr. The banner contents depend on the
|
||||
* current version of the repository and of the libav* libraries used by
|
||||
* the program.
|
||||
*/
|
||||
void show_banner(int argc, char **argv, const OptionDef *options);
|
||||
void show_banner(void);
|
||||
|
||||
/**
|
||||
* Print the version of the program to stdout. The version message
|
||||
* Prints the version of the program to stdout. The version message
|
||||
* depends on the current versions of the repository and of the libav*
|
||||
* libraries.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_version(void *optctx, const char *opt, const char *arg);
|
||||
void show_version(void);
|
||||
|
||||
/**
|
||||
* Print the build configuration of the program to stdout. The contents
|
||||
* depend on the definition of FFMPEG_CONFIGURATION.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_buildconf(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print the license of the program to stdout. The license depends on
|
||||
* Prints the license of the program to stdout. The license depends on
|
||||
* the license of the libraries compiled into the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_license(void *optctx, const char *opt, const char *arg);
|
||||
void show_license(void);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the formats supported by the
|
||||
* program (including devices).
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_formats(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the devices supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_devices(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the codecs supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_codecs(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the decoders supported by the
|
||||
* Prints a listing containing all the formats supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_decoders(void *optctx, const char *opt, const char *arg);
|
||||
void show_formats(void);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the encoders supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_encoders(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_filters(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the bit stream filters supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_bsfs(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the protocols supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_protocols(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the pixel formats supported by the
|
||||
* program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_pix_fmts(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the standard channel layouts supported by
|
||||
* the program.
|
||||
* This option processing function does not utilize the arguments.
|
||||
*/
|
||||
int show_layouts(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the sample formats supported by the
|
||||
* program.
|
||||
*/
|
||||
int show_sample_fmts(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Print a listing containing all the color names and values recognized
|
||||
* by the program.
|
||||
*/
|
||||
int show_colors(void *optctx, const char *opt, const char *arg);
|
||||
|
||||
/**
|
||||
* Return a positive value if a line read from standard input
|
||||
* starts with [yY], otherwise return 0.
|
||||
*/
|
||||
int read_yesno(void);
|
||||
|
||||
/**
|
||||
* Read the file with name filename, and put its content in a newly
|
||||
* allocated 0-terminated buffer.
|
||||
*
|
||||
* @param filename file to read from
|
||||
* @param bufptr location where pointer to buffer is returned
|
||||
* @param size location where size of buffer is returned
|
||||
* @return >= 0 in case of success, a negative value corresponding to an
|
||||
* AVERROR error code in case of failure.
|
||||
*/
|
||||
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size);
|
||||
|
||||
/**
|
||||
* Get a file corresponding to a preset file.
|
||||
*
|
||||
* If is_path is non-zero, look for the file in the path preset_name.
|
||||
* Otherwise search for a file named arg.ffpreset in the directories
|
||||
* $FFMPEG_DATADIR (if set), $HOME/.ffmpeg, and in the datadir defined
|
||||
* at configuration time or in a "ffpresets" folder along the executable
|
||||
* on win32, in that order. If no such file is found and
|
||||
* codec_name is defined, then search for a file named
|
||||
* codec_name-preset_name.avpreset in the above-mentioned directories.
|
||||
*
|
||||
* @param filename buffer where the name of the found filename is written
|
||||
* @param filename_size size in bytes of the filename buffer
|
||||
* @param preset_name name of the preset to search
|
||||
* @param is_path tell if preset_name is a filename path
|
||||
* @param codec_name name of the codec for which to look for the
|
||||
* preset, may be NULL
|
||||
*/
|
||||
FILE *get_preset_file(char *filename, size_t filename_size,
|
||||
const char *preset_name, int is_path, const char *codec_name);
|
||||
|
||||
/**
|
||||
* Realloc array to hold new_size elements of elem_size.
|
||||
* Calls exit() on failure.
|
||||
*
|
||||
* @param array array to reallocate
|
||||
* @param elem_size size in bytes of each element
|
||||
* @param size new element count will be written here
|
||||
* @param new_size number of elements to place in reallocated array
|
||||
* @return reallocated array
|
||||
*/
|
||||
void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
|
||||
#define media_type_string av_get_media_type_string
|
||||
|
||||
#define GROW_ARRAY(array, nb_elems)\
|
||||
array = grow_array(array, sizeof(*array), &nb_elems, nb_elems + 1)
|
||||
|
||||
#define GET_PIX_FMT_NAME(pix_fmt)\
|
||||
const char *name = av_get_pix_fmt_name(pix_fmt);
|
||||
|
||||
#define GET_SAMPLE_FMT_NAME(sample_fmt)\
|
||||
const char *name = av_get_sample_fmt_name(sample_fmt)
|
||||
|
||||
#define GET_SAMPLE_RATE_NAME(rate)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "%d", rate);
|
||||
|
||||
#define GET_CH_LAYOUT_NAME(ch_layout)\
|
||||
char name[16];\
|
||||
snprintf(name, sizeof(name), "0x%"PRIx64, ch_layout);
|
||||
|
||||
#define GET_CH_LAYOUT_DESC(ch_layout)\
|
||||
char name[128];\
|
||||
av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
|
||||
|
||||
#endif /* CMDUTILS_H */
|
||||
#endif /* FFMPEG_CMDUTILS_H */
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
{ "L" , OPT_EXIT, {.func_arg = show_license}, "show license" },
|
||||
{ "h" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "?" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
|
||||
{ "buildconf" , OPT_EXIT, {.func_arg = show_buildconf}, "show build configuration" },
|
||||
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
|
||||
{ "devices" , OPT_EXIT, {.func_arg = show_devices }, "show available devices" },
|
||||
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
|
||||
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
|
||||
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
|
||||
{ "bsfs" , OPT_EXIT, {.func_arg = show_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols" , OPT_EXIT, {.func_arg = show_protocols}, "show available protocols" },
|
||||
{ "filters" , OPT_EXIT, {.func_arg = show_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {.func_arg = show_pix_fmts }, "show available pixel formats" },
|
||||
{ "layouts" , OPT_EXIT, {.func_arg = show_layouts }, "show standard channel layouts" },
|
||||
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
|
||||
{ "colors" , OPT_EXIT, {.func_arg = show_colors }, "show available color names" },
|
||||
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "report" , 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags" , HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
|
||||
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
|
||||
#if CONFIG_OPENCL
|
||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||
#endif
|
||||
@@ -1,274 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2013 Lenny Wang
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/log.h"
|
||||
#include "libavutil/opencl.h"
|
||||
#include "cmdutils.h"
|
||||
|
||||
typedef struct {
|
||||
int platform_idx;
|
||||
int device_idx;
|
||||
char device_name[64];
|
||||
int64_t runtime;
|
||||
} OpenCLDeviceBenchmark;
|
||||
|
||||
const char *ocl_bench_source = AV_OPENCL_KERNEL(
|
||||
inline unsigned char clip_uint8(int a)
|
||||
{
|
||||
if (a & (~0xFF))
|
||||
return (-a)>>31;
|
||||
else
|
||||
return a;
|
||||
}
|
||||
|
||||
kernel void unsharp_bench(
|
||||
global unsigned char *src,
|
||||
global unsigned char *dst,
|
||||
global int *mask,
|
||||
int width,
|
||||
int height)
|
||||
{
|
||||
int i, j, local_idx, lc_idx, sum = 0;
|
||||
int2 thread_idx, block_idx, global_idx, lm_idx;
|
||||
thread_idx.x = get_local_id(0);
|
||||
thread_idx.y = get_local_id(1);
|
||||
block_idx.x = get_group_id(0);
|
||||
block_idx.y = get_group_id(1);
|
||||
global_idx.x = get_global_id(0);
|
||||
global_idx.y = get_global_id(1);
|
||||
local uchar data[32][32];
|
||||
local int lc[128];
|
||||
|
||||
for (i = 0; i <= 1; i++) {
|
||||
lm_idx.y = -8 + (block_idx.y + i) * 16 + thread_idx.y;
|
||||
lm_idx.y = lm_idx.y < 0 ? 0 : lm_idx.y;
|
||||
lm_idx.y = lm_idx.y >= height ? height - 1: lm_idx.y;
|
||||
for (j = 0; j <= 1; j++) {
|
||||
lm_idx.x = -8 + (block_idx.x + j) * 16 + thread_idx.x;
|
||||
lm_idx.x = lm_idx.x < 0 ? 0 : lm_idx.x;
|
||||
lm_idx.x = lm_idx.x >= width ? width - 1: lm_idx.x;
|
||||
data[i*16 + thread_idx.y][j*16 + thread_idx.x] = src[lm_idx.y*width + lm_idx.x];
|
||||
}
|
||||
}
|
||||
local_idx = thread_idx.y*16 + thread_idx.x;
|
||||
if (local_idx < 128)
|
||||
lc[local_idx] = mask[local_idx];
|
||||
barrier(CLK_LOCAL_MEM_FENCE);
|
||||
|
||||
\n#pragma unroll\n
|
||||
for (i = -4; i <= 4; i++) {
|
||||
lm_idx.y = 8 + i + thread_idx.y;
|
||||
\n#pragma unroll\n
|
||||
for (j = -4; j <= 4; j++) {
|
||||
lm_idx.x = 8 + j + thread_idx.x;
|
||||
lc_idx = (i + 4)*8 + j + 4;
|
||||
sum += (int)data[lm_idx.y][lm_idx.x] * lc[lc_idx];
|
||||
}
|
||||
}
|
||||
int temp = (int)data[thread_idx.y + 8][thread_idx.x + 8];
|
||||
int res = temp + (((temp - (int)((sum + 1<<15) >> 16))) >> 16);
|
||||
if (global_idx.x < width && global_idx.y < height)
|
||||
dst[global_idx.x + global_idx.y*width] = clip_uint8(res);
|
||||
}
|
||||
);
|
||||
|
||||
#define OCLCHECK(method, ... ) \
|
||||
do { \
|
||||
status = method(__VA_ARGS__); \
|
||||
if (status != CL_SUCCESS) { \
|
||||
av_log(NULL, AV_LOG_ERROR, # method " error '%s'\n", \
|
||||
av_opencl_errstr(status)); \
|
||||
ret = AVERROR_EXTERNAL; \
|
||||
goto end; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define CREATEBUF(out, flags, size) \
|
||||
do { \
|
||||
out = clCreateBuffer(ext_opencl_env->context, flags, size, NULL, &status); \
|
||||
if (status != CL_SUCCESS) { \
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create OpenCL buffer\n"); \
|
||||
ret = AVERROR_EXTERNAL; \
|
||||
goto end; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
static void fill_rand_int(int *data, int n)
|
||||
{
|
||||
int i;
|
||||
srand(av_gettime());
|
||||
for (i = 0; i < n; i++)
|
||||
data[i] = rand();
|
||||
}
|
||||
|
||||
#define OPENCL_NB_ITER 5
|
||||
static int64_t run_opencl_bench(AVOpenCLExternalEnv *ext_opencl_env)
|
||||
{
|
||||
int i, arg = 0, width = 1920, height = 1088;
|
||||
int64_t start, ret = 0;
|
||||
cl_int status;
|
||||
size_t kernel_len;
|
||||
char *inbuf;
|
||||
int *mask;
|
||||
int buf_size = width * height * sizeof(char);
|
||||
int mask_size = sizeof(uint32_t) * 128;
|
||||
|
||||
cl_mem cl_mask, cl_inbuf, cl_outbuf;
|
||||
cl_kernel kernel = NULL;
|
||||
cl_program program = NULL;
|
||||
size_t local_work_size_2d[2] = {16, 16};
|
||||
size_t global_work_size_2d[2] = {(size_t)width, (size_t)height};
|
||||
|
||||
if (!(inbuf = av_malloc(buf_size)) || !(mask = av_malloc(mask_size))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Out of memory\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fill_rand_int((int*)inbuf, buf_size/4);
|
||||
fill_rand_int(mask, mask_size/4);
|
||||
|
||||
CREATEBUF(cl_mask, CL_MEM_READ_ONLY, mask_size);
|
||||
CREATEBUF(cl_inbuf, CL_MEM_READ_ONLY, buf_size);
|
||||
CREATEBUF(cl_outbuf, CL_MEM_READ_WRITE, buf_size);
|
||||
|
||||
kernel_len = strlen(ocl_bench_source);
|
||||
program = clCreateProgramWithSource(ext_opencl_env->context, 1, &ocl_bench_source,
|
||||
&kernel_len, &status);
|
||||
if (status != CL_SUCCESS || !program) {
|
||||
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to create benchmark program\n");
|
||||
ret = AVERROR_EXTERNAL;
|
||||
goto end;
|
||||
}
|
||||
status = clBuildProgram(program, 1, &(ext_opencl_env->device_id), NULL, NULL, NULL);
|
||||
if (status != CL_SUCCESS) {
|
||||
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to build benchmark program\n");
|
||||
ret = AVERROR_EXTERNAL;
|
||||
goto end;
|
||||
}
|
||||
kernel = clCreateKernel(program, "unsharp_bench", &status);
|
||||
if (status != CL_SUCCESS) {
|
||||
av_log(NULL, AV_LOG_ERROR, "OpenCL unable to create benchmark kernel\n");
|
||||
ret = AVERROR_EXTERNAL;
|
||||
goto end;
|
||||
}
|
||||
|
||||
OCLCHECK(clEnqueueWriteBuffer, ext_opencl_env->command_queue, cl_inbuf, CL_TRUE, 0,
|
||||
buf_size, inbuf, 0, NULL, NULL);
|
||||
OCLCHECK(clEnqueueWriteBuffer, ext_opencl_env->command_queue, cl_mask, CL_TRUE, 0,
|
||||
mask_size, mask, 0, NULL, NULL);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_inbuf);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_outbuf);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_mem), &cl_mask);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &width);
|
||||
OCLCHECK(clSetKernelArg, kernel, arg++, sizeof(cl_int), &height);
|
||||
|
||||
start = av_gettime_relative();
|
||||
for (i = 0; i < OPENCL_NB_ITER; i++)
|
||||
OCLCHECK(clEnqueueNDRangeKernel, ext_opencl_env->command_queue, kernel, 2, NULL,
|
||||
global_work_size_2d, local_work_size_2d, 0, NULL, NULL);
|
||||
clFinish(ext_opencl_env->command_queue);
|
||||
ret = (av_gettime_relative() - start)/OPENCL_NB_ITER;
|
||||
end:
|
||||
if (kernel)
|
||||
clReleaseKernel(kernel);
|
||||
if (program)
|
||||
clReleaseProgram(program);
|
||||
if (cl_inbuf)
|
||||
clReleaseMemObject(cl_inbuf);
|
||||
if (cl_outbuf)
|
||||
clReleaseMemObject(cl_outbuf);
|
||||
if (cl_mask)
|
||||
clReleaseMemObject(cl_mask);
|
||||
av_free(inbuf);
|
||||
av_free(mask);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int compare_ocl_device_desc(const void *a, const void *b)
|
||||
{
|
||||
return ((OpenCLDeviceBenchmark*)a)->runtime - ((OpenCLDeviceBenchmark*)b)->runtime;
|
||||
}
|
||||
|
||||
int opt_opencl_bench(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
int i, j, nb_devices = 0, count = 0;
|
||||
int64_t score = 0;
|
||||
AVOpenCLDeviceList *device_list;
|
||||
AVOpenCLDeviceNode *device_node = NULL;
|
||||
OpenCLDeviceBenchmark *devices = NULL;
|
||||
cl_platform_id platform;
|
||||
|
||||
av_opencl_get_device_list(&device_list);
|
||||
for (i = 0; i < device_list->platform_num; i++)
|
||||
nb_devices += device_list->platform_node[i]->device_num;
|
||||
if (!nb_devices) {
|
||||
av_log(NULL, AV_LOG_ERROR, "No OpenCL device detected!\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (!(devices = av_malloc_array(nb_devices, sizeof(OpenCLDeviceBenchmark)))) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not allocate buffer\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < device_list->platform_num; i++) {
|
||||
for (j = 0; j < device_list->platform_node[i]->device_num; j++) {
|
||||
device_node = device_list->platform_node[i]->device_node[j];
|
||||
platform = device_list->platform_node[i]->platform_id;
|
||||
score = av_opencl_benchmark(device_node, platform, run_opencl_bench);
|
||||
if (score > 0) {
|
||||
devices[count].platform_idx = i;
|
||||
devices[count].device_idx = j;
|
||||
devices[count].runtime = score;
|
||||
strcpy(devices[count].device_name, device_node->device_name);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
qsort(devices, count, sizeof(OpenCLDeviceBenchmark), compare_ocl_device_desc);
|
||||
fprintf(stderr, "platform_idx\tdevice_idx\tdevice_name\truntime\n");
|
||||
for (i = 0; i < count; i++)
|
||||
fprintf(stdout, "%d\t%d\t%s\t%"PRId64"\n",
|
||||
devices[i].platform_idx, devices[i].device_idx,
|
||||
devices[i].device_name, devices[i].runtime);
|
||||
|
||||
av_opencl_free_device_list(&device_list);
|
||||
av_free(devices);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int opt_opencl(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
char *key, *value;
|
||||
const char *opts = arg;
|
||||
int ret = 0;
|
||||
while (*opts) {
|
||||
ret = av_opt_get_key_value(&opts, "=", ":", 0, &key, &value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = av_opencl_set_option(key, value);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (*opts)
|
||||
opts++;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
195
common.mak
195
common.mak
@@ -2,153 +2,116 @@
|
||||
# common bits used by all libraries
|
||||
#
|
||||
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
all: # make "all" default target
|
||||
|
||||
ifndef SUBDIR
|
||||
vpath %.c $(SRC_DIR)
|
||||
vpath %.h $(SRC_DIR)
|
||||
vpath %.S $(SRC_DIR)
|
||||
vpath %.asm $(SRC_DIR)
|
||||
vpath %.v $(SRC_DIR)
|
||||
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX HOSTCC HOSTLD AS YASM AR LD STRIP CP WINDRES
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
M = @$(call ECHO,$(TAG),$@);
|
||||
$(foreach VAR,$(BRIEF), \
|
||||
$(eval override $(VAR) = @$$(call ECHO,$(VAR),$$(MSG)); $($(VAR))))
|
||||
$(foreach VAR,$(SILENT),$(eval override $(VAR) = @$($(VAR))))
|
||||
$(eval INSTALL = @$(call ECHO,INSTALL,$$(^:$(SRC_DIR)/%=%)); $(INSTALL))
|
||||
ifeq ($(SRC_DIR),$(SRC_PATH_BARE))
|
||||
BUILD_ROOT_REL = .
|
||||
else
|
||||
BUILD_ROOT_REL = ..
|
||||
endif
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avresample avutil postproc swscale swresample
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale
|
||||
|
||||
# NASM requires -I path terminated with /
|
||||
IFLAGS := -I. -I$(SRC_PATH)/
|
||||
CPPFLAGS := $(IFLAGS) $(CPPFLAGS)
|
||||
CFLAGS += $(ECFLAGS)
|
||||
CCFLAGS = $(CPPFLAGS) $(CFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
CXXFLAGS += $(CPPFLAGS) $(CFLAGS)
|
||||
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
|
||||
define COMPILE
|
||||
$(call $(1)DEP,$(1))
|
||||
$($(1)) $($(1)FLAGS) $($(1)_DEPFLAGS) $($(1)_C) $($(1)_O) $<
|
||||
endef
|
||||
|
||||
COMPILE_C = $(call COMPILE,CC)
|
||||
COMPILE_CXX = $(call COMPILE,CXX)
|
||||
COMPILE_S = $(call COMPILE,AS)
|
||||
COMPILE_HOSTC = $(call COMPILE,HOSTCC)
|
||||
CFLAGS := -DHAVE_AV_CONFIG_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE \
|
||||
-I$(BUILD_ROOT_REL) -I$(SRC_PATH) $(OPTFLAGS)
|
||||
|
||||
%.o: %.c
|
||||
$(COMPILE_C)
|
||||
|
||||
%.o: %.cpp
|
||||
$(COMPILE_CXX)
|
||||
|
||||
%.o: %.m
|
||||
$(COMPILE_C)
|
||||
|
||||
%.s: %.c
|
||||
$(CC) $(CPPFLAGS) $(CFLAGS) -S -o $@ $<
|
||||
$(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
|
||||
|
||||
%.o: %.S
|
||||
$(COMPILE_S)
|
||||
$(CC) $(CFLAGS) $(LIBOBJFLAGS) -c -o $@ $<
|
||||
|
||||
%_host.o: %.c
|
||||
$(COMPILE_HOSTC)
|
||||
%.ho: %.h
|
||||
$(CC) $(CFLAGS) $(LIBOBJFLAGS) -Wno-unused -c -o $@ -x c $<
|
||||
|
||||
%.o: %.rc
|
||||
$(WINDRES) $(IFLAGS) --preprocessor "$(DEPWINDRES) -E -xc-header -DRC_INVOKED $(CC_DEPFLAGS)" -o $@ $<
|
||||
%.d: %.c
|
||||
$(DEPEND_CMD) > $@
|
||||
|
||||
%.i: %.c
|
||||
$(CC) $(CCFLAGS) $(CC_E) $<
|
||||
%.d: %.S
|
||||
$(DEPEND_CMD) > $@
|
||||
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
%.d: %.cpp
|
||||
$(DEPEND_CMD) > $@
|
||||
|
||||
%.o: %.d
|
||||
|
||||
%$(EXESUF): %.c
|
||||
|
||||
%.ver: %.v
|
||||
$(Q)sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
|
||||
sed 's/$$MAJOR/$($(basename $(@F))_VERSION_MAJOR)/' $^ > $@
|
||||
|
||||
%.c %.h: TAG = GEN
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
%.h:
|
||||
@:
|
||||
|
||||
# Disable suffix rules. Most of the builtin rules are suffix rules,
|
||||
# so this saves some time on slow systems.
|
||||
.SUFFIXES:
|
||||
|
||||
# Do not delete intermediate files from chains of implicit rules
|
||||
$(OBJS):
|
||||
SVN_ENTRIES = $(SRC_PATH_BARE)/.svn/entries
|
||||
ifeq ($(wildcard $(SVN_ENTRIES)),$(SVN_ENTRIES))
|
||||
$(BUILD_ROOT_REL)/version.h: $(SVN_ENTRIES)
|
||||
endif
|
||||
|
||||
include $(SRC_PATH)/arch.mak
|
||||
$(BUILD_ROOT_REL)/version.h: $(SRC_PATH_BARE)/version.sh
|
||||
$< $(SRC_PATH) $@ $(EXTRA_VERSION)
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
SLIBOBJS += $(SLIBOBJS-yes)
|
||||
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTPROGS += $(TESTPROGS-yes)
|
||||
install: install-libs install-headers
|
||||
|
||||
LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
|
||||
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(EXTRALIBS)
|
||||
uninstall: uninstall-libs uninstall-headers
|
||||
|
||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
|
||||
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)%) $(TESTPROGS:%=$(SUBDIR)%-test.o)
|
||||
TESTPROGS := $(TESTPROGS:%=$(SUBDIR)%-test$(EXESUF))
|
||||
HOSTOBJS := $(HOSTPROGS:%=$(SUBDIR)%.o)
|
||||
HOSTPROGS := $(HOSTPROGS:%=$(SUBDIR)%$(HOSTEXESUF))
|
||||
TOOLS += $(TOOLS-yes)
|
||||
TOOLOBJS := $(TOOLS:%=tools/%.o)
|
||||
TOOLS := $(TOOLS:%=tools/%$(EXESUF))
|
||||
HEADERS += $(HEADERS-yes)
|
||||
.PHONY: all depend dep clean distclean install* uninstall* tests
|
||||
endif
|
||||
|
||||
PATH_LIBNAME = $(foreach NAME,$(1),lib$(NAME)/$($(CONFIG_SHARED:yes=S)LIBNAME))
|
||||
DEP_LIBS := $(foreach lib,$(FFLIBS),$(call PATH_LIBNAME,$(lib)))
|
||||
CFLAGS += $(CFLAGS-yes)
|
||||
OBJS += $(OBJS-yes)
|
||||
FFLIBS := $(FFLIBS-yes) $(FFLIBS)
|
||||
TESTS += $(TESTS-yes)
|
||||
|
||||
FFEXTRALIBS := $(addprefix -l,$(addsuffix $(BUILDSUF),$(FFLIBS))) $(EXTRALIBS)
|
||||
FFLDFLAGS := $(addprefix -L$(BUILD_ROOT)/lib,$(FFLIBS)) $(LDFLAGS)
|
||||
|
||||
OBJS := $(addprefix $(SUBDIR),$(OBJS))
|
||||
TESTS := $(addprefix $(SUBDIR),$(TESTS))
|
||||
|
||||
DEP_LIBS:=$(foreach NAME,$(FFLIBS),lib$(NAME)/$($(BUILD_SHARED:yes=S)LIBNAME))
|
||||
|
||||
SRC_DIR := $(SRC_PATH)/lib$(NAME)
|
||||
ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)/$(ARCH)/*.h))
|
||||
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
|
||||
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
|
||||
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c)
|
||||
checkheaders: $(filter-out %_template.ho,$(ALLHEADERS:.h=.ho))
|
||||
|
||||
alltools: $(TOOLS)
|
||||
DEPS := $(OBJS:.o=.d)
|
||||
depend dep: $(DEPS)
|
||||
|
||||
$(HOSTOBJS): %.o: %.c
|
||||
$(COMPILE_HOSTC)
|
||||
|
||||
$(HOSTPROGS): %$(HOSTEXESUF): %.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTLIBS)
|
||||
|
||||
$(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.map *.ver *.ho *.gcno *.gcda
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
CLEANSUFFIXES = *.o *~ *.ho *.ver
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a *.exp *.map
|
||||
DISTCLEANSUFFIXES = *.d *.pc
|
||||
|
||||
define RULES
|
||||
$(SUBDIR)%$(EXESUF): $(SUBDIR)%.o
|
||||
$(CC) $(FFLDFLAGS) -o $$@ $$^ $(SUBDIR)$(LIBNAME) $(FFEXTRALIBS)
|
||||
|
||||
$(SUBDIR)%-test.o: $(SUBDIR)%.c
|
||||
$(CC) $(CFLAGS) -DTEST -c -o $$@ $$^
|
||||
|
||||
$(SUBDIR)%-test.o: $(SUBDIR)%-test.c
|
||||
$(CC) $(CFLAGS) -DTEST -c -o $$@ $$^
|
||||
|
||||
$(SUBDIR)x86/%.o: $(SUBDIR)x86/%.asm
|
||||
$(YASM) $(YASMFLAGS) -I $$(<D)/ -o $$@ $$<
|
||||
|
||||
$(SUBDIR)x86/%.d: $(SUBDIR)x86/%.asm
|
||||
$(YASM) $(YASMFLAGS) -I $$(<D)/ -M -o $$(@:%.d=%.o) $$< > $$@
|
||||
|
||||
clean::
|
||||
$(RM) $(OBJS) $(OBJS:.o=.d)
|
||||
$(RM) $(HOSTPROGS)
|
||||
$(RM) $(TOOLS)
|
||||
rm -f $(TESTS) $(addprefix $(SUBDIR),$(CLEANFILES) $(CLEANSUFFIXES) $(LIBSUFFIXES)) \
|
||||
$(addprefix $(SUBDIR), $(foreach suffix,$(CLEANSUFFIXES),$(addsuffix /$(suffix),$(DIRS))))
|
||||
|
||||
distclean:: clean
|
||||
rm -f $(addprefix $(SUBDIR),$(DISTCLEANSUFFIXES)) \
|
||||
$(addprefix $(SUBDIR), $(foreach suffix,$(DISTCLEANSUFFIXES),$(addsuffix /$(suffix),$(DIRS))))
|
||||
endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d))
|
||||
tests: $(TESTS)
|
||||
|
||||
-include $(DEPS)
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
/*
|
||||
* Work around the class() function in AIX math.h clashing with
|
||||
* identifiers named "class".
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFMPEG_COMPAT_AIX_MATH_H
|
||||
#define FFMPEG_COMPAT_AIX_MATH_H
|
||||
|
||||
#define class class_in_math_h_causes_problems
|
||||
|
||||
#include_next <math.h>
|
||||
|
||||
#undef class
|
||||
|
||||
#endif /* FFMPEG_COMPAT_AIX_MATH_H */
|
||||
@@ -1,880 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
// NOTE: this is a partial update of the Avisynth C interface to recognize
|
||||
// new color spaces added in Avisynth 2.60. By no means is this document
|
||||
// completely Avisynth 2.60 compliant.
|
||||
|
||||
#ifndef __AVISYNTH_C__
|
||||
#define __AVISYNTH_C__
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
typedef unsigned char BYTE;
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVISYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 4 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_A=1<<4,
|
||||
AVS_PLANAR_R=1<<5,
|
||||
AVS_PLANAR_G=1<<6,
|
||||
AVS_PLANAR_B=1<<7,
|
||||
AVS_PLANAR_A_ALIGNED=AVS_PLANAR_A|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_R_ALIGNED=AVS_PLANAR_R|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_G_ALIGNED=AVS_PLANAR_G|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_B_ALIGNED=AVS_PLANAR_B|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31,
|
||||
|
||||
AVS_CS_SHIFT_SUB_WIDTH = 0,
|
||||
AVS_CS_SHIFT_SUB_HEIGHT = 1 << 3,
|
||||
AVS_CS_SHIFT_SAMPLE_BITS = 1 << 4,
|
||||
|
||||
AVS_CS_SUB_WIDTH_MASK = 7 << AVS_CS_SHIFT_SUB_WIDTH,
|
||||
AVS_CS_SUB_WIDTH_1 = 3 << AVS_CS_SHIFT_SUB_WIDTH, // YV24
|
||||
AVS_CS_SUB_WIDTH_2 = 0 << AVS_CS_SHIFT_SUB_WIDTH, // YV12, I420, YV16
|
||||
AVS_CS_SUB_WIDTH_4 = 1 << AVS_CS_SHIFT_SUB_WIDTH, // YUV9, YV411
|
||||
|
||||
AVS_CS_VPLANEFIRST = 1 << 3, // YV12, YV16, YV24, YV411, YUV9
|
||||
AVS_CS_UPLANEFIRST = 1 << 4, // I420
|
||||
|
||||
AVS_CS_SUB_HEIGHT_MASK = 7 << AVS_CS_SHIFT_SUB_HEIGHT,
|
||||
AVS_CS_SUB_HEIGHT_1 = 3 << AVS_CS_SHIFT_SUB_HEIGHT, // YV16, YV24, YV411
|
||||
AVS_CS_SUB_HEIGHT_2 = 0 << AVS_CS_SHIFT_SUB_HEIGHT, // YV12, I420
|
||||
AVS_CS_SUB_HEIGHT_4 = 1 << AVS_CS_SHIFT_SUB_HEIGHT, // YUV9
|
||||
|
||||
AVS_CS_SAMPLE_BITS_MASK = 7 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_8 = 0 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_16 = 1 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
AVS_CS_SAMPLE_BITS_32 = 2 << AVS_CS_SHIFT_SAMPLE_BITS,
|
||||
|
||||
AVS_CS_PLANAR_MASK = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_BGR | AVS_CS_SAMPLE_BITS_MASK | AVS_CS_SUB_HEIGHT_MASK | AVS_CS_SUB_WIDTH_MASK,
|
||||
AVS_CS_PLANAR_FILTER = ~( AVS_CS_VPLANEFIRST | AVS_CS_UPLANEFIRST )};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
// AVS_CS_YV12 = 1<<3 Reserved
|
||||
// AVS_CS_I420 = 1<<4 Reserved
|
||||
AVS_CS_RAW32 = 1<<5 | AVS_CS_INTERLEAVED,
|
||||
|
||||
AVS_CS_YV24 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_1, // YVU 4:4:4 planar
|
||||
AVS_CS_YV16 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:2 planar
|
||||
AVS_CS_YV12 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YVU 4:2:0 planar
|
||||
AVS_CS_I420 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_UPLANEFIRST | AVS_CS_SUB_HEIGHT_2 | AVS_CS_SUB_WIDTH_2, // YUV 4:2:0 planar
|
||||
AVS_CS_IYUV = AVS_CS_I420,
|
||||
AVS_CS_YV411 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_1 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:1 planar
|
||||
AVS_CS_YUV9 = AVS_CS_PLANAR | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 | AVS_CS_VPLANEFIRST | AVS_CS_SUB_HEIGHT_4 | AVS_CS_SUB_WIDTH_4, // YVU 4:1:0 planar
|
||||
AVS_CS_Y8 = AVS_CS_PLANAR | AVS_CS_INTERLEAVED | AVS_CS_YUV | AVS_CS_SAMPLE_BITS_8 // Y 4:0:0 planar
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV24 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv16(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV16 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV12 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_yv411(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_YV411 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_y8(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_PLANAR_MASK) == (AVS_CS_Y8 & AVS_CS_PLANAR_FILTER); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return avs_is_planar(p) ? ((p->pixel_type & AVS_CS_PLANAR_MASK) == (c_space & AVS_CS_PLANAR_FILTER)) : ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
BYTE * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
volatile long sequence_number;
|
||||
|
||||
volatile long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
volatile long refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
int row_sizeUV, heightUV;
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_sizeUV;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = (p->row_sizeUV+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_sizeUV;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->heightUV;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const BYTE* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE BYTE* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on an AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, int frame_range);
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
AVS_CPUF_SSE3 = 0x100, // PIV+, K8 Venice
|
||||
AVS_CPUF_SSSE3 = 0x200, // Core 2
|
||||
AVS_CPUF_SSE4 = 0x400, // Penryn, Wolfdale, Yorkfield
|
||||
AVS_CPUF_SSE4_1 = 0x400,
|
||||
AVS_CPUF_SSE4_2 = 0x800, // Nehalem
|
||||
};
|
||||
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, void* val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
|
||||
#ifdef AVSC_NO_DECLSPEC
|
||||
// use LoadLibrary and related functions to dynamically load Avisynth instead of declspec(dllimport)
|
||||
/*
|
||||
The following functions needs to have been declared, probably from windows.h
|
||||
|
||||
void* malloc(size_t)
|
||||
void free(void*);
|
||||
|
||||
HMODULE LoadLibrary(const char*);
|
||||
void* GetProcAddress(HMODULE, const char*);
|
||||
FreeLibrary(HMODULE);
|
||||
*/
|
||||
|
||||
|
||||
typedef struct AVS_Library AVS_Library;
|
||||
|
||||
#define AVSC_DECLARE_FUNC(name) name##_func name
|
||||
|
||||
struct AVS_Library {
|
||||
HMODULE handle;
|
||||
|
||||
AVSC_DECLARE_FUNC(avs_add_function);
|
||||
AVSC_DECLARE_FUNC(avs_at_exit);
|
||||
AVSC_DECLARE_FUNC(avs_bit_blt);
|
||||
AVSC_DECLARE_FUNC(avs_check_version);
|
||||
AVSC_DECLARE_FUNC(avs_clip_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_copy_clip);
|
||||
AVSC_DECLARE_FUNC(avs_copy_value);
|
||||
AVSC_DECLARE_FUNC(avs_copy_video_frame);
|
||||
AVSC_DECLARE_FUNC(avs_create_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_delete_script_environment);
|
||||
AVSC_DECLARE_FUNC(avs_function_exists);
|
||||
AVSC_DECLARE_FUNC(avs_get_audio);
|
||||
AVSC_DECLARE_FUNC(avs_get_cpu_flags);
|
||||
AVSC_DECLARE_FUNC(avs_get_error);
|
||||
AVSC_DECLARE_FUNC(avs_get_frame);
|
||||
AVSC_DECLARE_FUNC(avs_get_parity);
|
||||
AVSC_DECLARE_FUNC(avs_get_var);
|
||||
AVSC_DECLARE_FUNC(avs_get_version);
|
||||
AVSC_DECLARE_FUNC(avs_get_video_info);
|
||||
AVSC_DECLARE_FUNC(avs_invoke);
|
||||
AVSC_DECLARE_FUNC(avs_make_writable);
|
||||
AVSC_DECLARE_FUNC(avs_new_c_filter);
|
||||
AVSC_DECLARE_FUNC(avs_new_video_frame_a);
|
||||
AVSC_DECLARE_FUNC(avs_release_clip);
|
||||
AVSC_DECLARE_FUNC(avs_release_value);
|
||||
AVSC_DECLARE_FUNC(avs_release_video_frame);
|
||||
AVSC_DECLARE_FUNC(avs_save_string);
|
||||
AVSC_DECLARE_FUNC(avs_set_cache_hints);
|
||||
AVSC_DECLARE_FUNC(avs_set_global_var);
|
||||
AVSC_DECLARE_FUNC(avs_set_memory_max);
|
||||
AVSC_DECLARE_FUNC(avs_set_to_clip);
|
||||
AVSC_DECLARE_FUNC(avs_set_var);
|
||||
AVSC_DECLARE_FUNC(avs_set_working_dir);
|
||||
AVSC_DECLARE_FUNC(avs_sprintf);
|
||||
AVSC_DECLARE_FUNC(avs_subframe);
|
||||
AVSC_DECLARE_FUNC(avs_subframe_planar);
|
||||
AVSC_DECLARE_FUNC(avs_take_clip);
|
||||
AVSC_DECLARE_FUNC(avs_vsprintf);
|
||||
};
|
||||
|
||||
#undef AVSC_DECLARE_FUNC
|
||||
|
||||
|
||||
AVSC_INLINE AVS_Library * avs_load_library() {
|
||||
AVS_Library *library = (AVS_Library *)malloc(sizeof(AVS_Library));
|
||||
if (!library)
|
||||
return NULL;
|
||||
library->handle = LoadLibrary("avisynth");
|
||||
if (library->handle == NULL)
|
||||
goto fail;
|
||||
|
||||
#define __AVSC_STRINGIFY(x) #x
|
||||
#define AVSC_STRINGIFY(x) __AVSC_STRINGIFY(x)
|
||||
#define AVSC_LOAD_FUNC(name) {\
|
||||
library->name = (name##_func) GetProcAddress(library->handle, AVSC_STRINGIFY(name));\
|
||||
if (library->name == NULL)\
|
||||
goto fail;\
|
||||
}
|
||||
|
||||
AVSC_LOAD_FUNC(avs_add_function);
|
||||
AVSC_LOAD_FUNC(avs_at_exit);
|
||||
AVSC_LOAD_FUNC(avs_bit_blt);
|
||||
AVSC_LOAD_FUNC(avs_check_version);
|
||||
AVSC_LOAD_FUNC(avs_clip_get_error);
|
||||
AVSC_LOAD_FUNC(avs_copy_clip);
|
||||
AVSC_LOAD_FUNC(avs_copy_value);
|
||||
AVSC_LOAD_FUNC(avs_copy_video_frame);
|
||||
AVSC_LOAD_FUNC(avs_create_script_environment);
|
||||
AVSC_LOAD_FUNC(avs_delete_script_environment);
|
||||
AVSC_LOAD_FUNC(avs_function_exists);
|
||||
AVSC_LOAD_FUNC(avs_get_audio);
|
||||
AVSC_LOAD_FUNC(avs_get_cpu_flags);
|
||||
AVSC_LOAD_FUNC(avs_get_error);
|
||||
AVSC_LOAD_FUNC(avs_get_frame);
|
||||
AVSC_LOAD_FUNC(avs_get_parity);
|
||||
AVSC_LOAD_FUNC(avs_get_var);
|
||||
AVSC_LOAD_FUNC(avs_get_version);
|
||||
AVSC_LOAD_FUNC(avs_get_video_info);
|
||||
AVSC_LOAD_FUNC(avs_invoke);
|
||||
AVSC_LOAD_FUNC(avs_make_writable);
|
||||
AVSC_LOAD_FUNC(avs_new_c_filter);
|
||||
AVSC_LOAD_FUNC(avs_new_video_frame_a);
|
||||
AVSC_LOAD_FUNC(avs_release_clip);
|
||||
AVSC_LOAD_FUNC(avs_release_value);
|
||||
AVSC_LOAD_FUNC(avs_release_video_frame);
|
||||
AVSC_LOAD_FUNC(avs_save_string);
|
||||
AVSC_LOAD_FUNC(avs_set_cache_hints);
|
||||
AVSC_LOAD_FUNC(avs_set_global_var);
|
||||
AVSC_LOAD_FUNC(avs_set_memory_max);
|
||||
AVSC_LOAD_FUNC(avs_set_to_clip);
|
||||
AVSC_LOAD_FUNC(avs_set_var);
|
||||
AVSC_LOAD_FUNC(avs_set_working_dir);
|
||||
AVSC_LOAD_FUNC(avs_sprintf);
|
||||
AVSC_LOAD_FUNC(avs_subframe);
|
||||
AVSC_LOAD_FUNC(avs_subframe_planar);
|
||||
AVSC_LOAD_FUNC(avs_take_clip);
|
||||
AVSC_LOAD_FUNC(avs_vsprintf);
|
||||
|
||||
#undef __AVSC_STRINGIFY
|
||||
#undef AVSC_STRINGIFY
|
||||
#undef AVSC_LOAD_FUNC
|
||||
|
||||
return library;
|
||||
|
||||
fail:
|
||||
free(library);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
AVSC_INLINE void avs_free_library(AVS_Library *library) {
|
||||
if (!library)
|
||||
return;
|
||||
FreeLibrary(library->handle);
|
||||
free(library);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
@@ -1,68 +0,0 @@
|
||||
// Copyright (c) 2011 FFmpegSource Project
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
/* these are defines/functions that are used and were changed in the switch to 2.6
|
||||
* and are needed to maintain full compatility with 2.5 */
|
||||
|
||||
enum {
|
||||
AVS_CS_YV12_25 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420_25 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
};
|
||||
|
||||
AVSC_INLINE int avs_get_height_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p_25(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane)
|
||||
{
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV)
|
||||
return p->row_size>>1;
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV)
|
||||
{
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
}
|
||||
else
|
||||
return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_yv12_25(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12_25) == AVS_CS_YV12_25)||((p->pixel_type & AVS_CS_I420_25) == AVS_CS_I420_25); }
|
||||
@@ -1,728 +0,0 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
||||
@@ -1,85 +0,0 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
||||
@@ -1,77 +0,0 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) ((a << 24) | (b << 16) | (c << 8) | (d))
|
||||
#define MAKEWORD(a,b) ((a << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
||||
@@ -1,35 +0,0 @@
|
||||
/*
|
||||
* Work around broken floating point limits on some systems.
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include_next <float.h>
|
||||
|
||||
#ifdef FLT_MAX
|
||||
#undef FLT_MAX
|
||||
#define FLT_MAX 3.40282346638528859812e+38F
|
||||
|
||||
#undef FLT_MIN
|
||||
#define FLT_MIN 1.17549435082228750797e-38F
|
||||
|
||||
#undef DBL_MAX
|
||||
#define DBL_MAX ((double)1.79769313486231570815e+308L)
|
||||
|
||||
#undef DBL_MIN
|
||||
#define DBL_MIN ((double)2.22507385850720138309e-308L)
|
||||
#endif
|
||||
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file was copied from the following newsgroup posting:
|
||||
*
|
||||
* Newsgroups: mod.std.unix
|
||||
* Subject: public domain AT&T getopt source
|
||||
* Date: 3 Nov 85 19:34:15 GMT
|
||||
*
|
||||
* Here's something you've all been waiting for: the AT&T public domain
|
||||
* source for getopt(3). It is the code which was given out at the 1985
|
||||
* UNIFORUM conference in Dallas. I obtained it by electronic mail
|
||||
* directly from AT&T. The people there assure me that it is indeed
|
||||
* in the public domain.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
static int opterr = 1;
|
||||
static int optind = 1;
|
||||
static int optopt;
|
||||
static char *optarg;
|
||||
|
||||
static int getopt(int argc, char *argv[], char *opts)
|
||||
{
|
||||
static int sp = 1;
|
||||
int c;
|
||||
char *cp;
|
||||
|
||||
if (sp == 1) {
|
||||
if (optind >= argc ||
|
||||
argv[optind][0] != '-' || argv[optind][1] == '\0')
|
||||
return EOF;
|
||||
else if (!strcmp(argv[optind], "--")) {
|
||||
optind++;
|
||||
return EOF;
|
||||
}
|
||||
}
|
||||
optopt = c = argv[optind][sp];
|
||||
if (c == ':' || !(cp = strchr(opts, c))) {
|
||||
fprintf(stderr, ": illegal option -- %c\n", c);
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
optind++;
|
||||
sp = 1;
|
||||
}
|
||||
return '?';
|
||||
}
|
||||
if (*++cp == ':') {
|
||||
if (argv[optind][sp+1] != '\0')
|
||||
optarg = &argv[optind++][sp+1];
|
||||
else if(++optind >= argc) {
|
||||
fprintf(stderr, ": option requires an argument -- %c\n", c);
|
||||
sp = 1;
|
||||
return '?';
|
||||
} else
|
||||
optarg = argv[optind++];
|
||||
sp = 1;
|
||||
} else {
|
||||
if (argv[optind][++sp] == '\0') {
|
||||
sp = 1;
|
||||
optind++;
|
||||
}
|
||||
optarg = NULL;
|
||||
}
|
||||
|
||||
return c;
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdarg.h>
|
||||
#include <limits.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "compat/va_copy.h"
|
||||
#include "libavutil/error.h"
|
||||
|
||||
#if defined(__MINGW32__)
|
||||
#define EOVERFLOW EFBIG
|
||||
#endif
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
int ret;
|
||||
|
||||
va_start(ap, fmt);
|
||||
ret = avpriv_vsnprintf(s, n, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
va_list ap)
|
||||
{
|
||||
int ret;
|
||||
va_list ap_copy;
|
||||
|
||||
if (n == 0)
|
||||
return _vscprintf(fmt, ap);
|
||||
else if (n > INT_MAX)
|
||||
return AVERROR(EOVERFLOW);
|
||||
|
||||
/* we use n - 1 here because if the buffer is not big enough, the MS
|
||||
* runtime libraries don't add a terminating zero at the end. MSDN
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
va_end(ap_copy);
|
||||
if (ret == -1)
|
||||
ret = _vscprintf(fmt, ap);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
* C99-compatible snprintf() and vsnprintf() implementations
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_SNPRINTF_H
|
||||
#define COMPAT_SNPRINTF_H
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int avpriv_snprintf(char *s, size_t n, const char *fmt, ...);
|
||||
int avpriv_vsnprintf(char *s, size_t n, const char *fmt, va_list ap);
|
||||
|
||||
#undef snprintf
|
||||
#undef _snprintf
|
||||
#undef vsnprintf
|
||||
#define snprintf avpriv_snprintf
|
||||
#define _snprintf avpriv_snprintf
|
||||
#define vsnprintf avpriv_vsnprintf
|
||||
|
||||
#endif /* COMPAT_SNPRINTF_H */
|
||||
@@ -1,164 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 KO Myung-Hun <komh@chollian.net>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* os2threads to pthreads wrapper
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_OS2PTHREADS_H
|
||||
#define AVCODEC_OS2PTHREADS_H
|
||||
|
||||
#define INCL_DOS
|
||||
#include <os2.h>
|
||||
|
||||
#undef __STRICT_ANSI__ /* for _beginthread() */
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef TID pthread_t;
|
||||
typedef void pthread_attr_t;
|
||||
|
||||
typedef HMTX pthread_mutex_t;
|
||||
typedef void pthread_mutexattr_t;
|
||||
|
||||
typedef struct {
|
||||
HEV event_sem;
|
||||
int wait_count;
|
||||
} pthread_cond_t;
|
||||
|
||||
typedef void pthread_condattr_t;
|
||||
|
||||
struct thread_arg {
|
||||
void *(*start_routine)(void *);
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static void thread_entry(void *arg)
|
||||
{
|
||||
struct thread_arg *thread_arg = arg;
|
||||
|
||||
thread_arg->start_routine(thread_arg->arg);
|
||||
|
||||
av_free(thread_arg);
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
struct thread_arg *thread_arg;
|
||||
|
||||
thread_arg = av_mallocz(sizeof(struct thread_arg));
|
||||
|
||||
thread_arg->start_routine = start_routine;
|
||||
thread_arg->arg = arg;
|
||||
|
||||
*thread = _beginthread(thread_entry, NULL, 256 * 1024, thread_arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DosWaitThread((PTID)&thread, DCWW_WAIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
|
||||
{
|
||||
DosCreateMutexSem(NULL, (PHMTX)mutex, 0, FALSE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_mutex_destroy(pthread_mutex_t *mutex)
|
||||
{
|
||||
DosCloseMutexSem(*(PHMTX)mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_mutex_lock(pthread_mutex_t *mutex)
|
||||
{
|
||||
DosRequestMutexSem(*(PHMTX)mutex, SEM_INDEFINITE_WAIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_mutex_unlock(pthread_mutex_t *mutex)
|
||||
{
|
||||
DosReleaseMutexSem(*(PHMTX)mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
|
||||
{
|
||||
DosCreateEventSem(NULL, &cond->event_sem, DCE_POSTONE, FALSE);
|
||||
|
||||
cond->wait_count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
DosCloseEventSem(cond->event_sem);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
if (cond->wait_count > 0) {
|
||||
DosPostEventSem(cond->event_sem);
|
||||
|
||||
cond->wait_count--;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
while (cond->wait_count > 0) {
|
||||
DosPostEventSem(cond->event_sem);
|
||||
|
||||
cond->wait_count--;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
cond->wait_count++;
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
DosWaitEventSem(cond->event_sem, SEM_INDEFINITE_WAIT);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* AVCODEC_OS2PTHREADS_H */
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
n=10
|
||||
|
||||
case "$1" in
|
||||
-n) n=$2; shift 2 ;;
|
||||
-n*) n=${1#-n}; shift ;;
|
||||
esac
|
||||
|
||||
exec sed ${n}q "$@"
|
||||
@@ -1,34 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
int plan9_main(int argc, char **argv);
|
||||
|
||||
#undef main
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* The setfcr() function in lib9 is broken, must use asm. */
|
||||
#ifdef __i386
|
||||
short fcr;
|
||||
__asm__ volatile ("fstcw %0 \n"
|
||||
"or $63, %0 \n"
|
||||
"fldcw %0 \n"
|
||||
: "=m"(fcr));
|
||||
#endif
|
||||
|
||||
return plan9_main(argc, argv);
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/sh
|
||||
exec awk "BEGIN { for (i = 2; i < ARGC; i++) printf \"$1\", ARGV[i] }" "$@"
|
||||
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
* C99-compatible strtod() implementation
|
||||
* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <limits.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
static char *check_nan_suffix(char *s)
|
||||
{
|
||||
char *start = s;
|
||||
|
||||
if (*s++ != '(')
|
||||
return start;
|
||||
|
||||
while ((*s >= 'a' && *s <= 'z') || (*s >= 'A' && *s <= 'Z') ||
|
||||
(*s >= '0' && *s <= '9') || *s == '_')
|
||||
s++;
|
||||
|
||||
return *s == ')' ? s + 1 : start;
|
||||
}
|
||||
|
||||
#undef strtod
|
||||
double strtod(const char *, char **);
|
||||
|
||||
double avpriv_strtod(const char *nptr, char **endptr)
|
||||
{
|
||||
char *end;
|
||||
double res;
|
||||
|
||||
/* Skip leading spaces */
|
||||
while (av_isspace(*nptr))
|
||||
nptr++;
|
||||
|
||||
if (!av_strncasecmp(nptr, "infinity", 8)) {
|
||||
end = nptr + 8;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "inf", 3)) {
|
||||
end = nptr + 3;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "+inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-infinity", 9)) {
|
||||
end = nptr + 9;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "-inf", 4)) {
|
||||
end = nptr + 4;
|
||||
res = -INFINITY;
|
||||
} else if (!av_strncasecmp(nptr, "nan", 3)) {
|
||||
end = check_nan_suffix(nptr + 3);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "+nan", 4) ||
|
||||
!av_strncasecmp(nptr, "-nan", 4)) {
|
||||
end = check_nan_suffix(nptr + 4);
|
||||
res = NAN;
|
||||
} else if (!av_strncasecmp(nptr, "0x", 2) ||
|
||||
!av_strncasecmp(nptr, "-0x", 3) ||
|
||||
!av_strncasecmp(nptr, "+0x", 3)) {
|
||||
/* FIXME this doesn't handle exponents, non-integers (float/double)
|
||||
* and numbers too large for long long */
|
||||
res = strtoll(nptr, &end, 16);
|
||||
} else {
|
||||
res = strtod(nptr, &end);
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = end;
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFMPEG_COMPAT_TMS470_MATH_H
|
||||
#define FFMPEG_COMPAT_TMS470_MATH_H
|
||||
|
||||
#include_next <math.h>
|
||||
|
||||
#undef INFINITY
|
||||
#undef NAN
|
||||
|
||||
#define INFINITY (*(const float*)((const unsigned []){ 0x7f800000 }))
|
||||
#define NAN (*(const float*)((const unsigned []){ 0x7fc00000 }))
|
||||
|
||||
#endif /* FFMPEG_COMPAT_TMS470_MATH_H */
|
||||
@@ -1,296 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2010-2011 x264 project
|
||||
*
|
||||
* Authors: Steven Walters <kemuri9@gmail.com>
|
||||
* Pegasys Inc. <http://www.pegasys-inc.com>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* w32threads to pthreads wrapper
|
||||
*/
|
||||
|
||||
#ifndef FFMPEG_COMPAT_W32PTHREADS_H
|
||||
#define FFMPEG_COMPAT_W32PTHREADS_H
|
||||
|
||||
/* Build up a pthread-like API using underlying Windows API. Have only static
|
||||
* methods so as to not conflict with a potentially linked in pthread-win32
|
||||
* library.
|
||||
* As most functions here are used without checking return values,
|
||||
* only implement return values as necessary. */
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
void *(*func)(void* arg);
|
||||
void *arg;
|
||||
void *ret;
|
||||
} pthread_t;
|
||||
|
||||
/* the conditional variable api for windows 6.0+ uses critical sections and
|
||||
* not mutexes */
|
||||
typedef CRITICAL_SECTION pthread_mutex_t;
|
||||
|
||||
/* This is the CONDITIONAL_VARIABLE typedef for using Window's native
|
||||
* conditional variables on kernels 6.0+.
|
||||
* MinGW does not currently have this typedef. */
|
||||
typedef struct pthread_cond_t {
|
||||
void *ptr;
|
||||
} pthread_cond_t;
|
||||
|
||||
/* function pointers to conditional variable API on windows 6.0+ kernels */
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
static void (WINAPI *cond_broadcast)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_init)(pthread_cond_t *cond);
|
||||
static void (WINAPI *cond_signal)(pthread_cond_t *cond);
|
||||
static BOOL (WINAPI *cond_wait)(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
DWORD milliseconds);
|
||||
#else
|
||||
#define cond_init InitializeConditionVariable
|
||||
#define cond_broadcast WakeAllConditionVariable
|
||||
#define cond_signal WakeConditionVariable
|
||||
#define cond_wait SleepConditionVariableCS
|
||||
|
||||
#define CreateEvent(a, reset, init, name) \
|
||||
CreateEventEx(a, name, \
|
||||
(reset ? CREATE_EVENT_MANUAL_RESET : 0) | \
|
||||
(init ? CREATE_EVENT_INITIAL_SET : 0), \
|
||||
EVENT_ALL_ACCESS)
|
||||
// CreateSemaphoreExA seems to be desktop-only, but as long as we don't
|
||||
// use named semaphores, it doesn't matter if we use the W version.
|
||||
#define CreateSemaphore(a, b, c, d) \
|
||||
CreateSemaphoreExW(a, b, c, d, 0, SEMAPHORE_ALL_ACCESS)
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
#endif
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
thread->func = start_routine;
|
||||
thread->arg = arg;
|
||||
thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
|
||||
0, NULL);
|
||||
return !thread->handle;
|
||||
}
|
||||
|
||||
static av_unused void pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
|
||||
if (ret != WAIT_OBJECT_0)
|
||||
return;
|
||||
if (value_ptr)
|
||||
*value_ptr = thread.ret;
|
||||
CloseHandle(thread.handle);
|
||||
}
|
||||
|
||||
static inline int pthread_mutex_init(pthread_mutex_t *m, void* attr)
|
||||
{
|
||||
InitializeCriticalSection(m);
|
||||
return 0;
|
||||
}
|
||||
static inline int pthread_mutex_destroy(pthread_mutex_t *m)
|
||||
{
|
||||
DeleteCriticalSection(m);
|
||||
return 0;
|
||||
}
|
||||
static inline int pthread_mutex_lock(pthread_mutex_t *m)
|
||||
{
|
||||
EnterCriticalSection(m);
|
||||
return 0;
|
||||
}
|
||||
static inline int pthread_mutex_unlock(pthread_mutex_t *m)
|
||||
{
|
||||
LeaveCriticalSection(m);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* for pre-Windows 6.0 platforms we need to define and use our own condition
|
||||
* variable and api */
|
||||
typedef struct win32_cond_t {
|
||||
pthread_mutex_t mtx_broadcast;
|
||||
pthread_mutex_t mtx_waiter_count;
|
||||
volatile int waiter_count;
|
||||
HANDLE semaphore;
|
||||
HANDLE waiters_done;
|
||||
volatile int is_broadcast;
|
||||
} win32_cond_t;
|
||||
|
||||
static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_attr)
|
||||
{
|
||||
win32_cond_t *win32_cond = NULL;
|
||||
if (cond_init) {
|
||||
cond_init(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* non native condition variables */
|
||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||
if (!win32_cond)
|
||||
return ENOMEM;
|
||||
cond->ptr = win32_cond;
|
||||
win32_cond->semaphore = CreateSemaphore(NULL, 0, 0x7fffffff, NULL);
|
||||
if (!win32_cond->semaphore)
|
||||
return ENOMEM;
|
||||
win32_cond->waiters_done = CreateEvent(NULL, TRUE, FALSE, NULL);
|
||||
if (!win32_cond->waiters_done)
|
||||
return ENOMEM;
|
||||
|
||||
pthread_mutex_init(&win32_cond->mtx_waiter_count, NULL);
|
||||
pthread_mutex_init(&win32_cond->mtx_broadcast, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
/* native condition variables do not destroy */
|
||||
if (cond_init)
|
||||
return;
|
||||
|
||||
/* non native condition variables */
|
||||
CloseHandle(win32_cond->semaphore);
|
||||
CloseHandle(win32_cond->waiters_done);
|
||||
pthread_mutex_destroy(&win32_cond->mtx_waiter_count);
|
||||
pthread_mutex_destroy(&win32_cond->mtx_broadcast);
|
||||
av_freep(&win32_cond);
|
||||
cond->ptr = NULL;
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
|
||||
if (cond_broadcast) {
|
||||
cond_broadcast(cond);
|
||||
return;
|
||||
}
|
||||
|
||||
/* non native condition variables */
|
||||
pthread_mutex_lock(&win32_cond->mtx_broadcast);
|
||||
pthread_mutex_lock(&win32_cond->mtx_waiter_count);
|
||||
have_waiter = 0;
|
||||
|
||||
if (win32_cond->waiter_count) {
|
||||
win32_cond->is_broadcast = 1;
|
||||
have_waiter = 1;
|
||||
}
|
||||
|
||||
if (have_waiter) {
|
||||
ReleaseSemaphore(win32_cond->semaphore, win32_cond->waiter_count, NULL);
|
||||
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
|
||||
WaitForSingleObject(win32_cond->waiters_done, INFINITE);
|
||||
ResetEvent(win32_cond->waiters_done);
|
||||
win32_cond->is_broadcast = 0;
|
||||
} else
|
||||
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
|
||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int last_waiter;
|
||||
if (cond_wait) {
|
||||
cond_wait(cond, mutex, INFINITE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* non native condition variables */
|
||||
pthread_mutex_lock(&win32_cond->mtx_broadcast);
|
||||
pthread_mutex_lock(&win32_cond->mtx_waiter_count);
|
||||
win32_cond->waiter_count++;
|
||||
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
|
||||
// unlock the external mutex
|
||||
pthread_mutex_unlock(mutex);
|
||||
WaitForSingleObject(win32_cond->semaphore, INFINITE);
|
||||
|
||||
pthread_mutex_lock(&win32_cond->mtx_waiter_count);
|
||||
win32_cond->waiter_count--;
|
||||
last_waiter = !win32_cond->waiter_count || !win32_cond->is_broadcast;
|
||||
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
|
||||
|
||||
if (last_waiter)
|
||||
SetEvent(win32_cond->waiters_done);
|
||||
|
||||
// lock the external mutex
|
||||
return pthread_mutex_lock(mutex);
|
||||
}
|
||||
|
||||
static av_unused void pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = cond->ptr;
|
||||
int have_waiter;
|
||||
if (cond_signal) {
|
||||
cond_signal(cond);
|
||||
return;
|
||||
}
|
||||
|
||||
pthread_mutex_lock(&win32_cond->mtx_broadcast);
|
||||
|
||||
/* non-native condition variables */
|
||||
pthread_mutex_lock(&win32_cond->mtx_waiter_count);
|
||||
have_waiter = win32_cond->waiter_count;
|
||||
pthread_mutex_unlock(&win32_cond->mtx_waiter_count);
|
||||
|
||||
if (have_waiter) {
|
||||
ReleaseSemaphore(win32_cond->semaphore, 1, NULL);
|
||||
WaitForSingleObject(win32_cond->waiters_done, INFINITE);
|
||||
ResetEvent(win32_cond->waiters_done);
|
||||
}
|
||||
|
||||
pthread_mutex_unlock(&win32_cond->mtx_broadcast);
|
||||
}
|
||||
|
||||
static av_unused void w32thread_init(void)
|
||||
{
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
HANDLE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
|
||||
/* if one is available, then they should all be available */
|
||||
cond_init =
|
||||
(void*)GetProcAddress(kernel_dll, "InitializeConditionVariable");
|
||||
cond_broadcast =
|
||||
(void*)GetProcAddress(kernel_dll, "WakeAllConditionVariable");
|
||||
cond_signal =
|
||||
(void*)GetProcAddress(kernel_dll, "WakeConditionVariable");
|
||||
cond_wait =
|
||||
(void*)GetProcAddress(kernel_dll, "SleepConditionVariableCS");
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#endif /* FFMPEG_COMPAT_W32PTHREADS_H */
|
||||
@@ -1,132 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2013, Derek Buitenhuis
|
||||
#
|
||||
# Permission to use, copy, modify, and/or distribute this software for any
|
||||
# purpose with or without fee is hereby granted, provided that the above
|
||||
# copyright notice and this permission notice appear in all copies.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
# mktemp isn't POSIX, so supply an implementation
|
||||
mktemp() {
|
||||
echo "${2%%XXX*}.${HOSTNAME}.${UID}.$$"
|
||||
}
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: makedef <version_script> <objects>" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
vscript=$1
|
||||
shift
|
||||
|
||||
if [ ! -f "$vscript" ]; then
|
||||
echo "Version script does not exist" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for object in "$@"; do
|
||||
if [ ! -f "$object" ]; then
|
||||
echo "Object does not exist: ${object}" >&2
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# Create a lib temporarily to dump symbols from.
|
||||
# It's just much easier to do it this way
|
||||
libname=$(mktemp -u "library").lib
|
||||
|
||||
trap 'rm -f -- $libname' EXIT
|
||||
|
||||
lib -out:${libname} $@ >/dev/null
|
||||
if [ $? != 0 ]; then
|
||||
echo "Could not create temporary library." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS='
|
||||
'
|
||||
|
||||
# Determine if we're building for x86 or x86_64 and
|
||||
# set the symbol prefix accordingly.
|
||||
prefix=""
|
||||
arch=$(dumpbin -headers ${libname} |
|
||||
tr '\t' ' ' |
|
||||
grep '^ \+.\+machine \+(.\+)' |
|
||||
head -1 |
|
||||
sed -e 's/^ \{1,\}.\{1,\} \{1,\}machine \{1,\}(\(...\)).*/\1/')
|
||||
|
||||
if [ "${arch}" = "x86" ]; then
|
||||
prefix="_"
|
||||
else
|
||||
if [ "${arch}" != "ARM" ] && [ "${arch}" != "x64" ]; then
|
||||
echo "Unknown machine type." >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
started=0
|
||||
regex="none"
|
||||
|
||||
for line in $(cat ${vscript} | tr '\t' ' '); do
|
||||
# We only care about global symbols
|
||||
echo "${line}" | grep -q '^ \+global:'
|
||||
if [ $? = 0 ]; then
|
||||
started=1
|
||||
line=$(echo "${line}" | sed -e 's/^ \{1,\}global: *//')
|
||||
else
|
||||
echo "${line}" | grep -q '^ \+local:'
|
||||
if [ $? = 0 ]; then
|
||||
started=0
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ${started} = 0 ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Handle multiple symbols on one line
|
||||
IFS=';'
|
||||
|
||||
# Work around stupid expansion to filenames
|
||||
line=$(echo "${line}" | sed -e 's/\*/.\\+/g')
|
||||
for exp in ${line}; do
|
||||
# Remove leading and trailing whitespace
|
||||
exp=$(echo "${exp}" | sed -e 's/^ *//' -e 's/ *$//')
|
||||
|
||||
if [ "${regex}" = "none" ]; then
|
||||
regex="${exp}"
|
||||
else
|
||||
regex="${regex};${exp}"
|
||||
fi
|
||||
done
|
||||
|
||||
IFS='
|
||||
'
|
||||
done
|
||||
|
||||
dump=$(dumpbin -linkermember:1 ${libname})
|
||||
|
||||
rm ${libname}
|
||||
|
||||
IFS=';'
|
||||
list=""
|
||||
for exp in ${regex}; do
|
||||
list="${list}"'
|
||||
'$(echo "${dump}" |
|
||||
sed -e '/public symbols/,$!d' -e '/^ \{1,\}Summary/,$d' -e "s/ \{1,\}${prefix}/ /" -e 's/ \{1,\}/ /g' |
|
||||
tail -n +2 |
|
||||
cut -d' ' -f3 |
|
||||
grep "^${exp}" |
|
||||
sed -e 's/^/ /')
|
||||
done
|
||||
|
||||
echo "EXPORTS"
|
||||
echo "${list}" | sort | uniq | tail -n +2
|
||||
2202
doc/APIchanges
2202
doc/APIchanges
File diff suppressed because it is too large
Load Diff
182
doc/Makefile
182
doc/Makefile
@@ -1,182 +0,0 @@
|
||||
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
|
||||
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
|
||||
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
|
||||
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
|
||||
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
|
||||
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
|
||||
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
|
||||
|
||||
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
|
||||
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
|
||||
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
|
||||
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
|
||||
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
|
||||
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
|
||||
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
|
||||
|
||||
MANPAGES1 = $(AVPROGS-yes:%=doc/%.1) $(AVPROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
|
||||
MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
doc/general.html \
|
||||
doc/git-howto.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
|
||||
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
|
||||
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
|
||||
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
|
||||
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
|
||||
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
|
||||
|
||||
DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
ALL_DOC_EXAMPLES := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
ALL_DOC_EXAMPLES_G := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
PROGS += $(DOC_EXAMPLES)
|
||||
|
||||
all-$(CONFIG_DOC): doc
|
||||
|
||||
doc: documentation
|
||||
|
||||
apidoc: doc/doxy/html
|
||||
documentation: $(DOCS)
|
||||
|
||||
examples: $(DOC_EXAMPLES)
|
||||
|
||||
TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d)
|
||||
|
||||
doc/%.txt: TAG = TXT
|
||||
doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
|
||||
GENTEXI = format codec
|
||||
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%-all.html: TAG = HTML
|
||||
|
||||
ifdef HAVE_MAKEINFO_HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
|
||||
else
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
endif
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@
|
||||
|
||||
doc/%-all.pod: TAG = POD
|
||||
doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXYGEN) $(DOXY_INPUT)
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
install-html:
|
||||
|
||||
install-man:
|
||||
|
||||
ifdef CONFIG_HTMLPAGES
|
||||
install-progs-$(CONFIG_DOC): install-html
|
||||
|
||||
install-html: $(HTMLPAGES)
|
||||
$(Q)mkdir -p "$(DOCDIR)"
|
||||
$(INSTALL) -m 644 $(HTMLPAGES) "$(DOCDIR)"
|
||||
endif
|
||||
|
||||
ifdef CONFIG_MANPAGES
|
||||
install-progs-$(CONFIG_DOC): install-man
|
||||
|
||||
install-man: $(MANPAGES)
|
||||
$(Q)mkdir -p "$(MANDIR)/man1"
|
||||
$(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1"
|
||||
$(Q)mkdir -p "$(MANDIR)/man3"
|
||||
$(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3"
|
||||
endif
|
||||
|
||||
uninstall: uninstall-doc
|
||||
|
||||
uninstall-doc: uninstall-html uninstall-man
|
||||
|
||||
uninstall-html:
|
||||
$(RM) -r "$(DOCDIR)"
|
||||
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(AVPROGS-yes:%=%.1) $(AVPROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
|
||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
|
||||
|
||||
clean:: docclean
|
||||
|
||||
distclean:: docclean
|
||||
$(RM) doc/config.texi
|
||||
|
||||
examplesclean:
|
||||
$(RM) $(ALL_DOC_EXAMPLES) $(ALL_DOC_EXAMPLES_G)
|
||||
$(RM) $(CLEANSUFFIXES:%=doc/examples/%)
|
||||
|
||||
docclean: examplesclean
|
||||
$(RM) $(CLEANSUFFIXES:%=doc/%)
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
.PHONY: apidoc doc documentation
|
||||
90
doc/TODO
Normal file
90
doc/TODO
Normal file
@@ -0,0 +1,90 @@
|
||||
ffmpeg TODO list:
|
||||
----------------
|
||||
|
||||
Fabrice's TODO list: (unordered)
|
||||
-------------------
|
||||
Short term:
|
||||
|
||||
- use AVFMTCTX_DISCARD_PKT in ffplay so that DV has a chance to work
|
||||
- add RTSP regression test (both client and server)
|
||||
- make ffserver allocate AVFormatContext
|
||||
- clean up (incompatible change, for 0.5.0):
|
||||
* AVStream -> AVComponent
|
||||
* AVFormatContext -> AVInputStream/AVOutputStream
|
||||
* suppress rate_emu from AVCodecContext
|
||||
- add new float/integer audio filterting and conversion : suppress
|
||||
CODEC_ID_PCM_xxc and use CODEC_ID_RAWAUDIO.
|
||||
- fix telecine and frame rate conversion
|
||||
|
||||
Long term (ask me if you want to help):
|
||||
|
||||
- commit new imgconvert API and new PIX_FMT_xxx alpha formats
|
||||
- commit new LGPL'ed float and integer-only AC3 decoder
|
||||
- add WMA integer-only decoder
|
||||
- add new MPEG4-AAC audio decoder (both integer-only and float version)
|
||||
|
||||
Michael's TODO list: (unordered) (if anyone wanna help with sth, just ask)
|
||||
-------------------
|
||||
- optimize H264 CABAC
|
||||
- more optimizations
|
||||
- simper rate control
|
||||
|
||||
Francois' TODO list: (unordered, without any timeframe)
|
||||
-------------------
|
||||
- test MACE decoder against the openquicktime one as suggested by A'rpi
|
||||
- BeOS audio input grabbing backend
|
||||
- BeOS video input grabbing backend
|
||||
- publish my BeOS libposix on BeBits so I can officially support ffserver :)
|
||||
- check the whole code for thread-safety (global and init stuff)
|
||||
|
||||
Philip'a TODO list: (alphabetically ordered) (please help)
|
||||
------------------
|
||||
- Add a multi-ffm filetype so that feeds can be recorded into multiple files rather
|
||||
than one big file.
|
||||
- Authenticated users support -- where the authentication is in the URL
|
||||
- Change ASF files so that the embedded timestamp in the frames is right rather
|
||||
than being an offset from the start of the stream
|
||||
- Make ffm files more resilient to changes in the codec structures so that you
|
||||
can play old ffm files.
|
||||
|
||||
Baptiste's TODO list:
|
||||
-----------------
|
||||
- mov edit list support (AVEditList)
|
||||
- YUV 10 bit per component support "2vuy"
|
||||
- mxf muxer
|
||||
- mpeg2 non linear quantizer
|
||||
|
||||
unassigned TODO: (unordered)
|
||||
---------------
|
||||
- use AVFrame for audio codecs too
|
||||
- rework aviobuf.c buffering strategy and fix url_fskip
|
||||
- generate optimal huffman tables for mjpeg encoding
|
||||
- fix ffserver regression tests
|
||||
- support xvids motion estimation
|
||||
- support x264s motion estimation
|
||||
- support x264s rate control
|
||||
- SNOW: non translational motion compensation
|
||||
- SNOW: more optimal quantization
|
||||
- SNOW: 4x4 block support
|
||||
- SNOW: 1/8 pel motion compensation support
|
||||
- SNOW: iterative motion estimation based on subsampled images
|
||||
- SNOW: try B frames and MCTF and see how their PSNR/bitrate/complexity behaves
|
||||
- SNOW: try to use the wavelet transformed MC-ed reference frame as context for the entropy coder
|
||||
- SNOW: think about/analyize how to make snow use multiple cpus/threads
|
||||
- SNOW: finish spec
|
||||
- FLAC: lossy encoding (viterbi and naive scalar quantization)
|
||||
- libavfilter
|
||||
- JPEG2000 decoder & encoder
|
||||
- MPEG4 GMC encoding support
|
||||
- macroblock based pixel format (better cache locality, somewhat complex, one paper claimed it faster for high res)
|
||||
- regression tests for codecs which do not have an encoder (I+P-frame bitstream in svn)
|
||||
- add support for using mplayers video filters to ffmpeg
|
||||
- H264 encoder
|
||||
- per MB ratecontrol (so VCD and such do work better)
|
||||
- write a script which iteratively changes all functions between always_inline and noinline and benchmarks the result to find the best set of inlined functions
|
||||
- convert all the non SIMD asm into small asm vs. C testcases and submit them to the gcc devels so they can improve gcc
|
||||
- generic audio mixing API
|
||||
- extract PES packetizer from PS muxer and use it for new TS muxer
|
||||
- implement automatic AVBistreamFilter activation
|
||||
- make cabac encoder use bytestream (see http://trac.videolan.org/x264/changeset/?format=diff&new=651)
|
||||
- merge imdct and windowing, the current code does considerable amounts of redundant work
|
||||
@@ -1,11 +0,0 @@
|
||||
@chapter Authors
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
37
doc/avutil.txt
Normal file
37
doc/avutil.txt
Normal file
@@ -0,0 +1,37 @@
|
||||
AVUtil
|
||||
======
|
||||
libavutil is a small lightweight library of generally useful functions.
|
||||
It is not a library for code needed by both libavcodec and libavformat.
|
||||
|
||||
|
||||
Overview:
|
||||
=========
|
||||
adler32.c adler32 checksum
|
||||
aes.c AES encryption and decryption
|
||||
fifo.c resizeable first in first out buffer
|
||||
intfloat_readwrite.c portable reading and writing of floating point values
|
||||
log.c "printf" with context and level
|
||||
md5.c MD5 Message-Digest Algorithm
|
||||
rational.c code to perform exact calculations with rational numbers
|
||||
tree.c generic AVL tree
|
||||
crc.c generic CRC checksumming code
|
||||
integer.c 128bit integer math
|
||||
lls.c
|
||||
mathematics.c greatest common divisor, integer sqrt, integer log2, ...
|
||||
mem.c memory allocation routines with guaranteed alignment
|
||||
softfloat.c
|
||||
|
||||
Headers:
|
||||
bswap.h big/little/native-endian conversion code
|
||||
x86_cpu.h a few useful macros for unifying x86-64 and x86-32 code
|
||||
avutil.h
|
||||
common.h
|
||||
intreadwrite.h reading and writing of unaligned big/little/native-endian integers
|
||||
|
||||
|
||||
Goals:
|
||||
======
|
||||
* Modular (few interdependencies and the possibility of disabling individual parts during ./configure)
|
||||
* Small (source and object)
|
||||
* Efficient (low CPU and memory usage)
|
||||
* Useful (avoid useless features almost no one needs)
|
||||
@@ -1,144 +0,0 @@
|
||||
@chapter Bitstream Filters
|
||||
@c man begin BITSTREAM FILTERS
|
||||
|
||||
When you configure your FFmpeg build, all the supported bitstream
|
||||
filters are enabled by default. You can list all available ones using
|
||||
the configure option @code{--list-bsfs}.
|
||||
|
||||
You can disable all the bitstream filters using the configure option
|
||||
@code{--disable-bsfs}, and selectively enable any bitstream filter using
|
||||
the option @code{--enable-bsf=BSF}, or you can disable a particular
|
||||
bitstream filter using the option @code{--disable-bsf=BSF}.
|
||||
|
||||
The option @code{-bsfs} of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
|
||||
Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section aac_adtstoasc
|
||||
|
||||
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
|
||||
bitstream filter.
|
||||
|
||||
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
|
||||
ADTS header and removes the ADTS header.
|
||||
|
||||
This is required for example when copying an AAC stream from a raw
|
||||
ADTS AAC container to a FLV or a MOV/MP4 file.
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
|
||||
The additional argument specifies which packets should be filtered.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item a
|
||||
add extradata to all key packets, but only if @var{local_header} is
|
||||
set in the @option{flags2} codec context field
|
||||
|
||||
@item k
|
||||
add extradata to all key packets
|
||||
|
||||
@item e
|
||||
add extradata to all packets
|
||||
@end table
|
||||
|
||||
If not specified it is assumed @samp{k}.
|
||||
|
||||
For example the following @command{ffmpeg} command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
generated by the @code{libx264} encoder, but corrects them by adding
|
||||
the header stored in extradata to the key packets:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section h264_mp4toannexb
|
||||
|
||||
Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
|
||||
This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
|
||||
For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
@end example
|
||||
|
||||
@section imxdump
|
||||
|
||||
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
|
||||
Pro decoder. This filter only applies to the mpeg2video codec, and is
|
||||
likely not needed for Final Cut Pro 7 and newer with the appropriate
|
||||
@option{-tag:v}.
|
||||
|
||||
For example, to remux 30 MB/sec NTSC IMX to MOV:
|
||||
|
||||
@example
|
||||
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
|
||||
@end example
|
||||
|
||||
@section mjpeg2jpeg
|
||||
|
||||
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
|
||||
|
||||
MJPEG is a video codec wherein each video frame is essentially a
|
||||
JPEG image. The individual frames can be extracted without loss,
|
||||
e.g. by
|
||||
|
||||
@example
|
||||
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
|
||||
@end example
|
||||
|
||||
Unfortunately, these chunks are incomplete JPEG images, because
|
||||
they lack the DHT segment required for decoding. Quoting from
|
||||
@url{http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml}:
|
||||
|
||||
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
|
||||
commented that "MJPEG, or at least the MJPEG in AVIs having the
|
||||
MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* --
|
||||
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
|
||||
and it must use basic Huffman encoding, not arithmetic or
|
||||
progressive. . . . You can indeed extract the MJPEG frames and
|
||||
decode them with a regular JPEG decoder, but you have to prepend
|
||||
the DHT segment to them, or else the decoder won't have any idea
|
||||
how to decompress the data. The exact table necessary is given in
|
||||
the OpenDML spec."
|
||||
|
||||
This bitstream filter patches the header of frames extracted from an MJPEG
|
||||
stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
|
||||
@example
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
@end example
|
||||
|
||||
@section mjpega_dump_header
|
||||
|
||||
@section movsub
|
||||
|
||||
@section mp3_header_decompress
|
||||
|
||||
@section noise
|
||||
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise output.mkv
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
@c man end BITSTREAM FILTERS
|
||||
5
doc/bootstrap.min.css
vendored
5
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -1,56 +0,0 @@
|
||||
FFmpeg currently uses a custom build system, this text attempts to document
|
||||
some of its obscure features and options.
|
||||
|
||||
Makefile variables:
|
||||
|
||||
V
|
||||
Disable the default terse mode, the full command issued by make and its
|
||||
output will be shown on the screen.
|
||||
|
||||
DESTDIR
|
||||
Destination directory for the install targets, useful to prepare packages
|
||||
or install FFmpeg in cross-environments.
|
||||
|
||||
Makefile targets:
|
||||
|
||||
all
|
||||
Default target, builds all the libraries and the executables.
|
||||
|
||||
fate
|
||||
Run the fate test suite, note you must have installed it
|
||||
|
||||
fate-list
|
||||
Will list all fate/regression test targets
|
||||
|
||||
install
|
||||
Install headers, libraries and programs.
|
||||
|
||||
examples
|
||||
Build all examples located in doc/examples.
|
||||
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
|
||||
libavcodec/api-example
|
||||
Build the libavcodec basic example.
|
||||
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as example).
|
||||
|
||||
config
|
||||
Reconfigure the project with current configuration.
|
||||
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
Touch all files that otherwise would be build, this is useful to reduce
|
||||
unneeded rebuilding when changing headers, but note you must force rebuilds
|
||||
of files that actually need it by hand then.
|
||||
|
||||
make -j<num>
|
||||
rebuild with multiple jobs at the same time. Faster on multi processor systems
|
||||
|
||||
make -k
|
||||
continue build in case of errors, this is useful for the regression tests
|
||||
sometimes but note it will still not run all reg tests.
|
||||
|
||||
1126
doc/codecs.texi
1126
doc/codecs.texi
File diff suppressed because it is too large
Load Diff
@@ -1,236 +0,0 @@
|
||||
@chapter Decoders
|
||||
@c man begin DECODERS
|
||||
|
||||
Decoders are configured elements in FFmpeg which allow the decoding of
|
||||
multimedia streams.
|
||||
|
||||
When you configure your FFmpeg build, all the supported native decoders
|
||||
are enabled by default. Decoders requiring an external library must be enabled
|
||||
manually via the corresponding @code{--enable-lib} option. You can list all
|
||||
available decoders using the configure option @code{--list-decoders}.
|
||||
|
||||
You can disable all the decoders with the configure option
|
||||
@code{--disable-decoders} and selectively enable / disable single decoders
|
||||
with the options @code{--enable-decoder=@var{DECODER}} /
|
||||
@code{--disable-decoder=@var{DECODER}}.
|
||||
|
||||
The option @code{-decoders} of the ff* tools will display the list of
|
||||
enabled decoders.
|
||||
|
||||
@c man end DECODERS
|
||||
|
||||
@chapter Video Decoders
|
||||
@c man begin VIDEO DECODERS
|
||||
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
|
||||
This decoder decodes rawvideo streams.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item top @var{top_field_first}
|
||||
Specify the assumed field type of the input video.
|
||||
@table @option
|
||||
@item -1
|
||||
the video is assumed to be progressive (default)
|
||||
@item 0
|
||||
bottom-field-first is assumed
|
||||
@item 1
|
||||
top-field-first is assumed
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@c man begin AUDIO DECODERS
|
||||
|
||||
A description of some of the currently available audio decoders
|
||||
follows.
|
||||
|
||||
@section ac3
|
||||
|
||||
AC-3 audio decoder.
|
||||
|
||||
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
|
||||
the undocumented RealAudio 3 (a.k.a. dnet).
|
||||
|
||||
@subsection AC-3 Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -drc_scale @var{value}
|
||||
Dynamic Range Scale Factor. The factor to apply to dynamic range values
|
||||
from the AC-3 stream. This factor is applied exponentially.
|
||||
There are 3 notable scale factor ranges:
|
||||
@table @option
|
||||
@item drc_scale == 0
|
||||
DRC disabled. Produces full range audio.
|
||||
@item 0 < drc_scale <= 1
|
||||
DRC enabled. Applies a fraction of the stream DRC value.
|
||||
Audio reproduction is between full range and full compression.
|
||||
@item drc_scale > 1
|
||||
DRC enabled. Applies drc_scale asymmetrically.
|
||||
Loud sounds are fully compressed. Soft sounds are enhanced.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
|
||||
This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper.
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper.
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper.
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper.
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus decoder wrapper.
|
||||
|
||||
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
An FFmpeg native decoder for Opus exists, so users can decode Opus
|
||||
without this library.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
|
||||
also be found in VobSub file pairs and in some Matroska files.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item palette
|
||||
Specify the global palette used by the bitmaps. When stored in VobSub, the
|
||||
palette is normally specified in the index file; in Matroska, the palette is
|
||||
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
|
||||
palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
@end table
|
||||
|
||||
@section libzvbi-teletext
|
||||
|
||||
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
|
||||
subtitles. Requires the presence of the libzvbi headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libzvbi}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item txt_page
|
||||
List of teletext page numbers to decode. You may use the special * string to
|
||||
match all pages. Pages that do not match the specified list are dropped.
|
||||
Default value is *.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
Specifies the format of the decoded subtitles. The teletext decoder is capable
|
||||
of decoding the teletext pages to bitmaps or to simple text, you should use
|
||||
"bitmap" for teletext pages, because certain graphics and colors cannot be
|
||||
expressed in simple text. You might use "text" for teletext based subtitles if
|
||||
your application can handle simple text based subtitles. Default value is
|
||||
bitmap.
|
||||
@item txt_left
|
||||
X offset of generated bitmaps, default is 0.
|
||||
@item txt_top
|
||||
Y offset of generated bitmaps, default is 0.
|
||||
@item txt_chop_spaces
|
||||
Chops leading and trailing spaces and removes empty lines from the generated
|
||||
text. This option is useful for teletext based subtitles where empty spaces may
|
||||
be present at the start or at the end of the lines or empty lines may be
|
||||
present between the subtitle lines because of double-sized teletext charactes.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
miliseconds. Default value is 30000 which is 30 seconds.
|
||||
@item txt_transparent
|
||||
Force transparent background of the generated teletext bitmaps. Default value
|
||||
is 0 which means an opaque (black) background.
|
||||
@end table
|
||||
|
||||
@c man end SUBTILES DECODERS
|
||||
165
doc/default.css
165
doc/default.css
@@ -1,165 +0,0 @@
|
||||
a.summary-letter {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #2D6198;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #884488;
|
||||
}
|
||||
|
||||
#banner {
|
||||
background-color: white;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#banner img {
|
||||
margin-bottom: 1px;
|
||||
margin-top: 5px;
|
||||
}
|
||||
|
||||
#body {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #313131;
|
||||
margin: 0;
|
||||
text-align: justify;
|
||||
}
|
||||
|
||||
.center {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#container {
|
||||
background-color: white;
|
||||
color: #202020;
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
#footer {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h1 a, h2 a, h3 a, h4 a {
|
||||
text-decoration: inherit;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4 {
|
||||
padding-left: 0.4em;
|
||||
border-radius: 4px;
|
||||
padding-bottom: 0.25em;
|
||||
padding-top: 0.25em;
|
||||
border: 1px solid #6A996A;
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #7BB37B;
|
||||
color: #151515;
|
||||
font-size: 1.2em;
|
||||
padding-bottom: 0.3em;
|
||||
padding-top: 0.3em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #313131;
|
||||
font-size: 1.0em;
|
||||
background-color: #ABE3AB;
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: #313131;
|
||||
font-size: 0.9em;
|
||||
margin-bottom: -6px;
|
||||
background-color: #BBF3BB;
|
||||
}
|
||||
|
||||
h4 {
|
||||
color: #313131;
|
||||
font-size: 0.8em;
|
||||
margin-bottom: -8px;
|
||||
background-color: #D1FDD1;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
#navbar {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-top: 1px solid #5C665C;
|
||||
margin-top: 12px;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#navbar a, #navbar_secondary a {
|
||||
color: white;
|
||||
padding: 0.3em;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar a:hover, #navbar_secondary a:hover {
|
||||
background-color: #313131;
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar_secondary {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-left: 1px solid #5C665C;
|
||||
border-right: 1px solid #5C665C;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
pre {
|
||||
margin-left: 3em;
|
||||
margin-right: 3em;
|
||||
padding: 0.3em;
|
||||
border: 1px solid #bbb;
|
||||
background-color: #f7f7f7;
|
||||
}
|
||||
|
||||
dl dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
#proj_desc {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
#repos {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
border-collapse: collapse;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos th {
|
||||
background-color: #7BB37B;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos td {
|
||||
padding: 0.2em;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
@@ -1,435 +0,0 @@
|
||||
@chapter Demuxers
|
||||
@c man begin DEMUXERS
|
||||
|
||||
Demuxers are configured elements in FFmpeg that can read the
|
||||
multimedia streams from a particular type of file.
|
||||
|
||||
When you configure your FFmpeg build, all the supported demuxers
|
||||
are enabled by default. You can list all available ones using the
|
||||
configure option @code{--list-demuxers}.
|
||||
|
||||
You can disable all the demuxers using the configure option
|
||||
@code{--disable-demuxers}, and selectively enable a single demuxer with
|
||||
the option @code{--enable-demuxer=@var{DEMUXER}}, or disable it
|
||||
with the option @code{--disable-demuxer=@var{DEMUXER}}.
|
||||
|
||||
The option @code{-formats} of the ff* tools will display the list of
|
||||
enabled demuxers.
|
||||
|
||||
The description of some of the currently available demuxers follows.
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section asf
|
||||
|
||||
Advanced Systems Format demuxer.
|
||||
|
||||
This demuxer is used to demux ASF files and MMS network streams.
|
||||
|
||||
@table @option
|
||||
@item -no_resync_search @var{bool}
|
||||
Do not try to resynchronize by looking for a certain optional start code.
|
||||
@end table
|
||||
|
||||
@anchor{concat}
|
||||
@section concat
|
||||
|
||||
Virtual concatenation script demuxer.
|
||||
|
||||
This demuxer reads a list of files and other directives from a text file and
|
||||
demuxes them one after the other, as if all their packet had been muxed
|
||||
together.
|
||||
|
||||
The timestamps in the files are adjusted so that the first file starts at 0
|
||||
and each next file starts where the previous one finishes. Note that it is
|
||||
done globally and may cause gaps if all streams do not have exactly the same
|
||||
length.
|
||||
|
||||
All files must have the same streams (same codecs, same time base, etc.).
|
||||
|
||||
The duration of each file is used to adjust the timestamps of the next file:
|
||||
if the duration is incorrect (because it was computed using the bit-rate or
|
||||
because the file is truncated, for example), it can cause artifacts. The
|
||||
@code{duration} directive can be used to override the duration stored in
|
||||
each file.
|
||||
|
||||
@subsection Syntax
|
||||
|
||||
The script is a text file in extended-ASCII, with one directive per line.
|
||||
Empty lines, leading spaces and lines starting with '#' are ignored. The
|
||||
following directive is recognized:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @code{file @var{path}}
|
||||
Path to a file to read; special characters and spaces must be escaped with
|
||||
backslash or single quotes.
|
||||
|
||||
All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was to its default -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appears exactly as is (no extra space or byte-order-mark) on the very first
|
||||
line of the script.
|
||||
|
||||
@item @code{duration @var{dur}}
|
||||
Duration of the file. This information can be specified from the file;
|
||||
specifying it here may be more efficient or help if the information from the
|
||||
file is not available or accurate.
|
||||
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
All subsequent stream-related directives apply to the last introduced
|
||||
stream.
|
||||
Some streams properties must be set in order to allow identifying the
|
||||
matching streams in the subfiles.
|
||||
If no streams are defined in the script, the streams from the first file are
|
||||
copied.
|
||||
|
||||
@item @code{exact_stream_id @var{id}}
|
||||
Set the id of the stream.
|
||||
If this directive is given, the string with the corresponding id in the
|
||||
subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
component.
|
||||
|
||||
If set to 0, any file name is accepted.
|
||||
|
||||
The default is -1, it is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
|
||||
Currently, the only conversion is adding the h264_mp4toannexb bitstream
|
||||
filter to H.264 streams in MP4 format. This is necessary in particular if
|
||||
there are resolution changes.
|
||||
|
||||
@end table
|
||||
|
||||
@section flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams.
|
||||
|
||||
@table @option
|
||||
@item -flv_metadata @var{bool}
|
||||
Allocate the streams according to the onMetaData array content.
|
||||
@end table
|
||||
|
||||
@section libgme
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
|
||||
@section libquvi
|
||||
|
||||
Play media from Internet services using the quvi project.
|
||||
|
||||
The demuxer accepts a @option{format} option to request a specific quality. It
|
||||
is by default set to @var{best}.
|
||||
|
||||
See @url{http://quvi.sourceforge.net/} for more information.
|
||||
|
||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
|
||||
enabled.
|
||||
|
||||
@section gif
|
||||
|
||||
Animated GIF demuxer.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item min_delay
|
||||
Set the minimum valid delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 2.
|
||||
|
||||
@item default_delay
|
||||
Set the default delay between frames in hundredths of seconds.
|
||||
Range is 0 to 6000. Default value is 10.
|
||||
|
||||
@item ignore_loop
|
||||
GIF files can contain information to loop a certain number of times (or
|
||||
infinitely). If @option{ignore_loop} is set to 1, then the loop setting
|
||||
from the input will be ignored and looping will not occur. If set to 0,
|
||||
then looping will occur and will cycle the number of times according to
|
||||
the GIF. Default value is 1.
|
||||
@end table
|
||||
|
||||
For example, with the overlay filter, place an infinitely looping GIF
|
||||
over another video:
|
||||
@example
|
||||
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
|
||||
@end example
|
||||
|
||||
Note that in the above example the shortest option for overlay filter is
|
||||
used to end the output video at the length of the shortest input file,
|
||||
which in this case is @file{input.mp4} as the GIF in this example loops
|
||||
infinitely.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
||||
This demuxer reads from a list of image files specified by a pattern.
|
||||
The syntax and meaning of the pattern is specified by the
|
||||
option @var{pattern_type}.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item framerate
|
||||
Set the frame rate for the video stream. It defaults to 25.
|
||||
@item loop
|
||||
If set to 1, loop over the input. Default value is 0.
|
||||
@item pattern_type
|
||||
Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
|
||||
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
specifies the position of the characters representing a sequential
|
||||
number in each filename matched by the pattern. If the form
|
||||
"%d0@var{N}d" is used, the string representing the number in each
|
||||
filename is 0-padded and @var{N} is the total number of 0-padded
|
||||
digits representing the number. The literal character '%' can be
|
||||
specified in the pattern with the string "%%".
|
||||
|
||||
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
the file list specified by the pattern must contain a number
|
||||
inclusively contained between @var{start_number} and
|
||||
@var{start_number}+@var{start_number_range}-1, and all the following
|
||||
numbers must be sequential.
|
||||
|
||||
For example the pattern "img-%03d.bmp" will match a sequence of
|
||||
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a
|
||||
sequence of filenames of the form @file{i%m%g-1.jpg},
|
||||
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
|
||||
|
||||
Note that the pattern must not necessarily contain "%d" or
|
||||
"%0@var{N}d", for example to convert a single image file
|
||||
@file{img.jpeg} you can employ the command:
|
||||
@example
|
||||
ffmpeg -i img.jpeg img.png
|
||||
@end example
|
||||
|
||||
@item glob
|
||||
Select a glob wildcard pattern type.
|
||||
|
||||
The pattern is interpreted like a @code{glob()} pattern. This is only
|
||||
selectable if libavformat was compiled with globbing support.
|
||||
|
||||
@item glob_sequence @emph{(deprecated, will be removed)}
|
||||
Select a mixed glob wildcard/sequence pattern.
|
||||
|
||||
If your version of libavformat was compiled with globbing support, and
|
||||
the provided pattern contains at least one glob meta character among
|
||||
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
|
||||
interpreted like a @code{glob()} pattern, otherwise it is interpreted
|
||||
like a sequence pattern.
|
||||
|
||||
All glob special characters @code{%*?[]@{@}} must be prefixed
|
||||
with "%". To escape a literal "%" you shall use "%%".
|
||||
|
||||
For example the pattern @code{foo-%*.jpeg} will match all the
|
||||
filenames prefixed by "foo-" and terminating with ".jpeg", and
|
||||
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
|
||||
"foo-", followed by a sequence of three characters, and terminating
|
||||
with ".jpeg".
|
||||
|
||||
This pattern type is deprecated in favor of @var{glob} and
|
||||
@var{sequence}.
|
||||
@end table
|
||||
|
||||
Default value is @var{glob_sequence}.
|
||||
@item pixel_format
|
||||
Set the pixel format of the images to read. If not specified the pixel
|
||||
format is guessed from the first image file in the sequence.
|
||||
@item start_number
|
||||
Set the index of the file matched by the image file pattern to start
|
||||
to read from. Default value is 0.
|
||||
@item start_number_range
|
||||
Set the index interval range to check when looking for the first image
|
||||
file in the sequence, starting from @var{start_number}. Default value
|
||||
is 5.
|
||||
@item ts_from_file
|
||||
If set to 1, will set frame timestamp to modification time of image file. Note
|
||||
that monotonity of timestamps is not provided: images go in the same order as
|
||||
without this option. Default value is 0.
|
||||
If set to 2, will set frame timestamp to the modification time of the image file in
|
||||
nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Use @command{ffmpeg} for creating a video from the images in the file
|
||||
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
|
||||
input frame rate of 10 frames per second:
|
||||
@example
|
||||
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
As above, but start by reading from a file with index 100 in the sequence:
|
||||
@example
|
||||
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read images matching the "*.png" glob pattern , that is all the files
|
||||
terminating with the ".png" suffix:
|
||||
@example
|
||||
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fix_teletext_pts
|
||||
Overrides teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
not discarded. Default value is 1, set this option to 0 if you want your
|
||||
teletext packet PTS and DTS values untouched.
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video demuxer.
|
||||
|
||||
This demuxer allows one to read raw video data. Since there is no header
|
||||
specifying the assumed video parameters, the user must specify them
|
||||
in order to be able to decode the data correctly.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set input video frame rate. Default value is 25.
|
||||
|
||||
@item pixel_format
|
||||
Set the input video pixel format. Default value is @code{yuv420p}.
|
||||
|
||||
@item video_size
|
||||
Set the input video size. This value must be specified explicitly.
|
||||
@end table
|
||||
|
||||
For example to read a rawvideo file @file{input.raw} with
|
||||
@command{ffplay}, assuming a pixel format of @code{rgb24}, a video
|
||||
size of @code{320x240}, and a frame rate of 10 images per second, use
|
||||
the command:
|
||||
@example
|
||||
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
|
||||
@end example
|
||||
|
||||
@section sbg
|
||||
|
||||
SBaGen script demuxer.
|
||||
|
||||
This demuxer reads the script language used by SBaGen
|
||||
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
|
||||
script looks like that:
|
||||
@example
|
||||
-SE
|
||||
a: 300-2.5/3 440+4.5/0
|
||||
b: 300-2.5/0 440+4.5/3
|
||||
off: -
|
||||
NOW == a
|
||||
+0:07:00 == b
|
||||
+0:14:00 == a
|
||||
+0:21:00 == b
|
||||
+0:30:00 off
|
||||
@end example
|
||||
|
||||
A SBG script can mix absolute and relative timestamps. If the script uses
|
||||
either only absolute timestamps (including the script start time) or only
|
||||
relative ones, then its layout is fixed, and the conversion is
|
||||
straightforward. On the other hand, if the script mixes both kind of
|
||||
timestamps, then the @var{NOW} reference for relative timestamps will be
|
||||
taken from the current time of day at the time the script is read, and the
|
||||
script layout will be frozen according to that reference. That means that if
|
||||
the script is directly played, the actual times will match the absolute
|
||||
timestamps up to the sound controller's clock accuracy, but if the user
|
||||
somehow pauses the playback or seeks, all times will be shifted accordingly.
|
||||
|
||||
@section tedcaptions
|
||||
|
||||
JSON captions used for @url{http://www.ted.com/, TED Talks}.
|
||||
|
||||
TED does not provide links to the captions, but they can be guessed from the
|
||||
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
|
||||
contains a bookmarklet to expose them.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item start_time
|
||||
Set the start time of the TED talk, in milliseconds. The default is 15000
|
||||
(15s). It is used to sync the captions with the downloadable videos, because
|
||||
they include a 15s intro.
|
||||
@end table
|
||||
|
||||
Example: convert the captions to a format most players understand:
|
||||
@example
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@c man end DEMUXERS
|
||||
@@ -1,800 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Developer Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Developer Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Developers Guide
|
||||
|
||||
@section Notes for external developers
|
||||
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{http://ffmpeg.org/legal.html}.
|
||||
|
||||
@section Contributing
|
||||
|
||||
There are 3 ways by which code gets into ffmpeg.
|
||||
@itemize @bullet
|
||||
@item Submitting Patches to the main developer mailing list
|
||||
see @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@item Committing changes to a git clone, for example on github.com or
|
||||
gitorious.org. And asking us to merge these changes.
|
||||
@end itemize
|
||||
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@section Coding Rules
|
||||
|
||||
@subsection Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Indent size is 4.
|
||||
|
||||
@item
|
||||
The TAB character is forbidden outside of Makefiles as is any
|
||||
form of trailing whitespace. Commits containing either will be
|
||||
rejected by the git repository.
|
||||
|
||||
@item
|
||||
You should try to limit your code lines to 80 characters; however, do so if
|
||||
and only if this improves readability.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@subsection Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
can be generated automatically. All nontrivial functions should have a comment
|
||||
above them explaining what the function does, even if it is just one sentence.
|
||||
All structures and their member variables should be documented, too.
|
||||
|
||||
Avoid Qt-style and similar Doxygen syntax with @code{!} in it, i.e. replace
|
||||
@code{//!} with @code{///} and similar. Also @@ syntax should be employed
|
||||
for markup commands, i.e. use @code{@@param} and not @code{\param}.
|
||||
|
||||
@example
|
||||
/**
|
||||
* @@file
|
||||
* MPEG codec.
|
||||
* @@author ...
|
||||
*/
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
*/
|
||||
typedef struct Foobar @{
|
||||
int var1; /**< var1 description */
|
||||
int var2; ///< var2 description
|
||||
/** var3 description */
|
||||
int var3;
|
||||
@} Foobar;
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
* @@param my_parameter description of my_parameter
|
||||
* @@return return value description
|
||||
*/
|
||||
int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@subsection C language features
|
||||
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};})
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@subsection Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in the CamelCase
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_aac_parse_header}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
Identifiers ending in @code{_t} are reserved by
|
||||
@url{http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02, POSIX}.
|
||||
Also avoid names starting with @code{__} or @code{_} followed by an uppercase
|
||||
letter as they are reserved by the C standard. Names starting with @code{_}
|
||||
are reserved at the file level and may not be used for externally visible
|
||||
symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
|
||||
@subsection Miscellaneous conventions
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
@item
|
||||
Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@subsection Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Contributions should be licensed under the
|
||||
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
|
||||
including an "or any later version" clause, or, if you prefer
|
||||
a gift-style license, the
|
||||
@uref{http://opensource.org/licenses/isc-license.txt, ISC} or
|
||||
@uref{http://mit-license.org/, MIT} license.
|
||||
@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including
|
||||
an "or any later version" clause is also acceptable, but LGPL is
|
||||
preferred.
|
||||
If you add a new file, give it a proper license header. Do not copy and
|
||||
paste it from a random place, use an existing file as template.
|
||||
|
||||
@item
|
||||
You must not commit code which breaks FFmpeg! (Meaning unfinished but
|
||||
enabled code which breaks compilation or compiles but does not work or
|
||||
breaks the regression tests)
|
||||
You can commit unfinished stuff (for testing etc), but it must be disabled
|
||||
(#ifdef etc) by default so it does not interfere with other developers'
|
||||
work.
|
||||
|
||||
@item
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
@item
|
||||
You do not have to over-test things. If it works for you, and you think it
|
||||
should work for others, then commit. If your code has problems
|
||||
(portability, triggers compiler bugs, unusual environment etc) they will be
|
||||
reported and eventually fixed.
|
||||
|
||||
@item
|
||||
Do not commit unrelated changes together, split them into self-contained
|
||||
pieces. Also do not forget that if part B depends on part A, but A does not
|
||||
depend on B, then A can and should be committed first and separate from B.
|
||||
Keeping changes well split into self-contained parts makes reviewing and
|
||||
understanding them on the commit log mailing list easier. This also helps
|
||||
in case of debugging later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
@item
|
||||
Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove functionality from the code. Just improve!
|
||||
|
||||
Note: Redundant code can be removed.
|
||||
|
||||
@item
|
||||
Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
@item
|
||||
We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
developer has his own indentation style, you should not change it. Of course
|
||||
if you (re)write something, you can use your own style, even though we would
|
||||
prefer if the indentation throughout FFmpeg was consistent (Many projects
|
||||
force a given indentation style - we do not.). If you really need to make
|
||||
indentation changes (try to avoid this), separate them strictly from real
|
||||
changes.
|
||||
|
||||
NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
|
||||
@item
|
||||
Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@item
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
@item
|
||||
When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
@item
|
||||
Do NOT commit to code actively maintained by others without permission.
|
||||
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
|
||||
timeframe (12h for build failures and security fixes, 3 days small changes,
|
||||
1 week for big patches) then commit your patch if you think it is OK.
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
@item
|
||||
Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
|
||||
are sent there and reviewed by all the other developers. Bugs and possible
|
||||
improvements or general questions regarding commits are discussed there. We
|
||||
expect you to react if problems with your code are uncovered.
|
||||
|
||||
@item
|
||||
Update the documentation if you change behavior or add features. If you are
|
||||
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
|
||||
maintainer(s) will review and commit your stuff.
|
||||
|
||||
@item
|
||||
Try to keep important discussions and requests (also) on the public
|
||||
developer mailing list, so that all developers can benefit from them.
|
||||
|
||||
@item
|
||||
Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
@item
|
||||
Remember to check if you need to bump versions for the specific libav*
|
||||
parts (libavutil, libavcodec, libavformat) you are changing. You need
|
||||
to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@item
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
be disabled, not the code changed.
|
||||
Thus the remaining warnings can either be bugs or correct code.
|
||||
If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
@item
|
||||
Make sure that no parts of the codebase that you maintain are missing from the
|
||||
@file{MAINTAINERS} file. If something that you want to maintain is missing add it with
|
||||
your name after it.
|
||||
If at some point you no longer want to maintain some code, then please help
|
||||
finding a new maintainer and also don't forget updating the @file{MAINTAINERS} file.
|
||||
@end enumerate
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@section Submitting patches
|
||||
|
||||
First, read the @ref{Coding Rules} above if you did not yet, in particular
|
||||
the rules regarding patch submission.
|
||||
|
||||
When you submit your patch, please use @code{git format-patch} or
|
||||
@code{git send-email}. We cannot read other diffs :-)
|
||||
|
||||
Also please do not submit a patch which contains several unrelated changes.
|
||||
Split it into separate, self-contained pieces. This does not mean splitting
|
||||
file by file. Instead, make the patch as small as possible while still
|
||||
keeping it as a logical unit that contains an individual change, even
|
||||
if it spans multiple files. This makes reviewing your patches much easier
|
||||
for us and greatly increases your chances of getting your patch applied.
|
||||
|
||||
Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
|
||||
Run the @ref{Regression tests} before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
|
||||
It also helps quite a bit if you tell us what the patch does (for example
|
||||
'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
|
||||
and has no lrint()')
|
||||
|
||||
Also please if you send several patches, send each patch as a separate mail,
|
||||
do not attach several unrelated patches to the same mail.
|
||||
|
||||
Patches should be posted to the
|
||||
@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
|
||||
mailing list. Use @code{git send-email} when possible since it will properly
|
||||
send patches without requiring extra care. If you cannot, then send patches
|
||||
as base64-encoded attachments, so your patch is not trashed during
|
||||
transmission.
|
||||
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
several iterations. Once your patch is deemed good enough, some developer
|
||||
will pick it up and commit it to the official FFmpeg tree.
|
||||
|
||||
Give us a few days to react. But if some time passes without reaction,
|
||||
send a reminder by email. Your patch should eventually be dealt with.
|
||||
|
||||
|
||||
@section New codecs or formats checklist
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Did you use av_cold for codec initialization and close functions?
|
||||
|
||||
@item
|
||||
Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
|
||||
AVInputFormat/AVOutputFormat struct?
|
||||
|
||||
@item
|
||||
Did you bump the minor version number (and reset the micro version
|
||||
number) in @file{libavcodec/version.h} or @file{libavformat/version.h}?
|
||||
|
||||
@item
|
||||
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
|
||||
|
||||
@item
|
||||
Did you add the AVCodecID to @file{avcodec.h}?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in @file{libavcodec/codec_desc.c}.
|
||||
|
||||
@item
|
||||
If it has a FourCC, did you add it to @file{libavformat/riff.c},
|
||||
even if it is only a decoder?
|
||||
|
||||
@item
|
||||
Did you add a rule to compile the appropriate files in the Makefile?
|
||||
Remember to do this even if you're just adding a format to a file that is
|
||||
already being compiled by some other rule, like a raw demuxer.
|
||||
|
||||
@item
|
||||
Did you add an entry to the table of supported formats or codecs in
|
||||
@file{doc/general.texi}?
|
||||
|
||||
@item
|
||||
Did you add an entry in the Changelog?
|
||||
|
||||
@item
|
||||
If it depends on a parser or a library, did you add that dependency in
|
||||
configure?
|
||||
|
||||
@item
|
||||
Did you @code{git add} the appropriate files before committing?
|
||||
|
||||
@item
|
||||
Did you make sure it compiles standalone, i.e. with
|
||||
@code{configure --disable-everything --enable-decoder=foo}
|
||||
(or @code{--enable-demuxer} or whatever your component is)?
|
||||
@end enumerate
|
||||
|
||||
|
||||
@section patch submission checklist
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Does @code{make fate} pass with the patch applied?
|
||||
|
||||
@item
|
||||
Was the patch generated with git format-patch or send-email?
|
||||
|
||||
@item
|
||||
Did you sign off your patch? (git commit -s)
|
||||
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
|
||||
of sign off.
|
||||
|
||||
@item
|
||||
Did you provide a clear git commit log message?
|
||||
|
||||
@item
|
||||
Is the patch against latest FFmpeg git master branch?
|
||||
|
||||
@item
|
||||
Are you subscribed to ffmpeg-devel?
|
||||
(the list is subscribers only due to spam)
|
||||
|
||||
@item
|
||||
Have you checked that the changes are minimal, so that the same cannot be
|
||||
achieved with a smaller patch and/or simpler final code?
|
||||
|
||||
@item
|
||||
If the change is to speed critical code, did you benchmark it?
|
||||
|
||||
@item
|
||||
If you did any benchmarks, did you provide them in the mail?
|
||||
|
||||
@item
|
||||
Have you checked that the patch does not introduce buffer overflows or
|
||||
other security issues?
|
||||
|
||||
@item
|
||||
Did you test your decoder or demuxer against damaged data? If no, see
|
||||
tools/trasher, the noise bitstream filter, and
|
||||
@uref{http://caca.zoy.org/wiki/zzuf, zzuf}. Your decoder or demuxer
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
|
||||
@item
|
||||
Does the patch not mix functional and cosmetic changes?
|
||||
|
||||
@item
|
||||
Did you add tabs or trailing whitespace to the code? Both are forbidden.
|
||||
|
||||
@item
|
||||
Is the patch attached to the email you send?
|
||||
|
||||
@item
|
||||
Is the mime type of the patch correct? It should be text/x-diff or
|
||||
text/x-patch or at least text/plain and not application/octet-stream.
|
||||
|
||||
@item
|
||||
If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
|
||||
@item
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to ftp://upload.ffmpeg.org
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
|
||||
@item
|
||||
Did you provide a verbose explanation why it changes things like it does?
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary of the user visible advantages and
|
||||
disadvantages if the patch is applied?
|
||||
|
||||
@item
|
||||
Did you provide an example so we can verify the new feature added by the
|
||||
patch easily?
|
||||
|
||||
@item
|
||||
If you added a new file, did you insert a license header? It should be
|
||||
taken from FFmpeg, not randomly copied and pasted from somewhere else.
|
||||
|
||||
@item
|
||||
You should maintain alphabetical order in alphabetically ordered lists as
|
||||
long as doing so does not break API/ABI compatibility.
|
||||
|
||||
@item
|
||||
Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
@item
|
||||
Consider to add a regression test for your code.
|
||||
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm
|
||||
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
of leaks, out of array accesses, etc.
|
||||
@end enumerate
|
||||
|
||||
@section Patch review process
|
||||
|
||||
All patches posted to ffmpeg-devel will be reviewed, unless they contain a
|
||||
clear note that the patch is not for the git master branch.
|
||||
Reviews and comments will be posted as replies to the patch on the
|
||||
mailing list. The patch submitter then has to take care of every comment,
|
||||
that can be by resubmitting a changed patch or by discussion. Resubmitted
|
||||
patches will themselves be reviewed like any other patch. If at some point
|
||||
a patch passes review with no comments then it is approved, that can for
|
||||
simple and small patches happen immediately while large patches will generally
|
||||
have to be changed and reviewed many times before they are approved.
|
||||
After a patch is approved it will be committed to the repository.
|
||||
|
||||
We will review all submitted patches, but sometimes we are quite busy so
|
||||
especially for large patches this can take several weeks.
|
||||
|
||||
If you feel that the review process is too slow and you are willing to try to
|
||||
take over maintainership of the area of code you change then just clone
|
||||
git master and maintain the area of code there. We will merge each area from
|
||||
where its best maintained.
|
||||
|
||||
When resubmitting patches, please do not make any significant changes
|
||||
not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
|
||||
@anchor{Regression tests}
|
||||
@section Regression tests
|
||||
|
||||
Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
|
||||
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
|
||||
|
||||
[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
@subsection Visualizing Test Coverage
|
||||
|
||||
The FFmpeg build system allows visualizing the test coverage in an easy
|
||||
manner with the coverage tools @code{gcov}/@code{lcov}. This involves
|
||||
the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Configure to compile with instrumentation enabled:
|
||||
@code{configure --toolchain=gcov}.
|
||||
|
||||
@item
|
||||
Run your test case, either manually or via FATE. This can be either
|
||||
the full FATE regression suite, or any arbitrary invocation of any
|
||||
front-end tool provided by FFmpeg, in any combination.
|
||||
|
||||
@item
|
||||
Run @code{make lcov} to generate coverage data in HTML format.
|
||||
|
||||
@item
|
||||
View @code{lcov/index.html} in your preferred HTML viewer.
|
||||
@end enumerate
|
||||
|
||||
You can use the command @code{make lcov-reset} to reset the coverage
|
||||
measurements. You will need to rerun @code{make lcov} after running a
|
||||
new test.
|
||||
|
||||
@subsection Using Valgrind
|
||||
|
||||
The configure script provides a shortcut for using valgrind to spot bugs
|
||||
related to memory handling. Just add the option
|
||||
@code{--toolchain=valgrind-memcheck} or @code{--toolchain=valgrind-massif}
|
||||
to your configure line, and reasonable defaults will be set for running
|
||||
FATE under the supervision of either the @strong{memcheck} or the
|
||||
@strong{massif} tool of the valgrind suite.
|
||||
|
||||
In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Release process}
|
||||
@section Release process
|
||||
|
||||
FFmpeg maintains a set of @strong{release branches}, which are the
|
||||
recommended deliverable for system integrators and distributors (such as
|
||||
Linux distributions, etc.). At regular times, a @strong{release
|
||||
manager} prepares, tests and publishes tarballs on the
|
||||
@url{http://ffmpeg.org} website.
|
||||
|
||||
There are two kinds of releases:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
@strong{Major releases} always include the latest and greatest
|
||||
features and functionality.
|
||||
|
||||
@item
|
||||
@strong{Point releases} are cut from @strong{release} branches,
|
||||
which are named @code{release/X}, with @code{X} being the release
|
||||
version number.
|
||||
@end enumerate
|
||||
|
||||
Note that we promise to our users that shared libraries from any FFmpeg
|
||||
release never break programs that have been @strong{compiled} against
|
||||
previous versions of @strong{the same release series} in any case!
|
||||
|
||||
However, from time to time, we do make API changes that require adaptations
|
||||
in applications. Such changes are only allowed in (new) major releases and
|
||||
require further steps such as bumping library version numbers and/or
|
||||
adjustments to the symbol versioning file. Please discuss such changes
|
||||
on the @strong{ffmpeg-devel} mailing list in time to allow forward planning.
|
||||
|
||||
@anchor{Criteria for Point Releases}
|
||||
@subsection Criteria for Point Releases
|
||||
|
||||
Changes that match the following criteria are valid candidates for
|
||||
inclusion into a point release:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Fixes a security issue, preferably identified by a @strong{CVE
|
||||
number} issued by @url{http://cve.mitre.org/}.
|
||||
|
||||
@item
|
||||
Fixes a documented bug in @url{https://trac.ffmpeg.org}.
|
||||
|
||||
@item
|
||||
Improves the included documentation.
|
||||
|
||||
@item
|
||||
Retains both source code and binary compatibility with previous
|
||||
point releases of the same release branch.
|
||||
@end enumerate
|
||||
|
||||
The order for checking the rules is (1 OR 2 OR 3) AND 4.
|
||||
|
||||
|
||||
@subsection Release Checklist
|
||||
|
||||
The release process involves the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Ensure that the @file{RELEASE} file contains the version number for
|
||||
the upcoming release.
|
||||
|
||||
@item
|
||||
Add the release at @url{https://trac.ffmpeg.org/admin/ticket/versions}.
|
||||
|
||||
@item
|
||||
Announce the intent to do a release to the mailing list.
|
||||
|
||||
@item
|
||||
Make sure all relevant security fixes have been backported. See
|
||||
@url{https://ffmpeg.org/security.html}.
|
||||
|
||||
@item
|
||||
Ensure that the FATE regression suite still passes in the release
|
||||
branch on at least @strong{i386} and @strong{amd64}
|
||||
(cf. @ref{Regression tests}).
|
||||
|
||||
@item
|
||||
Prepare the release tarballs in @code{bz2} and @code{gz} formats, and
|
||||
supplementing files that contain @code{gpg} signatures
|
||||
|
||||
@item
|
||||
Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and
|
||||
push an annotated tag in the form @code{nX}, with @code{X}
|
||||
containing the version number.
|
||||
|
||||
@item
|
||||
Propose and send a patch to the @strong{ffmpeg-devel} mailing list
|
||||
with a news entry for the website.
|
||||
|
||||
@item
|
||||
Publish the news entry.
|
||||
|
||||
@item
|
||||
Send announcement to the mailing list.
|
||||
@end enumerate
|
||||
|
||||
@bye
|
||||
@@ -1,25 +0,0 @@
|
||||
@chapter Device Options
|
||||
@c man begin DEVICE OPTIONS
|
||||
|
||||
The libavdevice library provides the same interface as
|
||||
libavformat. Namely, an input device is considered like a demuxer, and
|
||||
an output device like a muxer, and the interface and generic device
|
||||
options are the same provided by libavformat (see the ffmpeg-formats
|
||||
manual).
|
||||
|
||||
In addition each input or output device may support so-called private
|
||||
options, which are specific for that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the device
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
@c man end DEVICE OPTIONS
|
||||
|
||||
@ifclear config-writeonly
|
||||
@include indevs.texi
|
||||
@end ifclear
|
||||
@ifclear config-readonly
|
||||
@include outdevs.texi
|
||||
@end ifclear
|
||||
@@ -1,13 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
SRC_PATH="${1}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 3
|
||||
|
||||
$DOXYGEN - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
|
||||
EOF
|
||||
2176
doc/encoders.texi
2176
doc/encoders.texi
File diff suppressed because it is too large
Load Diff
174
doc/errno.txt
174
doc/errno.txt
@@ -1,174 +0,0 @@
|
||||
The following table lists most error codes found in various operating
|
||||
systems supported by FFmpeg.
|
||||
|
||||
OS
|
||||
Code Std F LBMWwb Text (YMMV)
|
||||
|
||||
E2BIG POSIX ++++++ Argument list too long
|
||||
EACCES POSIX ++++++ Permission denied
|
||||
EADDRINUSE POSIX +++..+ Address in use
|
||||
EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address
|
||||
EADV +..... Advertise error
|
||||
EAFNOSUPPORT POSIX +++..+ Address family not supported
|
||||
EAGAIN POSIX + ++++++ Resource temporarily unavailable
|
||||
EALREADY POSIX +++..+ Operation already in progress
|
||||
EAUTH .++... Authentication error
|
||||
EBADARCH ..+... Bad CPU type in executable
|
||||
EBADE +..... Invalid exchange
|
||||
EBADEXEC ..+... Bad executable
|
||||
EBADF POSIX ++++++ Bad file descriptor
|
||||
EBADFD +..... File descriptor in bad state
|
||||
EBADMACHO ..+... Malformed Macho file
|
||||
EBADMSG POSIX ++4... Bad message
|
||||
EBADR +..... Invalid request descriptor
|
||||
EBADRPC .++... RPC struct is bad
|
||||
EBADRQC +..... Invalid request code
|
||||
EBADSLT +..... Invalid slot
|
||||
EBFONT +..... Bad font file format
|
||||
EBUSY POSIX - ++++++ Device or resource busy
|
||||
ECANCELED POSIX +++... Operation canceled
|
||||
ECHILD POSIX ++++++ No child processes
|
||||
ECHRNG +..... Channel number out of range
|
||||
ECOMM +..... Communication error on send
|
||||
ECONNABORTED POSIX +++..+ Software caused connection abort
|
||||
ECONNREFUSED POSIX - +++ss+ Connection refused
|
||||
ECONNRESET POSIX +++..+ Connection reset
|
||||
EDEADLK POSIX ++++++ Resource deadlock avoided
|
||||
EDEADLOCK +..++. File locking deadlock error
|
||||
EDESTADDRREQ POSIX +++... Destination address required
|
||||
EDEVERR ..+... Device error
|
||||
EDOM C89 - ++++++ Numerical argument out of domain
|
||||
EDOOFUS .F.... Programming error
|
||||
EDOTDOT +..... RFS specific error
|
||||
EDQUOT POSIX +++... Disc quota exceeded
|
||||
EEXIST POSIX ++++++ File exists
|
||||
EFAULT POSIX - ++++++ Bad address
|
||||
EFBIG POSIX - ++++++ File too large
|
||||
EFTYPE .++... Inappropriate file type or format
|
||||
EHOSTDOWN +++... Host is down
|
||||
EHOSTUNREACH POSIX +++..+ No route to host
|
||||
EHWPOISON +..... Memory page has hardware error
|
||||
EIDRM POSIX +++... Identifier removed
|
||||
EILSEQ C99 ++++++ Illegal byte sequence
|
||||
EINPROGRESS POSIX - +++ss+ Operation in progress
|
||||
EINTR POSIX - ++++++ Interrupted system call
|
||||
EINVAL POSIX + ++++++ Invalid argument
|
||||
EIO POSIX + ++++++ I/O error
|
||||
EISCONN POSIX +++..+ Socket is already connected
|
||||
EISDIR POSIX ++++++ Is a directory
|
||||
EISNAM +..... Is a named type file
|
||||
EKEYEXPIRED +..... Key has expired
|
||||
EKEYREJECTED +..... Key was rejected by service
|
||||
EKEYREVOKED +..... Key has been revoked
|
||||
EL2HLT +..... Level 2 halted
|
||||
EL2NSYNC +..... Level 2 not synchronized
|
||||
EL3HLT +..... Level 3 halted
|
||||
EL3RST +..... Level 3 reset
|
||||
ELIBACC +..... Can not access a needed shared library
|
||||
ELIBBAD +..... Accessing a corrupted shared library
|
||||
ELIBEXEC +..... Cannot exec a shared library directly
|
||||
ELIBMAX +..... Too many shared libraries
|
||||
ELIBSCN +..... .lib section in a.out corrupted
|
||||
ELNRNG +..... Link number out of range
|
||||
ELOOP POSIX +++..+ Too many levels of symbolic links
|
||||
EMEDIUMTYPE +..... Wrong medium type
|
||||
EMFILE POSIX ++++++ Too many open files
|
||||
EMLINK POSIX ++++++ Too many links
|
||||
EMSGSIZE POSIX +++..+ Message too long
|
||||
EMULTIHOP POSIX ++4... Multihop attempted
|
||||
ENAMETOOLONG POSIX - ++++++ Filen ame too long
|
||||
ENAVAIL +..... No XENIX semaphores available
|
||||
ENEEDAUTH .++... Need authenticator
|
||||
ENETDOWN POSIX +++..+ Network is down
|
||||
ENETRESET SUSv3 +++..+ Network dropped connection on reset
|
||||
ENETUNREACH POSIX +++..+ Network unreachable
|
||||
ENFILE POSIX ++++++ Too many open files in system
|
||||
ENOANO +..... No anode
|
||||
ENOATTR .++... Attribute not found
|
||||
ENOBUFS POSIX - +++..+ No buffer space available
|
||||
ENOCSI +..... No CSI structure available
|
||||
ENODATA XSR +N4... No message available
|
||||
ENODEV POSIX - ++++++ No such device
|
||||
ENOENT POSIX - ++++++ No such file or directory
|
||||
ENOEXEC POSIX ++++++ Exec format error
|
||||
ENOFILE ...++. No such file or directory
|
||||
ENOKEY +..... Required key not available
|
||||
ENOLCK POSIX ++++++ No locks available
|
||||
ENOLINK POSIX ++4... Link has been severed
|
||||
ENOMEDIUM +..... No medium found
|
||||
ENOMEM POSIX ++++++ Not enough space
|
||||
ENOMSG POSIX +++..+ No message of desired type
|
||||
ENONET +..... Machine is not on the network
|
||||
ENOPKG +..... Package not installed
|
||||
ENOPROTOOPT POSIX +++..+ Protocol not available
|
||||
ENOSPC POSIX ++++++ No space left on device
|
||||
ENOSR XSR +N4... No STREAM resources
|
||||
ENOSTR XSR +N4... Not a STREAM
|
||||
ENOSYS POSIX + ++++++ Function not implemented
|
||||
ENOTBLK +++... Block device required
|
||||
ENOTCONN POSIX +++..+ Socket is not connected
|
||||
ENOTDIR POSIX ++++++ Not a directory
|
||||
ENOTEMPTY POSIX ++++++ Directory not empty
|
||||
ENOTNAM +..... Not a XENIX named type file
|
||||
ENOTRECOVERABLE SUSv4 - +..... State not recoverable
|
||||
ENOTSOCK POSIX +++..+ Socket operation on non-socket
|
||||
ENOTSUP POSIX +++... Operation not supported
|
||||
ENOTTY POSIX ++++++ Inappropriate I/O control operation
|
||||
ENOTUNIQ +..... Name not unique on network
|
||||
ENXIO POSIX ++++++ No such device or address
|
||||
EOPNOTSUPP POSIX +++..+ Operation not supported (on socket)
|
||||
EOVERFLOW POSIX +++..+ Value too large to be stored in data type
|
||||
EOWNERDEAD SUSv4 +..... Owner died
|
||||
EPERM POSIX - ++++++ Operation not permitted
|
||||
EPFNOSUPPORT +++..+ Protocol family not supported
|
||||
EPIPE POSIX - ++++++ Broken pipe
|
||||
EPROCLIM .++... Too many processes
|
||||
EPROCUNAVAIL .++... Bad procedure for program
|
||||
EPROGMISMATCH .++... Program version wrong
|
||||
EPROGUNAVAIL .++... RPC prog. not avail
|
||||
EPROTO POSIX ++4... Protocol error
|
||||
EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported
|
||||
EPROTOTYPE POSIX +++..+ Protocol wrong type for socket
|
||||
EPWROFF ..+... Device power is off
|
||||
ERANGE C89 - ++++++ Result too large
|
||||
EREMCHG +..... Remote address changed
|
||||
EREMOTE +++... Object is remote
|
||||
EREMOTEIO +..... Remote I/O error
|
||||
ERESTART +..... Interrupted system call should be restarted
|
||||
ERFKILL +..... Operation not possible due to RF-kill
|
||||
EROFS POSIX ++++++ Read-only file system
|
||||
ERPCMISMATCH .++... RPC version wrong
|
||||
ESHLIBVERS ..+... Shared library version mismatch
|
||||
ESHUTDOWN +++..+ Cannot send after socket shutdown
|
||||
ESOCKTNOSUPPORT +++... Socket type not supported
|
||||
ESPIPE POSIX ++++++ Illegal seek
|
||||
ESRCH POSIX ++++++ No such process
|
||||
ESRMNT +..... Srmount error
|
||||
ESTALE POSIX +++..+ Stale NFS file handle
|
||||
ESTRPIPE +..... Streams pipe error
|
||||
ETIME XSR +N4... Stream ioctl timeout
|
||||
ETIMEDOUT POSIX - +++ss+ Connection timed out
|
||||
ETOOMANYREFS +++... Too many references: cannot splice
|
||||
ETXTBSY POSIX +++... Text file busy
|
||||
EUCLEAN +..... Structure needs cleaning
|
||||
EUNATCH +..... Protocol driver not attached
|
||||
EUSERS +++... Too many users
|
||||
EWOULDBLOCK POSIX +++..+ Operation would block
|
||||
EXDEV POSIX ++++++ Cross-device link
|
||||
EXFULL +..... Exchange full
|
||||
|
||||
Notations:
|
||||
|
||||
F: used in FFmpeg (-: a few times, +: a lot)
|
||||
|
||||
SUSv3: Single Unix Specification, version 3
|
||||
SUSv4: Single Unix Specification, version 4
|
||||
XSR: XSI STREAMS (obsolete)
|
||||
|
||||
OS: availability on some supported operating systems
|
||||
L: GNU/Linux
|
||||
B: BSD (F: FreeBSD, N: NetBSD)
|
||||
M: MacOS X
|
||||
W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h)
|
||||
w: Mingw32 (3.17) and Mingw64 (2.0.1)
|
||||
b: BeOS
|
||||
@@ -1,43 +0,0 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
||||
@@ -1,23 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then just run "make examples".
|
||||
This will build the examples using the FFmpeg build system. You can clean those
|
||||
examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
||||
@@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,664 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* @example decoding_encoding.c
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
if (buffer_size < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
exit(1);
|
||||
}
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for (i = 0; i < 200; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_freep(&samples);
|
||||
av_frame_free(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio decoding.
|
||||
*/
|
||||
static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_samples_get_buffer_size(NULL, c->channels,
|
||||
decoded_frame->nb_samples,
|
||||
c->sample_fmt, 1);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
fwrite(decoded_frame->data[0], 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Video encoding example
|
||||
*/
|
||||
static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base = (AVRational){1,25};
|
||||
/* emit one intra frame every ten frames
|
||||
* check frame pict_type before passing frame
|
||||
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
|
||||
* then gop_size is ignored and the output of encoder
|
||||
* will always be I frame irrespective to gop_size
|
||||
*/
|
||||
c->gop_size = 10;
|
||||
c->max_b_frames = 1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode 1 second of video */
|
||||
for (i = 0; i < 25; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < c->height/2; y++) {
|
||||
for (x = 0; x < c->width/2; x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Video decoding example
|
||||
*/
|
||||
|
||||
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
char *filename)
|
||||
{
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
for (;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,386 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
/* The different ways of decoding and managing data memory. You are not
|
||||
* supposed to support all the modes in your application but pick the one most
|
||||
* appropriate to your needs. Look for the use of api_mode in this example to
|
||||
* see what are the differences of API usage between them */
|
||||
enum {
|
||||
API_MODE_OLD = 0, /* old method, deprecated */
|
||||
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
|
||||
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
|
||||
};
|
||||
|
||||
static int api_mode = API_MODE_OLD;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we use the new API with reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the decoders, with or without reference counting */
|
||||
if (api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
|
||||
"input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call. If unset, it's using\n"
|
||||
"the classic old method.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5) {
|
||||
const char *mode = argv[1] + strlen("-refcount=");
|
||||
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
|
||||
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
|
||||
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
|
||||
else {
|
||||
fprintf(stderr, "unknow mode '%s'\n", mode);
|
||||
exit(1);
|
||||
}
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* When using the new API, you need to use the libavutil/frame.h API, while
|
||||
* the classic frame management is available in libavcodec */
|
||||
if (api_mode == API_MODE_OLD)
|
||||
frame = avcodec_alloc_frame();
|
||||
else
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
if (api_mode == API_MODE_OLD)
|
||||
avcodec_free_frame(&frame);
|
||||
else
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,185 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,365 +0,0 @@
|
||||
/*
|
||||
* copyright (c) 2013 Andrew Kelley
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
*
|
||||
* abuffer: This provides the endpoint where you can feed the decoded samples.
|
||||
* volume: In this example we hardcode it to 0.90.
|
||||
* aformat: This converts the samples to the samplefreq, channel layout,
|
||||
* and sample format required by the audio device.
|
||||
* abuffersink: This provides the endpoint where you can read the samples after
|
||||
* they have passed through the filter chain.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
#include "libavfilter/buffersrc.h"
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
AVFilterContext **sink)
|
||||
{
|
||||
AVFilterGraph *filter_graph;
|
||||
AVFilterContext *abuffer_ctx;
|
||||
AVFilter *abuffer;
|
||||
AVFilterContext *volume_ctx;
|
||||
AVFilter *volume;
|
||||
AVFilterContext *aformat_ctx;
|
||||
AVFilter *aformat;
|
||||
AVFilterContext *abuffersink_ctx;
|
||||
AVFilter *abuffersink;
|
||||
|
||||
AVDictionary *options_dict = NULL;
|
||||
uint8_t options_str[1024];
|
||||
uint8_t ch_layout[64];
|
||||
|
||||
int err;
|
||||
|
||||
/* Create a new filtergraph, which will contain all the filters. */
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!filter_graph) {
|
||||
fprintf(stderr, "Unable to create filter graph.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Create the abuffer filter;
|
||||
* it will be used for feeding the data into the graph. */
|
||||
abuffer = avfilter_get_by_name("abuffer");
|
||||
if (!abuffer) {
|
||||
fprintf(stderr, "Could not find the abuffer filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
|
||||
if (!abuffer_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffer instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
/* Now initialize the filter; we pass NULL options, since we have already
|
||||
* set all the options above. */
|
||||
err = avfilter_init_str(abuffer_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffer filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create volume filter. */
|
||||
volume = avfilter_get_by_name("volume");
|
||||
if (!volume) {
|
||||
fprintf(stderr, "Could not find the volume filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
|
||||
if (!volume_ctx) {
|
||||
fprintf(stderr, "Could not allocate the volume instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A different way of passing the options is as key/value pairs in a
|
||||
* dictionary. */
|
||||
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
|
||||
err = avfilter_init_dict(volume_ctx, &options_dict);
|
||||
av_dict_free(&options_dict);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the volume filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create the aformat filter;
|
||||
* it ensures that the output is of the format we want. */
|
||||
aformat = avfilter_get_by_name("aformat");
|
||||
if (!aformat) {
|
||||
fprintf(stderr, "Could not find the aformat filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
|
||||
if (!aformat_ctx) {
|
||||
fprintf(stderr, "Could not allocate the aformat instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Finally create the abuffersink filter;
|
||||
* it will be used to get the filtered data out of the graph. */
|
||||
abuffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!abuffersink) {
|
||||
fprintf(stderr, "Could not find the abuffersink filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
|
||||
if (!abuffersink_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* This filter takes no options. */
|
||||
err = avfilter_init_str(abuffersink_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Connect the filters;
|
||||
* in this simple case the filters just form a linear chain. */
|
||||
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error connecting filters\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Configure the graph. */
|
||||
err = avfilter_graph_config(filter_graph, NULL);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
*graph = filter_graph;
|
||||
*src = abuffer_ctx;
|
||||
*sink = abuffersink_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do something useful with the filtered data: this simple
|
||||
* example just prints the MD5 checksum of each plane to stdout. */
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < planes; i++) {
|
||||
uint8_t checksum[16];
|
||||
|
||||
av_md5_init(md5);
|
||||
av_md5_sum(checksum, frame->extended_data[i], plane_size);
|
||||
|
||||
fprintf(stdout, "plane %d: 0x", i);
|
||||
for (j = 0; j < sizeof(checksum); j++)
|
||||
fprintf(stdout, "%02X", checksum[j]);
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Construct a frame of audio data to be filtered;
|
||||
* this simple example just synthesizes a sine wave. */
|
||||
static int get_input(AVFrame *frame, int frame_num)
|
||||
{
|
||||
int err, i, j;
|
||||
|
||||
#define FRAME_SIZE 1024
|
||||
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
err = av_frame_get_buffer(frame, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Fill the data for each channel. */
|
||||
for (i = 0; i < 5; i++) {
|
||||
float *data = (float*)frame->extended_data[i];
|
||||
|
||||
for (j = 0; j < frame->nb_samples; j++)
|
||||
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct AVMD5 *md5;
|
||||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
if (argc < 2) {
|
||||
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
duration = atof(argv[1]);
|
||||
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
|
||||
if (nb_frames <= 0) {
|
||||
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
avfilter_register_all();
|
||||
|
||||
/* Allocate the frame we will be using to store the data. */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating the frame\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
/* get an input frame to be filtered */
|
||||
err = get_input(frame, i);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error generating input frame:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Send the frame to the input of the filtergraph. */
|
||||
err = av_buffersrc_add_frame(src, frame);
|
||||
if (err < 0) {
|
||||
av_frame_unref(frame);
|
||||
fprintf(stderr, "Error submitting the frame to the filtergraph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Get all the filtered output that is available. */
|
||||
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
|
||||
/* now do something with our filtered frame */
|
||||
err = process_output(md5, frame);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error processing the filtered frame:");
|
||||
goto fail;
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
|
||||
if (err == AVERROR(EAGAIN)) {
|
||||
/* Need to feed more frames in. */
|
||||
continue;
|
||||
} else if (err == AVERROR_EOF) {
|
||||
/* Nothing more to do, finish. */
|
||||
break;
|
||||
} else if (err < 0) {
|
||||
/* An error occurred. */
|
||||
fprintf(stderr, "Error filtering the data:");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet0, packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
packet0.data = NULL;
|
||||
packet.data = NULL;
|
||||
while (1) {
|
||||
if (!packet0.data) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
packet0 = packet;
|
||||
}
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
packet.size -= ret;
|
||||
packet.data += ret;
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
if (packet.size <= 0)
|
||||
av_free_packet(&packet0);
|
||||
} else {
|
||||
/* discard non-wanted packets */
|
||||
av_free_packet(&packet0);
|
||||
}
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
@@ -1,261 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
||||
@@ -1,670 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, *codec);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = ost->st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i, ret;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(pict);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx,
|
||||
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
|
||||
0, c->height, ost->frame->data, ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* a hack to avoid data copy with some raw video muxers */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!frame)
|
||||
return 1;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = ost->st->index;
|
||||
pkt.data = (uint8_t *)frame;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
pkt.pts = pkt.dts = frame->pts;
|
||||
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_close(ost->st->codec);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
|
||||
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_close(oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,214 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,755 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* simple audio converter
|
||||
*
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 48000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
/** The audio sample output format */
|
||||
#define OUTPUT_SAMPLE_FORMAT AV_SAMPLE_FMT_S16
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
return error_buffer;
|
||||
}
|
||||
|
||||
/** Open an input file and the required decoder. */
|
||||
static int open_input_file(const char *filename,
|
||||
AVFormatContext **input_format_context,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/** Open the input file to read from it. */
|
||||
if ((error = avformat_open_input(input_format_context, filename, NULL,
|
||||
NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
*input_format_context = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Get information on the input file (number of streams etc.). */
|
||||
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open find stream info (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Make sure that there is only one stream in the input file. */
|
||||
if ((*input_format_context)->nb_streams != 1) {
|
||||
fprintf(stderr, "Expected one audio input stream, but found %d\n",
|
||||
(*input_format_context)->nb_streams);
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Open the decoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
|
||||
input_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Save the decoder context for easier access later. */
|
||||
*input_codec_context = (*input_format_context)->streams[0]->codec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open an output file and the required encoder.
|
||||
* Also set some basic encoder parameters.
|
||||
* Some of these parameters are based on the input file's parameters.
|
||||
*/
|
||||
static int open_output_file(const char *filename,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVFormatContext **output_format_context,
|
||||
AVCodecContext **output_codec_context)
|
||||
{
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/** Open the output file to write to it. */
|
||||
if ((error = avio_open(&output_io_context, filename,
|
||||
AVIO_FLAG_WRITE)) < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Create a new format context for the output container format. */
|
||||
if (!(*output_format_context = avformat_alloc_context())) {
|
||||
fprintf(stderr, "Could not allocate output format context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/** Associate the output file (pointer) with the container format context. */
|
||||
(*output_format_context)->pb = output_io_context;
|
||||
|
||||
/** Guess the desired container format based on the file extension. */
|
||||
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
|
||||
NULL))) {
|
||||
fprintf(stderr, "Could not find output file format\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
av_strlcpy((*output_format_context)->filename, filename,
|
||||
sizeof((*output_format_context)->filename));
|
||||
|
||||
/** Find the encoder to be used by its name. */
|
||||
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
|
||||
fprintf(stderr, "Could not find an AAC encoder.\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Create a new audio stream in the output file container. */
|
||||
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
|
||||
fprintf(stderr, "Could not create new stream\n");
|
||||
error = AVERROR(ENOMEM);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easiert access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion.
|
||||
*/
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
*/
|
||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
(*output_codec_context)->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/** Open the encoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open output codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_close((*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Initialize one data packet for reading or writing. */
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
av_init_packet(packet);
|
||||
/** Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/** Initialize one audio frame for reading from the input file */
|
||||
static int init_input_frame(AVFrame **frame)
|
||||
{
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate input frame\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the audio resampler based on the input and output codec settings.
|
||||
* If the input and output sample formats differ, a conversion is required
|
||||
* libswresample takes care of this, but requires initialization.
|
||||
*/
|
||||
static int init_resampler(AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext **resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/**
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
* not greater than the number of samples to be converted.
|
||||
* If the sample rates differ, this case has to be handled differently
|
||||
*/
|
||||
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
|
||||
|
||||
/** Open the resampler with the specified parameters. */
|
||||
if ((error = swr_init(*resample_context)) < 0) {
|
||||
fprintf(stderr, "Could not open resample context\n");
|
||||
swr_free(resample_context);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(OUTPUT_SAMPLE_FORMAT, OUTPUT_CHANNELS, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the header of the output file container. */
|
||||
static int write_output_file_header(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not write output file header (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Decode one audio frame from the input file. */
|
||||
static int decode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are the the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode the audio frame stored in the temporary packet.
|
||||
* The input audio stream decoder is used to do this.
|
||||
* If we are at the end of the file, pass an empty packet to the decoder
|
||||
* to flush it.
|
||||
*/
|
||||
if ((error = avcodec_decode_audio4(input_codec_context, frame,
|
||||
data_present, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not decode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the decoder has not been flushed completely, we are not finished,
|
||||
* so that this function has to be called again.
|
||||
*/
|
||||
if (*finished && *data_present)
|
||||
*finished = 0;
|
||||
av_free_packet(&input_packet);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a temporary storage for the specified number of audio samples.
|
||||
* The conversion requires temporary storage due to the different format.
|
||||
* The number of audio samples to be allocated is specified in frame_size.
|
||||
*/
|
||||
static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience.
|
||||
*/
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the input audio samples into the output sample format.
|
||||
* The conversion happens on a per-frame basis, the size of which is specified
|
||||
* by frame_size.
|
||||
*/
|
||||
static int convert_samples(const uint8_t **input_data,
|
||||
uint8_t **converted_data, const int frame_size,
|
||||
SwrContext *resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Convert the samples using the resampler. */
|
||||
if ((error = swr_convert(resample_context,
|
||||
converted_data, frame_size,
|
||||
input_data , frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not convert input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Add converted input audio samples to the FIFO buffer for later processing. */
|
||||
static int add_samples_to_fifo(AVAudioFifo *fifo,
|
||||
uint8_t **converted_input_samples,
|
||||
const int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Make the FIFO as large as it needs to be to hold both,
|
||||
* the old and the new samples.
|
||||
*/
|
||||
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not reallocate FIFO\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Store the new samples in the FIFO buffer. */
|
||||
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
|
||||
frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not write data to FIFO\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read one audio frame from the input file, decodes, converts and stores
|
||||
* it in the FIFO buffer.
|
||||
*/
|
||||
static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext *resampler_context,
|
||||
int *finished)
|
||||
{
|
||||
/** Temporary storage of the input samples of the frame read from the file. */
|
||||
AVFrame *input_frame = NULL;
|
||||
/** Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/** Initialize temporary storage for one input frame. */
|
||||
if (init_input_frame(&input_frame))
|
||||
goto cleanup;
|
||||
/** Decode one frame worth of audio samples. */
|
||||
if (decode_audio_frame(input_frame, input_format_context,
|
||||
input_codec_context, &data_present, finished))
|
||||
goto cleanup;
|
||||
/**
|
||||
* If we are at the end of the file and there are no more samples
|
||||
* in the decoder which are delayed, we are actually finished.
|
||||
* This must not be treated as an error.
|
||||
*/
|
||||
if (*finished && !data_present) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/** If there is decoded data, convert and store it */
|
||||
if (data_present) {
|
||||
/** Initialize the temporary storage for the converted input samples. */
|
||||
if (init_converted_samples(&converted_input_samples, output_codec_context,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Convert the input samples to the desired output sample format.
|
||||
* This requires a temporary storage provided by converted_input_samples.
|
||||
*/
|
||||
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
|
||||
input_frame->nb_samples, resampler_context))
|
||||
goto cleanup;
|
||||
|
||||
/** Add the converted input samples to the FIFO buffer for later processing. */
|
||||
if (add_samples_to_fifo(fifo, converted_input_samples,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize one input frame for writing to the output file.
|
||||
* The frame will be exactly frame_size samples large.
|
||||
*/
|
||||
static int init_output_frame(AVFrame **frame,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Create a new frame to store the audio samples. */
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate output frame\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the frame's parameters, especially its size and format.
|
||||
* av_frame_get_buffer needs this to allocate memory for the
|
||||
* audio samples of the frame.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity.
|
||||
*/
|
||||
(*frame)->nb_samples = frame_size;
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
/**
|
||||
* Allocate the samples of the created frame. This call will make
|
||||
* sure that the audio frame can hold as many samples as specified.
|
||||
*/
|
||||
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
|
||||
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_frame_free(frame);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
int *data_present)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
*/
|
||||
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
|
||||
frame, data_present)) < 0) {
|
||||
fprintf(stderr, "Could not encode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present) {
|
||||
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
av_free_packet(&output_packet);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load one audio frame from the FIFO buffer, encode and write it to the
|
||||
* output file.
|
||||
*/
|
||||
static int load_encode_and_write(AVAudioFifo *fifo,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context)
|
||||
{
|
||||
/** Temporary storage of the output samples of the frame written to the file. */
|
||||
AVFrame *output_frame;
|
||||
/**
|
||||
* Use the maximum number of possible samples per frame.
|
||||
* If there is less than the maximum possible frame size in the FIFO
|
||||
* buffer use this number. Otherwise, use the maximum possible frame size
|
||||
*/
|
||||
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
|
||||
output_codec_context->frame_size);
|
||||
int data_written;
|
||||
|
||||
/** Initialize temporary storage for one output frame. */
|
||||
if (init_output_frame(&output_frame, output_codec_context, frame_size))
|
||||
return AVERROR_EXIT;
|
||||
|
||||
/**
|
||||
* Read as many samples from the FIFO buffer as required to fill the frame.
|
||||
* The samples are stored in the frame temporarily.
|
||||
*/
|
||||
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not read data from FIFO\n");
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio samples. */
|
||||
if (encode_audio_frame(output_frame, output_format_context,
|
||||
output_codec_context, &data_written)) {
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
av_frame_free(&output_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
static int write_output_file_trailer(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = av_write_trailer(output_format_context)) < 0) {
|
||||
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Convert an audio file to an AAC file in an MP4 container. */
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
|
||||
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
|
||||
SwrContext *resample_context = NULL;
|
||||
AVAudioFifo *fifo = NULL;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/** Register all codecs and formats so that they can be used. */
|
||||
av_register_all();
|
||||
/** Open the input file for reading. */
|
||||
if (open_input_file(argv[1], &input_format_context,
|
||||
&input_codec_context))
|
||||
goto cleanup;
|
||||
/** Open the output file for writing. */
|
||||
if (open_output_file(argv[2], input_codec_context,
|
||||
&output_format_context, &output_codec_context))
|
||||
goto cleanup;
|
||||
/** Initialize the resampler to be able to convert audio sample formats. */
|
||||
if (init_resampler(input_codec_context, output_codec_context,
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Loop as long as we have input samples to read or output samples
|
||||
* to write; abort as soon as we have neither.
|
||||
*/
|
||||
while (1) {
|
||||
/** Use the encoder's desired frame size for processing. */
|
||||
const int output_frame_size = output_codec_context->frame_size;
|
||||
int finished = 0;
|
||||
|
||||
/**
|
||||
* Make sure that there is one frame worth of samples in the FIFO
|
||||
* buffer so that the encoder can do its work.
|
||||
* Since the decoder's and the encoder's frame size may differ, we
|
||||
* need to FIFO buffer to store as many frames worth of input samples
|
||||
* that they make up at least one frame worth of output samples.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) < output_frame_size) {
|
||||
/**
|
||||
* Decode one frame worth of audio samples, convert it to the
|
||||
* output sample format and put it into the FIFO buffer.
|
||||
*/
|
||||
if (read_decode_convert_and_store(fifo, input_format_context,
|
||||
input_codec_context,
|
||||
output_codec_context,
|
||||
resample_context, &finished))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file, we continue
|
||||
* encoding the remaining audio samples to the output file.
|
||||
*/
|
||||
if (finished)
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have enough samples for the encoder, we encode them.
|
||||
* At the end of the file, we pass the remaining samples to
|
||||
* the encoder.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) >= output_frame_size ||
|
||||
(finished && av_audio_fifo_size(fifo) > 0))
|
||||
/**
|
||||
* Take one frame worth of audio samples from the FIFO buffer,
|
||||
* encode it and write it to the output file.
|
||||
*/
|
||||
if (load_encode_and_write(fifo, output_format_context,
|
||||
output_codec_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file and have encoded
|
||||
* all remaining samples, we can exit this loop and finish.
|
||||
*/
|
||||
if (finished) {
|
||||
int data_written;
|
||||
/** Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
} while (data_written);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
if (write_output_file_trailer(output_format_context))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (fifo)
|
||||
av_audio_fifo_free(fifo);
|
||||
swr_free(&resample_context);
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_close(output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
avcodec_close(input_codec_context);
|
||||
if (input_format_context)
|
||||
avformat_close_input(&input_format_context);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1,597 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream;
|
||||
AVCodecContext *codec_ctx;
|
||||
stream = ifmt_ctx->streams[i];
|
||||
codec_ctx = stream->codec;
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx,
|
||||
avcodec_find_decoder(codec_ctx->codec_id), NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = in_stream->codec;
|
||||
enc_ctx = out_stream->codec;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = dec_ctx->time_base;
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
|
||||
ifmt_ctx->streams[i]->codec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = NULL;
|
||||
AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
|
||||
ofmt_ctx->streams[i]->codec, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
enc_pkt.duration = av_rescale_q(enc_pkt.duration,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
packet.dts = av_rescale_q_rnd(packet.dts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
packet.pts = av_rescale_q_rnd(packet.pts,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base,
|
||||
AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_free_packet(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_close(ifmt_ctx->streams[i]->codec);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
|
||||
avcodec_close(ofmt_ctx->streams[i]->codec);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_close(ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
||||
472
doc/faq.texi
472
doc/faq.texi
@@ -2,15 +2,29 @@
|
||||
|
||||
@settitle FFmpeg FAQ
|
||||
@titlepage
|
||||
@sp 7
|
||||
@center @titlefont{FFmpeg FAQ}
|
||||
@sp 3
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter General Questions
|
||||
|
||||
@section When will the next FFmpeg version be released? / Why are FFmpeg releases so few and far between?
|
||||
|
||||
Like most open source projects FFmpeg suffers from a certain lack of
|
||||
manpower. For this reason the developers have to prioritize the work
|
||||
they do and putting out releases is not at the top of the list, fixing
|
||||
bugs and reviewing patches takes precedence. Please don't complain or
|
||||
request more timely and/or frequent releases unless you are willing to
|
||||
help out creating them.
|
||||
|
||||
@section I have a problem with an old version of FFmpeg; where should I report it?
|
||||
Nowhere. Upgrade to the latest release or if there is no recent release upgrade
|
||||
to Subversion HEAD. You could also try to report it. Maybe you will get lucky and
|
||||
become the first person in history to get an answer different from "upgrade
|
||||
to Subversion HEAD".
|
||||
|
||||
@section Why doesn't FFmpeg support feature [xyz]?
|
||||
|
||||
Because no one has taken on that task yet. FFmpeg development is
|
||||
@@ -24,6 +38,30 @@ No. Windows DLLs are not portable, bloated and often slow.
|
||||
Moreover FFmpeg strives to support all codecs natively.
|
||||
A DLL loader is not conducive to that goal.
|
||||
|
||||
@section My bug report/mail to ffmpeg-devel/user has not received any replies.
|
||||
|
||||
Likely reasons
|
||||
@itemize
|
||||
@item We are busy and haven't had time yet to read your report or
|
||||
investigate the issue.
|
||||
@item You didn't follow bugreports.html.
|
||||
@item You didn't use Subversion HEAD.
|
||||
@item You reported a segmentation fault without gdb output.
|
||||
@item You describe a problem but not how to reproduce it.
|
||||
@item It's unclear if you use ffmpeg as command line tool or use
|
||||
libav* from another application.
|
||||
@item You speak about a video having problems on playback but
|
||||
not what you use to play it.
|
||||
@item We have no faint clue what you are talking about besides
|
||||
that it is related to FFmpeg.
|
||||
@end itemize
|
||||
|
||||
@section Is there a forum for FFmpeg? I do not like mailing lists.
|
||||
|
||||
You may view our mailing lists with a more forum-alike look here:
|
||||
@url{http://dir.gmane.org/gmane.comp.video.ffmpeg.user},
|
||||
but, if you post, please remember that our mailing list rules still apply there.
|
||||
|
||||
@section I cannot read this file although this format seems to be supported by ffmpeg.
|
||||
|
||||
Even if ffmpeg can read the container format, it may not support all its
|
||||
@@ -47,7 +85,7 @@ The following list of video codecs should work on most Windows systems:
|
||||
.asf only
|
||||
@item mpeg4
|
||||
Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
|
||||
@item mpeg1video
|
||||
@item mpeg1
|
||||
.mpg only
|
||||
@end table
|
||||
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
|
||||
@@ -59,9 +97,9 @@ The following list of audio codecs should work on most Windows systems:
|
||||
@table @option
|
||||
@item adpcm_ima_wav
|
||||
@item adpcm_ms
|
||||
@item pcm_s16le
|
||||
@item pcm
|
||||
always
|
||||
@item libmp3lame
|
||||
@item mp3
|
||||
If some MP3 codec like LAME is installed.
|
||||
@end table
|
||||
|
||||
@@ -79,23 +117,11 @@ not a bug they should fix:
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
|
||||
|
||||
Distributions usually split libraries in several packages. The main package
|
||||
contains the files necessary to run programs using the library. The
|
||||
development package contains the files necessary to build programs using the
|
||||
library. Sometimes, docs and/or data are in a separate package too.
|
||||
|
||||
To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
|
||||
Try a @code{make distclean} in the ffmpeg source directory before the build.
|
||||
If this does not help see
|
||||
Try a @code{make distclean} in the ffmpeg source directory before the build. If this does not help see
|
||||
(@url{http://ffmpeg.org/bugreports.html}).
|
||||
|
||||
@section How do I encode single pictures into movies?
|
||||
@@ -105,55 +131,21 @@ For example, img1.jpg, img2.jpg, img3.jpg,...
|
||||
Then you may run:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
|
||||
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
Notice that @samp{%d} is replaced by the image number.
|
||||
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
|
||||
|
||||
Use the @option{-start_number} option to declare a starting number for
|
||||
the sequence. This is useful if your sequence does not start with
|
||||
@file{img001.jpg} but is still in a numerical order. The following
|
||||
example will start with @file{img100.jpg}:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
If you have large number of pictures to rename, you can use the
|
||||
following command to ease the burden. The command, using the bourne
|
||||
shell syntax, symbolically links all files in the current directory
|
||||
that match @code{*jpg} to the @file{/tmp} directory in the sequence of
|
||||
@file{img001.jpg}, @file{img002.jpg} and so on.
|
||||
|
||||
@example
|
||||
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
|
||||
@end example
|
||||
|
||||
If you want to sequence them by oldest modified first, substitute
|
||||
@code{$(ls -r -t *jpg)} in place of @code{*jpg}.
|
||||
|
||||
Then run:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc...
|
||||
|
||||
The same logic is used for any image format that ffmpeg reads.
|
||||
|
||||
You can also use @command{cat} to pipe images to ffmpeg:
|
||||
|
||||
@example
|
||||
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
|
||||
@end example
|
||||
|
||||
@section How do I encode movie to single pictures?
|
||||
|
||||
Use:
|
||||
|
||||
@example
|
||||
ffmpeg -i movie.mpg movie%d.jpg
|
||||
ffmpeg -i movie.mpg movie%d.jpg
|
||||
@end example
|
||||
|
||||
The @file{movie.mpg} used as input will be converted to
|
||||
@@ -161,15 +153,15 @@ The @file{movie.mpg} used as input will be converted to
|
||||
|
||||
Instead of relying on file format self-recognition, you may also use
|
||||
@table @option
|
||||
@item -c:v ppm
|
||||
@item -c:v png
|
||||
@item -c:v mjpeg
|
||||
@item -vcodec ppm
|
||||
@item -vcodec png
|
||||
@item -vcodec mjpeg
|
||||
@end table
|
||||
to force the encoding.
|
||||
|
||||
Applying that to the previous example:
|
||||
@example
|
||||
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
|
||||
ffmpeg -i movie.mpg -f image2 -vcodec mjpeg menu%d.jpg
|
||||
@end example
|
||||
|
||||
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
|
||||
@@ -184,28 +176,87 @@ quite logical that there is a small reduction of quality. This is not a bug.
|
||||
|
||||
Use @file{-} as file name.
|
||||
|
||||
@section Why does FFmpeg not decode audio in VOB files?
|
||||
|
||||
The audio is AC-3 (a.k.a. A/52). AC-3 decoding is an optional component in FFmpeg
|
||||
as the component that handles AC-3 decoding is currently released under the GPL.
|
||||
Enable AC-3 decoding with @code{./configure --enable-gpl}. Take care: By
|
||||
enabling AC-3, you automatically change the license of libavcodec from
|
||||
LGPL to GPL.
|
||||
|
||||
@section Why does the chrominance data seem to be sampled at a different time from the luminance data on bt8x8 captures on Linux?
|
||||
|
||||
This is a well-known bug in the bt8x8 driver. For 2.4.26 there is a patch at
|
||||
(@url{http://svn.ffmpeg.org/michael/trunk/patches/bttv-420-2.4.26.patch?view=co}). This may also
|
||||
apply cleanly to other 2.4-series kernels.
|
||||
|
||||
@section How do I avoid the ugly aliasing artifacts in bt8x8 captures on Linux?
|
||||
|
||||
Pass 'combfilter=1 lumafilter=1' to the bttv driver. Note though that 'combfilter=1'
|
||||
will cause somewhat too strong filtering. A fix is to apply (@url{http://svn.ffmpeg.org/michael/trunk/patches/bttv-comb-2.4.26.patch?view=co})
|
||||
or (@url{http://svn.ffmpeg.org/michael/trunk/patches/bttv-comb-2.6.6.patch?view=co})
|
||||
and pass 'combfilter=2'.
|
||||
|
||||
@section -f jpeg doesn't work.
|
||||
|
||||
Try '-f image2 test%d.jpg'.
|
||||
|
||||
@section Why can I not change the frame rate?
|
||||
@section Why can I not change the framerate?
|
||||
|
||||
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
|
||||
Choose a different codec with the -c:v command line option.
|
||||
Some codecs, like MPEG-1/2, only allow a small number of fixed framerates.
|
||||
Choose a different codec with the -vcodec command line option.
|
||||
|
||||
@section How do I encode Xvid or DivX video with ffmpeg?
|
||||
|
||||
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
|
||||
standard (note that there are many other coding formats that use this
|
||||
same standard). Thus, use '-c:v mpeg4' to encode in these formats. The
|
||||
same standard). Thus, use '-vcodec mpeg4' to encode in these formats. The
|
||||
default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want
|
||||
a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will
|
||||
force the fourcc 'xvid' to be stored as the video fourcc rather than the
|
||||
default.
|
||||
|
||||
@section How do I encode videos which play on the iPod?
|
||||
|
||||
@table @option
|
||||
@item needed stuff
|
||||
-acodec libfaac -vcodec mpeg4 width<=320 height<=240
|
||||
@item working stuff
|
||||
4mv, title
|
||||
@item non-working stuff
|
||||
B-frames
|
||||
@item example command line
|
||||
ffmpeg -i input -acodec libfaac -ab 128kb -vcodec mpeg4 -b 1200kb -mbd 2 -flags +4mv -trellis 2 -aic 2 -cmp 2 -subcmp 2 -s 320x180 -title X output.mp4
|
||||
@end table
|
||||
|
||||
@section How do I encode videos which play on the PSP?
|
||||
|
||||
@table @option
|
||||
@item needed stuff
|
||||
-acodec libfaac -vcodec mpeg4 width*height<=76800 width%16=0 height%16=0 -ar 24000 -r 30000/1001 or 15000/1001 -f psp
|
||||
@item working stuff
|
||||
4mv, title
|
||||
@item non-working stuff
|
||||
B-frames
|
||||
@item example command line
|
||||
ffmpeg -i input -acodec libfaac -ab 128kb -vcodec mpeg4 -b 1200kb -ar 24000 -mbd 2 -flags +4mv -trellis 2 -aic 2 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp output.mp4
|
||||
@item needed stuff for H.264
|
||||
-acodec libfaac -vcodec libx264 width*height<=76800 width%16=0? height%16=0? -ar 48000 -coder 1 -r 30000/1001 or 15000/1001 -f psp
|
||||
@item working stuff for H.264
|
||||
title, loop filter
|
||||
@item non-working stuff for H.264
|
||||
CAVLC
|
||||
@item example command line
|
||||
ffmpeg -i input -acodec libfaac -ab 128kb -vcodec libx264 -b 1200kb -ar 48000 -mbd 2 -coder 1 -cmp 2 -subcmp 2 -s 368x192 -r 30000/1001 -title X -f psp -flags loop -trellis 2 -partitions parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 output.mp4
|
||||
@item higher resolution for newer PSP firmwares, width<=480, height<=272
|
||||
-vcodec libx264 -level 21 -coder 1 -f psp
|
||||
@item example command line
|
||||
ffmpeg -i input -acodec libfaac -ab 128kb -ac 2 -ar 48000 -vcodec libx264 -level 21 -b 640kb -coder 1 -f psp -flags +loop -trellis 2 -partitions +parti4x4+parti8x8+partp4x4+partp8x8+partb8x8 -g 250 -s 480x272 output.mp4
|
||||
@end table
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-4?
|
||||
|
||||
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
'-mbd rd -flags +4mv+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
|
||||
@@ -224,82 +275,24 @@ material, and try '-top 0/1' if the result looks really messed-up.
|
||||
If you have built FFmpeg with @code{./configure --enable-avisynth}
|
||||
(only possible on MinGW/Cygwin platforms),
|
||||
then you may use any file that DirectShow can read as input.
|
||||
(Be aware that this feature has been recently added,
|
||||
so you will need to help yourself in case of problems.)
|
||||
|
||||
Just create an "input.avs" text file with this single line ...
|
||||
@example
|
||||
DirectShowSource("C:\path to your file\yourfile.asf")
|
||||
DirectShowSource("C:\path to your file\yourfile.asf")
|
||||
@end example
|
||||
... and then feed that text file to ffmpeg:
|
||||
... and then feed that text file to FFmpeg:
|
||||
@example
|
||||
ffmpeg -i input.avs
|
||||
ffmpeg -i input.avs
|
||||
@end example
|
||||
|
||||
For ANY other help on AviSynth, please visit the
|
||||
@uref{http://www.avisynth.org/, AviSynth homepage}.
|
||||
For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}.
|
||||
|
||||
@section How can I join video files?
|
||||
|
||||
To "join" video files is quite ambiguous. The following list explains the
|
||||
different kinds of "joining" and points out how those are addressed in
|
||||
FFmpeg. To join video files may mean:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
To put them one after the other: this is called to @emph{concatenate} them
|
||||
(in short: concat) and is addressed
|
||||
@ref{How can I concatenate video files, in this very faq}.
|
||||
|
||||
@item
|
||||
To put them together in the same file, to let the user choose between the
|
||||
different versions (example: different audio languages): this is called to
|
||||
@emph{multiplex} them together (in short: mux), and is done by simply
|
||||
invoking ffmpeg with several @option{-i} options.
|
||||
|
||||
@item
|
||||
For audio, to put all channels together in a single stream (example: two
|
||||
mono streams into one stereo stream): this is sometimes called to
|
||||
@emph{merge} them, and can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
|
||||
|
||||
@item
|
||||
For audio, to play one on top of the other: this is called to @emph{mix}
|
||||
them, and can be done by first merging them into a single stream and then
|
||||
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
|
||||
the channels at will.
|
||||
|
||||
@item
|
||||
For video, to display both together, side by side or one on top of a part of
|
||||
the other; it can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
|
||||
|
||||
@end itemize
|
||||
|
||||
@anchor{How can I concatenate video files}
|
||||
@section How can I concatenate video files?
|
||||
|
||||
There are several solutions, depending on the exact circumstances.
|
||||
|
||||
@subsection Concatenating using the concat @emph{filter}
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
|
||||
@code{concat}} filter designed specifically for that, with examples in the
|
||||
documentation. This operation is recommended if you need to re-encode.
|
||||
|
||||
@subsection Concatenating using the concat @emph{demuxer}
|
||||
|
||||
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
|
||||
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
|
||||
your format doesn't support file level concatenation.
|
||||
|
||||
@subsection Concatenating using the concat @emph{protocol} (file level)
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to join video files by
|
||||
merely concatenating them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
these privileged formats, then using the humble @code{cat} command (or the
|
||||
@@ -307,47 +300,32 @@ equally humble @code{copy} under Windows), and finally transcoding back to your
|
||||
format of choice.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i input1.avi -sameq intermediate1.mpg
|
||||
ffmpeg -i input2.avi -sameq intermediate2.mpg
|
||||
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
ffmpeg -i intermediate_all.mpg -sameq output.avi
|
||||
@end example
|
||||
|
||||
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
|
||||
@code{copy} which will avoid creation of a potentially huge intermediate file.
|
||||
Notice that you should either use @code{-sameq} or set a reasonably high
|
||||
bitrate for your intermediate and output files, if you want to preserve
|
||||
video quality.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
@end example
|
||||
|
||||
Note that you may need to escape the character "|" which is special for many
|
||||
shells.
|
||||
|
||||
Another option is usage of named pipes, should your platform support it:
|
||||
Also notice that you may avoid the huge intermediate files by taking advantage
|
||||
of named pipes, should your platform support it:
|
||||
|
||||
@example
|
||||
mkfifo intermediate1.mpg
|
||||
mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
ffmpeg -i input1.avi -sameq -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -sameq -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
ffmpeg -f mpeg -i - -sameq -vcodec mpeg4 -acodec libmp3lame output.avi
|
||||
@end example
|
||||
|
||||
@subsection Concatenating using raw audio and video
|
||||
|
||||
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
|
||||
allow concatenation, and the transcoding step is almost lossless.
|
||||
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
|
||||
from all but the first stream. This can be accomplished by piping through
|
||||
@code{tail} as seen below. Note that when piping through @code{tail} you
|
||||
must use command grouping, @code{@{ ;@}}, to background properly.
|
||||
|
||||
For example, let's say we want to concatenate two FLV files into an
|
||||
output.flv file:
|
||||
For example, let's say we want to join two FLV files into an output.flv file:
|
||||
|
||||
@example
|
||||
mkfifo temp1.a
|
||||
@@ -359,75 +337,38 @@ mkfifo all.v
|
||||
ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
|
||||
ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
|
||||
ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
|
||||
@{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; @} &
|
||||
ffmpeg -i input2.flv -an -f yuv4mpegpipe - > temp2.v < /dev/null &
|
||||
cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
-sameq -y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
@end example
|
||||
|
||||
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
|
||||
@section FFmpeg does not adhere to the -maxrate setting, some frames are bigger than maxrate/fps.
|
||||
|
||||
Use @option{-dumpgraph -} to find out exactly where the channel layout is
|
||||
lost.
|
||||
Read the MPEG spec about video buffer verifier.
|
||||
|
||||
Most likely, it is through @code{auto-inserted aresample}. Try to understand
|
||||
why the converting filter was needed at that place.
|
||||
@section I want CBR, but no matter what I do frame sizes differ.
|
||||
|
||||
Just before the output is a likely place, as @option{-f lavfi} currently
|
||||
only support packed S16.
|
||||
You do not understand what CBR is, please read the MPEG spec.
|
||||
Read about video buffer verifier and constant bitrate.
|
||||
The one sentence summary is that there is a buffer and the input rate is
|
||||
constant, the output can vary as needed.
|
||||
|
||||
Then insert the correct @code{aformat} explicitly in the filtergraph,
|
||||
specifying the exact format.
|
||||
@section How do I check if a stream is CBR?
|
||||
|
||||
@example
|
||||
aformat=sample_fmts=s16:channel_layouts=stereo
|
||||
@end example
|
||||
To quote the MPEG-2 spec:
|
||||
"There is no way to tell that a bitstream is constant bitrate without
|
||||
examining all of the vbv_delay values and making complicated computations."
|
||||
|
||||
@section Why does FFmpeg not see the subtitles in my VOB file?
|
||||
|
||||
VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initially detected.
|
||||
|
||||
Some applications, including the @code{ffmpeg} command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
|
||||
The @option{-sameq} option meant "same quantizer", and made sense only in a
|
||||
very limited set of cases. Unfortunately, a lot of people mistook it for
|
||||
"same quality" and used it in places where it did not make sense: it had
|
||||
roughly the expected visible effect, but achieved it in a very inefficient
|
||||
way.
|
||||
|
||||
Each encoder has its own set of options to set the quality-vs-size balance,
|
||||
use the options for the encoder you are using to set the quality level to a
|
||||
point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
||||
Yes. Check the @file{doc/examples} directory in the source
|
||||
repository, also available online at:
|
||||
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
|
||||
|
||||
Examples are also installed by default, usually in
|
||||
@code{$PREFIX/share/ffmpeg/examples}.
|
||||
|
||||
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
Yes. Read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
examine the source code for one of the many open source projects that
|
||||
already incorporate FFmpeg at (@url{projects.html}).
|
||||
|
||||
@@ -439,52 +380,64 @@ with @code{#ifdef}s related to the compiler.
|
||||
|
||||
@section Is Microsoft Visual C++ supported?
|
||||
|
||||
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
|
||||
section in the FFmpeg documentation.
|
||||
No. Microsoft Visual C++ is not compliant to the C99 standard and does
|
||||
not - among other things - support the inline assembly used in FFmpeg.
|
||||
If you wish to use MSVC++ for your
|
||||
project then you can link the MSVC++ code with libav* as long as
|
||||
you compile the latter with a working C compiler. For more information, see
|
||||
the @emph{Microsoft Visual C++ compatibility} section in the FFmpeg
|
||||
documentation.
|
||||
|
||||
There have been efforts to make FFmpeg compatible with MSVC++ in the
|
||||
past. However, they have all been rejected as too intrusive, especially
|
||||
since MinGW does the job adequately. None of the core developers
|
||||
work with MSVC++ and thus this item is low priority. Should you find
|
||||
the silver bullet that solves this problem, feel free to shoot it at us.
|
||||
|
||||
We strongly recommend you to move over from MSVC++ to MinGW tools.
|
||||
|
||||
@section Can I use FFmpeg or libavcodec under Windows?
|
||||
|
||||
Yes, but the Cygwin or MinGW tools @emph{must} be used to compile FFmpeg.
|
||||
Read the @emph{Windows} section in the FFmpeg documentation to find more
|
||||
information.
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at
|
||||
@url{http://ffmpeg.arrozcru.org/}.
|
||||
|
||||
@section Can you add automake, libtool or autoconf support?
|
||||
|
||||
No. These tools are too bloated and they complicate the build.
|
||||
|
||||
@section Why not rewrite FFmpeg in object-oriented C++?
|
||||
@section Why not rewrite ffmpeg in object-oriented C++?
|
||||
|
||||
FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
|
||||
read "Programming Religion" at (@url{http://www.tux.org/lkml/#s15}).
|
||||
|
||||
@section Why are the ffmpeg programs devoid of debugging symbols?
|
||||
|
||||
The build process creates @command{ffmpeg_g}, @command{ffplay_g}, etc. which
|
||||
contain full debug information. Those binaries are stripped to create
|
||||
@command{ffmpeg}, @command{ffplay}, etc. If you need the debug information, use
|
||||
the *_g versions.
|
||||
The build process creates ffmpeg_g, ffplay_g, etc. which contain full debug
|
||||
information. Those binaries are stripped to create ffmpeg, ffplay, etc. If
|
||||
you need the debug information, used the *_g versions.
|
||||
|
||||
@section I do not like the LGPL, can I contribute code under the GPL instead?
|
||||
|
||||
Yes, as long as the code is optional and can easily and cleanly be placed
|
||||
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
|
||||
under #if CONFIG_GPL without breaking anything. So for example a new codec
|
||||
or filter would be OK under GPL while a bug fix to LGPL code would not.
|
||||
|
||||
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
|
||||
@section I want to compile xyz.c alone but my compiler produced many errors.
|
||||
|
||||
FFmpeg builds static libraries by default. In static libraries, dependencies
|
||||
are not handled. That has two consequences. First, you must specify the
|
||||
libraries in dependency order: @code{-lavdevice} must come before
|
||||
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
|
||||
Second, external libraries that are used in FFmpeg have to be specified too.
|
||||
Common code is in its own files in libav* and is used by the individual
|
||||
codecs. They will not work without the common parts, you have to compile
|
||||
the whole libav*. If you wish, disable some parts with configure switches.
|
||||
You can also try to hack it and remove more, but if you had problems fixing
|
||||
the compilation failure then you are probably not qualified for this.
|
||||
|
||||
An easy way to get the full list of required libraries in dependency order
|
||||
is to use @code{pkg-config}.
|
||||
|
||||
@example
|
||||
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
|
||||
@end example
|
||||
|
||||
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
|
||||
more details.
|
||||
|
||||
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
|
||||
@section I'm using libavcodec from within my C++ application but the linker complains about missing symbols which seem to be available.
|
||||
|
||||
FFmpeg is a pure C project, so to use the libraries within your C++ application
|
||||
you need to explicitly state that you are using a C library. You can do this by
|
||||
@@ -492,19 +445,31 @@ encompassing your FFmpeg includes using @code{extern "C"}.
|
||||
|
||||
See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3}
|
||||
|
||||
@section I'm using libavutil from within my C++ application but the compiler complains about 'UINT64_C' was not declared in this scope
|
||||
|
||||
FFmpeg is a pure C project using C99 math features, in order to enable C++
|
||||
to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
|
||||
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
|
||||
|
||||
You have to create a custom AVIOContext using @code{avio_alloc_context},
|
||||
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
|
||||
You have to implement a URLProtocol, see libavformat/file.c in FFmpeg
|
||||
and libmpdemux/demux_lavf.c in MPlayer sources.
|
||||
|
||||
@section I get "No compatible shell script interpreter found." in MSys.
|
||||
|
||||
The standard MSys bash (2.04) is broken. You need to install 2.05 or later.
|
||||
|
||||
@section I get "./configure: line <xxx>: pr: command not found" in MSys.
|
||||
|
||||
The standard MSys install doesn't come with pr. You need to get it from the coreutils package.
|
||||
|
||||
@section I tried to pass RTP packets into a decoder, but it doesn't work.
|
||||
|
||||
RTP is a container format like any other, you must first depacketize the
|
||||
codec frames/samples stored in RTP and then feed to the decoder.
|
||||
|
||||
@section Where can I find libav* headers for Pascal/Delphi?
|
||||
|
||||
see @url{http://www.iversenit.dk/dev/ffmpeg-headers/}
|
||||
|
||||
@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm?
|
||||
|
||||
see @url{http://www.ffmpeg.org/~michael/}
|
||||
see @url{http://svn.ffmpeg.org/michael/trunk/docs/}
|
||||
|
||||
@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?
|
||||
|
||||
@@ -512,25 +477,12 @@ Even if peculiar since it is network oriented, RTP is a container like any
|
||||
other. You have to @emph{demux} RTP before feeding the payload to libavcodec.
|
||||
In this specific case please look at RFC 4629 to see how it should be done.
|
||||
|
||||
@section AVStream.r_frame_rate is wrong, it is much larger than the frame rate.
|
||||
@section AVStream.r_frame_rate is wrong, it is much larger than the framerate.
|
||||
|
||||
@code{r_frame_rate} is NOT the average frame rate, it is the smallest frame rate
|
||||
r_frame_rate is NOT the average framerate, it is the smallest framerate
|
||||
that can accurately represent all timestamps. So no, it is not
|
||||
wrong if it is larger than the average!
|
||||
For example, if you have mixed 25 and 30 fps content, then @code{r_frame_rate}
|
||||
will be 150 (it is the least common multiple).
|
||||
If you are looking for the average frame rate, see @code{AVStream.avg_frame_rate}.
|
||||
|
||||
@section Why is @code{make fate} not running all tests?
|
||||
|
||||
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
|
||||
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
|
||||
@command{configure} option is set to the right path.
|
||||
|
||||
@section Why is @code{make fate} not finding the samples?
|
||||
|
||||
Do you happen to have a @code{~} character in the samples path to indicate a
|
||||
home directory? The value is used in ways where the shell cannot expand it,
|
||||
causing FATE to not find files. Just replace @code{~} by the full path.
|
||||
For example, if you have mixed 25 and 30 fps content, then r_frame_rate
|
||||
will be 150.
|
||||
|
||||
@bye
|
||||
|
||||
205
doc/fate.texi
205
doc/fate.texi
@@ -1,205 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Automated Testing Environment}
|
||||
@end titlepage
|
||||
|
||||
@node Top
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
@example
|
||||
make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
@example
|
||||
FATE_SAMPLES=fate-suite/ make fate
|
||||
@end example
|
||||
|
||||
@float NOTE
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@example
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
@itemize
|
||||
@item configure.log
|
||||
@item compile.log
|
||||
@item test.log
|
||||
@item report
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
@table @option
|
||||
@item RSA
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
@item ECDSA
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
@end table
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@chapter FATE makefile targets and variables
|
||||
|
||||
@section Makefile targets
|
||||
|
||||
@table @option
|
||||
@item fate-rsync
|
||||
Download/synchronize sample files to the configured samples directory.
|
||||
|
||||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@item 0: show just the test arguments
|
||||
@item 1: show just the command used in the test
|
||||
@item 2: show everything
|
||||
@end itemize
|
||||
|
||||
@item SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
|
||||
@item GEN
|
||||
Set to @var{1} to generate the missing or mismatched references.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
@end example
|
||||
@@ -1,29 +0,0 @@
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
build_only= # set to "yes" for a compile-only instance that skips tests
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
cpu=
|
||||
cross_prefix=
|
||||
as=
|
||||
cc=
|
||||
ld=
|
||||
target_os=
|
||||
sysroot=
|
||||
target_exec=
|
||||
target_path=
|
||||
target_samples=
|
||||
extra_cflags=
|
||||
extra_ldflags=
|
||||
extra_libs=
|
||||
extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
||||
@@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Bitstream Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
|
||||
A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include bitstream_filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-bitstream-filters
|
||||
@settitle FFmpeg bitstream filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Codecs Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the codecs (decoders and encoders) provided by
|
||||
the libavcodec library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include codecs.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-codecs
|
||||
@settitle FFmpeg codecs
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Devices Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output devices provided by the
|
||||
libavdevice library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include devices.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-devices
|
||||
@settitle FFmpeg devices
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
969
doc/ffmpeg-doc.texi
Normal file
969
doc/ffmpeg-doc.texi
Normal file
@@ -0,0 +1,969 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Documentation
|
||||
@titlepage
|
||||
@sp 7
|
||||
@center @titlefont{FFmpeg Documentation}
|
||||
@sp 3
|
||||
@end titlepage
|
||||
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FFmpeg is a very fast video and audio converter. It can also grab from
|
||||
a live audio/video source.
|
||||
|
||||
The command line interface is designed to be intuitive, in the sense
|
||||
that FFmpeg tries to figure out all parameters that can possibly be
|
||||
derived automatically. You usually only have to specify the target
|
||||
bitrate you want.
|
||||
|
||||
FFmpeg can also convert from any sample rate to any other, and resize
|
||||
video on the fly with a high quality polyphase filter.
|
||||
|
||||
@chapter Quick Start
|
||||
|
||||
@c man begin EXAMPLES
|
||||
@section Video and Audio grabbing
|
||||
|
||||
FFmpeg can grab video and audio from devices given that you specify the input
|
||||
format and device.
|
||||
|
||||
@example
|
||||
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
|
||||
@end example
|
||||
|
||||
Note that you must activate the right video source and channel before
|
||||
launching FFmpeg with any TV viewer such as xawtv
|
||||
(@url{http://linux.bytesex.org/xawtv/}) by Gerd Knorr. You also
|
||||
have to set the audio recording levels correctly with a
|
||||
standard mixer.
|
||||
|
||||
@section X11 grabbing
|
||||
|
||||
FFmpeg can grab the X11 display.
|
||||
|
||||
@example
|
||||
ffmpeg -f x11grab -s cif -i :0.0 /tmp/out.mpg
|
||||
@end example
|
||||
|
||||
0.0 is display.screen number of your X11 server, same as
|
||||
the DISPLAY environment variable.
|
||||
|
||||
@example
|
||||
ffmpeg -f x11grab -s cif -i :0.0+10,20 /tmp/out.mpg
|
||||
@end example
|
||||
|
||||
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
|
||||
variable. 10 is the x-offset and 20 the y-offset for the grabbing.
|
||||
|
||||
@section Video and Audio file format conversion
|
||||
|
||||
* FFmpeg can use any supported file format and protocol as input:
|
||||
|
||||
Examples:
|
||||
|
||||
* You can use YUV files as input:
|
||||
|
||||
@example
|
||||
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
|
||||
@end example
|
||||
|
||||
It will use the files:
|
||||
@example
|
||||
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
|
||||
/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
|
||||
@end example
|
||||
|
||||
The Y files use twice the resolution of the U and V files. They are
|
||||
raw files, without header. They can be generated by all decent video
|
||||
decoders. You must specify the size of the image with the @option{-s} option
|
||||
if FFmpeg cannot guess it.
|
||||
|
||||
* You can input from a raw YUV420P file:
|
||||
|
||||
@example
|
||||
ffmpeg -i /tmp/test.yuv /tmp/out.avi
|
||||
@end example
|
||||
|
||||
test.yuv is a file containing raw YUV planar data. Each frame is composed
|
||||
of the Y plane followed by the U and V planes at half vertical and
|
||||
horizontal resolution.
|
||||
|
||||
* You can output to a raw YUV420P file:
|
||||
|
||||
@example
|
||||
ffmpeg -i mydivx.avi hugefile.yuv
|
||||
@end example
|
||||
|
||||
* You can set several input files and output files:
|
||||
|
||||
@example
|
||||
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
Converts the audio file a.wav and the raw YUV video file a.yuv
|
||||
to MPEG file a.mpg.
|
||||
|
||||
* You can also do audio and video conversions at the same time:
|
||||
|
||||
@example
|
||||
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
|
||||
@end example
|
||||
|
||||
Converts a.wav to MPEG audio at 22050 Hz sample rate.
|
||||
|
||||
* You can encode to several formats at the same time and define a
|
||||
mapping from input stream to output streams:
|
||||
|
||||
@example
|
||||
ffmpeg -i /tmp/a.wav -ab 64k /tmp/a.mp2 -ab 128k /tmp/b.mp2 -map 0:0 -map 0:0
|
||||
@end example
|
||||
|
||||
Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. '-map
|
||||
file:index' specifies which input stream is used for each output
|
||||
stream, in the order of the definition of output streams.
|
||||
|
||||
* You can transcode decrypted VOBs:
|
||||
|
||||
@example
|
||||
ffmpeg -i snatch_1.vob -f avi -vcodec mpeg4 -b 800k -g 300 -bf 2 -acodec libmp3lame -ab 128k snatch.avi
|
||||
@end example
|
||||
|
||||
This is a typical DVD ripping example; the input is a VOB file, the
|
||||
output an AVI file with MPEG-4 video and MP3 audio. Note that in this
|
||||
command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
|
||||
GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
|
||||
input video. Furthermore, the audio stream is MP3-encoded so you need
|
||||
to enable LAME support by passing @code{--enable-libmp3lame} to configure.
|
||||
The mapping is particularly useful for DVD transcoding
|
||||
to get the desired audio language.
|
||||
|
||||
NOTE: To see the supported input formats, use @code{ffmpeg -formats}.
|
||||
|
||||
* You can extract images from a video, or create a video from many images:
|
||||
|
||||
For extracting images from a video:
|
||||
@example
|
||||
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
|
||||
@end example
|
||||
|
||||
This will extract one video frame per second from the video and will
|
||||
output them in files named @file{foo-001.jpeg}, @file{foo-002.jpeg},
|
||||
etc. Images will be rescaled to fit the new WxH values.
|
||||
|
||||
If you want to extract just a limited number of frames, you can use the
|
||||
above command in combination with the -vframes or -t option, or in
|
||||
combination with -ss to start extracting from a certain point in time.
|
||||
|
||||
For creating a video from many images:
|
||||
@example
|
||||
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
|
||||
@end example
|
||||
|
||||
The syntax @code{foo-%03d.jpeg} specifies to use a decimal number
|
||||
composed of three digits padded with zeroes to express the sequence
|
||||
number. It is the same syntax supported by the C printf function, but
|
||||
only formats accepting a normal integer are suitable.
|
||||
|
||||
* You can put many streams of the same type in the output:
|
||||
|
||||
@example
|
||||
ffmpeg -i test1.avi -i test2.avi -vcodec copy -acodec copy -vcodec copy -acodec copy test12.avi -newvideo -newaudio
|
||||
@end example
|
||||
|
||||
In addition to the first video and audio streams, the resulting
|
||||
output file @file{test12.avi} will contain the second video
|
||||
and the second audio stream found in the input streams list.
|
||||
|
||||
The @code{-newvideo}, @code{-newaudio} and @code{-newsubtitle}
|
||||
options have to be specified immediately after the name of the output
|
||||
file to which you want to add them.
|
||||
@c man end
|
||||
|
||||
@chapter Invocation
|
||||
|
||||
@section Syntax
|
||||
|
||||
The generic syntax is:
|
||||
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffmpeg [[infile options][@option{-i} @var{infile}]]... @{[outfile options] @var{outfile}@}...
|
||||
@c man end
|
||||
@end example
|
||||
@c man begin DESCRIPTION
|
||||
As a general rule, options are applied to the next specified
|
||||
file. Therefore, order is important, and you can have the same
|
||||
option on the command line multiple times. Each occurrence is
|
||||
then applied to the next input or output file.
|
||||
|
||||
* To set the video bitrate of the output file to 64kbit/s:
|
||||
@example
|
||||
ffmpeg -i input.avi -b 64k output.avi
|
||||
@end example
|
||||
|
||||
* To force the frame rate of the output file to 24 fps:
|
||||
@example
|
||||
ffmpeg -i input.avi -r 24 output.avi
|
||||
@end example
|
||||
|
||||
* To force the frame rate of the input file (valid for raw formats only)
|
||||
to 1 fps and the frame rate of the output file to 24 fps:
|
||||
@example
|
||||
ffmpeg -r 1 -i input.m2v -r 24 output.avi
|
||||
@end example
|
||||
|
||||
The format option may be needed for raw input files.
|
||||
|
||||
By default, FFmpeg tries to convert as losslessly as possible: It
|
||||
uses the same audio and video parameters for the outputs as the one
|
||||
specified for the inputs.
|
||||
@c man end
|
||||
|
||||
@c man begin OPTIONS
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -L
|
||||
Show license.
|
||||
|
||||
@item -h
|
||||
Show help.
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -formats
|
||||
Show available formats, codecs, bitstream filters, protocols, and frame size and frame rate abbreviations.
|
||||
|
||||
The fields preceding the format and codec names have the following meanings:
|
||||
@table @samp
|
||||
@item D
|
||||
Decoding available
|
||||
@item E
|
||||
Encoding available
|
||||
@item V/A/S
|
||||
Video/audio/subtitle codec
|
||||
@item S
|
||||
Codec supports slices
|
||||
@item D
|
||||
Codec supports direct rendering
|
||||
@item T
|
||||
Codec can handle input truncated at random locations instead of only at frame boundaries
|
||||
@end table
|
||||
|
||||
@item -f @var{fmt}
|
||||
Force format.
|
||||
|
||||
@item -i @var{filename}
|
||||
input file name
|
||||
|
||||
@item -y
|
||||
Overwrite output files.
|
||||
|
||||
@item -t @var{duration}
|
||||
Restrict the transcoded/captured video sequence
|
||||
to the duration specified in seconds.
|
||||
@code{hh:mm:ss[.xxx]} syntax is also supported.
|
||||
|
||||
@item -fs @var{limit_size}
|
||||
Set the file size limit.
|
||||
|
||||
@item -ss @var{position}
|
||||
Seek to given time position in seconds.
|
||||
@code{hh:mm:ss[.xxx]} syntax is also supported.
|
||||
|
||||
@item -itsoffset @var{offset}
|
||||
Set the input time offset in seconds.
|
||||
@code{[-]hh:mm:ss[.xxx]} syntax is also supported.
|
||||
This option affects all the input files that follow it.
|
||||
The offset is added to the timestamps of the input files.
|
||||
Specifying a positive offset means that the corresponding
|
||||
streams are delayed by 'offset' seconds.
|
||||
|
||||
@item -timestamp @var{time}
|
||||
Set the timestamp.
|
||||
|
||||
@item -metadata @var{key}=@var{value}
|
||||
Set a metadata key/value pair.
|
||||
|
||||
For example, for setting the title in the output file:
|
||||
@example
|
||||
ffmpeg -i in.avi -metadata title="my title" out.flv
|
||||
@end example
|
||||
|
||||
@item -v @var{number}
|
||||
Set the logging verbosity level.
|
||||
|
||||
@item -target @var{type}
|
||||
Specify target file type ("vcd", "svcd", "dvd", "dv", "dv50", "pal-vcd",
|
||||
"ntsc-svcd", ... ). All the format options (bitrate, codecs,
|
||||
buffer sizes) are then set automatically. You can just type:
|
||||
|
||||
@example
|
||||
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
Nevertheless you can specify additional options as long as you know
|
||||
they do not conflict with the standard, as in:
|
||||
|
||||
@example
|
||||
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
|
||||
@end example
|
||||
|
||||
@item -dframes @var{number}
|
||||
Set the number of data frames to record.
|
||||
|
||||
@item -scodec @var{codec}
|
||||
Force subtitle codec ('copy' to copy stream).
|
||||
|
||||
@item -newsubtitle
|
||||
Add a new subtitle stream to the current output stream.
|
||||
|
||||
@item -slang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current subtitle stream.
|
||||
|
||||
@end table
|
||||
|
||||
@section Video Options
|
||||
|
||||
@table @option
|
||||
@item -b @var{bitrate}
|
||||
Set the video bitrate in bit/s (default = 200 kb/s).
|
||||
@item -vframes @var{number}
|
||||
Set the number of video frames to record.
|
||||
@item -r @var{fps}
|
||||
Set frame rate (Hz value, fraction or abbreviation), (default = 25).
|
||||
@item -s @var{size}
|
||||
Set frame size. The format is @samp{wxh} (ffserver default = 160x128, ffmpeg default = same as source).
|
||||
The following abbreviations are recognized:
|
||||
@table @samp
|
||||
@item sqcif
|
||||
128x96
|
||||
@item qcif
|
||||
176x144
|
||||
@item cif
|
||||
352x288
|
||||
@item 4cif
|
||||
704x576
|
||||
@item qqvga
|
||||
160x120
|
||||
@item qvga
|
||||
320x240
|
||||
@item vga
|
||||
640x480
|
||||
@item svga
|
||||
800x600
|
||||
@item xga
|
||||
1024x768
|
||||
@item uxga
|
||||
1600x1200
|
||||
@item qxga
|
||||
2048x1536
|
||||
@item sxga
|
||||
1280x1024
|
||||
@item qsxga
|
||||
2560x2048
|
||||
@item hsxga
|
||||
5120x4096
|
||||
@item wvga
|
||||
852x480
|
||||
@item wxga
|
||||
1366x768
|
||||
@item wsxga
|
||||
1600x1024
|
||||
@item wuxga
|
||||
1920x1200
|
||||
@item woxga
|
||||
2560x1600
|
||||
@item wqsxga
|
||||
3200x2048
|
||||
@item wquxga
|
||||
3840x2400
|
||||
@item whsxga
|
||||
6400x4096
|
||||
@item whuxga
|
||||
7680x4800
|
||||
@item cga
|
||||
320x200
|
||||
@item ega
|
||||
640x350
|
||||
@item hd480
|
||||
852x480
|
||||
@item hd720
|
||||
1280x720
|
||||
@item hd1080
|
||||
1920x1080
|
||||
@end table
|
||||
|
||||
@item -aspect @var{aspect}
|
||||
Set aspect ratio (4:3, 16:9 or 1.3333, 1.7777).
|
||||
@item -croptop @var{size}
|
||||
Set top crop band size (in pixels).
|
||||
@item -cropbottom @var{size}
|
||||
Set bottom crop band size (in pixels).
|
||||
@item -cropleft @var{size}
|
||||
Set left crop band size (in pixels).
|
||||
@item -cropright @var{size}
|
||||
Set right crop band size (in pixels).
|
||||
@item -padtop @var{size}
|
||||
Set top pad band size (in pixels).
|
||||
@item -padbottom @var{size}
|
||||
Set bottom pad band size (in pixels).
|
||||
@item -padleft @var{size}
|
||||
Set left pad band size (in pixels).
|
||||
@item -padright @var{size}
|
||||
Set right pad band size (in pixels).
|
||||
@item -padcolor @var{hex_color}
|
||||
Set color of padded bands. The value for padcolor is expressed
|
||||
as a six digit hexadecimal number where the first two digits
|
||||
represent red, the middle two digits green and last two digits
|
||||
blue (default = 000000 (black)).
|
||||
@item -vn
|
||||
Disable video recording.
|
||||
@item -bt @var{tolerance}
|
||||
Set video bitrate tolerance (in bits, default 4000k).
|
||||
Has a minimum value of: (target_bitrate/target_framerate).
|
||||
In 1-pass mode, bitrate tolerance specifies how far ratecontrol is
|
||||
willing to deviate from the target average bitrate value. This is
|
||||
not related to min/max bitrate. Lowering tolerance too much has
|
||||
an adverse effect on quality.
|
||||
@item -maxrate @var{bitrate}
|
||||
Set max video bitrate (in bit/s).
|
||||
Requires -bufsize to be set.
|
||||
@item -minrate @var{bitrate}
|
||||
Set min video bitrate (in bit/s).
|
||||
Most useful in setting up a CBR encode:
|
||||
@example
|
||||
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
|
||||
@end example
|
||||
It is of little use elsewise.
|
||||
@item -bufsize @var{size}
|
||||
Set video buffer verifier buffer size (in bits).
|
||||
@item -vcodec @var{codec}
|
||||
Force video codec to @var{codec}. Use the @code{copy} special value to
|
||||
tell that the raw codec data must be copied as is.
|
||||
@item -sameq
|
||||
Use same video quality as source (implies VBR).
|
||||
|
||||
@item -pass @var{n}
|
||||
Select the pass number (1 or 2). It is used to do two-pass
|
||||
video encoding. The statistics of the video are recorded in the first
|
||||
pass into a log file (see also the option -passlogfile),
|
||||
and in the second pass that log file is used to generate the video
|
||||
at the exact requested bitrate.
|
||||
On pass 1, you may just deactivate audio and set output to null,
|
||||
examples for Windows and Unix:
|
||||
@example
|
||||
ffmpeg -i foo.mov -vcodec libxvid -pass 1 -an -f rawvideo -y NUL
|
||||
ffmpeg -i foo.mov -vcodec libxvid -pass 1 -an -f rawvideo -y /dev/null
|
||||
@end example
|
||||
|
||||
@item -passlogfile @var{prefix}
|
||||
Set two-pass log file name prefix to @var{prefix}, the default file name
|
||||
prefix is ``ffmpeg2pass''. The complete file name will be
|
||||
@file{PREFIX-N.log}, where N is a number specific to the output
|
||||
stream.
|
||||
|
||||
@item -newvideo
|
||||
Add a new video stream to the current output stream.
|
||||
|
||||
@end table
|
||||
|
||||
@section Advanced Video Options
|
||||
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format. Use 'list' as parameter to show all the supported
|
||||
pixel formats.
|
||||
@item -sws_flags @var{flags}
|
||||
Set SwScaler flags (only available when compiled with swscale support).
|
||||
@item -g @var{gop_size}
|
||||
Set the group of pictures size.
|
||||
@item -intra
|
||||
Use only intra frames.
|
||||
@item -vdt @var{n}
|
||||
Discard threshold.
|
||||
@item -qscale @var{q}
|
||||
Use fixed video quantizer scale (VBR).
|
||||
@item -qmin @var{q}
|
||||
minimum video quantizer scale (VBR)
|
||||
@item -qmax @var{q}
|
||||
maximum video quantizer scale (VBR)
|
||||
@item -qdiff @var{q}
|
||||
maximum difference between the quantizer scales (VBR)
|
||||
@item -qblur @var{blur}
|
||||
video quantizer scale blur (VBR) (range 0.0 - 1.0)
|
||||
@item -qcomp @var{compression}
|
||||
video quantizer scale compression (VBR) (default 0.5).
|
||||
Constant of ratecontrol equation. Recommended range for default rc_eq: 0.0-1.0
|
||||
|
||||
@item -lmin @var{lambda}
|
||||
minimum video lagrange factor (VBR)
|
||||
@item -lmax @var{lambda}
|
||||
max video lagrange factor (VBR)
|
||||
@item -mblmin @var{lambda}
|
||||
minimum macroblock quantizer scale (VBR)
|
||||
@item -mblmax @var{lambda}
|
||||
maximum macroblock quantizer scale (VBR)
|
||||
|
||||
These four options (lmin, lmax, mblmin, mblmax) use 'lambda' units,
|
||||
but you may use the QP2LAMBDA constant to easily convert from 'q' units:
|
||||
@example
|
||||
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
|
||||
@end example
|
||||
|
||||
@item -rc_init_cplx @var{complexity}
|
||||
initial complexity for single pass encoding
|
||||
@item -b_qfactor @var{factor}
|
||||
qp factor between P- and B-frames
|
||||
@item -i_qfactor @var{factor}
|
||||
qp factor between P- and I-frames
|
||||
@item -b_qoffset @var{offset}
|
||||
qp offset between P- and B-frames
|
||||
@item -i_qoffset @var{offset}
|
||||
qp offset between P- and I-frames
|
||||
@item -rc_eq @var{equation}
|
||||
Set rate control equation (@pxref{FFmpeg formula
|
||||
evaluator}) (default = @code{tex^qComp}).
|
||||
@item -rc_override @var{override}
|
||||
rate control override for specific intervals
|
||||
@item -me_method @var{method}
|
||||
Set motion estimation method to @var{method}.
|
||||
Available methods are (from lowest to best quality):
|
||||
@table @samp
|
||||
@item zero
|
||||
Try just the (0, 0) vector.
|
||||
@item phods
|
||||
@item log
|
||||
@item x1
|
||||
@item hex
|
||||
@item umh
|
||||
@item epzs
|
||||
(default method)
|
||||
@item full
|
||||
exhaustive search (slow and marginally better than epzs)
|
||||
@end table
|
||||
|
||||
@item -dct_algo @var{algo}
|
||||
Set DCT algorithm to @var{algo}. Available values are:
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_DCT_AUTO (default)
|
||||
@item 1
|
||||
FF_DCT_FASTINT
|
||||
@item 2
|
||||
FF_DCT_INT
|
||||
@item 3
|
||||
FF_DCT_MMX
|
||||
@item 4
|
||||
FF_DCT_MLIB
|
||||
@item 5
|
||||
FF_DCT_ALTIVEC
|
||||
@end table
|
||||
|
||||
@item -idct_algo @var{algo}
|
||||
Set IDCT algorithm to @var{algo}. Available values are:
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_IDCT_AUTO (default)
|
||||
@item 1
|
||||
FF_IDCT_INT
|
||||
@item 2
|
||||
FF_IDCT_SIMPLE
|
||||
@item 3
|
||||
FF_IDCT_SIMPLEMMX
|
||||
@item 4
|
||||
FF_IDCT_LIBMPEG2MMX
|
||||
@item 5
|
||||
FF_IDCT_PS2
|
||||
@item 6
|
||||
FF_IDCT_MLIB
|
||||
@item 7
|
||||
FF_IDCT_ARM
|
||||
@item 8
|
||||
FF_IDCT_ALTIVEC
|
||||
@item 9
|
||||
FF_IDCT_SH4
|
||||
@item 10
|
||||
FF_IDCT_SIMPLEARM
|
||||
@end table
|
||||
|
||||
@item -er @var{n}
|
||||
Set error resilience to @var{n}.
|
||||
@table @samp
|
||||
@item 1
|
||||
FF_ER_CAREFUL (default)
|
||||
@item 2
|
||||
FF_ER_COMPLIANT
|
||||
@item 3
|
||||
FF_ER_AGGRESSIVE
|
||||
@item 4
|
||||
FF_ER_VERY_AGGRESSIVE
|
||||
@end table
|
||||
|
||||
@item -ec @var{bit_mask}
|
||||
Set error concealment to @var{bit_mask}. @var{bit_mask} is a bit mask of
|
||||
the following values:
|
||||
@table @samp
|
||||
@item 1
|
||||
FF_EC_GUESS_MVS (default = enabled)
|
||||
@item 2
|
||||
FF_EC_DEBLOCK (default = enabled)
|
||||
@end table
|
||||
|
||||
@item -bf @var{frames}
|
||||
Use 'frames' B-frames (supported for MPEG-1, MPEG-2 and MPEG-4).
|
||||
@item -mbd @var{mode}
|
||||
macroblock decision
|
||||
@table @samp
|
||||
@item 0
|
||||
FF_MB_DECISION_SIMPLE: Use mb_cmp (cannot change it yet in FFmpeg).
|
||||
@item 1
|
||||
FF_MB_DECISION_BITS: Choose the one which needs the fewest bits.
|
||||
@item 2
|
||||
FF_MB_DECISION_RD: rate distortion
|
||||
@end table
|
||||
|
||||
@item -4mv
|
||||
Use four motion vector by macroblock (MPEG-4 only).
|
||||
@item -part
|
||||
Use data partitioning (MPEG-4 only).
|
||||
@item -bug @var{param}
|
||||
Work around encoder bugs that are not auto-detected.
|
||||
@item -strict @var{strictness}
|
||||
How strictly to follow the standards.
|
||||
@item -aic
|
||||
Enable Advanced intra coding (h263+).
|
||||
@item -umv
|
||||
Enable Unlimited Motion Vector (h263+)
|
||||
|
||||
@item -deinterlace
|
||||
Deinterlace pictures.
|
||||
@item -ilme
|
||||
Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
|
||||
Use this option if your input file is interlaced and you want
|
||||
to keep the interlaced format for minimum losses.
|
||||
The alternative is to deinterlace the input stream with
|
||||
@option{-deinterlace}, but deinterlacing introduces losses.
|
||||
@item -psnr
|
||||
Calculate PSNR of compressed frames.
|
||||
@item -vstats
|
||||
Dump video coding statistics to @file{vstats_HHMMSS.log}.
|
||||
@item -vstats_file @var{file}
|
||||
Dump video coding statistics to @var{file}.
|
||||
@item -vhook @var{module}
|
||||
Insert video processing @var{module}. @var{module} contains the module
|
||||
name and its parameters separated by spaces.
|
||||
@item -top @var{n}
|
||||
top=1/bottom=0/auto=-1 field first
|
||||
@item -dc @var{precision}
|
||||
Intra_dc_precision.
|
||||
@item -vtag @var{fourcc/tag}
|
||||
Force video tag/fourcc.
|
||||
@item -qphist
|
||||
Show QP histogram.
|
||||
@item -vbsf @var{bitstream_filter}
|
||||
Bitstream filters available are "dump_extra", "remove_extra", "noise", "h264_mp4toannexb", "imxdump", "mjpegadump".
|
||||
@example
|
||||
ffmpeg -i h264.mp4 -vcodec copy -vbsf h264_mp4toannexb -an out.h264
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section Audio Options
|
||||
|
||||
@table @option
|
||||
@item -aframes @var{number}
|
||||
Set the number of audio frames to record.
|
||||
@item -ar @var{freq}
|
||||
Set the audio sampling frequency (default = 44100 Hz).
|
||||
@item -ab @var{bitrate}
|
||||
Set the audio bitrate in bit/s (default = 64k).
|
||||
@item -aq @var{q}
|
||||
Set the audio quality (codec-specific, VBR).
|
||||
@item -ac @var{channels}
|
||||
Set the number of audio channels (default = 1).
|
||||
@item -an
|
||||
Disable audio recording.
|
||||
@item -acodec @var{codec}
|
||||
Force audio codec to @var{codec}. Use the @code{copy} special value to
|
||||
specify that the raw codec data must be copied as is.
|
||||
@item -newaudio
|
||||
Add a new audio track to the output file. If you want to specify parameters,
|
||||
do so before @code{-newaudio} (@code{-acodec}, @code{-ab}, etc..).
|
||||
|
||||
Mapping will be done automatically, if the number of output streams is equal to
|
||||
the number of input streams, else it will pick the first one that matches. You
|
||||
can override the mapping using @code{-map} as usual.
|
||||
|
||||
Example:
|
||||
@example
|
||||
ffmpeg -i file.mpg -vcodec copy -acodec ac3 -ab 384k test.mpg -acodec mp2 -ab 192k -newaudio
|
||||
@end example
|
||||
@item -alang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current audio stream.
|
||||
@end table
|
||||
|
||||
@section Advanced Audio options:
|
||||
|
||||
@table @option
|
||||
@item -atag @var{fourcc/tag}
|
||||
Force audio tag/fourcc.
|
||||
@item -absf @var{bitstream_filter}
|
||||
Bitstream filters available are "dump_extra", "remove_extra", "noise", "mp3comp", "mp3decomp".
|
||||
@end table
|
||||
|
||||
@section Subtitle options:
|
||||
|
||||
@table @option
|
||||
@item -scodec @var{codec}
|
||||
Force subtitle codec ('copy' to copy stream).
|
||||
@item -newsubtitle
|
||||
Add a new subtitle stream to the current output stream.
|
||||
@item -slang @var{code}
|
||||
Set the ISO 639 language code (3 letters) of the current subtitle stream.
|
||||
@item -sn
|
||||
Disable subtitle recording.
|
||||
@item -sbsf @var{bitstream_filter}
|
||||
Bitstream filters available are "mov2textsub", "text2movsub".
|
||||
@example
|
||||
ffmpeg -i file.mov -an -vn -sbsf mov2textsub -scodec copy -f rawvideo sub.txt
|
||||
@end example
|
||||
@end table
|
||||
|
||||
@section Audio/Video grab options
|
||||
|
||||
@table @option
|
||||
@item -vc @var{channel}
|
||||
Set video grab channel (DV1394 only).
|
||||
@item -tvstd @var{standard}
|
||||
Set television standard (NTSC, PAL (SECAM)).
|
||||
@item -isync
|
||||
Synchronize read on input.
|
||||
@end table
|
||||
|
||||
@section Advanced options
|
||||
|
||||
@table @option
|
||||
@item -map @var{input_stream_id}[:@var{sync_stream_id}]
|
||||
Set stream mapping from input streams to output streams.
|
||||
Just enumerate the input streams in the order you want them in the output.
|
||||
@var{sync_stream_id} if specified sets the input stream to sync
|
||||
against.
|
||||
@item -map_meta_data @var{outfile}:@var{infile}
|
||||
Set meta data information of @var{outfile} from @var{infile}.
|
||||
@item -debug
|
||||
Print specific debug info.
|
||||
@item -benchmark
|
||||
Add timings for benchmarking.
|
||||
@item -dump
|
||||
Dump each input packet.
|
||||
@item -hex
|
||||
When dumping packets, also dump the payload.
|
||||
@item -bitexact
|
||||
Only use bit exact algorithms (for codec testing).
|
||||
@item -ps @var{size}
|
||||
Set packet size in bits.
|
||||
@item -re
|
||||
Read input at native frame rate. Mainly used to simulate a grab device.
|
||||
@item -loop_input
|
||||
Loop over the input stream. Currently it works only for image
|
||||
streams. This option is used for automatic FFserver testing.
|
||||
@item -loop_output @var{number_of_times}
|
||||
Repeatedly loop output for formats that support looping such as animated GIF
|
||||
(0 will loop the output infinitely).
|
||||
@item -threads @var{count}
|
||||
Thread count.
|
||||
@item -vsync @var{parameter}
|
||||
Video sync method. Video will be stretched/squeezed to match the timestamps,
|
||||
it is done by duplicating and dropping frames. With -map you can select from
|
||||
which stream the timestamps should be taken. You can leave either video or
|
||||
audio unchanged and sync the remaining stream(s) to the unchanged one.
|
||||
@item -async @var{samples_per_second}
|
||||
Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
|
||||
the parameter is the maximum samples per second by which the audio is changed.
|
||||
-async 1 is a special case where only the start of the audio stream is corrected
|
||||
without any later correction.
|
||||
@item -copyts
|
||||
Copy timestamps from input to output.
|
||||
@item -shortest
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
Timestamp discontinuity delta threshold.
|
||||
@item -muxdelay @var{seconds}
|
||||
Set the maximum demux-decode delay.
|
||||
@item -muxpreload @var{seconds}
|
||||
Set the initial demux-decode delay.
|
||||
@end table
|
||||
|
||||
@section Preset files
|
||||
|
||||
A preset file contains a sequence of @var{option}=@var{value} pairs,
|
||||
one for each line, specifying a sequence of options which would be
|
||||
awkward to specify on the command line. Lines starting with the hash
|
||||
('#') character are ignored and are used to provide comments. Check
|
||||
the @file{ffpresets} directory in the FFmpeg source tree for examples.
|
||||
|
||||
Preset files are specified with the @code{vpre}, @code{apre} and
|
||||
@code{spre} options. The options specified in a preset file are
|
||||
applied to the currently selected codec of the same type as the preset
|
||||
option.
|
||||
|
||||
The argument passed to the preset options identifies the preset file
|
||||
to use according to the following rules.
|
||||
|
||||
First ffmpeg searches for a file named @var{arg}.ffpreset in the
|
||||
directories @file{$HOME/.ffmpeg}, and in the datadir defined at
|
||||
configuration time (usually @file{PREFIX/share/ffmpeg}) in that
|
||||
order. For example, if the argument is @code{libx264-max}, it will
|
||||
search for the file @file{libx264-max.ffpreset}.
|
||||
|
||||
If no such file is found, then ffmpeg will search for a file named
|
||||
@var{codec_name}-@var{arg}.ffpreset in the above-mentioned
|
||||
directories, where @var{codec_name} is the name of the codec to which
|
||||
the preset file options will be applied. For example, if you select
|
||||
the video codec with @code{-vcodec libx264} and use @code{-vpre max},
|
||||
then it will search for the file @file{libx264-max.ffpreset}.
|
||||
|
||||
Finally, if the above rules failed and the argument specifies an
|
||||
absolute pathname, ffmpeg will search for that filename. This way you
|
||||
can specify the absolute and complete filename of the preset file, for
|
||||
example @file{./ffpresets/libx264-max.ffpreset}.
|
||||
|
||||
@node FFmpeg formula evaluator
|
||||
@section FFmpeg formula evaluator
|
||||
|
||||
When evaluating a rate control string, FFmpeg uses an internal formula
|
||||
evaluator.
|
||||
|
||||
The following binary operators are available: @code{+}, @code{-},
|
||||
@code{*}, @code{/}, @code{^}.
|
||||
|
||||
The following unary operators are available: @code{+}, @code{-},
|
||||
@code{(...)}.
|
||||
|
||||
The following statements are available: @code{ld}, @code{st},
|
||||
@code{while}.
|
||||
|
||||
The following functions are available:
|
||||
@table @var
|
||||
@item sinh(x)
|
||||
@item cosh(x)
|
||||
@item tanh(x)
|
||||
@item sin(x)
|
||||
@item cos(x)
|
||||
@item tan(x)
|
||||
@item atan(x)
|
||||
@item asin(x)
|
||||
@item acos(x)
|
||||
@item exp(x)
|
||||
@item log(x)
|
||||
@item abs(x)
|
||||
@item squish(x)
|
||||
@item gauss(x)
|
||||
@item mod(x, y)
|
||||
@item max(x, y)
|
||||
@item min(x, y)
|
||||
@item eq(x, y)
|
||||
@item gte(x, y)
|
||||
@item gt(x, y)
|
||||
@item lte(x, y)
|
||||
@item lt(x, y)
|
||||
@item bits2qp(bits)
|
||||
@item qp2bits(qp)
|
||||
@end table
|
||||
|
||||
The following constants are available:
|
||||
@table @var
|
||||
@item PI
|
||||
@item E
|
||||
@item iTex
|
||||
@item pTex
|
||||
@item tex
|
||||
@item mv
|
||||
@item fCode
|
||||
@item iCount
|
||||
@item mcVar
|
||||
@item var
|
||||
@item isI
|
||||
@item isP
|
||||
@item isB
|
||||
@item avgQP
|
||||
@item qComp
|
||||
@item avgIITex
|
||||
@item avgPITex
|
||||
@item avgPPTex
|
||||
@item avgBPTex
|
||||
@item avgTex
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg
|
||||
@settitle FFmpeg video converter
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffserver(1), ffplay(1) and the HTML documentation of @file{ffmpeg}.
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHOR
|
||||
Fabrice Bellard
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@section Protocols
|
||||
|
||||
The file name can be @file{-} to read from standard input or to write
|
||||
to standard output.
|
||||
|
||||
FFmpeg also handles many protocols specified with an URL syntax.
|
||||
|
||||
Use 'ffmpeg -formats' to see a list of the supported protocols.
|
||||
|
||||
The protocol @code{http:} is currently used only to communicate with
|
||||
FFserver (see the FFserver documentation). When FFmpeg will be a
|
||||
video player it will also be used for streaming :-)
|
||||
|
||||
@chapter Tips
|
||||
|
||||
@itemize
|
||||
@item For streaming at very low bitrate application, use a low frame rate
|
||||
and a small GOP size. This is especially true for RealVideo where
|
||||
the Linux player does not seem to be very fast, so it can miss
|
||||
frames. An example is:
|
||||
|
||||
@example
|
||||
ffmpeg -g 3 -r 3 -t 10 -b 50k -s qcif -f rv10 /tmp/b.rm
|
||||
@end example
|
||||
|
||||
@item The parameter 'q' which is displayed while encoding is the current
|
||||
quantizer. The value 1 indicates that a very good quality could
|
||||
be achieved. The value 31 indicates the worst quality. If q=31 appears
|
||||
too often, it means that the encoder cannot compress enough to meet
|
||||
your bitrate. You must either increase the bitrate, decrease the
|
||||
frame rate or decrease the frame size.
|
||||
|
||||
@item If your computer is not fast enough, you can speed up the
|
||||
compression at the expense of the compression ratio. You can use
|
||||
'-me zero' to speed up motion estimation, and '-intra' to disable
|
||||
motion estimation completely (you have only I-frames, which means it
|
||||
is about as good as JPEG compression).
|
||||
|
||||
@item To have very low audio bitrates, reduce the sampling frequency
|
||||
(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
|
||||
|
||||
@item To have a constant quality (but a variable bitrate), use the option
|
||||
'-qscale n' when 'n' is between 1 (excellent quality) and 31 (worst
|
||||
quality).
|
||||
|
||||
@item When converting video files, you can use the '-sameq' option which
|
||||
uses the same quality factor in the encoder as in the decoder.
|
||||
It allows almost lossless encoding.
|
||||
|
||||
@end itemize
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes filters, sources, and sinks provided by the
|
||||
libavfilter library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavfilter.html,libavfilter}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-filters
|
||||
@settitle FFmpeg filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Formats Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the supported formats (muxers and demuxers)
|
||||
provided by the libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include formats.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-formats
|
||||
@settitle FFmpeg formats
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Protocols Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output protocols provided by the
|
||||
libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include protocols.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-protocols
|
||||
@settitle FFmpeg protocols
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Resampler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg resampler provides a high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows one to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include resampler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswresample.html,libswresample}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-resampler
|
||||
@settitle FFmpeg Resampler
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,43 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Scaler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg rescaler provides a high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows one to perform
|
||||
image rescaling and pixel format conversion.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include scaler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswscale.html,libswscale}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-scaler
|
||||
@settitle FFmpeg video scaling and pixel format converter
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Utilities Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes some generic features and utilities provided
|
||||
by the libavutil library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include utils.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-utils
|
||||
@settitle FFmpeg utilities
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
1553
doc/ffmpeg.texi
1553
doc/ffmpeg.texi
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
:
|
||||
ffmpeg.c : libav*
|
||||
======== : ======
|
||||
:
|
||||
:
|
||||
--------------------------------:---> AVStream...
|
||||
InputStream input_streams[] / :
|
||||
/ :
|
||||
InputFile input_files[] +==========================+ / ^ :
|
||||
------> 0 | : st ---:-----------:--/ : :
|
||||
^ +------+-----------+-----+ / +--------------------------+ : :
|
||||
: | :ist_index--:-----:---------/ 1 | : st : | : :
|
||||
: +------+-----------+-----+ +==========================+ : :
|
||||
nb_input_files : | :ist_index--:-----:------------------> 2 | : st : | : :
|
||||
: +------+-----------+-----+ +--------------------------+ : nb_input_streams :
|
||||
: | :ist_index : | 3 | ... | : :
|
||||
v +------+-----------+-----+ +--------------------------+ : :
|
||||
--> 4 | | : :
|
||||
| +--------------------------+ : :
|
||||
| 5 | | : :
|
||||
| +==========================+ v :
|
||||
| :
|
||||
| :
|
||||
| :
|
||||
| :
|
||||
--------- --------------------------------:---> AVStream...
|
||||
\ / :
|
||||
OutputStream output_streams[] / :
|
||||
\ / :
|
||||
+======\======================/======+ ^ :
|
||||
------> 0 | : source_index : st-:--- | : :
|
||||
OutputFile output_files[] / +------------------------------------+ : :
|
||||
/ 1 | : : : | : :
|
||||
^ +------+------------+-----+ / +------------------------------------+ : :
|
||||
: | : ost_index -:-----:------/ 2 | : : : | : :
|
||||
nb_output_files : +------+------------+-----+ +====================================+ : :
|
||||
: | : ost_index -:-----|-----------------> 3 | : : : | : :
|
||||
: +------+------------+-----+ +------------------------------------+ : nb_output_streams :
|
||||
: | : : | 4 | | : :
|
||||
: +------+------------+-----+ +------------------------------------+ : :
|
||||
: | : : | 5 | | : :
|
||||
v +------+------------+-----+ +------------------------------------+ : :
|
||||
6 | | : :
|
||||
+------------------------------------+ : :
|
||||
7 | | : :
|
||||
+====================================+ v :
|
||||
:
|
||||
172
doc/ffmpeg_powerpc_performance_evaluation_howto.txt
Normal file
172
doc/ffmpeg_powerpc_performance_evaluation_howto.txt
Normal file
@@ -0,0 +1,172 @@
|
||||
FFmpeg & evaluating performance on the PowerPC Architecture HOWTO
|
||||
|
||||
(c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
|
||||
|
||||
|
||||
|
||||
I - Introduction
|
||||
|
||||
The PowerPC architecture and its SIMD extension AltiVec offer some
|
||||
interesting tools to evaluate performance and improve the code.
|
||||
This document tries to explain how to use those tools with FFmpeg.
|
||||
|
||||
The architecture itself offers two ways to evaluate the performance of
|
||||
a given piece of code:
|
||||
|
||||
1) The Time Base Registers (TBL)
|
||||
2) The Performance Monitor Counter Registers (PMC)
|
||||
|
||||
The first ones are always available, always active, but they're not very
|
||||
accurate: the registers increment by one every four *bus* cycles. On
|
||||
my 667 Mhz tiBook (ppc7450), this means once every twenty *processor*
|
||||
cycles. So we won't use that.
|
||||
|
||||
The PMC are much more useful: not only can they report cycle-accurate
|
||||
timing, but they can also be used to monitor many other parameters,
|
||||
such as the number of AltiVec stalls for every kind of instruction,
|
||||
or instruction cache misses. The downside is that not all processors
|
||||
support the PMC (all G3, all G4 and the 970 do support them), and
|
||||
they're inactive by default - you need to activate them with a
|
||||
dedicated tool. Also, the number of available PMC depends on the
|
||||
procesor: the various 604 have 2, the various 75x (aka. G3) have 4,
|
||||
and the various 74xx (aka G4) have 6.
|
||||
|
||||
*WARNING*: The PowerPC 970 is not very well documented, and its PMC
|
||||
registers are 64 bits wide. To properly notify the code, you *must*
|
||||
tune for the 970 (using --tune=970), or the code will assume 32 bit
|
||||
registers.
|
||||
|
||||
|
||||
II - Enabling FFmpeg PowerPC performance support
|
||||
|
||||
This needs to be done by hand. First, you need to configure FFmpeg as
|
||||
usual, but add the "--powerpc-perf-enable" option. For instance:
|
||||
|
||||
#####
|
||||
./configure --prefix=/usr/local/ffmpeg-svn --cc=gcc-3.3 --tune=7450 --powerpc-perf-enable
|
||||
#####
|
||||
|
||||
This will configure FFmpeg to install inside /usr/local/ffmpeg-svn,
|
||||
compiling with gcc-3.3 (you should try to use this one or a newer
|
||||
gcc), and tuning for the PowerPC 7450 (i.e. the newer G4; as a rule of
|
||||
thumb, those at 550Mhz and more). It will also enable the PMC.
|
||||
|
||||
You may also edit the file "config.h" to enable the following line:
|
||||
|
||||
#####
|
||||
// #define ALTIVEC_USE_REFERENCE_C_CODE 1
|
||||
#####
|
||||
|
||||
If you enable this line, then the code will not make use of AltiVec,
|
||||
but will use the reference C code instead. This is useful to compare
|
||||
performance between two versions of the code.
|
||||
|
||||
Also, the number of enabled PMC is defined in "libavcodec/ppc/dsputil_ppc.h":
|
||||
|
||||
#####
|
||||
#define POWERPC_NUM_PMC_ENABLED 4
|
||||
#####
|
||||
|
||||
If you have a G4 CPU, you can enable all 6 PMC. DO NOT enable more
|
||||
PMC than available on your CPU!
|
||||
|
||||
Then, simply compile FFmpeg as usual (make && make install).
|
||||
|
||||
|
||||
|
||||
III - Using FFmpeg PowerPC performance support
|
||||
|
||||
This FFmeg can be used exactly as usual. But before exiting, FFmpeg
|
||||
will dump a per-function report that looks like this:
|
||||
|
||||
#####
|
||||
PowerPC performance report
|
||||
Values are from the PMC registers, and represent whatever the
|
||||
registers are set to record.
|
||||
Function "gmc1_altivec" (pmc1):
|
||||
min: 231
|
||||
max: 1339867
|
||||
avg: 558.25 (255302)
|
||||
Function "gmc1_altivec" (pmc2):
|
||||
min: 93
|
||||
max: 2164
|
||||
avg: 267.31 (255302)
|
||||
Function "gmc1_altivec" (pmc3):
|
||||
min: 72
|
||||
max: 1987
|
||||
avg: 276.20 (255302)
|
||||
(...)
|
||||
#####
|
||||
|
||||
In this example, PMC1 was set to record CPU cycles, PMC2 was set to
|
||||
record AltiVec Permute Stall Cycles, and PMC3 was set to record AltiVec
|
||||
Issue Stalls.
|
||||
|
||||
The function "gmc1_altivec" was monitored 255302 times, and the
|
||||
minimum execution time was 231 processor cycles. The max and average
|
||||
aren't much use, as it's very likely the OS interrupted execution for
|
||||
reasons of its own :-(
|
||||
|
||||
With the exact same settings and source file, but using the reference C
|
||||
code we get:
|
||||
|
||||
#####
|
||||
PowerPC performance report
|
||||
Values are from the PMC registers, and represent whatever the
|
||||
registers are set to record.
|
||||
Function "gmc1_altivec" (pmc1):
|
||||
min: 592
|
||||
max: 2532235
|
||||
avg: 962.88 (255302)
|
||||
Function "gmc1_altivec" (pmc2):
|
||||
min: 0
|
||||
max: 33
|
||||
avg: 0.00 (255302)
|
||||
Function "gmc1_altivec" (pmc3):
|
||||
min: 0
|
||||
max: 350
|
||||
avg: 0.03 (255302)
|
||||
(...)
|
||||
#####
|
||||
|
||||
592 cycles, so the fastest AltiVec execution is about 2.5x faster than
|
||||
the fastest C execution in this example. It's not perfect but it's not
|
||||
bad (well I wrote this function so I can't say otherwise :-).
|
||||
|
||||
Once you have that kind of report, you can try to improve things by
|
||||
finding what goes wrong and fixing it; in the example above, one
|
||||
should try to diminish the number of AltiVec stalls, as this *may*
|
||||
improve performance.
|
||||
|
||||
|
||||
|
||||
IV) Enabling the PMC in Mac OS X
|
||||
|
||||
This is easy. Use "Monster" and "monster". Those tools come from
|
||||
Apple's CHUD package, and can be found hidden in the developer web
|
||||
site & FTP site. "MONster" is the graphical application, use it to
|
||||
generate a config file specifying what each register should
|
||||
monitor. Then use the command-line application "monster" to use that
|
||||
config file, and enjoy the results.
|
||||
|
||||
Note that "MONster" can be used for many other things, but it's
|
||||
documented by Apple, it's not my subject.
|
||||
|
||||
If you are using CHUD 4.4.2 or later, you'll notice that MONster is
|
||||
no longer available. It's been superseeded by Shark, where
|
||||
configuration of PMCs is available as a plugin.
|
||||
|
||||
|
||||
|
||||
V) Enabling the PMC on Linux
|
||||
|
||||
On linux you may use oprofile from http://oprofile.sf.net, depending on the
|
||||
version and the cpu you may need to apply a patch[1] to access a set of the
|
||||
possibile counters from the userspace application. You can always define them
|
||||
using the kernel interface /dev/oprofile/* .
|
||||
|
||||
[1] http://dev.gentoo.org/~lu_zero/development/oprofile-g4-20060423.patch
|
||||
|
||||
--
|
||||
Romain Dolbeau <romain@dolbeau.org>
|
||||
Luca Barbato <lu_zero@gentoo.org>
|
||||
159
doc/ffplay-doc.texi
Normal file
159
doc/ffplay-doc.texi
Normal file
@@ -0,0 +1,159 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFplay Documentation
|
||||
@titlepage
|
||||
@sp 7
|
||||
@center @titlefont{FFplay Documentation}
|
||||
@sp 3
|
||||
@end titlepage
|
||||
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
@c man begin DESCRIPTION
|
||||
FFplay is a very simple and portable media player using the FFmpeg
|
||||
libraries and the SDL library. It is mostly used as a testbed for the
|
||||
various FFmpeg APIs.
|
||||
@c man end
|
||||
|
||||
@chapter Invocation
|
||||
|
||||
@section Syntax
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffplay [options] @file{input_file}
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@c man begin OPTIONS
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -h
|
||||
Show help.
|
||||
@item -version
|
||||
Show version.
|
||||
@item -L
|
||||
Show license.
|
||||
@item -formats
|
||||
Show available formats, codecs, protocols, ...
|
||||
@item -x @var{width}
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which don't
|
||||
contain a header with the frame size like raw YUV.
|
||||
@item -an
|
||||
Disable audio.
|
||||
@item -vn
|
||||
Disable video.
|
||||
@item -ss @var{pos}
|
||||
Seek to a given position in seconds.
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -f @var{fmt}
|
||||
Force format.
|
||||
@end table
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
@item -stats
|
||||
Show the stream duration, the codec parameters, the current position in
|
||||
the stream and the audio/video synchronisation drift.
|
||||
@item -debug
|
||||
Print specific debug info.
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -vismv
|
||||
Visualize motion vectors.
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@item -genpts
|
||||
Generate pts.
|
||||
@item -rtp_tcp
|
||||
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||
if you are streaming with the RTSP protocol.
|
||||
@item -sync @var{type}
|
||||
Set the master clock to audio (@code{type=audio}), video
|
||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||
master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -threads @var{count}
|
||||
Set the thread count.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
||||
@table @key
|
||||
@item q, ESC
|
||||
Quit.
|
||||
|
||||
@item f
|
||||
Toggle full screen.
|
||||
|
||||
@item p, SPC
|
||||
Pause.
|
||||
|
||||
@item a
|
||||
Cycle audio channel.
|
||||
|
||||
@item v
|
||||
Cycle video channel.
|
||||
|
||||
@item t
|
||||
Cycle subtitle channel.
|
||||
|
||||
@item w
|
||||
Show audio waves.
|
||||
|
||||
@item left/right
|
||||
Seek backward/forward 10 seconds.
|
||||
|
||||
@item down/up
|
||||
Seek backward/forward 1 minute.
|
||||
|
||||
@item mouse click
|
||||
Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffplay
|
||||
@settitle FFplay media player
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffserver(1) and the HTML documentation of @file{ffmpeg}.
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHOR
|
||||
Fabrice Bellard
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
293
doc/ffplay.texi
293
doc/ffplay.texi
@@ -1,293 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffplay Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffplay Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffplay [@var{options}] [@file{input_file}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
FFplay is a very simple and portable media player using the FFmpeg
|
||||
libraries and the SDL library. It is mostly used as a testbed for the
|
||||
various FFmpeg APIs.
|
||||
@c man end
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -x @var{width}
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -an
|
||||
Disable audio.
|
||||
@item -vn
|
||||
Disable video.
|
||||
@item -ss @var{pos}
|
||||
Seek to a given position in seconds.
|
||||
@item -t @var{duration}
|
||||
play <duration> seconds of audio/video
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -f @var{fmt}
|
||||
Force format.
|
||||
@item -window_title @var{title}
|
||||
Set window title (default is the input filename).
|
||||
@item -loop @var{number}
|
||||
Loops movie playback <number> times. 0 means forever.
|
||||
@item -showmode @var{mode}
|
||||
Set the show mode to use.
|
||||
Available values for @var{mode} are:
|
||||
@table @samp
|
||||
@item 0, video
|
||||
show video
|
||||
@item 1, waves
|
||||
show audio waves
|
||||
@item 2, rdft
|
||||
show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
|
||||
@end table
|
||||
|
||||
Default value is "video", if video is not present or cannot be played
|
||||
"rdft" is automatically selected.
|
||||
|
||||
You can interactively cycle through the available show modes by
|
||||
pressing the key @key{w}.
|
||||
|
||||
@item -vf @var{filtergraph}
|
||||
Create the filtergraph specified by @var{filtergraph} and use it to
|
||||
filter the video stream.
|
||||
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the stream, and must have a single video input and a single video
|
||||
output. In the filtergraph, the input is associated to the label
|
||||
@code{in}, and the output to the label @code{out}. See the
|
||||
ffmpeg-filters manual for more information about the filtergraph
|
||||
syntax.
|
||||
|
||||
You can specify this parameter multiple times and cycle through the specified
|
||||
filtergraphs along with the show modes by pressing the key @key{w}.
|
||||
|
||||
@item -af @var{filtergraph}
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the input audio.
|
||||
Use the option "-filters" to show all the available filters (including
|
||||
sources and sinks).
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
@end table
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@item -genpts
|
||||
Generate pts.
|
||||
@item -rtp_tcp
|
||||
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||
if you are streaming with the RTSP protocol.
|
||||
@item -sync @var{type}
|
||||
Set the master clock to audio (@code{type=audio}), video
|
||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||
master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -threads @var{count}
|
||||
Set the thread count.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@item -autoexit
|
||||
Exit when video is done playing.
|
||||
@item -exitonkeydown
|
||||
Exit if any key is pressed.
|
||||
@item -exitonmousedown
|
||||
Exit if any mouse button is pressed.
|
||||
|
||||
@item -codec:@var{media_specifier} @var{codec_name}
|
||||
Force a specific decoder implementation for the stream identified by
|
||||
@var{media_specifier}, which can assume the values @code{a} (audio),
|
||||
@code{v} (video), and @code{s} subtitle.
|
||||
|
||||
@item -acodec @var{codec_name}
|
||||
Force a specific audio decoder.
|
||||
|
||||
@item -vcodec @var{codec_name}
|
||||
Force a specific video decoder.
|
||||
|
||||
@item -scodec @var{codec_name}
|
||||
Force a specific subtitle decoder.
|
||||
|
||||
@item -autorotate
|
||||
Automatically rotate the video according to presentation metadata. Set by
|
||||
default, use -noautorotate to disable.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
||||
@table @key
|
||||
@item q, ESC
|
||||
Quit.
|
||||
|
||||
@item f
|
||||
Toggle full screen.
|
||||
|
||||
@item p, SPC
|
||||
Pause.
|
||||
|
||||
@item a
|
||||
Cycle audio channel in the current program.
|
||||
|
||||
@item v
|
||||
Cycle video channel.
|
||||
|
||||
@item t
|
||||
Cycle subtitle channel in the current program.
|
||||
|
||||
@item c
|
||||
Cycle program.
|
||||
|
||||
@item w
|
||||
Cycle video filters or show modes.
|
||||
|
||||
@item s
|
||||
Step to the next frame.
|
||||
|
||||
Pause if the stream is not already paused, step to the next video
|
||||
frame, and pause.
|
||||
|
||||
@item left/right
|
||||
Seek backward/forward 10 seconds.
|
||||
|
||||
@item down/up
|
||||
Seek backward/forward 1 minute.
|
||||
|
||||
@item page down/page up
|
||||
Seek to the previous/next chapter.
|
||||
or if there are no chapters
|
||||
Seek backward/forward 10 minutes.
|
||||
|
||||
@item mouse click
|
||||
Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@set config-readonly
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffplay.html,ffplay},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffplay-all.html,ffmpeg-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffplay(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffplay-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffplay
|
||||
@settitle FFplay media player
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
675
doc/ffprobe.texi
675
doc/ffprobe.texi
@@ -1,675 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffprobe Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffprobe Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] [@file{input_file}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
ffprobe gathers information from multimedia streams and prints it in
|
||||
human- and machine-readable fashion.
|
||||
|
||||
For example it can be used to check the format of the container used
|
||||
by a multimedia stream and the format and type of each media stream
|
||||
contained in it.
|
||||
|
||||
If a filename is specified in input, ffprobe will try to open and
|
||||
probe the file content. If the file cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
|
||||
Options are used to list some of the formats supported by ffprobe or
|
||||
for specifying which information to display, and for setting how
|
||||
ffprobe will show it.
|
||||
|
||||
ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
|
||||
section.
|
||||
|
||||
@c man end
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -f @var{format}
|
||||
Force format to use.
|
||||
|
||||
@item -unit
|
||||
Show the unit of the displayed values.
|
||||
|
||||
@item -prefix
|
||||
Use SI prefixes for the displayed values.
|
||||
Unless the "-byte_binary_prefix" option is used all the prefixes
|
||||
are decimal.
|
||||
|
||||
@item -byte_binary_prefix
|
||||
Force the use of binary prefixes for byte values.
|
||||
|
||||
@item -sexagesimal
|
||||
Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
|
||||
@item -pretty
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@var{writer_options} specifies the options to be passed to the writer.
|
||||
|
||||
For example for printing the output in JSON format, specify:
|
||||
@example
|
||||
-print_format json
|
||||
@end example
|
||||
|
||||
For more details on the available output printing formats, see the
|
||||
Writers section below.
|
||||
|
||||
@item -sections
|
||||
Print sections structure and section information, and exit. The output
|
||||
is not meant to be parsed by a machine.
|
||||
|
||||
@item -select_streams @var{stream_specifier}
|
||||
Select only the streams specified by @var{stream_specifier}. This
|
||||
option affects only the options related to streams
|
||||
(e.g. @code{show_streams}, @code{show_packets}, etc.).
|
||||
|
||||
For example to show only audio streams, you can use the command:
|
||||
@example
|
||||
ffprobe -show_streams -select_streams a INPUT
|
||||
@end example
|
||||
|
||||
To show only video packets belonging to the video stream with index 1:
|
||||
@example
|
||||
ffprobe -show_packets -select_streams v:1 INPUT
|
||||
@end example
|
||||
|
||||
@item -show_data
|
||||
Show payload data, as a hexadecimal and ASCII dump. Coupled with
|
||||
@option{-show_packets}, it will dump the packets' data. Coupled with
|
||||
@option{-show_streams}, it will dump the codec extradata.
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_data_hash @var{algorithm}
|
||||
Show a hash of payload data, for packets with @option{-show_packets} and for
|
||||
codec extradata with @option{-show_streams}.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
The error information is printed within a section with name "ERROR".
|
||||
|
||||
@item -show_format
|
||||
Show information about the container format of the input multimedia
|
||||
stream.
|
||||
|
||||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
Entries are specified according to the following
|
||||
syntax. @var{section_entries} contains a list of section entries
|
||||
separated by @code{:}. Each section entry is composed by a section
|
||||
name (or unique name), optionally followed by a list of entries local
|
||||
to that section, separated by @code{,}.
|
||||
|
||||
If section name is specified but is followed by no @code{=}, all
|
||||
entries are printed to output, together with all the contained
|
||||
sections. Otherwise only the entries specified in the local section
|
||||
entries list are printed. In particular, if @code{=} is specified but
|
||||
the list of local entries is empty, then no entries will be shown for
|
||||
that section.
|
||||
|
||||
Note that the order of specification of the local section entries is
|
||||
not honored in the output, and the usual display order will be
|
||||
retained.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
|
||||
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
|
||||
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
|
||||
@end example
|
||||
|
||||
For example, to show only the index and type of each stream, and the PTS
|
||||
time, duration time, and stream index of the packets, you can specify
|
||||
the argument:
|
||||
@example
|
||||
packet=pts_time,duration_time,stream_index : stream=index,codec_type
|
||||
@end example
|
||||
|
||||
To show all the entries in the section "format", but only the codec
|
||||
type in the section "stream", specify the argument:
|
||||
@example
|
||||
format : stream=codec_type
|
||||
@end example
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
stream_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
sections:
|
||||
@example
|
||||
stream_tags=title
|
||||
@end example
|
||||
|
||||
@item -show_packets
|
||||
Show information about each packet contained in the input multimedia
|
||||
stream.
|
||||
|
||||
The information for each single packet is printed within a dedicated
|
||||
section with name "PACKET".
|
||||
|
||||
@item -show_frames
|
||||
Show information about each frame and subtitle contained in the input
|
||||
multimedia stream.
|
||||
|
||||
The information for each single frame is printed within a dedicated
|
||||
section with name "FRAME" or "SUBTITLE".
|
||||
|
||||
@item -show_streams
|
||||
Show information about each media stream contained in the input
|
||||
multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM".
|
||||
|
||||
@item -show_programs
|
||||
Show information about programs and their streams contained in the input
|
||||
multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "PROGRAM_STREAM".
|
||||
|
||||
@item -show_chapters
|
||||
Show information about chapters stored in the format.
|
||||
|
||||
Each chapter is printed within a dedicated section with name "CHAPTER".
|
||||
|
||||
@item -count_frames
|
||||
Count the number of frames per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -count_packets
|
||||
Count the number of packets per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -read_intervals @var{read_intervals}
|
||||
|
||||
Read only the specified intervals. @var{read_intervals} must be a
|
||||
sequence of interval specifications separated by ",".
|
||||
@command{ffprobe} will seek to the interval starting point, and will
|
||||
continue reading from that.
|
||||
|
||||
Each interval is specified by two optional parts, separated by "%".
|
||||
|
||||
The first part specifies the interval start position. It is
|
||||
interpreted as an abolute position, or as a relative offset from the
|
||||
current position if it is preceded by the "+" character. If this first
|
||||
part is not specified, no seeking will be performed when reading this
|
||||
interval.
|
||||
|
||||
The second part specifies the interval end position. It is interpreted
|
||||
as an absolute position, or as a relative offset from the current
|
||||
position if it is preceded by the "+" character. If the offset
|
||||
specification starts with "#", it is interpreted as the number of
|
||||
packets to read (not including the flushing packets) from the interval
|
||||
start. If no second part is specified, the program will read until the
|
||||
end of the input.
|
||||
|
||||
Note that seeking is not accurate, thus the actual interval start
|
||||
point may be different from the specified position. Also, when an
|
||||
interval duration is specified, the absolute end time will be computed
|
||||
by adding the duration to the interval start point found by seeking
|
||||
the file, rather than to the specified start value.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{INTERVAL} ::= [@var{START}|+@var{START_OFFSET}][%[@var{END}|+@var{END_OFFSET}]]
|
||||
@var{INTERVALS} ::= @var{INTERVAL}[,@var{INTERVALS}]
|
||||
@end example
|
||||
|
||||
A few examples follow.
|
||||
@itemize
|
||||
@item
|
||||
Seek to time 10, read packets until 20 seconds after the found seek
|
||||
point, then seek to position @code{01:30} (1 minute and thirty
|
||||
seconds) and read packets until position @code{01:45}.
|
||||
@example
|
||||
10%+20,01:30%01:45
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read only 42 packets after seeking to position @code{01:23}:
|
||||
@example
|
||||
01:23%+#42
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read only the first 20 seconds from the start:
|
||||
@example
|
||||
%+20
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read from the start until position @code{02:30}:
|
||||
@example
|
||||
%02:30
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@item -show_private_data, -private
|
||||
Show private data, that is data depending on the format of the
|
||||
particular shown element.
|
||||
This option is enabled by default, but you may need to disable it
|
||||
for specific uses, for example when creating XSD-compliant XML output.
|
||||
|
||||
@item -show_program_version
|
||||
Show information related to program version.
|
||||
|
||||
Version information is printed within a section with name
|
||||
"PROGRAM_VERSION".
|
||||
|
||||
@item -show_library_versions
|
||||
Show information related to library versions.
|
||||
|
||||
Version information for each library is printed within a section with
|
||||
name "LIBRARY_VERSION".
|
||||
|
||||
@item -show_versions
|
||||
Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@chapter Writers
|
||||
@c man begin WRITERS
|
||||
|
||||
A writer defines the output format adopted by @command{ffprobe}, and will be
|
||||
used for printing all the parts of the output.
|
||||
|
||||
A writer may accept one or more arguments, which specify the options
|
||||
to adopt. The options are specified as a list of @var{key}=@var{value}
|
||||
pairs, separated by ":".
|
||||
|
||||
All writers support the following options:
|
||||
|
||||
@table @option
|
||||
@item string_validation, sv
|
||||
Set string validation mode.
|
||||
|
||||
The following values are accepted.
|
||||
@table @samp
|
||||
@item fail
|
||||
The writer will fail immediately in case an invalid string (UTF-8)
|
||||
sequence or code point is found in the input. This is especially
|
||||
useful to validate input metadata.
|
||||
|
||||
@item ignore
|
||||
Any validation error will be ignored. This will result in possibly
|
||||
broken output, especially with the json or xml writer.
|
||||
|
||||
@item replace
|
||||
The writer will substitute invalid UTF-8 sequences or code points with
|
||||
the string specified with the @option{string_validation_replacement}.
|
||||
@end table
|
||||
|
||||
Default value is @samp{replace}.
|
||||
|
||||
@item string_validation_replacement, svr
|
||||
Set replacement string to use in case @option{string_validation} is
|
||||
set to @samp{replace}.
|
||||
|
||||
In case the option is not specified, the writer will assume the empty
|
||||
string, that is it will remove the invalid sequences from the input
|
||||
strings.
|
||||
@end table
|
||||
|
||||
A description of the currently available writers follows.
|
||||
|
||||
@section default
|
||||
Default format.
|
||||
|
||||
Print each section in the form:
|
||||
@example
|
||||
[SECTION]
|
||||
key1=val1
|
||||
...
|
||||
keyN=valN
|
||||
[/SECTION]
|
||||
@end example
|
||||
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
|
||||
PROGRAM_STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Default value
|
||||
is 0.
|
||||
|
||||
@item noprint_wrappers, nw
|
||||
If set to 1 specify not to print the section header and footer.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section compact, csv
|
||||
Compact and CSV format.
|
||||
|
||||
The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specifid, the output has the form:
|
||||
@example
|
||||
section|key1=val1| ... |keyN=valN
|
||||
@end example
|
||||
|
||||
Metadata tags are printed in the corresponding "format" or "stream"
|
||||
section. A metadata tag key, if printed, is prefixed by the string
|
||||
"tag:".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item item_sep, s
|
||||
Specify the character to use for separating fields in the output line.
|
||||
It must be a single printable character, it is "|" by default ("," for
|
||||
the @code{csv} writer).
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Its default
|
||||
value is 0 (1 for the @code{csv} writer).
|
||||
|
||||
@item escape, e
|
||||
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
|
||||
writer).
|
||||
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
|
||||
@item csv
|
||||
Perform CSV-like escaping, as described in RFC4180. Strings
|
||||
containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
('"'), or @var{SEP} are enclosed in double-quotes.
|
||||
|
||||
@item none
|
||||
Perform no escaping.
|
||||
@end table
|
||||
|
||||
@item print_section, p
|
||||
Print the section name at the begin of each line if the value is
|
||||
@code{1}, disable it with value set to @code{0}. Default value is
|
||||
@code{1}.
|
||||
|
||||
@end table
|
||||
|
||||
@section flat
|
||||
Flat format.
|
||||
|
||||
A free-form output where each line contains an explicit key=value, such as
|
||||
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
|
||||
directly embedded in sh scripts as long as the separator character is an
|
||||
alphanumeric character or an underscore (see @var{sep_char} option).
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item sep_char, s
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section ini
|
||||
INI format output.
|
||||
|
||||
Print output in an INI based format.
|
||||
|
||||
The following conventions are adopted:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
'\' is the escape character
|
||||
@item
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section json
|
||||
JSON based format.
|
||||
|
||||
Each section is printed using JSON notation.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item compact, c
|
||||
If set to 1 enable compact output, that is each section will be
|
||||
printed on a single line. Default value is 0.
|
||||
@end table
|
||||
|
||||
For more information about JSON, see @url{http://www.json.org/}.
|
||||
|
||||
@section xml
|
||||
XML based format.
|
||||
|
||||
The XML output is described in the XML schema description file
|
||||
@file{ffprobe.xsd} installed in the FFmpeg datadir.
|
||||
|
||||
An updated version of the schema can be retrieved at the url
|
||||
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
|
||||
latest schema committed into the FFmpeg development source code tree.
|
||||
|
||||
Note that the output issued will be compliant to the
|
||||
@file{ffprobe.xsd} schema only when no special global output options
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@option{sexagesimal} etc.) are specified.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fully_qualified, q
|
||||
If set to 1 specify if the output should be fully qualified. Default
|
||||
value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@c man begin TIMECODE
|
||||
|
||||
@command{ffprobe} supports Timecode extraction:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
stream details (@option{-show_streams}, see @var{timecode}).
|
||||
|
||||
@item
|
||||
MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item
|
||||
DV, GXF and AVI timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
@c man end TIMECODE
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@set config-readonly
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffprobe.html,ffprobe},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffprobe-all.html,ffprobe-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffprobe(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffprobe-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffplay(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffprobe
|
||||
@settitle ffprobe media prober
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
280
doc/ffprobe.xsd
280
doc/ffprobe.xsd
@@ -1,280 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
277
doc/ffserver-doc.texi
Normal file
277
doc/ffserver-doc.texi
Normal file
@@ -0,0 +1,277 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFserver Documentation
|
||||
@titlepage
|
||||
@sp 7
|
||||
@center @titlefont{FFserver Documentation}
|
||||
@sp 3
|
||||
@end titlepage
|
||||
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
@c man begin DESCRIPTION
|
||||
FFserver is a streaming server for both audio and video. It supports
|
||||
several live feeds, streaming from files and time shifting on live feeds
|
||||
(you can seek to positions in the past on each live feed, provided you
|
||||
specify a big enough feed storage in ffserver.conf).
|
||||
|
||||
FFserver runs in daemon mode by default; that is, it puts itself in
|
||||
the background and detaches from its TTY, unless it is launched in
|
||||
debug mode or a NoDaemon option is specified in the configuration
|
||||
file.
|
||||
|
||||
This documentation covers only the streaming aspects of ffserver /
|
||||
ffmpeg. All questions about parameters for ffmpeg, codec questions,
|
||||
etc. are not covered here. Read @file{ffmpeg-doc.html} for more
|
||||
information.
|
||||
|
||||
@section How does it work?
|
||||
|
||||
FFserver receives prerecorded files or FFM streams from some ffmpeg
|
||||
instance as input, then streams them over RTP/RTSP/HTTP.
|
||||
|
||||
An ffserver instance will listen on some port as specified in the
|
||||
configuration file. You can launch one or more instances of ffmpeg and
|
||||
send one or more FFM streams to the port where ffserver is expecting
|
||||
to receive them. Alternately, you can make ffserver launch such ffmpeg
|
||||
instances at startup.
|
||||
|
||||
Input streams are called feeds, and each one is specified by a <Feed>
|
||||
section in the configuration file.
|
||||
|
||||
For each feed you can have different output streams in various
|
||||
formats, each one specified by a <Stream> section in the configuration
|
||||
file.
|
||||
|
||||
@section Status stream
|
||||
|
||||
FFserver supports an HTTP interface which exposes the current status
|
||||
of the server.
|
||||
|
||||
Simply point your browser to the address of the special status stream
|
||||
specified in the configuration file.
|
||||
|
||||
For example if you have:
|
||||
@example
|
||||
<Stream status.html>
|
||||
Format status
|
||||
|
||||
# Only allow local people to get the status
|
||||
ACL allow localhost
|
||||
ACL allow 192.168.0.0 192.168.255.255
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
then the server will post a page with the status information when
|
||||
the special stream @file{status.html} is requested.
|
||||
|
||||
@section What can this do?
|
||||
|
||||
When properly configured and running, you can capture video and audio in real
|
||||
time from a suitable capture card, and stream it out over the Internet to
|
||||
either Windows Media Player or RealAudio player (with some restrictions).
|
||||
|
||||
It can also stream from files, though that is currently broken. Very often, a
|
||||
web server can be used to serve up the files just as well.
|
||||
|
||||
It can stream prerecorded video from .ffm files, though it is somewhat tricky
|
||||
to make it work correctly.
|
||||
|
||||
@section What do I need?
|
||||
|
||||
I use Linux on a 900 MHz Duron with a cheapo Bt848 based TV capture card. I'm
|
||||
using stock Linux 2.4.17 with the stock drivers. [Actually that isn't true,
|
||||
I needed some special drivers for my motherboard-based sound card.]
|
||||
|
||||
I understand that FreeBSD systems work just fine as well.
|
||||
|
||||
@section How do I make it work?
|
||||
|
||||
First, build the kit. It *really* helps to have installed LAME first. Then when
|
||||
you run the ffserver ./configure, make sure that you have the
|
||||
@code{--enable-libmp3lame} flag turned on.
|
||||
|
||||
LAME is important as it allows for streaming audio to Windows Media Player.
|
||||
Don't ask why the other audio types do not work.
|
||||
|
||||
As a simple test, just run the following two command lines where INPUTFILE
|
||||
is some file which you can decode with ffmpeg:
|
||||
|
||||
@example
|
||||
./ffserver -f doc/ffserver.conf &
|
||||
./ffmpeg -i INPUTFILE http://localhost:8090/feed1.ffm
|
||||
@end example
|
||||
|
||||
At this point you should be able to go to your Windows machine and fire up
|
||||
Windows Media Player (WMP). Go to Open URL and enter
|
||||
|
||||
@example
|
||||
http://<linuxbox>:8090/test.asf
|
||||
@end example
|
||||
|
||||
You should (after a short delay) see video and hear audio.
|
||||
|
||||
WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
|
||||
transfer the entire file before starting to play.
|
||||
The same is true of AVI files.
|
||||
|
||||
@section What happens next?
|
||||
|
||||
You should edit the ffserver.conf file to suit your needs (in terms of
|
||||
frame rates etc). Then install ffserver and ffmpeg, write a script to start
|
||||
them up, and off you go.
|
||||
|
||||
@section Troubleshooting
|
||||
|
||||
@subsection I don't hear any audio, but video is fine.
|
||||
|
||||
Maybe you didn't install LAME, or got your ./configure statement wrong. Check
|
||||
the ffmpeg output to see if a line referring to MP3 is present. If not, then
|
||||
your configuration was incorrect. If it is, then maybe your wiring is not
|
||||
set up correctly. Maybe the sound card is not getting data from the right
|
||||
input source. Maybe you have a really awful audio interface (like I do)
|
||||
that only captures in stereo and also requires that one channel be flipped.
|
||||
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
|
||||
starting ffmpeg.
|
||||
|
||||
@subsection The audio and video loose sync after a while.
|
||||
|
||||
Yes, they do.
|
||||
|
||||
@subsection After a long while, the video update rate goes way down in WMP.
|
||||
|
||||
Yes, it does. Who knows why?
|
||||
|
||||
@subsection WMP 6.4 behaves differently to WMP 7.
|
||||
|
||||
Yes, it does. Any thoughts on this would be gratefully received. These
|
||||
differences extend to embedding WMP into a web page. [There are two
|
||||
object IDs that you can use: The old one, which does not play well, and
|
||||
the new one, which does (both tested on the same system). However,
|
||||
I suspect that the new one is not available unless you have installed WMP 7].
|
||||
|
||||
@section What else can it do?
|
||||
|
||||
You can replay video from .ffm files that was recorded earlier.
|
||||
However, there are a number of caveats, including the fact that the
|
||||
ffserver parameters must match the original parameters used to record the
|
||||
file. If they do not, then ffserver deletes the file before recording into it.
|
||||
(Now that I write this, it seems broken).
|
||||
|
||||
You can fiddle with many of the codec choices and encoding parameters, and
|
||||
there are a bunch more parameters that you cannot control. Post a message
|
||||
to the mailing list if there are some 'must have' parameters. Look in
|
||||
ffserver.conf for a list of the currently available controls.
|
||||
|
||||
It will automatically generate the ASX or RAM files that are often used
|
||||
in browsers. These files are actually redirections to the underlying ASF
|
||||
or RM file. The reason for this is that the browser often fetches the
|
||||
entire file before starting up the external viewer. The redirection files
|
||||
are very small and can be transferred quickly. [The stream itself is
|
||||
often 'infinite' and thus the browser tries to download it and never
|
||||
finishes.]
|
||||
|
||||
@section Tips
|
||||
|
||||
* When you connect to a live stream, most players (WMP, RA, etc) want to
|
||||
buffer a certain number of seconds of material so that they can display the
|
||||
signal continuously. However, ffserver (by default) starts sending data
|
||||
in realtime. This means that there is a pause of a few seconds while the
|
||||
buffering is being done by the player. The good news is that this can be
|
||||
cured by adding a '?buffer=5' to the end of the URL. This means that the
|
||||
stream should start 5 seconds in the past -- and so the first 5 seconds
|
||||
of the stream are sent as fast as the network will allow. It will then
|
||||
slow down to real time. This noticeably improves the startup experience.
|
||||
|
||||
You can also add a 'Preroll 15' statement into the ffserver.conf that will
|
||||
add the 15 second prebuffering on all requests that do not otherwise
|
||||
specify a time. In addition, ffserver will skip frames until a key_frame
|
||||
is found. This further reduces the startup delay by not transferring data
|
||||
that will be discarded.
|
||||
|
||||
* You may want to adjust the MaxBandwidth in the ffserver.conf to limit
|
||||
the amount of bandwidth consumed by live streams.
|
||||
|
||||
@section Why does the ?buffer / Preroll stop working after a time?
|
||||
|
||||
It turns out that (on my machine at least) the number of frames successfully
|
||||
grabbed is marginally less than the number that ought to be grabbed. This
|
||||
means that the timestamp in the encoded data stream gets behind realtime.
|
||||
This means that if you say 'Preroll 10', then when the stream gets 10
|
||||
or more seconds behind, there is no Preroll left.
|
||||
|
||||
Fixing this requires a change in the internals of how timestamps are
|
||||
handled.
|
||||
|
||||
@section Does the @code{?date=} stuff work.
|
||||
|
||||
Yes (subject to the limitation outlined above). Also note that whenever you
|
||||
start ffserver, it deletes the ffm file (if any parameters have changed),
|
||||
thus wiping out what you had recorded before.
|
||||
|
||||
The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
|
||||
of the following formats (the 'T' is literal):
|
||||
|
||||
@example
|
||||
* YYYY-MM-DDTHH:MM:SS (localtime)
|
||||
* YYYY-MM-DDTHH:MM:SSZ (UTC)
|
||||
@end example
|
||||
|
||||
You can omit the YYYY-MM-DD, and then it refers to the current day. However
|
||||
note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
|
||||
may be in the future and so is unlikely to be useful.
|
||||
|
||||
You use this by adding the ?date= to the end of the URL for the stream.
|
||||
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
|
||||
@c man end
|
||||
|
||||
@chapter Invocation
|
||||
@section Syntax
|
||||
@example
|
||||
@c man begin SYNOPSIS
|
||||
ffserver [options]
|
||||
@c man end
|
||||
@end example
|
||||
|
||||
@section Options
|
||||
@c man begin OPTIONS
|
||||
@table @option
|
||||
@item -version
|
||||
Show version.
|
||||
@item -L
|
||||
Show license.
|
||||
@item -formats
|
||||
Show available formats, codecs, protocols, ...
|
||||
@item -h
|
||||
Show help.
|
||||
@item -f @var{configfile}
|
||||
Use @file{configfile} instead of @file{/etc/ffserver.conf}.
|
||||
@item -n
|
||||
Enable no-launch mode. This option disables all the Launch directives
|
||||
within the various <Stream> sections. FFserver will not launch any
|
||||
ffmpeg instance, so you will have to launch them manually.
|
||||
@item -d
|
||||
Enable debug mode. This option increases log verbosity, directs log
|
||||
messages to stdout and causes ffserver to run in the foreground
|
||||
rather than as a daemon.
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffserver
|
||||
@settitle FFserver video server
|
||||
|
||||
@c man begin SEEALSO
|
||||
ffmpeg(1), ffplay(1), the @file{ffmpeg/doc/ffserver.conf} example and
|
||||
the HTML documentation of @file{ffmpeg}.
|
||||
@c man end
|
||||
|
||||
@c man begin AUTHOR
|
||||
Fabrice Bellard
|
||||
@c man end
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,11 +1,11 @@
|
||||
# Port on which the server is listening. You must select a different
|
||||
# port from your standard HTTP web server if it is running on the same
|
||||
# computer.
|
||||
HTTPPort 8090
|
||||
Port 8090
|
||||
|
||||
# Address on which the server is bound. Only useful if you have
|
||||
# several network interfaces.
|
||||
HTTPBindAddress 0.0.0.0
|
||||
BindAddress 0.0.0.0
|
||||
|
||||
# Number of simultaneous HTTP connections that can be handled. It has
|
||||
# to be defined *before* the MaxClients parameter, since it defines the
|
||||
@@ -25,6 +25,10 @@ MaxBandwidth 1000
|
||||
# '-' is the standard output.
|
||||
CustomLog -
|
||||
|
||||
# Suppress that if you want to launch ffserver as a daemon.
|
||||
NoDaemon
|
||||
|
||||
|
||||
##################################################################
|
||||
# Definition of the live feeds. Each live feed contains one video
|
||||
# and/or audio sequence coming from an ffmpeg encoder or another
|
||||
@@ -235,7 +239,7 @@ StartSendOnKey
|
||||
|
||||
#<Stream test.ogg>
|
||||
#Feed feed1.ffm
|
||||
#Metadata title "Stream title"
|
||||
#Title "Stream title"
|
||||
#AudioBitRate 64
|
||||
#AudioChannels 2
|
||||
#AudioSampleRate 44100
|
||||
@@ -280,10 +284,10 @@ StartSendOnKey
|
||||
#<Stream file.asf>
|
||||
#File "/usr/local/httpd/htdocs/test.asf"
|
||||
#NoAudio
|
||||
#Metadata author "Me"
|
||||
#Metadata copyright "Super MegaCorp"
|
||||
#Metadata title "Test stream from disk"
|
||||
#Metadata comment "Test comment"
|
||||
#Author "Me"
|
||||
#Copyright "Super MegaCorp"
|
||||
#Title "Test stream from disk"
|
||||
#Comment "Test comment"
|
||||
#</Stream>
|
||||
|
||||
|
||||
@@ -302,27 +306,6 @@ StartSendOnKey
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Transcode an incoming live feed to another live feed,
|
||||
# using libx264 and video presets
|
||||
|
||||
#<Stream live.h264>
|
||||
#Format rtp
|
||||
#Feed feed1.ffm
|
||||
#VideoCodec libx264
|
||||
#VideoFrameRate 24
|
||||
#VideoBitRate 100
|
||||
#VideoSize 480x272
|
||||
#AVPresetVideo default
|
||||
#AVPresetVideo baseline
|
||||
#AVOptionVideo flags +global_header
|
||||
#
|
||||
#AudioCodec libfaac
|
||||
#AudioBitRate 32
|
||||
#AudioChannels 2
|
||||
#AudioSampleRate 22050
|
||||
#AVOptionAudio flags +global_header
|
||||
#</Stream>
|
||||
|
||||
##################################################################
|
||||
# SDP/multicast examples
|
||||
#
|
||||
@@ -369,3 +352,5 @@ ACL allow 192.168.0.0 192.168.255.255
|
||||
<Redirect index.html>
|
||||
URL http://www.ffmpeg.org/
|
||||
</Redirect>
|
||||
|
||||
|
||||
|
||||
@@ -1,907 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffserver Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffserver Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffserver [@var{options}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
@command{ffserver} is a streaming server for both audio and video.
|
||||
It supports several live feeds, streaming from files and time shifting
|
||||
on live feeds. You can seek to positions in the past on each live
|
||||
feed, provided you specify a big enough feed storage.
|
||||
|
||||
@command{ffserver} is configured through a configuration file, which
|
||||
is read at startup. If not explicitly specified, it will read from
|
||||
@file{/etc/ffserver.conf}.
|
||||
|
||||
@command{ffserver} receives prerecorded files or FFM streams from some
|
||||
@command{ffmpeg} instance as input, then streams them over
|
||||
RTP/RTSP/HTTP.
|
||||
|
||||
An @command{ffserver} instance will listen on some port as specified
|
||||
in the configuration file. You can launch one or more instances of
|
||||
@command{ffmpeg} and send one or more FFM streams to the port where
|
||||
ffserver is expecting to receive them. Alternately, you can make
|
||||
@command{ffserver} launch such @command{ffmpeg} instances at startup.
|
||||
|
||||
Input streams are called feeds, and each one is specified by a
|
||||
@code{<Feed>} section in the configuration file.
|
||||
|
||||
For each feed you can have different output streams in various
|
||||
formats, each one specified by a @code{<Stream>} section in the
|
||||
configuration file.
|
||||
|
||||
@chapter Detailed description
|
||||
|
||||
@command{ffserver} works by forwarding streams encoded by
|
||||
@command{ffmpeg}, or pre-recorded streams which are read from disk.
|
||||
|
||||
Precisely, @command{ffserver} acts as an HTTP server, accepting POST
|
||||
requests from @command{ffmpeg} to acquire the stream to publish, and
|
||||
serving RTSP clients or HTTP clients GET requests with the stream
|
||||
media content.
|
||||
|
||||
A feed is an @ref{FFM} stream created by @command{ffmpeg}, and sent to
|
||||
a port where @command{ffserver} is listening.
|
||||
|
||||
Each feed is identified by a unique name, corresponding to the name
|
||||
of the resource published on @command{ffserver}, and is configured by
|
||||
a dedicated @code{Feed} section in the configuration file.
|
||||
|
||||
The feed publish URL is given by:
|
||||
@example
|
||||
http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
|
||||
@end example
|
||||
|
||||
where @var{ffserver_ip_address} is the IP address of the machine where
|
||||
@command{ffserver} is installed, @var{http_port} is the port number of
|
||||
the HTTP server (configured through the @option{HTTPPort} option), and
|
||||
@var{feed_name} is the name of the corresponding feed defined in the
|
||||
configuration file.
|
||||
|
||||
Each feed is associated to a file which is stored on disk. This stored
|
||||
file is used to allow to send pre-recorded data to a player as fast as
|
||||
possible when new content is added in real-time to the stream.
|
||||
|
||||
A "live-stream" or "stream" is a resource published by
|
||||
@command{ffserver}, and made accessible through the HTTP protocol to
|
||||
clients.
|
||||
|
||||
A stream can be connected to a feed, or to a file. In the first case,
|
||||
the published stream is forwarded from the corresponding feed
|
||||
generated by a running instance of @command{ffmpeg}, in the second
|
||||
case the stream is read from a pre-recorded file.
|
||||
|
||||
Each stream is identified by a unique name, corresponding to the name
|
||||
of the resource served by @command{ffserver}, and is configured by
|
||||
a dedicated @code{Stream} section in the configuration file.
|
||||
|
||||
The stream access HTTP URL is given by:
|
||||
@example
|
||||
http://@var{ffserver_ip_address}:@var{http_port}/@var{stream_name}[@var{options}]
|
||||
@end example
|
||||
|
||||
The stream access RTSP URL is given by:
|
||||
@example
|
||||
http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}]
|
||||
@end example
|
||||
|
||||
@var{stream_name} is the name of the corresponding stream defined in
|
||||
the configuration file. @var{options} is a list of options specified
|
||||
after the URL which affects how the stream is served by
|
||||
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
|
||||
and RTSP ports configured with the options @var{HTTPPort} and
|
||||
@var{RTSPPort} respectively.
|
||||
|
||||
In case the stream is associated to a feed, the encoding parameters
|
||||
must be configured in the stream configuration. They are sent to
|
||||
@command{ffmpeg} when setting up the encoding. This allows
|
||||
@command{ffserver} to define the encoding parameters used by
|
||||
the @command{ffmpeg} encoders.
|
||||
|
||||
The @command{ffmpeg} @option{override_ffserver} commandline option
|
||||
allows one to override the encoding parameters set by the server.
|
||||
|
||||
Multiple streams can be connected to the same feed.
|
||||
|
||||
For example, you can have a situation described by the following
|
||||
graph:
|
||||
@example
|
||||
_________ __________
|
||||
| | | |
|
||||
ffmpeg 1 -----| feed 1 |-----| stream 1 |
|
||||
\ |_________|\ |__________|
|
||||
\ \
|
||||
\ \ __________
|
||||
\ \ | |
|
||||
\ \| stream 2 |
|
||||
\ |__________|
|
||||
\
|
||||
\ _________ __________
|
||||
\ | | | |
|
||||
\| feed 2 |-----| stream 3 |
|
||||
|_________| |__________|
|
||||
|
||||
_________ __________
|
||||
| | | |
|
||||
ffmpeg 2 -----| feed 3 |-----| stream 4 |
|
||||
|_________| |__________|
|
||||
|
||||
_________ __________
|
||||
| | | |
|
||||
| file 1 |-----| stream 5 |
|
||||
|_________| |__________|
|
||||
@end example
|
||||
|
||||
@anchor{FFM}
|
||||
@section FFM, FFM2 formats
|
||||
|
||||
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
|
||||
video and audio streams and encoding options, and can store a moving time segment
|
||||
of an infinite movie or a whole movie.
|
||||
|
||||
FFM is version specific, and there is limited compatibility of FFM files
|
||||
generated by one version of ffmpeg/ffserver and another version of
|
||||
ffmpeg/ffserver. It may work but it is not guaranteed to work.
|
||||
|
||||
FFM2 is extensible while maintaining compatibility and should work between
|
||||
differing versions of tools. FFM2 is the default.
|
||||
|
||||
@section Status stream
|
||||
|
||||
@command{ffserver} supports an HTTP interface which exposes the
|
||||
current status of the server.
|
||||
|
||||
Simply point your browser to the address of the special status stream
|
||||
specified in the configuration file.
|
||||
|
||||
For example if you have:
|
||||
@example
|
||||
<Stream status.html>
|
||||
Format status
|
||||
|
||||
# Only allow local people to get the status
|
||||
ACL allow localhost
|
||||
ACL allow 192.168.0.0 192.168.255.255
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
then the server will post a page with the status information when
|
||||
the special stream @file{status.html} is requested.
|
||||
|
||||
@section How do I make it work?
|
||||
|
||||
As a simple test, just run the following two command lines where INPUTFILE
|
||||
is some file which you can decode with ffmpeg:
|
||||
|
||||
@example
|
||||
ffserver -f doc/ffserver.conf &
|
||||
ffmpeg -i INPUTFILE http://localhost:8090/feed1.ffm
|
||||
@end example
|
||||
|
||||
At this point you should be able to go to your Windows machine and fire up
|
||||
Windows Media Player (WMP). Go to Open URL and enter
|
||||
|
||||
@example
|
||||
http://<linuxbox>:8090/test.asf
|
||||
@end example
|
||||
|
||||
You should (after a short delay) see video and hear audio.
|
||||
|
||||
WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
|
||||
transfer the entire file before starting to play.
|
||||
The same is true of AVI files.
|
||||
|
||||
You should edit the @file{ffserver.conf} file to suit your needs (in
|
||||
terms of frame rates etc). Then install @command{ffserver} and
|
||||
@command{ffmpeg}, write a script to start them up, and off you go.
|
||||
|
||||
@section What else can it do?
|
||||
|
||||
You can replay video from .ffm files that was recorded earlier.
|
||||
However, there are a number of caveats, including the fact that the
|
||||
ffserver parameters must match the original parameters used to record the
|
||||
file. If they do not, then ffserver deletes the file before recording into it.
|
||||
(Now that I write this, it seems broken).
|
||||
|
||||
You can fiddle with many of the codec choices and encoding parameters, and
|
||||
there are a bunch more parameters that you cannot control. Post a message
|
||||
to the mailing list if there are some 'must have' parameters. Look in
|
||||
ffserver.conf for a list of the currently available controls.
|
||||
|
||||
It will automatically generate the ASX or RAM files that are often used
|
||||
in browsers. These files are actually redirections to the underlying ASF
|
||||
or RM file. The reason for this is that the browser often fetches the
|
||||
entire file before starting up the external viewer. The redirection files
|
||||
are very small and can be transferred quickly. [The stream itself is
|
||||
often 'infinite' and thus the browser tries to download it and never
|
||||
finishes.]
|
||||
|
||||
@section Tips
|
||||
|
||||
* When you connect to a live stream, most players (WMP, RA, etc) want to
|
||||
buffer a certain number of seconds of material so that they can display the
|
||||
signal continuously. However, ffserver (by default) starts sending data
|
||||
in realtime. This means that there is a pause of a few seconds while the
|
||||
buffering is being done by the player. The good news is that this can be
|
||||
cured by adding a '?buffer=5' to the end of the URL. This means that the
|
||||
stream should start 5 seconds in the past -- and so the first 5 seconds
|
||||
of the stream are sent as fast as the network will allow. It will then
|
||||
slow down to real time. This noticeably improves the startup experience.
|
||||
|
||||
You can also add a 'Preroll 15' statement into the ffserver.conf that will
|
||||
add the 15 second prebuffering on all requests that do not otherwise
|
||||
specify a time. In addition, ffserver will skip frames until a key_frame
|
||||
is found. This further reduces the startup delay by not transferring data
|
||||
that will be discarded.
|
||||
|
||||
@section Why does the ?buffer / Preroll stop working after a time?
|
||||
|
||||
It turns out that (on my machine at least) the number of frames successfully
|
||||
grabbed is marginally less than the number that ought to be grabbed. This
|
||||
means that the timestamp in the encoded data stream gets behind realtime.
|
||||
This means that if you say 'Preroll 10', then when the stream gets 10
|
||||
or more seconds behind, there is no Preroll left.
|
||||
|
||||
Fixing this requires a change in the internals of how timestamps are
|
||||
handled.
|
||||
|
||||
@section Does the @code{?date=} stuff work.
|
||||
|
||||
Yes (subject to the limitation outlined above). Also note that whenever you
|
||||
start ffserver, it deletes the ffm file (if any parameters have changed),
|
||||
thus wiping out what you had recorded before.
|
||||
|
||||
The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
|
||||
of the following formats (the 'T' is literal):
|
||||
|
||||
@example
|
||||
* YYYY-MM-DDTHH:MM:SS (localtime)
|
||||
* YYYY-MM-DDTHH:MM:SSZ (UTC)
|
||||
@end example
|
||||
|
||||
You can omit the YYYY-MM-DD, and then it refers to the current day. However
|
||||
note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
|
||||
may be in the future and so is unlikely to be useful.
|
||||
|
||||
You use this by adding the ?date= to the end of the URL for the stream.
|
||||
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
|
||||
@c man end
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -f @var{configfile}
|
||||
Read configuration file @file{configfile}. If not specified it will
|
||||
read by default from @file{/etc/ffserver.conf}.
|
||||
|
||||
@item -n
|
||||
Enable no-launch mode. This option disables all the @code{Launch}
|
||||
directives within the various @code{<Feed>} sections. Since
|
||||
@command{ffserver} will not launch any @command{ffmpeg} instances, you
|
||||
will have to launch them manually.
|
||||
|
||||
@item -d
|
||||
Enable debug mode. This option increases log verbosity, and directs
|
||||
log messages to stdout. When specified, the @option{CustomLog} option
|
||||
is ignored.
|
||||
@end table
|
||||
|
||||
@chapter Configuration file syntax
|
||||
|
||||
@command{ffserver} reads a configuration file containing global
|
||||
options and settings for each stream and feed.
|
||||
|
||||
The configuration file consists of global options and dedicated
|
||||
sections, which must be introduced by "<@var{SECTION_NAME}
|
||||
@var{ARGS}>" on a separate line and must be terminated by a line in
|
||||
the form "</@var{SECTION_NAME}>". @var{ARGS} is optional.
|
||||
|
||||
Currently the following sections are recognized: @samp{Feed},
|
||||
@samp{Stream}, @samp{Redirect}.
|
||||
|
||||
A line starting with @code{#} is ignored and treated as a comment.
|
||||
|
||||
Name of options and sections are case-insensitive.
|
||||
|
||||
@section ACL syntax
|
||||
An ACL (Access Control List) specifies the address which are allowed
|
||||
to access a given stream, or to write a given feed.
|
||||
|
||||
It accepts the folling forms
|
||||
@itemize
|
||||
@item
|
||||
Allow/deny access to @var{address}.
|
||||
@example
|
||||
ACL ALLOW <address>
|
||||
ACL DENY <address>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Allow/deny access to ranges of addresses from @var{first_address} to
|
||||
@var{last_address}.
|
||||
@example
|
||||
ACL ALLOW <first_address> <last_address>
|
||||
ACL DENY <first_address> <last_address>
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
You can repeat the ACL allow/deny as often as you like. It is on a per
|
||||
stream basis. The first match defines the action. If there are no matches,
|
||||
then the default is the inverse of the last ACL statement.
|
||||
|
||||
Thus 'ACL allow localhost' only allows access from localhost.
|
||||
'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
|
||||
allow everybody else.
|
||||
|
||||
@section Global options
|
||||
@table @option
|
||||
@item HTTPPort @var{port_number}
|
||||
@item Port @var{port_number}
|
||||
@item RTSPPort @var{port_number}
|
||||
|
||||
@var{HTTPPort} sets the HTTP server listening TCP port number,
|
||||
@var{RTSPPort} sets the RTSP server listening TCP port number.
|
||||
|
||||
@var{Port} is the equivalent of @var{HTTPPort} and is deprecated.
|
||||
|
||||
You must select a different port from your standard HTTP web server if
|
||||
it is running on the same computer.
|
||||
|
||||
If not specified, no corresponding server will be created.
|
||||
|
||||
@item HTTPBindAddress @var{ip_address}
|
||||
@item BindAddress @var{ip_address}
|
||||
@item RTSPBindAddress @var{ip_address}
|
||||
Set address on which the HTTP/RTSP server is bound. Only useful if you
|
||||
have several network interfaces.
|
||||
|
||||
@var{BindAddress} is the equivalent of @var{HTTPBindAddress} and is
|
||||
deprecated.
|
||||
|
||||
@item MaxHTTPConnections @var{n}
|
||||
Set number of simultaneous HTTP connections that can be handled. It
|
||||
has to be defined @emph{before} the @option{MaxClients} parameter,
|
||||
since it defines the @option{MaxClients} maximum limit.
|
||||
|
||||
Default value is 2000.
|
||||
|
||||
@item MaxClients @var{n}
|
||||
Set number of simultaneous requests that can be handled. Since
|
||||
@command{ffserver} is very fast, it is more likely that you will want
|
||||
to leave this high and use @option{MaxBandwidth}.
|
||||
|
||||
Default value is 5.
|
||||
|
||||
@item MaxBandwidth @var{kbps}
|
||||
Set the maximum amount of kbit/sec that you are prepared to consume
|
||||
when streaming to clients.
|
||||
|
||||
Default value is 1000.
|
||||
|
||||
@item CustomLog @var{filename}
|
||||
Set access log file (uses standard Apache log file format). '-' is the
|
||||
standard output.
|
||||
|
||||
If not specified @command{ffserver} will produce no log.
|
||||
|
||||
In case the commandline option @option{-d} is specified this option is
|
||||
ignored, and the log is written to standard output.
|
||||
|
||||
@item NoDaemon
|
||||
Set no-daemon mode. This option is currently ignored since now
|
||||
@command{ffserver} will always work in no-daemon mode, and is
|
||||
deprecated.
|
||||
@end table
|
||||
|
||||
@section Feed section
|
||||
|
||||
A Feed section defines a feed provided to @command{ffserver}.
|
||||
|
||||
Each live feed contains one video and/or audio sequence coming from an
|
||||
@command{ffmpeg} encoder or another @command{ffserver}. This sequence
|
||||
may be encoded simultaneously with several codecs at several
|
||||
resolutions.
|
||||
|
||||
A feed instance specification is introduced by a line in the form:
|
||||
@example
|
||||
<Feed FEED_FILENAME>
|
||||
@end example
|
||||
|
||||
where @var{FEED_FILENAME} specifies the unique name of the FFM stream.
|
||||
|
||||
The following options are recognized within a Feed section.
|
||||
|
||||
@table @option
|
||||
@item File @var{filename}
|
||||
@item ReadOnlyFile @var{filename}
|
||||
Set the path where the feed file is stored on disk.
|
||||
|
||||
If not specified, the @file{/tmp/FEED.ffm} is assumed, where
|
||||
@var{FEED} is the feed name.
|
||||
|
||||
If @option{ReadOnlyFile} is used the file is marked as read-only and
|
||||
it will not be deleted or updated.
|
||||
|
||||
@item Truncate
|
||||
Truncate the feed file, rather than appending to it. By default
|
||||
@command{ffserver} will append data to the file, until the maximum
|
||||
file size value is reached (see @option{FileMaxSize} option).
|
||||
|
||||
@item FileMaxSize @var{size}
|
||||
Set maximum size of the feed file in bytes. 0 means unlimited. The
|
||||
postfixes @code{K} (2^10), @code{M} (2^20), and @code{G} (2^30) are
|
||||
recognized.
|
||||
|
||||
Default value is 5M.
|
||||
|
||||
@item Launch @var{args}
|
||||
Launch an @command{ffmpeg} command when creating @command{ffserver}.
|
||||
|
||||
@var{args} must be a sequence of arguments to be provided to an
|
||||
@command{ffmpeg} instance. The first provided argument is ignored, and
|
||||
it is replaced by a path with the same dirname of the @command{ffserver}
|
||||
instance, followed by the remaining argument and terminated with a
|
||||
path corresponding to the feed.
|
||||
|
||||
When the launched process exits, @command{ffserver} will launch
|
||||
another program instance.
|
||||
|
||||
In case you need a more complex @command{ffmpeg} configuration,
|
||||
e.g. if you need to generate multiple FFM feeds with a single
|
||||
@command{ffmpeg} instance, you should launch @command{ffmpeg} by hand.
|
||||
|
||||
This option is ignored in case the commandline option @option{-n} is
|
||||
specified.
|
||||
|
||||
@item ACL @var{spec}
|
||||
Specify the list of IP address which are allowed or denied to write
|
||||
the feed. Multiple ACL options can be specified.
|
||||
@end table
|
||||
|
||||
@section Stream section
|
||||
|
||||
A Stream section defines a stream provided by @command{ffserver}, and
|
||||
identified by a single name.
|
||||
|
||||
The stream is sent when answering a request containing the stream
|
||||
name.
|
||||
|
||||
A stream section must be introduced by the line:
|
||||
@example
|
||||
<Stream STREAM_NAME>
|
||||
@end example
|
||||
|
||||
where @var{STREAM_NAME} specifies the unique name of the stream.
|
||||
|
||||
The following options are recognized within a Stream section.
|
||||
|
||||
Encoding options are marked with the @emph{encoding} tag, and they are
|
||||
used to set the encoding parameters, and are mapped to libavcodec
|
||||
encoding options. Not all encoding options are supported, in
|
||||
particular it is not possible to set encoder private options. In order
|
||||
to override the encoding options specified by @command{ffserver}, you
|
||||
can use the @command{ffmpeg} @option{override_ffserver} commandline
|
||||
option.
|
||||
|
||||
Only one of the @option{Feed} and @option{File} options should be set.
|
||||
|
||||
@table @option
|
||||
@item Feed @var{feed_name}
|
||||
Set the input feed. @var{feed_name} must correspond to an existing
|
||||
feed defined in a @code{Feed} section.
|
||||
|
||||
When this option is set, encoding options are used to setup the
|
||||
encoding operated by the remote @command{ffmpeg} process.
|
||||
|
||||
@item File @var{filename}
|
||||
Set the filename of the pre-recorded input file to stream.
|
||||
|
||||
When this option is set, encoding options are ignored and the input
|
||||
file content is re-streamed as is.
|
||||
|
||||
@item Format @var{format_name}
|
||||
Set the format of the output stream.
|
||||
|
||||
Must be the name of a format recognized by FFmpeg. If set to
|
||||
@samp{status}, it is treated as a status stream.
|
||||
|
||||
@item InputFormat @var{format_name}
|
||||
Set input format. If not specified, it is automatically guessed.
|
||||
|
||||
@item Preroll @var{n}
|
||||
Set this to the number of seconds backwards in time to start. Note that
|
||||
most players will buffer 5-10 seconds of video, and also you need to allow
|
||||
for a keyframe to appear in the data stream.
|
||||
|
||||
Default value is 0.
|
||||
|
||||
@item StartSendOnKey
|
||||
Do not send stream until it gets the first key frame. By default
|
||||
@command{ffserver} will send data immediately.
|
||||
|
||||
@item MaxTime @var{n}
|
||||
Set the number of seconds to run. This value set the maximum duration
|
||||
of the stream a client will be able to receive.
|
||||
|
||||
A value of 0 means that no limit is set on the stream duration.
|
||||
|
||||
@item ACL @var{spec}
|
||||
Set ACL for the stream.
|
||||
|
||||
@item DynamicACL @var{spec}
|
||||
|
||||
@item RTSPOption @var{option}
|
||||
|
||||
@item MulticastAddress @var{address}
|
||||
|
||||
@item MulticastPort @var{port}
|
||||
|
||||
@item MulticastTTL @var{integer}
|
||||
|
||||
@item NoLoop
|
||||
|
||||
@item FaviconURL @var{url}
|
||||
Set favicon (favourite icon) for the server status page. It is ignored
|
||||
for regular streams.
|
||||
|
||||
@item Author @var{value}
|
||||
@item Comment @var{value}
|
||||
@item Copyright @var{value}
|
||||
@item Title @var{value}
|
||||
Set metadata corresponding to the option. All these options are
|
||||
deprecated in favor of @option{Metadata}.
|
||||
|
||||
@item Metadata @var{key} @var{value}
|
||||
Set metadata value on the output stream.
|
||||
|
||||
@item NoAudio
|
||||
@item NoVideo
|
||||
Suppress audio/video.
|
||||
|
||||
@item AudioCodec @var{codec_name} (@emph{encoding,audio})
|
||||
Set audio codec.
|
||||
|
||||
@item AudioBitRate @var{rate} (@emph{encoding,audio})
|
||||
Set bitrate for the audio stream in kbits per second.
|
||||
|
||||
@item AudioChannels @var{n} (@emph{encoding,audio})
|
||||
Set number of audio channels.
|
||||
|
||||
@item AudioSampleRate @var{n} (@emph{encoding,audio})
|
||||
Set sampling frequency for audio. When using low bitrates, you should
|
||||
lower this frequency to 22050 or 11025. The supported frequencies
|
||||
depend on the selected audio codec.
|
||||
|
||||
@item AVOptionAudio @var{option} @var{value} (@emph{encoding,audio})
|
||||
Set generic option for audio stream.
|
||||
|
||||
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
|
||||
Set preset for audio stream.
|
||||
|
||||
@item VideoCodec @var{codec_name} (@emph{encoding,video})
|
||||
Set video codec.
|
||||
|
||||
@item VideoBitRate @var{n} (@emph{encoding,video})
|
||||
Set bitrate for the video stream in kbits per second.
|
||||
|
||||
@item VideoBitRateRange @var{range} (@emph{encoding,video})
|
||||
Set video bitrate range.
|
||||
|
||||
A range must be specified in the form @var{minrate}-@var{maxrate}, and
|
||||
specifies the @option{minrate} and @option{maxrate} encoding options
|
||||
expressed in kbits per second.
|
||||
|
||||
@item VideoBitRateRangeTolerance @var{n} (@emph{encoding,video})
|
||||
Set video bitrate tolerance in kbits per second.
|
||||
|
||||
@item PixelFormat @var{pixel_format} (@emph{encoding,video})
|
||||
Set video pixel format.
|
||||
|
||||
@item Debug @var{integer} (@emph{encoding,video})
|
||||
Set video @option{debug} encoding option.
|
||||
|
||||
@item Strict @var{integer} (@emph{encoding,video})
|
||||
Set video @option{strict} encoding option.
|
||||
|
||||
@item VideoBufferSize @var{n} (@emph{encoding,video})
|
||||
Set ratecontrol buffer size, expressed in KB.
|
||||
|
||||
@item VideoFrameRate @var{n} (@emph{encoding,video})
|
||||
Set number of video frames per second.
|
||||
|
||||
@item VideoSize (@emph{encoding,video})
|
||||
Set size of the video frame, must be an abbreviation or in the form
|
||||
@var{W}x@var{H}. See @ref{video size syntax,,the Video size section
|
||||
in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
|
||||
Default value is @code{160x128}.
|
||||
|
||||
@item VideoIntraOnly (@emph{encoding,video})
|
||||
Transmit only intra frames (useful for low bitrates, but kills frame rate).
|
||||
|
||||
@item VideoGopSize @var{n} (@emph{encoding,video})
|
||||
If non-intra only, an intra frame is transmitted every VideoGopSize
|
||||
frames. Video synchronization can only begin at an intra frame.
|
||||
|
||||
@item VideoTag @var{tag} (@emph{encoding,video})
|
||||
Set video tag.
|
||||
|
||||
@item VideoHighQuality (@emph{encoding,video})
|
||||
@item Video4MotionVector (@emph{encoding,video})
|
||||
|
||||
@item BitExact (@emph{encoding,video})
|
||||
Set bitexact encoding flag.
|
||||
|
||||
@item IdctSimple (@emph{encoding,video})
|
||||
Set simple IDCT algorithm.
|
||||
|
||||
@item Qscale @var{n} (@emph{encoding,video})
|
||||
Enable constant quality encoding, and set video qscale (quantization
|
||||
scale) value, expressed in @var{n} QP units.
|
||||
|
||||
@item VideoQMin @var{n} (@emph{encoding,video})
|
||||
@item VideoQMax @var{n} (@emph{encoding,video})
|
||||
Set video qmin/qmax.
|
||||
|
||||
@item VideoQDiff @var{integer} (@emph{encoding,video})
|
||||
Set video @option{qdiff} encoding option.
|
||||
|
||||
@item LumiMask @var{float} (@emph{encoding,video})
|
||||
@item DarkMask @var{float} (@emph{encoding,video})
|
||||
Set @option{lumi_mask}/@option{dark_mask} encoding options.
|
||||
|
||||
@item AVOptionVideo @var{option} @var{value} (@emph{encoding,video})
|
||||
Set generic option for video stream.
|
||||
|
||||
@item AVPresetVideo @var{preset} (@emph{encoding,video})
|
||||
Set preset for video stream.
|
||||
|
||||
@var{preset} must be the path of a preset file.
|
||||
@end table
|
||||
|
||||
@subsection Server status stream
|
||||
|
||||
A server status stream is a special stream which is used to show
|
||||
statistics about the @command{ffserver} operations.
|
||||
|
||||
It must be specified setting the option @option{Format} to
|
||||
@samp{status}.
|
||||
|
||||
@section Redirect section
|
||||
|
||||
A redirect section specifies where to redirect the requested URL to
|
||||
another page.
|
||||
|
||||
A redirect section must be introduced by the line:
|
||||
@example
|
||||
<Redirect NAME>
|
||||
@end example
|
||||
|
||||
where @var{NAME} is the name of the page which should be redirected.
|
||||
|
||||
It only accepts the option @option{URL}, which specify the redirection
|
||||
URL.
|
||||
|
||||
@chapter Stream examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Multipart JPEG
|
||||
@example
|
||||
<Stream test.mjpg>
|
||||
Feed feed1.ffm
|
||||
Format mpjpeg
|
||||
VideoFrameRate 2
|
||||
VideoIntraOnly
|
||||
NoAudio
|
||||
Strict -1
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Single JPEG
|
||||
@example
|
||||
<Stream test.jpg>
|
||||
Feed feed1.ffm
|
||||
Format jpeg
|
||||
VideoFrameRate 2
|
||||
VideoIntraOnly
|
||||
VideoSize 352x240
|
||||
NoAudio
|
||||
Strict -1
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Flash
|
||||
@example
|
||||
<Stream test.swf>
|
||||
Feed feed1.ffm
|
||||
Format swf
|
||||
VideoFrameRate 2
|
||||
VideoIntraOnly
|
||||
NoAudio
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
ASF compatible
|
||||
@example
|
||||
<Stream test.asf>
|
||||
Feed feed1.ffm
|
||||
Format asf
|
||||
VideoFrameRate 15
|
||||
VideoSize 352x240
|
||||
VideoBitRate 256
|
||||
VideoBufferSize 40
|
||||
VideoGopSize 30
|
||||
AudioBitRate 64
|
||||
StartSendOnKey
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
MP3 audio
|
||||
@example
|
||||
<Stream test.mp3>
|
||||
Feed feed1.ffm
|
||||
Format mp2
|
||||
AudioCodec mp3
|
||||
AudioBitRate 64
|
||||
AudioChannels 1
|
||||
AudioSampleRate 44100
|
||||
NoVideo
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Ogg Vorbis audio
|
||||
@example
|
||||
<Stream test.ogg>
|
||||
Feed feed1.ffm
|
||||
Metadata title "Stream title"
|
||||
AudioBitRate 64
|
||||
AudioChannels 2
|
||||
AudioSampleRate 44100
|
||||
NoVideo
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Real with audio only at 32 kbits
|
||||
@example
|
||||
<Stream test.ra>
|
||||
Feed feed1.ffm
|
||||
Format rm
|
||||
AudioBitRate 32
|
||||
NoVideo
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
Real with audio and video at 64 kbits
|
||||
@example
|
||||
<Stream test.rm>
|
||||
Feed feed1.ffm
|
||||
Format rm
|
||||
AudioBitRate 32
|
||||
VideoBitRate 128
|
||||
VideoFrameRate 25
|
||||
VideoGopSize 25
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@item
|
||||
For stream coming from a file: you only need to set the input filename
|
||||
and optionally a new format.
|
||||
|
||||
@example
|
||||
<Stream file.rm>
|
||||
File "/usr/local/httpd/htdocs/tlive.rm"
|
||||
NoAudio
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
@example
|
||||
<Stream file.asf>
|
||||
File "/usr/local/httpd/htdocs/test.asf"
|
||||
NoAudio
|
||||
Metadata author "Me"
|
||||
Metadata copyright "Super MegaCorp"
|
||||
Metadata title "Test stream from disk"
|
||||
Metadata comment "Test comment"
|
||||
</Stream>
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffserver.html,ffserver},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffserver-all.html,ffserver-all},
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example,
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffserver(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffserver-all(1),
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffserver
|
||||
@settitle ffserver video server
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
||||
@@ -1,312 +0,0 @@
|
||||
All the numerical options, if not specified otherwise, accept a string
|
||||
representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: 'K', 'M', or 'G'.
|
||||
|
||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiples, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
|
||||
|
||||
Options which do not take arguments are boolean options, and set the
|
||||
corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
are used to precisely specify which stream(s) a given option belongs to.
|
||||
|
||||
A stream specifier is a string generally appended to the option name and
|
||||
separated from it by a colon. E.g. @code{-codec:a:1 ac3} contains the
|
||||
@code{a:1} stream specifier, which matches the second audio stream. Therefore, it
|
||||
would select the ac3 codec for the second audio stream.
|
||||
|
||||
A stream specifier can match several streams, so that the option is applied to all
|
||||
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
|
||||
streams.
|
||||
|
||||
An empty stream specifier matches all streams. For example, @code{-codec copy}
|
||||
or @code{-codec: copy} would copy all the streams without reencoding.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio, 's' for subtitle,
|
||||
'd' for data, and 't' for attachments. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program.
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@item m:@var{key}[:@var{value}]
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
|
||||
Note that in @command{ffmpeg}, matching by metadata will only work properly for
|
||||
input files.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
|
||||
These options are shared amongst the ff* tools.
|
||||
|
||||
@table @option
|
||||
|
||||
@item -L
|
||||
Show license.
|
||||
|
||||
@item -h, -?, -help, --help [@var{arg}]
|
||||
Show help. An optional parameter may be specified to print help about a specific
|
||||
item. If no argument is specified, only basic (non advanced) tool
|
||||
options are shown.
|
||||
|
||||
Possible values of @var{arg} are:
|
||||
@table @option
|
||||
@item long
|
||||
Print advanced tool options in addition to the basic tool options.
|
||||
|
||||
@item full
|
||||
Print complete list of options, including shared and private options
|
||||
for encoders, decoders, demuxers, muxers, filters, etc.
|
||||
|
||||
@item decoder=@var{decoder_name}
|
||||
Print detailed information about the decoder named @var{decoder_name}. Use the
|
||||
@option{-decoders} option to get a list of all decoders.
|
||||
|
||||
@item encoder=@var{encoder_name}
|
||||
Print detailed information about the encoder named @var{encoder_name}. Use the
|
||||
@option{-encoders} option to get a list of all encoders.
|
||||
|
||||
@item demuxer=@var{demuxer_name}
|
||||
Print detailed information about the demuxer named @var{demuxer_name}. Use the
|
||||
@option{-formats} option to get a list of all demuxers and muxers.
|
||||
|
||||
@item muxer=@var{muxer_name}
|
||||
Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -formats
|
||||
Show available formats.
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
|
||||
Note that the term 'codec' is used throughout this documentation as a shortcut
|
||||
for what is more correctly called a media bitstream format.
|
||||
|
||||
@item -decoders
|
||||
Show available decoders.
|
||||
|
||||
@item -encoders
|
||||
Show all available encoders.
|
||||
|
||||
@item -bsfs
|
||||
Show available bitstream filters.
|
||||
|
||||
@item -protocols
|
||||
Show available protocols.
|
||||
|
||||
@item -filters
|
||||
Show available libavfilter filters.
|
||||
|
||||
@item -pix_fmts
|
||||
Show available pixel formats.
|
||||
|
||||
@item -sample_fmts
|
||||
Show available sample formats.
|
||||
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
to the first line and the "Last message repeated n times" line will be
|
||||
omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@end table
|
||||
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a following FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@item level
|
||||
set the log level
|
||||
@end table
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@item -hide_banner
|
||||
Suppress printing banner.
|
||||
|
||||
All FFmpeg tools will normally show a copyright notice, build options
|
||||
and library versions. This option can be used to suppress printing
|
||||
this information.
|
||||
|
||||
@item -cpuflags flags (@emph{global})
|
||||
Allows setting and clearing cpu flags. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpuflags -sse+mmx ...
|
||||
ffmpeg -cpuflags mmx ...
|
||||
ffmpeg -cpuflags 0 ...
|
||||
@end example
|
||||
Possible flags for this option are:
|
||||
@table @samp
|
||||
@item x86
|
||||
@table @samp
|
||||
@item mmx
|
||||
@item mmxext
|
||||
@item sse
|
||||
@item sse2
|
||||
@item sse2slow
|
||||
@item sse3
|
||||
@item sse3slow
|
||||
@item ssse3
|
||||
@item atom
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item xop
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@table @samp
|
||||
@item armv5te
|
||||
@item armv6
|
||||
@item armv6t2
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@item altivec
|
||||
@end table
|
||||
@item Specific Processors
|
||||
@table @samp
|
||||
@item pentium2
|
||||
@item pentium3
|
||||
@item pentium4
|
||||
@item k6
|
||||
@item k62
|
||||
@item athlon
|
||||
@item athlonxp
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -opencl_bench
|
||||
Benchmark all available OpenCL devices and show the results. This option
|
||||
is only available when FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@var{options} must be a list of @var{key}=@var{value} option pairs
|
||||
separated by ':'. See the ``OpenCL Options'' section in the
|
||||
ffmpeg-utils manual for the list of supported options.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
|
||||
These options are provided directly by the libavformat, libavdevice and
|
||||
libavcodec libraries. To see the list of available AVOptions, use the
|
||||
@option{-help} option. They are separated into two categories:
|
||||
@table @option
|
||||
@item generic
|
||||
These options can be set for any container, codec or device. Generic options
|
||||
are listed under AVFormatContext options for containers/devices and under
|
||||
AVCodecContext options for codecs.
|
||||
@item private
|
||||
These options are specific to the given container, device or codec. Private
|
||||
options are listed under their corresponding containers/devices/codecs.
|
||||
@end table
|
||||
|
||||
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
|
||||
an MP3 file, use the @option{id3v2_version} private option of the MP3
|
||||
muxer:
|
||||
@example
|
||||
ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
@end example
|
||||
|
||||
All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them.
|
||||
|
||||
Note: the @option{-nooption} syntax cannot be used for boolean
|
||||
AVOptions, use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
Note: the old undocumented way of specifying per-stream AVOptions by
|
||||
prepending v/a/s to the options name is now obsolete and will be
|
||||
removed soon.
|
||||
@@ -1,270 +0,0 @@
|
||||
Filter design
|
||||
=============
|
||||
|
||||
This document explains guidelines that should be observed (or ignored with
|
||||
good reason) when writing filters for libavfilter.
|
||||
|
||||
In this document, the word “frame” indicates either a video frame or a group
|
||||
of audio samples, as stored in an AVFilterBuffer structure.
|
||||
|
||||
|
||||
Format negotiation
|
||||
==================
|
||||
|
||||
The query_formats method should set, for each input and each output links,
|
||||
the list of supported formats.
|
||||
|
||||
For video links, that means pixel format. For audio links, that means
|
||||
channel layout, sample format (the sample packing is implied by the sample
|
||||
format) and sample rate.
|
||||
|
||||
The lists are not just lists, they are references to shared objects. When
|
||||
the negotiation mechanism computes the intersection of the formats
|
||||
supported at each end of a link, all references to both lists are replaced
|
||||
with a reference to the intersection. And when a single format is
|
||||
eventually chosen for a link amongst the remaining list, again, all
|
||||
references to the list are updated.
|
||||
|
||||
That means that if a filter requires that its input and output have the
|
||||
same format amongst a supported list, all it has to do is use a reference
|
||||
to the same list of formats.
|
||||
|
||||
query_formats can leave some formats unset and return AVERROR(EAGAIN) to
|
||||
cause the negotiation mechanism to try again later. That can be used by
|
||||
filters with complex requirements to use the format negotiated on one link
|
||||
to set the formats supported on another.
|
||||
|
||||
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
Principle
|
||||
---------
|
||||
|
||||
Audio and video data are voluminous; the buffer and buffer reference
|
||||
mechanism is intended to avoid, as much as possible, expensive copies of
|
||||
that data while still allowing the filters to produce correct results.
|
||||
|
||||
The data is stored in buffers represented by AVFilterBuffer structures.
|
||||
They must not be accessed directly, but through references stored in
|
||||
AVFilterBufferRef structures. Several references can point to the
|
||||
same buffer; the buffer is automatically deallocated once all
|
||||
corresponding references have been destroyed.
|
||||
|
||||
The characteristics of the data (resolution, sample rate, etc.) are
|
||||
stored in the reference; different references for the same buffer can
|
||||
show different characteristics. In particular, a video reference can
|
||||
point to only a part of a video buffer.
|
||||
|
||||
A reference is usually obtained as input to the start_frame or
|
||||
filter_frame method or requested using the ff_get_video_buffer or
|
||||
ff_get_audio_buffer functions. A new reference on an existing buffer can
|
||||
be created with the avfilter_ref_buffer. A reference is destroyed using
|
||||
the avfilter_unref_bufferp function.
|
||||
|
||||
Reference ownership
|
||||
-------------------
|
||||
|
||||
At any time, a reference “belongs” to a particular piece of code,
|
||||
usually a filter. With a few caveats that will be explained below, only
|
||||
that piece of code is allowed to access it. It is also responsible for
|
||||
destroying it, although this is sometimes done automatically (see the
|
||||
section on link reference fields).
|
||||
|
||||
Here are the (fairly obvious) rules for reference ownership:
|
||||
|
||||
* A reference received by the filter_frame method (or its start_frame
|
||||
deprecated version) belongs to the corresponding filter.
|
||||
|
||||
Special exception: for video references: the reference may be used
|
||||
internally for automatic copying and must not be destroyed before
|
||||
end_frame; it can be given away to ff_start_frame.
|
||||
|
||||
* A reference passed to ff_filter_frame (or the deprecated
|
||||
ff_start_frame) is given away and must no longer be used.
|
||||
|
||||
* A reference created with avfilter_ref_buffer belongs to the code that
|
||||
created it.
|
||||
|
||||
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
|
||||
belongs to the code that requested it.
|
||||
|
||||
* A reference given as return value by the get_video_buffer or
|
||||
get_audio_buffer method is given away and must no longer be used.
|
||||
|
||||
Link reference fields
|
||||
---------------------
|
||||
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. The
|
||||
cur_buf and out_buf were used with the deprecated
|
||||
start_frame/draw_slice/end_frame API and should no longer be used.
|
||||
src_buf, cur_buf_copy and partial_buf are used by libavfilter internally
|
||||
and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
---------------------
|
||||
|
||||
The AVFilterBufferRef structure has a perms field that describes what
|
||||
the code that owns the reference is allowed to do to the buffer data.
|
||||
Different references for the same buffer can have different permissions.
|
||||
|
||||
For video filters that implement the deprecated
|
||||
start_frame/draw_slice/end_frame API, the permissions only apply to the
|
||||
parts of the buffer that have already been covered by the draw_slice
|
||||
method.
|
||||
|
||||
The value is a binary OR of the following constants:
|
||||
|
||||
* AV_PERM_READ: the owner can read the buffer data; this is essentially
|
||||
always true and is there for self-documentation.
|
||||
|
||||
* AV_PERM_WRITE: the owner can modify the buffer data.
|
||||
|
||||
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
|
||||
will not be modified by previous filters.
|
||||
|
||||
* AV_PERM_REUSE: the owner can output the buffer several times, without
|
||||
modifying the data in between.
|
||||
|
||||
* AV_PERM_REUSE2: the owner can output the buffer several times and
|
||||
modify the data in between (useless without the WRITE permissions).
|
||||
|
||||
* AV_PERM_ALIGN: the owner can access the data using fast operations
|
||||
that require data alignment.
|
||||
|
||||
The READ, WRITE and PRESERVE permissions are about sharing the same
|
||||
buffer between several filters to avoid expensive copies without them
|
||||
doing conflicting changes on the data.
|
||||
|
||||
The REUSE and REUSE2 permissions are about special memory for direct
|
||||
rendering. For example a buffer directly allocated in video memory must
|
||||
not modified once it is displayed on screen, or it will cause tearing;
|
||||
it will therefore not have the REUSE2 permission.
|
||||
|
||||
The ALIGN permission is about extracting part of the buffer, for
|
||||
copy-less padding or cropping for example.
|
||||
|
||||
|
||||
References received on input pads are guaranteed to have all the
|
||||
permissions stated in the min_perms field and none of the permissions
|
||||
stated in the rej_perms.
|
||||
|
||||
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
|
||||
guaranteed to have at least all the permissions requested as argument.
|
||||
|
||||
References created by avfilter_ref_buffer have the same permissions as
|
||||
the original reference minus the ones explicitly masked; the mask is
|
||||
usually ~0 to keep the same permissions.
|
||||
|
||||
Filters should remove permissions on reference they give to output
|
||||
whenever necessary. It can be automatically done by setting the
|
||||
rej_perms field on the output pad.
|
||||
|
||||
Here are a few guidelines corresponding to common situations:
|
||||
|
||||
* Filters that modify and forward their frame (like drawtext) need the
|
||||
WRITE permission.
|
||||
|
||||
* Filters that read their input to produce a new frame on output (like
|
||||
scale) need the READ permission on input and must request a buffer
|
||||
with the WRITE permission.
|
||||
|
||||
* Filters that intend to keep a reference after the filtering process
|
||||
is finished (after filter_frame returns) must have the PRESERVE
|
||||
permission on it and remove the WRITE permission if they create a new
|
||||
reference to give it away.
|
||||
|
||||
* Filters that intend to modify a reference they have kept after the end
|
||||
of the filtering process need the REUSE2 permission and must remove
|
||||
the PRESERVE permission if they create a new reference to give it
|
||||
away.
|
||||
|
||||
|
||||
Frame scheduling
|
||||
================
|
||||
|
||||
The purpose of these rules is to ensure that frames flow in the filter
|
||||
graph without getting stuck and accumulating somewhere.
|
||||
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
filter_frame
|
||||
------------
|
||||
|
||||
This method is called when a frame is pushed to the filter's input. It
|
||||
can be called at any time except in a reentrant way.
|
||||
|
||||
If the input frame is enough to produce output, then the filter should
|
||||
push the output frames on the output link immediately.
|
||||
|
||||
As an exception to the previous rule, if the input frame is enough to
|
||||
produce several output frames, then the filter needs output only at
|
||||
least one per link. The additional frames can be left buffered in the
|
||||
filter; these buffered frames must be flushed immediately if a new input
|
||||
produces new output.
|
||||
|
||||
(Example: frame rate-doubling filter: filter_frame must (1) flush the
|
||||
second copy of the previous frame, if it is still there, (2) push the
|
||||
first copy of the incoming frame, (3) keep the second copy for later.)
|
||||
|
||||
If the input frame is not enough to produce output, the filter must not
|
||||
call request_frame to get more. It must just process the frame or queue
|
||||
it. The task of requesting more frames is left to the filter's
|
||||
request_frame method or the application.
|
||||
|
||||
If a filter has several inputs, the filter must be ready for frames
|
||||
arriving randomly on any input. Therefore, any filter with several inputs
|
||||
will most likely require some kind of queuing mechanism. It is perfectly
|
||||
acceptable to have a limited queue and to drop frames when the inputs
|
||||
are too unbalanced.
|
||||
|
||||
request_frame
|
||||
-------------
|
||||
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For an input, it should directly call filter_frame on the corresponding
|
||||
output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
frames should be pushed. If not, the filter should request a frame on
|
||||
one of its inputs, repeatedly until at least one frame has been pushed.
|
||||
|
||||
Return values:
|
||||
if request_frame could produce a frame, it should return 0;
|
||||
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
|
||||
if it could not because there are no more frames, it should return
|
||||
AVERROR_EOF.
|
||||
|
||||
The typical implementation of request_frame for a filter with several
|
||||
inputs will look like that:
|
||||
|
||||
if (frames_queued) {
|
||||
push_one_frame();
|
||||
return 0;
|
||||
}
|
||||
while (!frame_pushed) {
|
||||
input = input_where_a_frame_is_most_needed();
|
||||
ret = ff_request_frame(input);
|
||||
if (ret == AVERROR_EOF) {
|
||||
process_eof_on_input();
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the filter_frame method will be called and do the work.
|
||||
|
||||
Legacy API
|
||||
==========
|
||||
|
||||
Until libavfilter 3.23, the filter_frame method was split:
|
||||
|
||||
- for video filters, it was made of start_frame, draw_slice (that could be
|
||||
called several times on distinct parts of the frame) and end_frame;
|
||||
|
||||
- for audio filters, it was called filter_samples.
|
||||
11232
doc/filters.texi
11232
doc/filters.texi
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user