mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-06 23:10:00 +01:00
Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ace829cb45 | ||
|
|
b9b3ef4f5a | ||
|
|
b2b7cb0f60 | ||
|
|
5cc6370a15 | ||
|
|
8b019be79b | ||
|
|
e36830c695 | ||
|
|
bc2ceeb3ac | ||
|
|
66bdf8f145 | ||
|
|
bfe61bbd00 | ||
|
|
5888679ae3 | ||
|
|
ecb375684d | ||
|
|
df56bc18ef | ||
|
|
ef99025603 | ||
|
|
860293a9a2 | ||
|
|
9b71114247 | ||
|
|
0b6de235b9 | ||
|
|
a73b464118 | ||
|
|
d9e9e97e5f | ||
|
|
d52676da38 | ||
|
|
ca85c3cd7d | ||
|
|
de253343c1 | ||
|
|
9c787a21ce | ||
|
|
7e11a86175 | ||
|
|
9ef90ff0a2 | ||
|
|
6c95a26c1a | ||
|
|
b6ec181240 | ||
|
|
b42e135614 | ||
|
|
0564e8ee49 |
1
.gitattributes
vendored
1
.gitattributes
vendored
@@ -1 +1,2 @@
|
||||
*.pnm -diff -text
|
||||
tests/ref/fate/sub-scc eol=crlf
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -19,12 +19,8 @@
|
||||
*.swp
|
||||
*.ver
|
||||
*.version
|
||||
*.metal.air
|
||||
*.metallib
|
||||
*.metallib.c
|
||||
*.ptx
|
||||
*.ptx.c
|
||||
*.ptx.gz
|
||||
*_g
|
||||
\#*
|
||||
.\#*
|
||||
@@ -35,9 +31,8 @@
|
||||
/ffprobe
|
||||
/config.asm
|
||||
/config.h
|
||||
/config_components.h
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/lcov/
|
||||
/src
|
||||
/mapfile
|
||||
/tools/python/__pycache__/
|
||||
|
||||
26
.mailmap
26
.mailmap
@@ -1,26 +0,0 @@
|
||||
<jeebjp@gmail.com> <jan.ekstrom@aminocom.com>
|
||||
<sw@jkqxz.net> <mrt@jkqxz.net>
|
||||
<u@pkh.me> <cboesch@gopro.com>
|
||||
<quinkblack@foxmail.com> <wantlamy@gmail.com>
|
||||
<quinkblack@foxmail.com> <zhilizhao@tencent.com>
|
||||
<modmaker@google.com> <modmaker-at-google.com@ffmpeg.org>
|
||||
<stebbins@jetheaddev.com> <jstebbins@jetheaddev.com>
|
||||
<barryjzhao@tencent.com> <mypopydev@gmail.com>
|
||||
<barryjzhao@tencent.com> <jun.zhao@intel.com>
|
||||
<josh@itanimul.li> <joshdk@obe.tv>
|
||||
<michael@niedermayer.cc> <michaelni@gmx.at>
|
||||
<linjie.justin.fu@gmail.com> <linjie.fu@intel.com>
|
||||
<linjie.justin.fu@gmail.com> <fulinjie@zju.edu.cn>
|
||||
<ceffmpeg@gmail.com> <cehoyos@ag.or.at>
|
||||
<ceffmpeg@gmail.com> <cehoyos@rainbow.studorg.tuwien.ac.at>
|
||||
<ffmpeg@gyani.pro> <gyandoshi@gmail.com>
|
||||
<atomnuker@gmail.com> <rpehlivanov@obe.tv>
|
||||
<lizhong1008@gmail.com> <zhong.li@intel.com>
|
||||
<lizhong1008@gmail.com> <zhongli_dev@126.com>
|
||||
<andreas.rheinhardt@outlook.com> <andreas.rheinhardt@gmail.com>
|
||||
<andreas.rheinhardt@outlook.com> <andreas.rheinhardt@googlemail.com>
|
||||
rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
|
||||
<thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com>
|
||||
<lq@chinaffmpeg.org> <liuqi05@kuaishou.com>
|
||||
<ruiling.song83@gmail.com> <ruiling.song@intel.com>
|
||||
Cosmin Stejerean <cosmin@cosmin.at> Cosmin Stejerean via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
|
||||
@@ -19,7 +19,7 @@ cache:
|
||||
directories:
|
||||
- ffmpeg-samples
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update --all; fi
|
||||
install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install nasm; fi
|
||||
script:
|
||||
|
||||
4
CREDITS
4
CREDITS
@@ -1,6 +1,6 @@
|
||||
See the Git history of the project (https://git.ffmpeg.org/ffmpeg) to
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
https://git.ffmpeg.org/ffmpeg
|
||||
http://source.ffmpeg.org.
|
||||
|
||||
545
Changelog
545
Changelog
@@ -1,550 +1,6 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 7.0.1:
|
||||
lavc/flacdsp: do not assume maximum R-V VL
|
||||
avformat/flacdec: Reorder allocations to avoid leak on error
|
||||
avcodec/adts_parser: Don't presume buffer to be padded
|
||||
avformat/movenc: Check av_malloc()
|
||||
avcodec/vp8: Return error on error
|
||||
avformat/mov: store sample_sizes as unsigned ints
|
||||
avformat/vvc: fix parsing sps_subpic_id
|
||||
avformat/vvc: initialize some ptl flags
|
||||
avcodec/mscc & mwsc: Check loop counts before use
|
||||
avcodec/mpegvideo_enc: Fix potential overflow in RD
|
||||
avcodec/mpeg4videodec: assert impossible wrap points
|
||||
avcodec/mpeg12dec: Use 64bit in bit computation
|
||||
avcodec/vqcdec: Check init_get_bits8() for failure
|
||||
avcodec/vvc/dec: Check init_get_bits8() for failure
|
||||
avcodec/vble: Check av_image_get_buffer_size() for failure
|
||||
avcodec/vp3: Replace check by assert
|
||||
avcodec/vp8: Forward return of ff_vpx_init_range_decoder()
|
||||
avcodec/jpeg2000dec: remove ST=3 case
|
||||
avcodec/qsvdec: Check av_image_get_buffer_size() for failure
|
||||
avcodec/exr: Fix preview overflow
|
||||
avcodec/decode: decode_simple_internal() only implements audio and video
|
||||
avcodec/fmvc: remove dead assignment
|
||||
avcodec/h2645_sei: Remove dead checks
|
||||
avcodec/h264_slice: Remove dead sps check
|
||||
avcodec/lpc: copy levenson coeffs only when they have been computed
|
||||
avutil/tests/base64: Check with too short output array
|
||||
libavutil/base64: Try not to write over the array end
|
||||
avcodec/cbs_av1: Avoid shift overflow
|
||||
fftools/ffplay: Check return of swr_alloc_set_opts2()
|
||||
tools/opt_common: Check for malloc failure
|
||||
doc/examples/demux_decode: Simplify loop
|
||||
avformat/concatdec: Check file
|
||||
avcodec/mpegvideo_enc: Fix 1 line and one column images
|
||||
avcodec/amrwbdec: assert mode to be valid in decode_fixed_vector()
|
||||
avcodec/wavarc: fix integer overflow in decode_5elp() block type 2
|
||||
swscale/output: Fix integer overflow in yuv2rgba64_full_1_c_template()
|
||||
swscale/output: Fix integer overflow in yuv2rgba64_1_c_template
|
||||
avcodec/av1dec: Change bit_depth to int
|
||||
avcodec/av1dec: bit_depth cannot be another values than 8,10,12
|
||||
avcodec/avs3_parser: assert the return value of init_get_bits()
|
||||
avcodec/avs2_parser: Assert init_get_bits8() success with const size 15
|
||||
avfilter/avfiltergraph: return value of ff_request_frame() is unused
|
||||
avformat/mxfdec: Check body_offset
|
||||
avformat/kvag: Check sample_rate
|
||||
avcodec/atrac9dec: Check init_get_bits8() for failure
|
||||
avcodec/ac3_parser: Check init_get_bits8() for failure
|
||||
avcodec/pngdec: Check last AVFrame before deref
|
||||
avcodec/hevcdec: Check ref frame
|
||||
doc/examples/qsv_transcode: Initialize pointer before free
|
||||
doc/examples/qsv_transcode: Simplify str_to_dict() loop
|
||||
doc/examples/vaapi_transcode: Simplify loop
|
||||
doc/examples/qsv_transcode: Simplify loop
|
||||
avcodec/cbs_h2645: Check NAL space
|
||||
avfilter/vf_thumbnail_cuda: Set ret before checking it
|
||||
avfilter/signature_lookup: Dont copy uninitialized stuff around
|
||||
avfilter/signature_lookup: Fix 2 differences to the refernce SW
|
||||
avcodec/x86/vp3dsp_init: Set correct function pointer, fix crash
|
||||
avformat/mp3dec: change bogus error message if read_header encounters EOF
|
||||
avformat/mp3dec: simplify inner frame size check in mp3_read_header
|
||||
avformat/mp3dec: only call ffio_ensure_seekback once
|
||||
avcodec/cbs_h266: read vps_ptl_max_tid before using it
|
||||
avcodec/cbs_h266: fix sh_collocated_from_l0_flag and sh_collocated_ref_idx infer
|
||||
avformat/vvc: fix parsing some early VPS bitstream values
|
||||
avformat/vvc: fix writing general_constraint_info bytes
|
||||
avutil/ppc/cpu: Also use the machdep.altivec sysctl on NetBSD
|
||||
lavd/v4l2: Use proper field type for second parameter of ioctl() with BSD's
|
||||
vulkan_av1: Fix force_integer_mv value
|
||||
vaapi_av1: Fix force_integer_mv value
|
||||
av1dec: Add force_integer_mv derived field for decoder use
|
||||
avutil/iamf: fix offsets for mix_gain options
|
||||
avformat/iamfdec: check nb_streams in header read
|
||||
avformat/mov: free the infe allocated item data on failure
|
||||
avformat/iamf_writer: reject duplicated stream ids in a stream group
|
||||
avformat/mov: don't read key_size bytes twice in the keys atom
|
||||
avformat/mov: take into account the first eight bytes in the keys atom
|
||||
avformat/mov: fix the check for the heif item parsing loop
|
||||
avutil/iamf: fix mix_gain_class name
|
||||
av1dec: Fix RefFrameSignBias calculation
|
||||
avcodec/codec_par: always clear extradata_size in avcodec_parameters_to_context()
|
||||
avcodec/mediacodecenc: Fix return empty packet when bsf is used
|
||||
avcodec/hevcdec: Fix precedence, bogus film grain warning
|
||||
avcodec/hevcdec: fix segfault on invalid film grain metadata
|
||||
lavc/vvc: Skip enhancement layer NAL units
|
||||
avformat/mov: ignore old infe box versions
|
||||
vulkan_av1: add workaround for NVIDIA drivers tested on broken CTS
|
||||
lavc/vulkan_av1: Use av1dec reference order hint information
|
||||
lavc/av1: Record reference ordering information for each frame
|
||||
doc/encoders: add missing libxvid option
|
||||
doc/encoders: remove non-existent flag
|
||||
fate/ffmpeg: Avoid dependency on samples
|
||||
avcodec/wavpack: Remove always-false check
|
||||
avcodec/wavpack: Fix leak and segfault on reallocation error
|
||||
avcodec/lossless_videoencdsp: Don't presume alignment in diff_bytes
|
||||
avcodec/ppc/h264dsp: Fix left shifts of negative numbers
|
||||
|
||||
version 7.0:
|
||||
- DXV DXT1 encoder
|
||||
- LEAD MCMP decoder
|
||||
- EVC decoding using external library libxevd
|
||||
- EVC encoding using external library libxeve
|
||||
- QOA decoder and demuxer
|
||||
- aap filter
|
||||
- demuxing, decoding, filtering, encoding, and muxing in the
|
||||
ffmpeg CLI now all run in parallel
|
||||
- enable gdigrab device to grab a window using the hwnd=HANDLER syntax
|
||||
- IAMF raw demuxer and muxer
|
||||
- D3D12VA hardware accelerated H264, HEVC, VP9, AV1, MPEG-2 and VC1 decoding
|
||||
- tiltandshift filter
|
||||
- qrencode filter and qrencodesrc source
|
||||
- quirc filter
|
||||
- lavu/eval: introduce randomi() function in expressions
|
||||
- VVC decoder (experimental)
|
||||
- fsync filter
|
||||
- Raw Captions with Time (RCWT) closed caption muxer
|
||||
- ffmpeg CLI -bsf option may now be used for input as well as output
|
||||
- ffmpeg CLI options may now be used as -/opt <path>, which is equivalent
|
||||
to -opt <contents of file <path>>
|
||||
- showinfo bitstream filter
|
||||
- a C11-compliant compiler is now required; note that this requirement
|
||||
will be bumped to C17 in the near future, so consider updating your
|
||||
build environment if it lacks C17 support
|
||||
- Change the default bitrate control method from VBR to CQP for QSV encoders.
|
||||
- removed deprecated ffmpeg CLI options -psnr and -map_channel
|
||||
- DVD-Video demuxer, powered by libdvdnav and libdvdread
|
||||
- ffprobe -show_stream_groups option
|
||||
- ffprobe (with -export_side_data film_grain) now prints film grain metadata
|
||||
- AEA muxer
|
||||
- ffmpeg CLI loopback decoders
|
||||
- Support PacketTypeMetadata of PacketType in enhanced flv format
|
||||
- ffplay with hwaccel decoding support (depends on vulkan renderer via libplacebo)
|
||||
- dnn filter libtorch backend
|
||||
- Android content URIs protocol
|
||||
- AOMedia Film Grain Synthesis 1 (AFGS1)
|
||||
- RISC-V optimizations for AAC, FLAC, JPEG-2000, LPC, RV4.0, SVQ, VC1, VP8, and more
|
||||
- Loongarch optimizations for HEVC decoding
|
||||
- Important AArch64 optimizations for HEVC
|
||||
- IAMF support inside MP4/ISOBMFF
|
||||
- Support for HEIF/AVIF still images and tiled still images
|
||||
- Dolby Vision profile 10 support in AV1
|
||||
- Support for Ambient Viewing Environment metadata in MP4/ISOBMFF
|
||||
- HDR10 metadata passthrough when encoding with libx264, libx265, and libsvtav1
|
||||
|
||||
|
||||
version 6.1:
|
||||
- libaribcaption decoder
|
||||
- Playdate video decoder and demuxer
|
||||
- Extend VAAPI support for libva-win32 on Windows
|
||||
- afireqsrc audio source filter
|
||||
- arls filter
|
||||
- ffmpeg CLI new option: -readrate_initial_burst
|
||||
- zoneplate video source filter
|
||||
- command support in the setpts and asetpts filters
|
||||
- Vulkan decode hwaccel, supporting H264, HEVC and AV1
|
||||
- color_vulkan filter
|
||||
- bwdif_vulkan filter
|
||||
- nlmeans_vulkan filter
|
||||
- RivaTuner video decoder
|
||||
- xfade_vulkan filter
|
||||
- vMix video decoder
|
||||
- Essential Video Coding parser, muxer and demuxer
|
||||
- Essential Video Coding frame merge bsf
|
||||
- bwdif_cuda filter
|
||||
- Microsoft RLE video encoder
|
||||
- Raw AC-4 muxer and demuxer
|
||||
- Raw VVC bitstream parser, muxer and demuxer
|
||||
- Bitstream filter for editing metadata in VVC streams
|
||||
- Bitstream filter for converting VVC from MP4 to Annex B
|
||||
- scale_vt filter for videotoolbox
|
||||
- transpose_vt filter for videotoolbox
|
||||
- support for the P_SKIP hinting to speed up libx264 encoding
|
||||
- Support HEVC,VP9,AV1 codec in enhanced flv format
|
||||
- apsnr and asisdr audio filters
|
||||
- OSQ demuxer and decoder
|
||||
- Support HEVC,VP9,AV1 codec fourcclist in enhanced rtmp protocol
|
||||
- CRI USM demuxer
|
||||
- ffmpeg CLI '-top' option deprecated in favor of the setfield filter
|
||||
- VAAPI AV1 encoder
|
||||
- ffprobe XML output schema changed to account for multiple
|
||||
variable-fields elements within the same parent element
|
||||
- ffprobe -output_format option added as an alias of -of
|
||||
|
||||
|
||||
version 6.0:
|
||||
- Radiance HDR image support
|
||||
- ddagrab (Desktop Duplication) video capture filter
|
||||
- ffmpeg -shortest_buf_duration option
|
||||
- ffmpeg now requires threading to be built
|
||||
- ffmpeg now runs every muxer in a separate thread
|
||||
- Add new mode to cropdetect filter to detect crop-area based on motion vectors and edges
|
||||
- VAAPI decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- WBMP (Wireless Application Protocol Bitmap) image format
|
||||
- a3dscope filter
|
||||
- bonk decoder and demuxer
|
||||
- Micronas SC-4 audio decoder
|
||||
- LAF demuxer
|
||||
- APAC decoder and demuxer
|
||||
- Media 100i decoders
|
||||
- DTS to PTS reorder bsf
|
||||
- ViewQuest VQC decoder
|
||||
- backgroundkey filter
|
||||
- nvenc AV1 encoding support
|
||||
- MediaCodec decoder via NDKMediaCodec
|
||||
- MediaCodec encoder
|
||||
- oneVPL support for QSV
|
||||
- QSV AV1 encoder
|
||||
- QSV decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- showcwt multimedia filter
|
||||
- corr video filter
|
||||
- adrc audio filter
|
||||
- afdelaysrc audio filter
|
||||
- WADY DPCM decoder and demuxer
|
||||
- CBD2 DPCM decoder
|
||||
- ssim360 video filter
|
||||
- ffmpeg CLI new options: -stats_enc_pre[_fmt], -stats_enc_post[_fmt],
|
||||
-stats_mux_pre[_fmt]
|
||||
- hstack_vaapi, vstack_vaapi and xstack_vaapi filters
|
||||
- XMD ADPCM decoder and demuxer
|
||||
- media100 to mjpegb bsf
|
||||
- ffmpeg CLI new option: -fix_sub_duration_heartbeat
|
||||
- WavArc decoder and demuxer
|
||||
- CrystalHD decoders deprecated
|
||||
- SDNS demuxer
|
||||
- RKA decoder and demuxer
|
||||
- filtergraph syntax in ffmpeg CLI now supports passing file contents
|
||||
as option values, by prefixing option name with '/'
|
||||
- hstack_qsv, vstack_qsv and xstack_qsv filters
|
||||
|
||||
|
||||
version 5.1:
|
||||
- add ipfs/ipns gateway support
|
||||
- dialogue enhance audio filter
|
||||
- dropped obsolete XvMC hwaccel
|
||||
- pcm-bluray encoder
|
||||
- DFPWM audio encoder/decoder and raw muxer/demuxer
|
||||
- SITI filter
|
||||
- Vizrt Binary Image encoder/decoder
|
||||
- avsynctest source filter
|
||||
- feedback video filter
|
||||
- pixelize video filter
|
||||
- colormap video filter
|
||||
- colorchart video source filter
|
||||
- multiply video filter
|
||||
- PGS subtitle frame merge bitstream filter
|
||||
- blurdetect filter
|
||||
- tiltshelf audio filter
|
||||
- QOI image format support
|
||||
- ffprobe -o option
|
||||
- virtualbass audio filter
|
||||
- VDPAU AV1 hwaccel
|
||||
- PHM image format support
|
||||
- remap_opencl filter
|
||||
- added chromakey_cuda filter
|
||||
- added bilateral_cuda filter
|
||||
|
||||
|
||||
version 5.0:
|
||||
- ADPCM IMA Westwood encoder
|
||||
- Westwood AUD muxer
|
||||
- ADPCM IMA Acorn Replay decoder
|
||||
- Argonaut Games CVG demuxer
|
||||
- Argonaut Games CVG muxer
|
||||
- Concatf protocol
|
||||
- afwtdn audio filter
|
||||
- audio and video segment filters
|
||||
- Apple Graphics (SMC) encoder
|
||||
- hsvkey and hsvhold video filters
|
||||
- adecorrelate audio filter
|
||||
- atilt audio filter
|
||||
- grayworld video filter
|
||||
- AV1 Low overhead bitstream format muxer
|
||||
- swscale slice threading
|
||||
- MSN Siren decoder
|
||||
- scharr video filter
|
||||
- apsyclip audio filter
|
||||
- morpho video filter
|
||||
- amr parser
|
||||
- (a)latency filters
|
||||
- GEM Raster image decoder
|
||||
- asdr audio filter
|
||||
- speex decoder
|
||||
- limitdiff video filter
|
||||
- xcorrelate video filter
|
||||
- varblur video filter
|
||||
- huesaturation video filter
|
||||
- colorspectrum source video filter
|
||||
- RTP packetizer for uncompressed video (RFC 4175)
|
||||
- bitpacked encoder
|
||||
- VideoToolbox VP9 hwaccel
|
||||
- VideoToolbox ProRes hwaccel
|
||||
- support loongarch.
|
||||
- aspectralstats audio filter
|
||||
- adynamicsmooth audio filter
|
||||
- libplacebo filter
|
||||
- vflip_vulkan, hflip_vulkan and flip_vulkan filters
|
||||
- adynamicequalizer audio filter
|
||||
- yadif_videotoolbox filter
|
||||
- VideoToolbox ProRes encoder
|
||||
- anlmf audio filter
|
||||
- IMF demuxer (experimental)
|
||||
|
||||
|
||||
version 4.4:
|
||||
- AudioToolbox output device
|
||||
- MacCaption demuxer
|
||||
- PGX decoder
|
||||
- chromanr video filter
|
||||
- VDPAU accelerated HEVC 10/12bit decoding
|
||||
- ADPCM IMA Ubisoft APM encoder
|
||||
- Rayman 2 APM muxer
|
||||
- AV1 encoding support SVT-AV1
|
||||
- Cineform HD encoder
|
||||
- ADPCM Argonaut Games encoder
|
||||
- Argonaut Games ASF muxer
|
||||
- AV1 Low overhead bitstream format demuxer
|
||||
- RPZA video encoder
|
||||
- ADPCM IMA MOFLEX decoder
|
||||
- MobiClip FastAudio decoder
|
||||
- MobiClip video decoder
|
||||
- MOFLEX demuxer
|
||||
- MODS demuxer
|
||||
- PhotoCD decoder
|
||||
- MCA demuxer
|
||||
- AV1 decoder (Hardware acceleration used only)
|
||||
- SVS demuxer
|
||||
- Argonaut Games BRP demuxer
|
||||
- DAT demuxer
|
||||
- aax demuxer
|
||||
- IPU decoder, parser and demuxer
|
||||
- Intel QSV-accelerated AV1 decoding
|
||||
- Argonaut Games Video decoder
|
||||
- libwavpack encoder removed
|
||||
- ACE demuxer
|
||||
- AVS3 demuxer
|
||||
- AVS3 video decoder via libuavs3d
|
||||
- Cintel RAW decoder
|
||||
- VDPAU accelerated VP9 10/12bit decoding
|
||||
- afreqshift and aphaseshift filters
|
||||
- High Voltage Software ADPCM encoder
|
||||
- LEGO Racers ALP (.tun & .pcm) muxer
|
||||
- AV1 VAAPI decoder
|
||||
- adenorm filter
|
||||
- ADPCM IMA AMV encoder
|
||||
- AMV muxer
|
||||
- NVDEC AV1 hwaccel
|
||||
- DXVA2/D3D11VA hardware accelerated AV1 decoding
|
||||
- speechnorm filter
|
||||
- SpeedHQ encoder
|
||||
- asupercut filter
|
||||
- asubcut filter
|
||||
- Microsoft Paint (MSP) version 2 decoder
|
||||
- Microsoft Paint (MSP) demuxer
|
||||
- AV1 monochrome encoding support via libaom >= 2.0.1
|
||||
- asuperpass and asuperstop filter
|
||||
- shufflepixels filter
|
||||
- tmidequalizer filter
|
||||
- estdif filter
|
||||
- epx filter
|
||||
- Dolby E parser
|
||||
- shear filter
|
||||
- kirsch filter
|
||||
- colortemperature filter
|
||||
- colorcontrast filter
|
||||
- PFM encoder
|
||||
- colorcorrect filter
|
||||
- binka demuxer
|
||||
- XBM parser
|
||||
- xbm_pipe demuxer
|
||||
- colorize filter
|
||||
- CRI parser
|
||||
- aexciter audio filter
|
||||
- exposure video filter
|
||||
- monochrome video filter
|
||||
- setts bitstream filter
|
||||
- vif video filter
|
||||
- OpenEXR image encoder
|
||||
- Simbiosis IMX decoder
|
||||
- Simbiosis IMX demuxer
|
||||
- Digital Pictures SGA demuxer and decoders
|
||||
- TTML subtitle encoder and muxer
|
||||
- identity video filter
|
||||
- msad video filter
|
||||
- gophers protocol
|
||||
- RIST protocol via librist
|
||||
|
||||
|
||||
version 4.3:
|
||||
- v360 filter
|
||||
- Intel QSV-accelerated MJPEG decoding
|
||||
- Intel QSV-accelerated VP9 decoding
|
||||
- Support for TrueHD in mp4
|
||||
- Support AMD AMF encoder on Linux (via Vulkan)
|
||||
- IMM5 video decoder
|
||||
- ZeroMQ protocol
|
||||
- support Sipro ACELP.KELVIN decoding
|
||||
- streamhash muxer
|
||||
- sierpinski video source
|
||||
- scroll video filter
|
||||
- photosensitivity filter
|
||||
- anlms filter
|
||||
- arnndn filter
|
||||
- bilateral filter
|
||||
- maskedmin and maskedmax filters
|
||||
- VDPAU VP9 hwaccel
|
||||
- median filter
|
||||
- QSV-accelerated VP9 encoding
|
||||
- AV1 encoding support via librav1e
|
||||
- AV1 frame merge bitstream filter
|
||||
- AV1 Annex B demuxer
|
||||
- axcorrelate filter
|
||||
- mvdv decoder
|
||||
- mvha decoder
|
||||
- MPEG-H 3D Audio support in mp4
|
||||
- thistogram filter
|
||||
- freezeframes filter
|
||||
- Argonaut Games ADPCM decoder
|
||||
- Argonaut Games ASF demuxer
|
||||
- xfade video filter
|
||||
- xfade_opencl filter
|
||||
- afirsrc audio filter source
|
||||
- pad_opencl filter
|
||||
- Simon & Schuster Interactive ADPCM decoder
|
||||
- Real War KVAG demuxer
|
||||
- CDToons video decoder
|
||||
- siren audio decoder
|
||||
- Rayman 2 ADPCM decoder
|
||||
- Rayman 2 APM demuxer
|
||||
- cas video filter
|
||||
- High Voltage Software ADPCM decoder
|
||||
- LEGO Racers ALP (.tun & .pcm) demuxer
|
||||
- AMQP 0-9-1 protocol (RabbitMQ)
|
||||
- Vulkan support
|
||||
- avgblur_vulkan, overlay_vulkan, scale_vulkan and chromaber_vulkan filters
|
||||
- ADPCM IMA MTF decoder
|
||||
- FWSE demuxer
|
||||
- DERF DPCM decoder
|
||||
- DERF demuxer
|
||||
- CRI HCA decoder
|
||||
- CRI HCA demuxer
|
||||
- overlay_cuda filter
|
||||
- switch from AvxSynth to AviSynth+ on Linux
|
||||
- mv30 decoder
|
||||
- Expanded styling support for 3GPP Timed Text Subtitles (movtext)
|
||||
- WebP parser
|
||||
- tmedian filter
|
||||
- maskedthreshold filter
|
||||
- Support for muxing pcm and pgs in m2ts
|
||||
- Cunning Developments ADPCM decoder
|
||||
- asubboost filter
|
||||
- Pro Pinball Series Soundbank demuxer
|
||||
- pcm_rechunk bitstream filter
|
||||
- scdet filter
|
||||
- NotchLC decoder
|
||||
- gradients source video filter
|
||||
- MediaFoundation encoder wrapper
|
||||
- untile filter
|
||||
- Simon & Schuster Interactive ADPCM encoder
|
||||
- PFM decoder
|
||||
- dblur video filter
|
||||
- Real War KVAG muxer
|
||||
|
||||
|
||||
version 4.2:
|
||||
- tpad filter
|
||||
- AV1 decoding support through libdav1d
|
||||
- dedot filter
|
||||
- chromashift and rgbashift filters
|
||||
- freezedetect filter
|
||||
- truehd_core bitstream filter
|
||||
- dhav demuxer
|
||||
- PCM-DVD encoder
|
||||
- GIF parser
|
||||
- vividas demuxer
|
||||
- hymt decoder
|
||||
- anlmdn filter
|
||||
- maskfun filter
|
||||
- hcom demuxer and decoder
|
||||
- ARBC decoder
|
||||
- libaribb24 based ARIB STD-B24 caption support (profiles A and C)
|
||||
- Support decoding of HEVC 4:4:4 content in nvdec and cuviddec
|
||||
- removed libndi-newtek
|
||||
- agm decoder
|
||||
- KUX demuxer
|
||||
- AV1 frame split bitstream filter
|
||||
- lscr decoder
|
||||
- lagfun filter
|
||||
- asoftclip filter
|
||||
- Support decoding of HEVC 4:4:4 content in vdpau
|
||||
- colorhold filter
|
||||
- xmedian filter
|
||||
- asr filter
|
||||
- showspatial multimedia filter
|
||||
- VP4 video decoder
|
||||
- IFV demuxer
|
||||
- derain filter
|
||||
- deesser filter
|
||||
- mov muxer writes tracks with unspecified language instead of English by default
|
||||
- add support for using clang to compile CUDA kernels
|
||||
|
||||
|
||||
version 4.1:
|
||||
- deblock filter
|
||||
- tmix filter
|
||||
- amplify filter
|
||||
- fftdnoiz filter
|
||||
- aderivative and aintegral audio filters
|
||||
- pal75bars and pal100bars video filter sources
|
||||
- support mbedTLS based TLS
|
||||
- adeclick filter
|
||||
- adeclip filter
|
||||
- libtensorflow backend for DNN based filters like srcnn
|
||||
- vc1 decoder is now bit-exact
|
||||
- ATRAC9 decoder
|
||||
- lensfun wrapper filter
|
||||
- colorconstancy filter
|
||||
- AVS2 video decoder via libdavs2
|
||||
- IMM4 video decoder
|
||||
- Brooktree ProSumer video decoder
|
||||
- MatchWare Screen Capture Codec decoder
|
||||
- WinCam Motion Video decoder
|
||||
- 1D LUT filter (lut1d)
|
||||
- RemotelyAnywhere Screen Capture decoder
|
||||
- cue and acue filters
|
||||
- support for AV1 in MP4
|
||||
- transpose_npp filter
|
||||
- AVS2 video encoder via libxavs2
|
||||
- amultiply filter
|
||||
- Block-Matching 3d (bm3d) denoising filter
|
||||
- acrossover filter
|
||||
- ilbc decoder
|
||||
- audio denoiser as afftdn filter
|
||||
- AV1 parser
|
||||
- SER demuxer
|
||||
- sinc audio filter source
|
||||
- chromahold filter
|
||||
- setparams filter
|
||||
- vibrance filter
|
||||
- decoding S12M timecode in h264
|
||||
- xstack filter
|
||||
- pcm vidc decoder and encoder
|
||||
- (a)graphmonitor filter
|
||||
- yadif_cuda filter
|
||||
|
||||
|
||||
version 4.0:
|
||||
- Bitstream filters for editing metadata in H.264, HEVC and MPEG-2 streams
|
||||
- Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
|
||||
@@ -598,7 +54,6 @@ version 4.0:
|
||||
- Haivision SRT protocol via libsrt
|
||||
- segafilm muxer
|
||||
- vfrdet filter
|
||||
- SRCNN filter
|
||||
|
||||
|
||||
version 3.4:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
## Installing FFmpeg
|
||||
#Installing FFmpeg:
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
||||
41
LICENSE.md
41
LICENSE.md
@@ -21,11 +21,10 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `compat/solaris/make_sunver.pl`
|
||||
- `doc/t2h.pm`
|
||||
- `doc/texi2pod.pl`
|
||||
- `libswresample/tests/swresample.c`
|
||||
- `libswresample/swresample-test.c`
|
||||
- `tests/checkasm/*`
|
||||
- `tests/tiny_ssim.c`
|
||||
- the following filters in libavfilter:
|
||||
- `signature_lookup.c`
|
||||
- `vf_blackframe.c`
|
||||
- `vf_boxblur.c`
|
||||
- `vf_colormatrix.c`
|
||||
@@ -35,13 +34,13 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_eq.c`
|
||||
- `vf_find_rect.c`
|
||||
- `vf_fspp.c`
|
||||
- `vf_geq.c`
|
||||
- `vf_histeq.c`
|
||||
- `vf_hqdn3d.c`
|
||||
- `vf_interlace.c`
|
||||
- `vf_kerndeint.c`
|
||||
- `vf_lensfun.c` (GPL version 3 or later)
|
||||
- `vf_mcdeint.c`
|
||||
- `vf_mpdecimate.c`
|
||||
- `vf_nnedi.c`
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
@@ -50,14 +49,12 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
- `vf_sab.c`
|
||||
- `vf_signature.c`
|
||||
- `vf_smartblur.c`
|
||||
- `vf_spp.c`
|
||||
- `vf_stereo3d.c`
|
||||
- `vf_super2xsai.c`
|
||||
- `vf_tinterlace.c`
|
||||
- `vf_uspp.c`
|
||||
- `vf_vaguedenoiser.c`
|
||||
- `vsrc_mptestsrc.c`
|
||||
|
||||
Should you, for whatever reason, prefer to use version 3 of the (L)GPL, then
|
||||
@@ -83,47 +80,41 @@ affect the licensing of binaries resulting from the combination.
|
||||
|
||||
### Compatible libraries
|
||||
|
||||
The following libraries are under GPL version 2:
|
||||
- avisynth
|
||||
The following libraries are under GPL:
|
||||
- frei0r
|
||||
- libcdio
|
||||
- libdavs2
|
||||
- librubberband
|
||||
- libvidstab
|
||||
- libx264
|
||||
- libx265
|
||||
- libxavs
|
||||
- libxavs2
|
||||
- libxvid
|
||||
|
||||
When combining them with FFmpeg, FFmpeg needs to be licensed as GPL as well by
|
||||
passing `--enable-gpl` to configure.
|
||||
|
||||
The following libraries are under LGPL version 3:
|
||||
- gmp
|
||||
- libaribb24
|
||||
- liblensfun
|
||||
|
||||
When combining them with FFmpeg, use the configure option `--enable-version3` to
|
||||
upgrade FFmpeg to the LGPL v3.
|
||||
|
||||
The VMAF, mbedTLS, RK MPI, OpenCORE and VisualOn libraries are under the Apache License
|
||||
2.0. That license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
The OpenCORE and VisualOn libraries are under the Apache License 2.0. That
|
||||
license is incompatible with the LGPL v2.1 and the GPL v2, but not with
|
||||
version 3 of those licenses. So to combine these libraries with FFmpeg, the
|
||||
license version needs to be upgraded by passing `--enable-version3` to configure.
|
||||
|
||||
The smbclient library is under the GPL v3, to combine it with FFmpeg,
|
||||
the options `--enable-gpl` and `--enable-version3` have to be passed to
|
||||
configure to upgrade FFmpeg to the GPL v3.
|
||||
|
||||
### Incompatible libraries
|
||||
|
||||
There are certain libraries you can combine with FFmpeg whose licenses are not
|
||||
compatible with the GPL and/or the LGPL. If you wish to enable these
|
||||
libraries, even in circumstances that their license may be incompatible, pass
|
||||
`--enable-nonfree` to configure. This will cause the resulting binary to be
|
||||
`--enable-nonfree` to configure. But note that if you enable any of these
|
||||
libraries the resulting binary will be under a complex license mix that is
|
||||
more restrictive than the LGPL and that may result in additional obligations.
|
||||
It is possible that these restrictions cause the resulting binary to be
|
||||
unredistributable.
|
||||
|
||||
The Fraunhofer FDK AAC and OpenSSL libraries are under licenses which are
|
||||
incompatible with the GPLv2 and v3. To the best of our knowledge, they are
|
||||
compatible with the LGPL.
|
||||
|
||||
The NVENC library, while its header file is licensed under the compatible MIT
|
||||
license, requires a proprietary binary blob at run time, and is deemed to be
|
||||
incompatible with the GPL. We are not certain if it is compatible with the
|
||||
LGPL, but we require `--enable-nonfree` even with LGPL configurations in case
|
||||
it is not.
|
||||
|
||||
174
MAINTAINERS
174
MAINTAINERS
@@ -11,11 +11,17 @@ A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
|
||||
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
final design decisions
|
||||
|
||||
|
||||
Applications
|
||||
============
|
||||
|
||||
ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer, Anton Khirnov
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c Marton Balint
|
||||
@@ -33,9 +39,8 @@ QuickTime faststart:
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server day to day operations Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
project server emergencies Árpád Gereöffy, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Lou Logan, Gyan Doshi
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
@@ -47,12 +52,12 @@ Communication
|
||||
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork Andriy Gelman
|
||||
mailing lists Baptiste Coudurier
|
||||
Twitter Reynaldo H. Verdejo Pinochet
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos, Lou Logan
|
||||
mailing lists Baptiste Coudurier, Lou Logan
|
||||
Google+ Paul B Mahol, Michael Niedermayer, Alexander Strasser
|
||||
Twitter Lou Logan, Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, rcombs, wm4
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, Rodger Combs, wm4
|
||||
|
||||
|
||||
libavutil
|
||||
@@ -73,7 +78,6 @@ Other:
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_vulkan* Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
@@ -84,7 +88,6 @@ Other:
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
tx* Lynne
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -110,12 +113,15 @@ Generic Parts:
|
||||
lzw.* Michael Niedermayer
|
||||
floating point AAN DCT:
|
||||
faandct.c, faandct.h Michael Niedermayer
|
||||
Non-power-of-two MDCT:
|
||||
mdct15.c, mdct15.h Rostislav Pehlivanov
|
||||
Golomb coding:
|
||||
golomb.c, golomb.h Michael Niedermayer
|
||||
motion estimation:
|
||||
motion* Michael Niedermayer
|
||||
rate control:
|
||||
ratecontrol.c Michael Niedermayer
|
||||
libxvid_rc.c Michael Niedermayer
|
||||
simple IDCT:
|
||||
simple_idct.c, simple_idct.h Michael Niedermayer
|
||||
postprocessing:
|
||||
@@ -131,29 +137,27 @@ Codecs:
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||
adpcm.c Zane van Iperen
|
||||
alacenc.c Jaikrishnan Menon
|
||||
alsdec.c Thilo Borgmann, Umair Khan
|
||||
amfenc* Dmitrii Ovchinnikov
|
||||
aptx.c Aurelien Jacobs
|
||||
ass* Aurelien Jacobs
|
||||
asv* Michael Niedermayer
|
||||
atrac3plus* Maxim Poliakovski
|
||||
audiotoolbox* rcombs
|
||||
avs2* Huiwen Ren
|
||||
audiotoolbox* Rodger Combs
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
binkaudio.c Peter Ross
|
||||
cavs* Stefan Gehrer
|
||||
cdxl.c Paul B Mahol
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
ccaption_dec.c Anshul Maheshwari, Aman Gupta
|
||||
cljr Alex Beregszaszi
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
cuviddec.c Timo Rothenpieler
|
||||
dca* foo86
|
||||
dfpwm* Jack Bruienne
|
||||
dirac* Rostislav Pehlivanov
|
||||
dnxhd* Baptiste Coudurier
|
||||
dolby_e* foo86
|
||||
@@ -162,7 +166,9 @@ Codecs:
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
evrc* Paul B Mahol
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
exr.c Martin Vignali
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fifo.c Jan Sebechlebsky
|
||||
@@ -179,24 +185,19 @@ Codecs:
|
||||
interplayvideo.c Mike Melanson
|
||||
jni*, ffjni* Matthieu Bouron
|
||||
jpeg2000* Nicolas Bertrand
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libcodec2.c Tomas Härdin
|
||||
libdirac* David Conrad
|
||||
libdavs2.c Huiwen Ren
|
||||
libjxl*.c, libjxl.h Leo Izen
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
libxavs.c Stefan Gehrer
|
||||
libxavs2.c Huiwen Ren
|
||||
libzvbi-teletextdec.c Marton Balint
|
||||
lzo.h, lzo.c Reimar Doeffinger
|
||||
mdec.c Michael Niedermayer
|
||||
@@ -209,18 +210,17 @@ Codecs:
|
||||
mqc* Nicolas Bertrand
|
||||
msmpeg4.c, msmpeg4data.h Michael Niedermayer
|
||||
msrle.c Mike Melanson
|
||||
msrleenc.c Tomas Härdin
|
||||
msvideo1.c Mike Melanson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvdec*, nvenc* Timo Rothenpieler
|
||||
omx.c Martin Storsjo, Aman Gupta
|
||||
opus* Rostislav Pehlivanov
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni
|
||||
qsv* Mark Thompson, Zhong Li, Haihao Xiang
|
||||
qsv* Mark Thompson
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@@ -228,20 +228,25 @@ Codecs:
|
||||
rpza.c Roberto Togni
|
||||
rtjpeg.c, rtjpeg.h Reimar Doeffinger
|
||||
rv10.c Michael Niedermayer
|
||||
s3tc* Ivo van Poorten
|
||||
smc.c Mike Melanson
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
speedhq.c Steinar H. Gunderson
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
tak* Paul B Mahol
|
||||
truemotion1* Mike Melanson
|
||||
tta.c Alex Beregszaszi, Jaikrishnan Menon
|
||||
ttaenc.c Paul B Mahol
|
||||
txd.c Ivo van Poorten
|
||||
v4l2_* Jorge Ramirez-Ortiz
|
||||
vc2* Rostislav Pehlivanov
|
||||
vcr1.c Michael Niedermayer
|
||||
videotoolboxenc.c Rick Kern, Aman Gupta
|
||||
vima.c Paul B Mahol
|
||||
vorbisdec.c Denes Balatoni, David Conrad
|
||||
vorbisenc.c Oded Shimon
|
||||
vp3* Mike Melanson
|
||||
@@ -250,21 +255,24 @@ Codecs:
|
||||
vp8 David Conrad, Ronald Bultje
|
||||
vp9 Ronald Bultje
|
||||
vqavideo.c Mike Melanson
|
||||
vvc Nuo Mi
|
||||
wmaprodec.c Sascha Sommer
|
||||
wmavoice.c Ronald S. Bultje
|
||||
wmv2.c Michael Niedermayer
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xface Stefano Sabatini
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||
d3d11va* Steve Lhomme
|
||||
mediacodec* Matthieu Bouron, Aman Gupta, Zhao Zhili
|
||||
vaapi* Haihao Xiang
|
||||
vaapi_encode* Mark Thompson, Haihao Xiang
|
||||
mediacodec* Matthieu Bouron, Aman Gupta
|
||||
vaapi* Gwenole Beauchesne
|
||||
vaapi_encode* Mark Thompson
|
||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||
videotoolbox* Rick Kern, Aman Gupta, Zhao Zhili
|
||||
videotoolbox* Rick Kern, Aman Gupta
|
||||
|
||||
|
||||
libavdevice
|
||||
@@ -299,40 +307,65 @@ Generic parts:
|
||||
motion_estimation.c Davinder Singh
|
||||
|
||||
Filters:
|
||||
f_drawgraph.c Paul B Mahol
|
||||
af_adelay.c Paul B Mahol
|
||||
af_aecho.c Paul B Mahol
|
||||
af_afade.c Paul B Mahol
|
||||
af_amerge.c Nicolas George
|
||||
af_aphaser.c Paul B Mahol
|
||||
af_aresample.c Michael Niedermayer
|
||||
af_astats.c Paul B Mahol
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_biquads.c Paul B Mahol
|
||||
af_chorus.c Paul B Mahol
|
||||
af_compand.c Paul B Mahol
|
||||
af_firequalizer.c Muhammad Faiz
|
||||
af_hdcd.c Burt P.
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_loudnorm.c Kyle Swanson
|
||||
af_pan.c Nicolas George
|
||||
af_sidechaincompress.c Paul B Mahol
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_aphasemeter.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_bwdif Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_chromakey.c Timo Rothenpieler
|
||||
vf_colorconstancy.c Mina Sami (CC <minas.gorgy@gmail.com>)
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_colorkey.c Timo Rothenpieler
|
||||
vf_colorlevels.c Paul B Mahol
|
||||
vf_coreimage.m Thilo Borgmann
|
||||
vf_deband.c Paul B Mahol
|
||||
vf_dejudder.c Nicholas Robbins
|
||||
vf_delogo.c Jean Delvare (CC <jdelvare@suse.com>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_fsync.c Thilo Borgmann
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_(t)interlace Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_libplacebo.c Niklas Haas
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_mestimate.c Davinder Singh
|
||||
vf_minterpolate.c Davinder Singh
|
||||
vf_neighbor.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_random.c Paul B Mahol
|
||||
vf_readvitc.c Tobias Rapp (CC t.rapp at noa-archive dot com)
|
||||
vf_scale.c Michael Niedermayer
|
||||
vf_tonemap_opencl.c Ruiling Song
|
||||
vf_separatefields.c Paul B Mahol
|
||||
vf_ssim.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_yadif.c Michael Niedermayer
|
||||
vf_zoompan.c Paul B Mahol
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
dnn Yejun Guo
|
||||
|
||||
libavformat
|
||||
===========
|
||||
|
||||
@@ -348,60 +381,59 @@ Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
aadec.c Vesselin Bontchev (vesselin.bontchev at yandex dot com)
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
alp.c Zane van Iperen
|
||||
amvenc.c Zane van Iperen
|
||||
apm.c Zane van Iperen
|
||||
apngdec.c Benoit Fouet
|
||||
argo_asf.c Zane van Iperen
|
||||
argo_brp.c Zane van Iperen
|
||||
argo_cvg.c Zane van Iperen
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
avi* Michael Niedermayer
|
||||
avisynth.c Stephen Hutchinson
|
||||
avr.c Paul B Mahol
|
||||
bink.c Peter Ross
|
||||
boadec.c Michael Niedermayer
|
||||
brstm.c Paul B Mahol
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
codec2.c Tomas Härdin
|
||||
crc.c Michael Niedermayer
|
||||
dashdec.c Steven Liu
|
||||
dashenc.c Karthick Jeyapal
|
||||
daud.c Reimar Doeffinger
|
||||
dfpwmdec.c Jack Bruienne
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dvdvideodec.c Marth64
|
||||
electronicarts.c Peter Ross
|
||||
evc* Samsung (Dawid Kozinski)
|
||||
epafdec.c Paul B Mahol
|
||||
ffm* Baptiste Coudurier
|
||||
flic.c Mike Melanson
|
||||
flvdec.c Michael Niedermayer
|
||||
flvenc.c Michael Niedermayer, Steven Liu
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
hlsenc.c Christian Suloway, Steven Liu
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
imf* Pierre-Anthony Lemieux
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
iss.c Stefan Gehrer
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
kvag.c Zane van Iperen
|
||||
libmodplug.c Clément Bœsch
|
||||
libopenmpt.c Josh de Kock
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskadec.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskaenc.c David Conrad, Andreas Rheinhardt
|
||||
matroska.c Aurelien Jacobs
|
||||
matroskadec.c Aurelien Jacobs
|
||||
matroskaenc.c David Conrad
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
mm.c Peter Ross
|
||||
mov.c Baptiste Coudurier
|
||||
@@ -413,20 +445,22 @@ Muxers/Demuxers:
|
||||
mpegtsenc.c Baptiste Coudurier
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier, Tomas Härdin
|
||||
mxf* Baptiste Coudurier
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
oggdec.c, oggdec.h David Conrad
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oggparsedaala* Rostislav Pehlivanov
|
||||
oma.c Maxim Poliakovski
|
||||
pp_bnk.c Zane van Iperen
|
||||
paf.c Paul B Mahol
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rcwtenc.c Marth64
|
||||
rdt.c Ronald S. Bultje
|
||||
rl2.c Sascha Sommer
|
||||
rmdec.c, rmenc.c Ronald S. Bultje
|
||||
@@ -445,9 +479,11 @@ Muxers/Demuxers:
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
smjpeg* Paul B Mahol
|
||||
spdif* Anssi Hannula
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
takdec.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
voc.c Aurelien Jacobs
|
||||
@@ -457,15 +493,14 @@ Muxers/Demuxers:
|
||||
webvtt* Matthew J Heaney
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
async.c Zhang Rui
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libsrt.c Zhao Zhili
|
||||
libssh.c Lukasz Marek
|
||||
libzmq.c Andriy Gelman
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c Marvin Scholz
|
||||
@@ -489,15 +524,13 @@ Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
LoongArch Shiyou Yin
|
||||
MIPS Manojkumar Bhosale
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC Lauri Kasanen
|
||||
RISC-V Rémi Denis-Courmont
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Hendrik Leppkes
|
||||
Windows MSVC Matthew Oliver, Hendrik Leppkes
|
||||
Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
OS/2 KO Myung-Hun
|
||||
@@ -522,7 +555,6 @@ Joakim Plate
|
||||
Jun Zhao
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
Martin Storsjö
|
||||
Panagiotis Issaris
|
||||
Pedro Arthur
|
||||
@@ -543,11 +575,8 @@ Releases
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
|
||||
GnuPG Fingerprints and IRC nicknames of maintainers and contributors
|
||||
====================================================================
|
||||
|
||||
IRC nicknames are in parentheses. These apply
|
||||
to the IRC channels listed on the website.
|
||||
GnuPG Fingerprints of maintainers and contributors
|
||||
==================================================
|
||||
|
||||
Alexander Strasser 1C96 78B7 83CB 8AA7 9AF5 D1EB A7D8 A57B A876 E58F
|
||||
Anssi Hannula 1A92 FF42 2DD9 8D2E 8AF7 65A9 4278 C520 513D F3CB
|
||||
@@ -561,24 +590,17 @@ Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Leo Izen (Traneptora) B6FD 3CFC 7ACF 83FC 9137 6945 5A71 C331 FD2F A19A
|
||||
Leo Izen (Traneptora) 1D83 0A0B CE46 709E 203B 26FC 764E 48EA 4822 1833
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Niklas Haas (haasn) 1DDB 8076 B14D 5B48 32FC 99D9 EB52 DA9C 02BA 6FB4
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Pierre-Anthony Lemieux (pal) F4B3 9492 E6F2 E4AF AEC8 46CB 698F A1F0 F8D4 EED4
|
||||
Ramiro Polla 7859 C65B 751B 1179 792E DAE8 8E95 8B2F 9B6C 5700
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
@@ -587,9 +609,7 @@ Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Thilo Borgmann (thilo) CE1D B7F4 4D20 FC3A DD9F FE5A 257C 5B8F 1D20 B92F
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
Tomas Härdin (thardin) A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Tomas Härdin A79D 4E3D F38F 763F 91F5 8B33 A01E 8AE0 41BB 2551
|
||||
Wei Gao 4269 7741 857A 0E60 9EC5 08D2 4744 4EFA 62C1 87B9
|
||||
Zane van Iperen (zane) 61AE D40F 368B 6F26 9DAE 3892 6861 6B2D 8AC4 DCC5
|
||||
|
||||
55
Makefile
55
Makefile
@@ -13,19 +13,17 @@ vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %.cu $(SRC_PATH)
|
||||
vpath %.ptx $(SRC_PATH)
|
||||
vpath %.metal $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
@@ -47,50 +45,25 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
FF_STATIC_DEP_LIBS := $(STATIC_DEP_LIBS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(filter-out $(FF_DEP_LIBS), $^) $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
target_dem_%_fuzzer$(EXESUF): target_dem_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_sws_fuzzer$(EXESUF): tools/target_sws_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
|
||||
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/scale_slice_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/scale_slice_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/target_dec_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/target_dem_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavcodec/hwaccels.h \
|
||||
$(SRC_PATH)/libavcodec/parsers.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config_components.h: ffbuild/.config
|
||||
config.h: ffbuild/.config
|
||||
ffbuild/.config: $(CONFIGURABLE_COMPONENTS)
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?) newer than config_components.h, rerun configure\n\n'
|
||||
@-printf '\nWARNING: $(?) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
@@ -98,8 +71,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS \
|
||||
OBJS SLIBOBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -121,13 +93,12 @@ include $(SRC_PATH)/fftools/Makefile
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
$(ALLFFLIBS:%=lib%/version.o): libavutil/ffversion.h
|
||||
libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
ifeq ($(STRIPTYPE),direct)
|
||||
$(STRIP) -o $@ $<
|
||||
else
|
||||
$(RM) $@
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
@@ -136,18 +107,13 @@ endif
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
|
||||
VERSION_SH = $(SRC_PATH)/ffbuild/version.sh
|
||||
ifeq ($(VERSION_TRACKING),yes)
|
||||
GIT_LOG = $(SRC_PATH)/.git/logs/HEAD
|
||||
endif
|
||||
|
||||
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) ffbuild/config.mak
|
||||
.version: M=@
|
||||
|
||||
ifneq ($(VERSION_TRACKING),yes)
|
||||
libavutil/ffversion.h .version: REVISION=unknown
|
||||
endif
|
||||
libavutil/ffversion.h .version:
|
||||
$(M)revision=$(REVISION) $(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
|
||||
$(M)$(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
|
||||
$(Q)touch .version
|
||||
|
||||
# force version.sh to run whenever version might have changed
|
||||
@@ -168,17 +134,16 @@ uninstall-data:
|
||||
|
||||
clean::
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES)) $(addprefix compat/*/*/,$(CLEANSUFFIXES))
|
||||
$(RM) $(addprefix compat/,$(CLEANSUFFIXES)) $(addprefix compat/*/,$(CLEANSUFFIXES))
|
||||
$(RM) -r coverage-html
|
||||
$(RM) -rf coverage.info coverage.info.in lcov
|
||||
|
||||
distclean:: clean
|
||||
$(RM) .version config.asm config.h config_components.h mapfile \
|
||||
$(RM) .version avversion.h config.asm config.h mapfile \
|
||||
ffbuild/.config ffbuild/config.* libavutil/avconfig.h \
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c \
|
||||
libavcodec/codec_list.c libavcodec/parser_list.c \
|
||||
libavfilter/filter_list.c libavdevice/indev_list.c libavdevice/outdev_list.c \
|
||||
libavformat/muxer_list.c libavformat/demuxer_list.c
|
||||
ifeq ($(SRC_LINK),src)
|
||||
$(RM) src
|
||||
@@ -193,7 +158,7 @@ check: all alltools examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
|
||||
$(sort $(OUTDIRS)):
|
||||
$(sort $(OBJDIRS)):
|
||||
$(Q)mkdir -p $@
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
|
||||
@@ -9,7 +9,7 @@ such as audio, video, subtitles and related metadata.
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides means to alter decoded audio and video through a directed graph of connected filters.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
┌─────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 7.0 "Dijkstra" │
|
||||
└─────────────────────────────────────────┘
|
||||
┌───────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.0 "Wu" │
|
||||
└───────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 7.0 "Dijkstra", about 6
|
||||
months after the release of FFmpeg 6.1.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.0 "Wu", about 6
|
||||
months after the release of FFmpeg 3.4.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <windows.h>
|
||||
@@ -95,7 +96,7 @@ do { \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
|
||||
InterlockedExchangePointer(object, desired);
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
1064
compat/avisynth/avisynth_c.h
Normal file
1064
compat/avisynth/avisynth_c.h
Normal file
File diff suppressed because it is too large
Load Diff
62
compat/avisynth/avs/capi.h
Normal file
62
compat/avisynth/avs/capi.h
Normal file
@@ -0,0 +1,62 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CAPI_H
|
||||
#define AVS_CAPI_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef BUILDING_AVSCORE
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#endif //AVS_CAPI_H
|
||||
55
compat/avisynth/avs/config.h
Normal file
55
compat/avisynth/avs/config.h
Normal file
@@ -0,0 +1,55 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_CONFIG_H
|
||||
#define AVS_CONFIG_H
|
||||
|
||||
// Undefine this to get cdecl calling convention
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
// NOTE TO PLUGIN AUTHORS:
|
||||
// Because FRAME_ALIGN can be substantially higher than the alignment
|
||||
// a plugin actually needs, plugins should not use FRAME_ALIGN to check for
|
||||
// alignment. They should always request the exact alignment value they need.
|
||||
// This is to make sure that plugins work over the widest range of AviSynth
|
||||
// builds possible.
|
||||
#define FRAME_ALIGN 32
|
||||
|
||||
#if defined(_M_AMD64) || defined(__x86_64)
|
||||
# define X86_64
|
||||
#elif defined(_M_IX86) || defined(__i386__)
|
||||
# define X86_32
|
||||
#else
|
||||
# error Unsupported CPU architecture.
|
||||
#endif
|
||||
|
||||
#endif //AVS_CONFIG_H
|
||||
51
compat/avisynth/avs/types.h
Normal file
51
compat/avisynth/avs/types.h
Normal file
@@ -0,0 +1,51 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef AVS_TYPES_H
|
||||
#define AVS_TYPES_H
|
||||
|
||||
// Define all types necessary for interfacing with avisynth.dll
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
typedef unsigned int Pixel32;
|
||||
typedef unsigned char BYTE;
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
#endif //AVS_TYPES_H
|
||||
728
compat/avisynth/avxsynth_c.h
Normal file
728
compat/avisynth/avxsynth_c.h
Normal file
@@ -0,0 +1,728 @@
|
||||
// Avisynth C Interface Version 0.20
|
||||
// Copyright 2003 Kevin Atkinson
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
|
||||
// MA 02110-1301 USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// As a special exception, I give you permission to link to the
|
||||
// Avisynth C interface with independent modules that communicate with
|
||||
// the Avisynth C interface solely through the interfaces defined in
|
||||
// avisynth_c.h, regardless of the license terms of these independent
|
||||
// modules, and to copy and distribute the resulting combined work
|
||||
// under terms of your choice, provided that every copy of the
|
||||
// combined work is accompanied by a complete copy of the source code
|
||||
// of the Avisynth C interface and Avisynth itself (with the version
|
||||
// used to produce the combined work), being distributed under the
|
||||
// terms of the GNU General Public License plus this exception. An
|
||||
// independent module is a module which is not derived from or based
|
||||
// on Avisynth C Interface, such as 3rd-party filters, import and
|
||||
// export plugins, or graphical user interfaces.
|
||||
|
||||
#ifndef __AVXSYNTH_C__
|
||||
#define __AVXSYNTH_C__
|
||||
|
||||
#include "windowsPorts/windows2linux.h"
|
||||
#include <stdarg.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
#else
|
||||
# define EXTERN_C
|
||||
#endif
|
||||
|
||||
#define AVSC_USE_STDCALL 1
|
||||
|
||||
#ifndef AVSC_USE_STDCALL
|
||||
# define AVSC_CC __cdecl
|
||||
#else
|
||||
# define AVSC_CC __stdcall
|
||||
#endif
|
||||
|
||||
#define AVSC_INLINE static __inline
|
||||
|
||||
#ifdef AVISYNTH_C_EXPORTS
|
||||
# define AVSC_EXPORT EXTERN_C
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllexport) ret AVSC_CC name
|
||||
#else
|
||||
# define AVSC_EXPORT EXTERN_C __declspec(dllexport)
|
||||
# ifndef AVSC_NO_DECLSPEC
|
||||
# define AVSC_API(ret, name) EXTERN_C __declspec(dllimport) ret AVSC_CC name
|
||||
# else
|
||||
# define AVSC_API(ret, name) typedef ret (AVSC_CC *name##_func)
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef __GNUC__
|
||||
typedef long long int INT64;
|
||||
#else
|
||||
typedef __int64 INT64;
|
||||
#endif
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Constants
|
||||
//
|
||||
|
||||
#ifndef __AVXSYNTH_H__
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
#endif
|
||||
|
||||
enum {AVS_SAMPLE_INT8 = 1<<0,
|
||||
AVS_SAMPLE_INT16 = 1<<1,
|
||||
AVS_SAMPLE_INT24 = 1<<2,
|
||||
AVS_SAMPLE_INT32 = 1<<3,
|
||||
AVS_SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {AVS_PLANAR_Y=1<<0,
|
||||
AVS_PLANAR_U=1<<1,
|
||||
AVS_PLANAR_V=1<<2,
|
||||
AVS_PLANAR_ALIGNED=1<<3,
|
||||
AVS_PLANAR_Y_ALIGNED=AVS_PLANAR_Y|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_U_ALIGNED=AVS_PLANAR_U|AVS_PLANAR_ALIGNED,
|
||||
AVS_PLANAR_V_ALIGNED=AVS_PLANAR_V|AVS_PLANAR_ALIGNED};
|
||||
|
||||
// Colorspace properties.
|
||||
enum {AVS_CS_BGR = 1<<28,
|
||||
AVS_CS_YUV = 1<<29,
|
||||
AVS_CS_INTERLEAVED = 1<<30,
|
||||
AVS_CS_PLANAR = 1<<31};
|
||||
|
||||
// Specific colorformats
|
||||
enum {
|
||||
AVS_CS_UNKNOWN = 0,
|
||||
AVS_CS_BGR24 = 1<<0 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_BGR32 = 1<<1 | AVS_CS_BGR | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YUY2 = 1<<2 | AVS_CS_YUV | AVS_CS_INTERLEAVED,
|
||||
AVS_CS_YV12 = 1<<3 | AVS_CS_YUV | AVS_CS_PLANAR, // y-v-u, planar
|
||||
AVS_CS_I420 = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR, // y-u-v, planar
|
||||
AVS_CS_IYUV = 1<<4 | AVS_CS_YUV | AVS_CS_PLANAR // same as above
|
||||
};
|
||||
|
||||
enum {
|
||||
AVS_IT_BFF = 1<<0,
|
||||
AVS_IT_TFF = 1<<1,
|
||||
AVS_IT_FIELDBASED = 1<<2};
|
||||
|
||||
enum {
|
||||
AVS_FILTER_TYPE=1,
|
||||
AVS_FILTER_INPUT_COLORSPACE=2,
|
||||
AVS_FILTER_OUTPUT_TYPE=9,
|
||||
AVS_FILTER_NAME=4,
|
||||
AVS_FILTER_AUTHOR=5,
|
||||
AVS_FILTER_VERSION=6,
|
||||
AVS_FILTER_ARGS=7,
|
||||
AVS_FILTER_ARGS_INFO=8,
|
||||
AVS_FILTER_ARGS_DESCRIPTION=10,
|
||||
AVS_FILTER_DESCRIPTION=11};
|
||||
|
||||
enum { //SUBTYPES
|
||||
AVS_FILTER_TYPE_AUDIO=1,
|
||||
AVS_FILTER_TYPE_VIDEO=2,
|
||||
AVS_FILTER_OUTPUT_TYPE_SAME=3,
|
||||
AVS_FILTER_OUTPUT_TYPE_DIFFERENT=4};
|
||||
|
||||
enum {
|
||||
AVS_CACHE_NOTHING=0,
|
||||
AVS_CACHE_RANGE=1,
|
||||
AVS_CACHE_ALL=2,
|
||||
AVS_CACHE_AUDIO=3,
|
||||
AVS_CACHE_AUDIO_NONE=4,
|
||||
AVS_CACHE_AUDIO_AUTO=5
|
||||
};
|
||||
|
||||
#define AVS_FRAME_ALIGN 16
|
||||
|
||||
typedef struct AVS_Clip AVS_Clip;
|
||||
typedef struct AVS_ScriptEnvironment AVS_ScriptEnvironment;
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoInfo
|
||||
//
|
||||
|
||||
// AVS_VideoInfo is layed out identicly to VideoInfo
|
||||
typedef struct AVS_VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
|
||||
int pixel_type;
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type;
|
||||
INT64 num_audio_samples;
|
||||
int nchannels;
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
} AVS_VideoInfo;
|
||||
|
||||
// useful functions of the above
|
||||
AVSC_INLINE int avs_has_video(const AVS_VideoInfo * p)
|
||||
{ return (p->width!=0); }
|
||||
|
||||
AVSC_INLINE int avs_has_audio(const AVS_VideoInfo * p)
|
||||
{ return (p->audio_samples_per_second!=0); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_BGR); }
|
||||
|
||||
AVSC_INLINE int avs_is_rgb24(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type&AVS_CS_BGR24)==AVS_CS_BGR24; } // Clear out additional properties
|
||||
|
||||
AVSC_INLINE int avs_is_rgb32(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_BGR32) == AVS_CS_BGR32 ; }
|
||||
|
||||
AVSC_INLINE int avs_is_yuv(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type&AVS_CS_YUV ); }
|
||||
|
||||
AVSC_INLINE int avs_is_yuy2(const AVS_VideoInfo * p)
|
||||
{ return (p->pixel_type & AVS_CS_YUY2) == AVS_CS_YUY2; }
|
||||
|
||||
AVSC_INLINE int avs_is_yv12(const AVS_VideoInfo * p)
|
||||
{ return ((p->pixel_type & AVS_CS_YV12) == AVS_CS_YV12)||((p->pixel_type & AVS_CS_I420) == AVS_CS_I420); }
|
||||
|
||||
AVSC_INLINE int avs_is_color_space(const AVS_VideoInfo * p, int c_space)
|
||||
{ return ((p->pixel_type & c_space) == c_space); }
|
||||
|
||||
AVSC_INLINE int avs_is_property(const AVS_VideoInfo * p, int property)
|
||||
{ return ((p->pixel_type & property)==property ); }
|
||||
|
||||
AVSC_INLINE int avs_is_planar(const AVS_VideoInfo * p)
|
||||
{ return !!(p->pixel_type & AVS_CS_PLANAR); }
|
||||
|
||||
AVSC_INLINE int avs_is_field_based(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_FIELDBASED); }
|
||||
|
||||
AVSC_INLINE int avs_is_parity_known(const AVS_VideoInfo * p)
|
||||
{ return ((p->image_type & AVS_IT_FIELDBASED)&&(p->image_type & (AVS_IT_BFF | AVS_IT_TFF))); }
|
||||
|
||||
AVSC_INLINE int avs_is_bff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_BFF); }
|
||||
|
||||
AVSC_INLINE int avs_is_tff(const AVS_VideoInfo * p)
|
||||
{ return !!(p->image_type & AVS_IT_TFF); }
|
||||
|
||||
AVSC_INLINE int avs_bits_per_pixel(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->pixel_type) {
|
||||
case AVS_CS_BGR24: return 24;
|
||||
case AVS_CS_BGR32: return 32;
|
||||
case AVS_CS_YUY2: return 16;
|
||||
case AVS_CS_YV12:
|
||||
case AVS_CS_I420: return 12;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_from_pixels(const AVS_VideoInfo * p, int pixels)
|
||||
{ return pixels * (avs_bits_per_pixel(p)>>3); } // Will work on planar images, but will return only luma planes
|
||||
|
||||
AVSC_INLINE int avs_row_size(const AVS_VideoInfo * p)
|
||||
{ return avs_bytes_from_pixels(p,p->width); } // Also only returns first plane on planar images
|
||||
|
||||
AVSC_INLINE int avs_bmp_size(const AVS_VideoInfo * vi)
|
||||
{ if (avs_is_planar(vi)) {int p = vi->height * ((avs_row_size(vi)+3) & ~3); p+=p>>1; return p; } return vi->height * ((avs_row_size(vi)+3) & ~3); }
|
||||
|
||||
AVSC_INLINE int avs_samples_per_second(const AVS_VideoInfo * p)
|
||||
{ return p->audio_samples_per_second; }
|
||||
|
||||
|
||||
AVSC_INLINE int avs_bytes_per_channel_sample(const AVS_VideoInfo * p)
|
||||
{
|
||||
switch (p->sample_type) {
|
||||
case AVS_SAMPLE_INT8: return sizeof(signed char);
|
||||
case AVS_SAMPLE_INT16: return sizeof(signed short);
|
||||
case AVS_SAMPLE_INT24: return 3;
|
||||
case AVS_SAMPLE_INT32: return sizeof(signed int);
|
||||
case AVS_SAMPLE_FLOAT: return sizeof(float);
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
AVSC_INLINE int avs_bytes_per_audio_sample(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels*avs_bytes_per_channel_sample(p);}
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_frames(const AVS_VideoInfo * p, INT64 frames)
|
||||
{ return ((INT64)(frames) * p->audio_samples_per_second * p->fps_denominator / p->fps_numerator); }
|
||||
|
||||
AVSC_INLINE int avs_frames_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return (int)(samples * (INT64)p->fps_numerator / (INT64)p->fps_denominator / (INT64)p->audio_samples_per_second); }
|
||||
|
||||
AVSC_INLINE INT64 avs_audio_samples_from_bytes(const AVS_VideoInfo * p, INT64 bytes)
|
||||
{ return bytes / avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE INT64 avs_bytes_from_audio_samples(const AVS_VideoInfo * p, INT64 samples)
|
||||
{ return samples * avs_bytes_per_audio_sample(p); }
|
||||
|
||||
AVSC_INLINE int avs_audio_channels(const AVS_VideoInfo * p)
|
||||
{ return p->nchannels; }
|
||||
|
||||
AVSC_INLINE int avs_sample_type(const AVS_VideoInfo * p)
|
||||
{ return p->sample_type;}
|
||||
|
||||
// useful mutator
|
||||
AVSC_INLINE void avs_set_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type|=property; }
|
||||
|
||||
AVSC_INLINE void avs_clear_property(AVS_VideoInfo * p, int property)
|
||||
{ p->image_type&=~property; }
|
||||
|
||||
AVSC_INLINE void avs_set_field_based(AVS_VideoInfo * p, int isfieldbased)
|
||||
{ if (isfieldbased) p->image_type|=AVS_IT_FIELDBASED; else p->image_type&=~AVS_IT_FIELDBASED; }
|
||||
|
||||
AVSC_INLINE void avs_set_fps(AVS_VideoInfo * p, unsigned numerator, unsigned denominator)
|
||||
{
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
p->fps_numerator = numerator/x;
|
||||
p->fps_denominator = denominator/x;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_same_colorspace(AVS_VideoInfo * x, AVS_VideoInfo * y)
|
||||
{
|
||||
return (x->pixel_type == y->pixel_type)
|
||||
|| (avs_is_yv12(x) && avs_is_yv12(y));
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_VideoFrame
|
||||
//
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
// AVS_VideoFrameBuffer is layed out identicly to VideoFrameBuffer
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrameBuffer {
|
||||
unsigned char * data;
|
||||
int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
long refcount;
|
||||
} AVS_VideoFrameBuffer;
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer.
|
||||
|
||||
// AVS_VideoFrame is layed out identicly to IVideoFrame
|
||||
// DO NOT USE THIS STRUCTURE DIRECTLY
|
||||
typedef struct AVS_VideoFrame {
|
||||
int refcount;
|
||||
AVS_VideoFrameBuffer * vfb;
|
||||
int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
} AVS_VideoFrame;
|
||||
|
||||
// Access functions for AVS_VideoFrame
|
||||
AVSC_INLINE int avs_get_pitch(const AVS_VideoFrame * p) {
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_pitch_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V: return p->pitchUV;}
|
||||
return p->pitch;}
|
||||
|
||||
AVSC_INLINE int avs_get_row_size(const AVS_VideoFrame * p) {
|
||||
return p->row_size; }
|
||||
|
||||
AVSC_INLINE int avs_get_row_size_p(const AVS_VideoFrame * p, int plane) {
|
||||
int r;
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->row_size>>1;
|
||||
else return 0;
|
||||
case AVS_PLANAR_U_ALIGNED: case AVS_PLANAR_V_ALIGNED:
|
||||
if (p->pitchUV) {
|
||||
r = ((p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r < p->pitchUV)
|
||||
return r;
|
||||
return p->row_size>>1;
|
||||
} else return 0;
|
||||
case AVS_PLANAR_Y_ALIGNED:
|
||||
r = (p->row_size+AVS_FRAME_ALIGN-1)&(~(AVS_FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r <= p->pitch)
|
||||
return r;
|
||||
return p->row_size;
|
||||
}
|
||||
return p->row_size;
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_get_height(const AVS_VideoFrame * p) {
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE int avs_get_height_p(const AVS_VideoFrame * p, int plane) {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: case AVS_PLANAR_V:
|
||||
if (p->pitchUV) return p->height>>1;
|
||||
return 0;
|
||||
}
|
||||
return p->height;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr(const AVS_VideoFrame * p) {
|
||||
return p->vfb->data + p->offset;}
|
||||
|
||||
AVSC_INLINE const unsigned char* avs_get_read_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;}
|
||||
}
|
||||
|
||||
AVSC_INLINE int avs_is_writable(const AVS_VideoFrame * p) {
|
||||
return (p->refcount == 1 && p->vfb->refcount == 1);}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr(const AVS_VideoFrame * p)
|
||||
{
|
||||
if (avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else
|
||||
return 0;
|
||||
}
|
||||
|
||||
AVSC_INLINE unsigned char* avs_get_write_ptr_p(const AVS_VideoFrame * p, int plane)
|
||||
{
|
||||
if (plane==AVS_PLANAR_Y && avs_is_writable(p)) {
|
||||
++p->vfb->sequence_number;
|
||||
return p->vfb->data + p->offset;
|
||||
} else if (plane==AVS_PLANAR_Y) {
|
||||
return 0;
|
||||
} else {
|
||||
switch (plane) {
|
||||
case AVS_PLANAR_U: return p->vfb->data + p->offsetU;
|
||||
case AVS_PLANAR_V: return p->vfb->data + p->offsetV;
|
||||
default: return p->vfb->data + p->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_video_frame)(AVS_VideoFrame *);
|
||||
// makes a shallow copy of a video frame
|
||||
AVSC_API(AVS_VideoFrame *, avs_copy_video_frame)(AVS_VideoFrame *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE void avs_release_frame(AVS_VideoFrame * f)
|
||||
{avs_release_video_frame(f);}
|
||||
AVSC_INLINE AVS_VideoFrame * avs_copy_frame(AVS_VideoFrame * f)
|
||||
{return avs_copy_video_frame(f);}
|
||||
#endif
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Value
|
||||
//
|
||||
|
||||
// Treat AVS_Value as a fat pointer. That is use avs_copy_value
|
||||
// and avs_release_value appropiaty as you would if AVS_Value was
|
||||
// a pointer.
|
||||
|
||||
// To maintain source code compatibility with future versions of the
|
||||
// avisynth_c API don't use the AVS_Value directly. Use the helper
|
||||
// functions below.
|
||||
|
||||
// AVS_Value is layed out identicly to AVSValue
|
||||
typedef struct AVS_Value AVS_Value;
|
||||
struct AVS_Value {
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
// for some function e'rror
|
||||
short array_size;
|
||||
union {
|
||||
void * clip; // do not use directly, use avs_take_clip
|
||||
char boolean;
|
||||
int integer;
|
||||
INT64 integer64; // match addition of __int64 to avxplugin.h
|
||||
float floating_pt;
|
||||
const char * string;
|
||||
const AVS_Value * array;
|
||||
} d;
|
||||
};
|
||||
|
||||
// AVS_Value should be initilized with avs_void.
|
||||
// Should also set to avs_void after the value is released
|
||||
// with avs_copy_value. Consider it the equalvent of setting
|
||||
// a pointer to NULL
|
||||
static const AVS_Value avs_void = {'v'};
|
||||
|
||||
AVSC_API(void, avs_copy_value)(AVS_Value * dest, AVS_Value src);
|
||||
AVSC_API(void, avs_release_value)(AVS_Value);
|
||||
|
||||
AVSC_INLINE int avs_defined(AVS_Value v) { return v.type != 'v'; }
|
||||
AVSC_INLINE int avs_is_clip(AVS_Value v) { return v.type == 'c'; }
|
||||
AVSC_INLINE int avs_is_bool(AVS_Value v) { return v.type == 'b'; }
|
||||
AVSC_INLINE int avs_is_int(AVS_Value v) { return v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_float(AVS_Value v) { return v.type == 'f' || v.type == 'i'; }
|
||||
AVSC_INLINE int avs_is_string(AVS_Value v) { return v.type == 's'; }
|
||||
AVSC_INLINE int avs_is_array(AVS_Value v) { return v.type == 'a'; }
|
||||
AVSC_INLINE int avs_is_error(AVS_Value v) { return v.type == 'e'; }
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_take_clip)(AVS_Value, AVS_ScriptEnvironment *);
|
||||
AVSC_API(void, avs_set_to_clip)(AVS_Value *, AVS_Clip *);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
AVSC_INLINE int avs_as_bool(AVS_Value v)
|
||||
{ return v.d.boolean; }
|
||||
AVSC_INLINE int avs_as_int(AVS_Value v)
|
||||
{ return v.d.integer; }
|
||||
AVSC_INLINE const char * avs_as_string(AVS_Value v)
|
||||
{ return avs_is_error(v) || avs_is_string(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE double avs_as_float(AVS_Value v)
|
||||
{ return avs_is_int(v) ? v.d.integer : v.d.floating_pt; }
|
||||
AVSC_INLINE const char * avs_as_error(AVS_Value v)
|
||||
{ return avs_is_error(v) ? v.d.string : 0; }
|
||||
AVSC_INLINE const AVS_Value * avs_as_array(AVS_Value v)
|
||||
{ return v.d.array; }
|
||||
AVSC_INLINE int avs_array_size(AVS_Value v)
|
||||
{ return avs_is_array(v) ? v.array_size : 1; }
|
||||
AVSC_INLINE AVS_Value avs_array_elt(AVS_Value v, int index)
|
||||
{ return avs_is_array(v) ? v.d.array[index] : v; }
|
||||
|
||||
// only use these functions on am AVS_Value that does not already have
|
||||
// an active value. Remember, treat AVS_Value as a fat pointer.
|
||||
AVSC_INLINE AVS_Value avs_new_value_bool(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'b'; v.d.boolean = v0 == 0 ? 0 : 1; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_int(int v0)
|
||||
{ AVS_Value v = {0}; v.type = 'i'; v.d.integer = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_string(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 's'; v.d.string = v0; return v; }
|
||||
AVSC_INLINE AVS_Value avs_new_value_float(float v0)
|
||||
{ AVS_Value v = {0}; v.type = 'f'; v.d.floating_pt = v0; return v;}
|
||||
AVSC_INLINE AVS_Value avs_new_value_error(const char * v0)
|
||||
{ AVS_Value v = {0}; v.type = 'e'; v.d.string = v0; return v; }
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE AVS_Value avs_new_value_clip(AVS_Clip * v0)
|
||||
{ AVS_Value v = {0}; avs_set_to_clip(&v, v0); return v; }
|
||||
#endif
|
||||
AVSC_INLINE AVS_Value avs_new_value_array(AVS_Value * v0, int size)
|
||||
{ AVS_Value v = {0}; v.type = 'a'; v.d.array = v0; v.array_size = size; return v; }
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_Clip
|
||||
//
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_release_clip)(AVS_Clip *);
|
||||
AVSC_API(AVS_Clip *, avs_copy_clip)(AVS_Clip *);
|
||||
|
||||
AVSC_API(const char *, avs_clip_get_error)(AVS_Clip *); // return 0 if no error
|
||||
|
||||
AVSC_API(const AVS_VideoInfo *, avs_get_video_info)(AVS_Clip *);
|
||||
|
||||
AVSC_API(int, avs_get_version)(AVS_Clip *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_get_frame)(AVS_Clip *, int n);
|
||||
// The returned video frame must be released with avs_release_video_frame
|
||||
|
||||
AVSC_API(int, avs_get_parity)(AVS_Clip *, int n);
|
||||
// return field parity if field_based, else parity of first field in frame
|
||||
|
||||
AVSC_API(int, avs_get_audio)(AVS_Clip *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
// start and count are in samples
|
||||
|
||||
AVSC_API(int, avs_set_cache_hints)(AVS_Clip *,
|
||||
int cachehints, size_t frame_range);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// This is the callback type used by avs_add_function
|
||||
typedef AVS_Value (AVSC_CC * AVS_ApplyFunc)
|
||||
(AVS_ScriptEnvironment *, AVS_Value args, void * user_data);
|
||||
|
||||
typedef struct AVS_FilterInfo AVS_FilterInfo;
|
||||
struct AVS_FilterInfo
|
||||
{
|
||||
// these members should not be modified outside of the AVS_ApplyFunc callback
|
||||
AVS_Clip * child;
|
||||
AVS_VideoInfo vi;
|
||||
AVS_ScriptEnvironment * env;
|
||||
AVS_VideoFrame * (AVSC_CC * get_frame)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_parity)(AVS_FilterInfo *, int n);
|
||||
int (AVSC_CC * get_audio)(AVS_FilterInfo *, void * buf,
|
||||
INT64 start, INT64 count);
|
||||
int (AVSC_CC * set_cache_hints)(AVS_FilterInfo *, int cachehints,
|
||||
int frame_range);
|
||||
void (AVSC_CC * free_filter)(AVS_FilterInfo *);
|
||||
|
||||
// Should be set when ever there is an error to report.
|
||||
// It is cleared before any of the above methods are called
|
||||
const char * error;
|
||||
// this is to store whatever and may be modified at will
|
||||
void * user_data;
|
||||
};
|
||||
|
||||
// Create a new filter
|
||||
// fi is set to point to the AVS_FilterInfo so that you can
|
||||
// modify it once it is initilized.
|
||||
// store_child should generally be set to true. If it is not
|
||||
// set than ALL methods (the function pointers) must be defined
|
||||
// If it is set than you do not need to worry about freeing the child
|
||||
// clip.
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(AVS_Clip *, avs_new_c_filter)(AVS_ScriptEnvironment * e,
|
||||
AVS_FilterInfo * * fi,
|
||||
AVS_Value child, int store_child);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
|
||||
/////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// AVS_ScriptEnvironment
|
||||
//
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
AVS_CPU_FORCE = 0x01, // N/A
|
||||
AVS_CPU_FPU = 0x02, // 386/486DX
|
||||
AVS_CPU_MMX = 0x04, // P55C, K6, PII
|
||||
AVS_CPU_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
AVS_CPU_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
AVS_CPU_SSE2 = 0x20, // PIV, Hammer
|
||||
AVS_CPU_3DNOW = 0x40, // K6-2
|
||||
AVS_CPU_3DNOW_EXT = 0x80, // Athlon
|
||||
AVS_CPU_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2,
|
||||
// which only Hammer will have anyway)
|
||||
};
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(const char *, avs_get_error)(AVS_ScriptEnvironment *); // return 0 if no error
|
||||
|
||||
AVSC_API(long, avs_get_cpu_flags)(AVS_ScriptEnvironment *);
|
||||
AVSC_API(int, avs_check_version)(AVS_ScriptEnvironment *, int version);
|
||||
|
||||
AVSC_API(char *, avs_save_string)(AVS_ScriptEnvironment *, const char* s, int length);
|
||||
AVSC_API(char *, avs_sprintf)(AVS_ScriptEnvironment *, const char * fmt, ...);
|
||||
|
||||
AVSC_API(char *, avs_vsprintf)(AVS_ScriptEnvironment *, const char * fmt, va_list val);
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
|
||||
AVSC_API(int, avs_add_function)(AVS_ScriptEnvironment *,
|
||||
const char * name, const char * params,
|
||||
AVS_ApplyFunc apply, void * user_data);
|
||||
|
||||
AVSC_API(int, avs_function_exists)(AVS_ScriptEnvironment *, const char * name);
|
||||
|
||||
AVSC_API(AVS_Value, avs_invoke)(AVS_ScriptEnvironment *, const char * name,
|
||||
AVS_Value args, const char** arg_names);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(AVS_Value, avs_get_var)(AVS_ScriptEnvironment *, const char* name);
|
||||
// The returned value must be be released with avs_release_value
|
||||
|
||||
AVSC_API(int, avs_set_var)(AVS_ScriptEnvironment *, const char* name, AVS_Value val);
|
||||
|
||||
AVSC_API(int, avs_set_global_var)(AVS_ScriptEnvironment *, const char* name, const AVS_Value val);
|
||||
|
||||
//void avs_push_context(AVS_ScriptEnvironment *, int level=0);
|
||||
//void avs_pop_context(AVS_ScriptEnvironment *);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_new_video_frame_a)(AVS_ScriptEnvironment *,
|
||||
const AVS_VideoInfo * vi, int align);
|
||||
// align should be at least 16
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#ifndef AVSC_NO_DECLSPEC
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_video_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
|
||||
AVSC_INLINE
|
||||
AVS_VideoFrame * avs_new_frame(AVS_ScriptEnvironment * env,
|
||||
const AVS_VideoInfo * vi)
|
||||
{return avs_new_video_frame_a(env,vi,AVS_FRAME_ALIGN);}
|
||||
#endif
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(int, avs_make_writable)(AVS_ScriptEnvironment *, AVS_VideoFrame * * pvf);
|
||||
|
||||
AVSC_API(void, avs_bit_blt)(AVS_ScriptEnvironment *, unsigned char* dstp, int dst_pitch, const unsigned char* srcp, int src_pitch, int row_size, int height);
|
||||
|
||||
typedef void (AVSC_CC *AVS_ShutdownFunc)(void* user_data, AVS_ScriptEnvironment * env);
|
||||
AVSC_API(void, avs_at_exit)(AVS_ScriptEnvironment *, AVS_ShutdownFunc function, void * user_data);
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height);
|
||||
// The returned video frame must be be released
|
||||
|
||||
AVSC_API(int, avs_set_memory_max)(AVS_ScriptEnvironment *, int mem);
|
||||
|
||||
AVSC_API(int, avs_set_working_dir)(AVS_ScriptEnvironment *, const char * newdir);
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
AVSC_API(AVS_ScriptEnvironment *, avs_create_script_environment)(int version);
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
// this symbol is the entry point for the plugin and must
|
||||
// be defined
|
||||
AVSC_EXPORT
|
||||
const char * AVSC_CC avisynth_c_plugin_init(AVS_ScriptEnvironment* env);
|
||||
|
||||
|
||||
#if defined __cplusplus
|
||||
extern "C"
|
||||
{
|
||||
#endif // __cplusplus
|
||||
AVSC_API(void, avs_delete_script_environment)(AVS_ScriptEnvironment *);
|
||||
|
||||
|
||||
AVSC_API(AVS_VideoFrame *, avs_subframe_planar)(AVS_ScriptEnvironment *, AVS_VideoFrame * src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV);
|
||||
// The returned video frame must be be released
|
||||
#if defined __cplusplus
|
||||
}
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif //__AVXSYNTH_C__
|
||||
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
85
compat/avisynth/windowsPorts/basicDataTypeConversions.h
Normal file
@@ -0,0 +1,85 @@
|
||||
#ifndef __DATA_TYPE_CONVERSIONS_H__
|
||||
#define __DATA_TYPE_CONVERSIONS_H__
|
||||
|
||||
#include <stdint.h>
|
||||
#include <wchar.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
|
||||
typedef int64_t __int64;
|
||||
typedef int32_t __int32;
|
||||
#ifdef __cplusplus
|
||||
typedef bool BOOL;
|
||||
#else
|
||||
typedef uint32_t BOOL;
|
||||
#endif // __cplusplus
|
||||
typedef void* HMODULE;
|
||||
typedef void* LPVOID;
|
||||
typedef void* PVOID;
|
||||
typedef PVOID HANDLE;
|
||||
typedef HANDLE HWND;
|
||||
typedef HANDLE HINSTANCE;
|
||||
typedef void* HDC;
|
||||
typedef void* HBITMAP;
|
||||
typedef void* HICON;
|
||||
typedef void* HFONT;
|
||||
typedef void* HGDIOBJ;
|
||||
typedef void* HBRUSH;
|
||||
typedef void* HMMIO;
|
||||
typedef void* HACMSTREAM;
|
||||
typedef void* HACMDRIVER;
|
||||
typedef void* HIC;
|
||||
typedef void* HACMOBJ;
|
||||
typedef HACMSTREAM* LPHACMSTREAM;
|
||||
typedef void* HACMDRIVERID;
|
||||
typedef void* LPHACMDRIVER;
|
||||
typedef unsigned char BYTE;
|
||||
typedef BYTE* LPBYTE;
|
||||
typedef char TCHAR;
|
||||
typedef TCHAR* LPTSTR;
|
||||
typedef const TCHAR* LPCTSTR;
|
||||
typedef char* LPSTR;
|
||||
typedef LPSTR LPOLESTR;
|
||||
typedef const char* LPCSTR;
|
||||
typedef LPCSTR LPCOLESTR;
|
||||
typedef wchar_t WCHAR;
|
||||
typedef unsigned short WORD;
|
||||
typedef unsigned int UINT;
|
||||
typedef UINT MMRESULT;
|
||||
typedef uint32_t DWORD;
|
||||
typedef DWORD COLORREF;
|
||||
typedef DWORD FOURCC;
|
||||
typedef DWORD HRESULT;
|
||||
typedef DWORD* LPDWORD;
|
||||
typedef DWORD* DWORD_PTR;
|
||||
typedef int32_t LONG;
|
||||
typedef int32_t* LONG_PTR;
|
||||
typedef LONG_PTR LRESULT;
|
||||
typedef uint32_t ULONG;
|
||||
typedef uint32_t* ULONG_PTR;
|
||||
//typedef __int64_t intptr_t;
|
||||
typedef uint64_t _fsize_t;
|
||||
|
||||
|
||||
//
|
||||
// Structures
|
||||
//
|
||||
|
||||
typedef struct _GUID {
|
||||
DWORD Data1;
|
||||
WORD Data2;
|
||||
WORD Data3;
|
||||
BYTE Data4[8];
|
||||
} GUID;
|
||||
|
||||
typedef GUID REFIID;
|
||||
typedef GUID CLSID;
|
||||
typedef CLSID* LPCLSID;
|
||||
typedef GUID IID;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
#endif // __DATA_TYPE_CONVERSIONS_H__
|
||||
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
77
compat/avisynth/windowsPorts/windows2linux.h
Normal file
@@ -0,0 +1,77 @@
|
||||
#ifndef __WINDOWS2LINUX_H__
|
||||
#define __WINDOWS2LINUX_H__
|
||||
|
||||
/*
|
||||
* LINUX SPECIFIC DEFINITIONS
|
||||
*/
|
||||
//
|
||||
// Data types conversions
|
||||
//
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "basicDataTypeConversions.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
namespace avxsynth {
|
||||
#endif // __cplusplus
|
||||
//
|
||||
// purposefully define the following MSFT definitions
|
||||
// to mean nothing (as they do not mean anything on Linux)
|
||||
//
|
||||
#define __stdcall
|
||||
#define __cdecl
|
||||
#define noreturn
|
||||
#define __declspec(x)
|
||||
#define STDAPI extern "C" HRESULT
|
||||
#define STDMETHODIMP HRESULT __stdcall
|
||||
#define STDMETHODIMP_(x) x __stdcall
|
||||
|
||||
#define STDMETHOD(x) virtual HRESULT x
|
||||
#define STDMETHOD_(a, x) virtual a x
|
||||
|
||||
#ifndef TRUE
|
||||
#define TRUE true
|
||||
#endif
|
||||
|
||||
#ifndef FALSE
|
||||
#define FALSE false
|
||||
#endif
|
||||
|
||||
#define S_OK (0x00000000)
|
||||
#define S_FALSE (0x00000001)
|
||||
#define E_NOINTERFACE (0X80004002)
|
||||
#define E_POINTER (0x80004003)
|
||||
#define E_FAIL (0x80004005)
|
||||
#define E_OUTOFMEMORY (0x8007000E)
|
||||
|
||||
#define INVALID_HANDLE_VALUE ((HANDLE)((LONG_PTR)-1))
|
||||
#define FAILED(hr) ((hr) & 0x80000000)
|
||||
#define SUCCEEDED(hr) (!FAILED(hr))
|
||||
|
||||
|
||||
//
|
||||
// Functions
|
||||
//
|
||||
#define MAKEDWORD(a,b,c,d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
|
||||
#define MAKEWORD(a,b) (((a) << 8) | (b))
|
||||
|
||||
#define lstrlen strlen
|
||||
#define lstrcpy strcpy
|
||||
#define lstrcmpi strcasecmp
|
||||
#define _stricmp strcasecmp
|
||||
#define InterlockedIncrement(x) __sync_fetch_and_add((x), 1)
|
||||
#define InterlockedDecrement(x) __sync_fetch_and_sub((x), 1)
|
||||
// Windows uses (new, old) ordering but GCC has (old, new)
|
||||
#define InterlockedCompareExchange(x,y,z) __sync_val_compare_and_swap(x,z,y)
|
||||
|
||||
#define UInt32x32To64(a, b) ( (uint64_t) ( ((uint64_t)((uint32_t)(a))) * ((uint32_t)(b)) ) )
|
||||
#define Int64ShrlMod32(a, b) ( (uint64_t) ( (uint64_t)(a) >> (b) ) )
|
||||
#define Int32x32To64(a, b) ((__int64)(((__int64)((long)(a))) * ((long)(b))))
|
||||
|
||||
#define MulDiv(nNumber, nNumerator, nDenominator) (int32_t) (((int64_t) (nNumber) * (int64_t) (nNumerator) + (int64_t) ((nDenominator)/2)) / (int64_t) (nDenominator))
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; // namespace avxsynth
|
||||
#endif // __cplusplus
|
||||
|
||||
#endif // __WINDOWS2LINUX_H__
|
||||
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
* Minimum CUDA compatibility definitions header
|
||||
*
|
||||
* Copyright (c) 2019 rcombs
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
#define COMPAT_CUDA_CUDA_RUNTIME_H
|
||||
|
||||
// Common macros
|
||||
#define __global__ __attribute__((global))
|
||||
#define __device__ __attribute__((device))
|
||||
#define __device_builtin__ __attribute__((device_builtin))
|
||||
#define __align__(N) __attribute__((aligned(N)))
|
||||
#define __inline__ __inline__ __attribute__((always_inline))
|
||||
|
||||
#define max(a, b) ((a) > (b) ? (a) : (b))
|
||||
#define min(a, b) ((a) < (b) ? (a) : (b))
|
||||
#define abs(x) ((x) < 0 ? -(x) : (x))
|
||||
|
||||
#define atomicAdd(a, b) (__atomic_fetch_add(a, b, __ATOMIC_SEQ_CST))
|
||||
|
||||
// Basic typedefs
|
||||
typedef __device_builtin__ unsigned long long cudaTextureObject_t;
|
||||
|
||||
typedef struct __device_builtin__ __align__(2) uchar2
|
||||
{
|
||||
unsigned char x, y;
|
||||
} uchar2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) ushort2
|
||||
{
|
||||
unsigned short x, y;
|
||||
} ushort2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) float2
|
||||
{
|
||||
float x, y;
|
||||
} float2;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) int2
|
||||
{
|
||||
int x, y;
|
||||
} int2;
|
||||
|
||||
typedef struct __device_builtin__ uint3
|
||||
{
|
||||
unsigned int x, y, z;
|
||||
} uint3;
|
||||
|
||||
typedef struct uint3 dim3;
|
||||
|
||||
typedef struct __device_builtin__ __align__(4) uchar4
|
||||
{
|
||||
unsigned char x, y, z, w;
|
||||
} uchar4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(8) ushort4
|
||||
{
|
||||
unsigned short x, y, z, w;
|
||||
} ushort4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) int4
|
||||
{
|
||||
int x, y, z, w;
|
||||
} int4;
|
||||
|
||||
typedef struct __device_builtin__ __align__(16) float4
|
||||
{
|
||||
float x, y, z, w;
|
||||
} float4;
|
||||
|
||||
// Accessors for special registers
|
||||
#define GETCOMP(reg, comp) \
|
||||
asm("mov.u32 %0, %%" #reg "." #comp ";" : "=r"(tmp)); \
|
||||
ret.comp = tmp;
|
||||
|
||||
#define GET(name, reg) static inline __device__ uint3 name() {\
|
||||
uint3 ret; \
|
||||
unsigned tmp; \
|
||||
GETCOMP(reg, x) \
|
||||
GETCOMP(reg, y) \
|
||||
GETCOMP(reg, z) \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
GET(getBlockIdx, ctaid)
|
||||
GET(getBlockDim, ntid)
|
||||
GET(getThreadIdx, tid)
|
||||
|
||||
// Instead of externs for these registers, we turn access to them into calls into trivial ASM
|
||||
#define blockIdx (getBlockIdx())
|
||||
#define blockDim (getBlockDim())
|
||||
#define threadIdx (getThreadIdx())
|
||||
|
||||
// Basic initializers (simple macros rather than inline functions)
|
||||
#define make_int2(a, b) ((int2){.x = a, .y = b})
|
||||
#define make_uchar2(a, b) ((uchar2){.x = a, .y = b})
|
||||
#define make_ushort2(a, b) ((ushort2){.x = a, .y = b})
|
||||
#define make_float2(a, b) ((float2){.x = a, .y = b})
|
||||
#define make_int4(a, b, c, d) ((int4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_uchar4(a, b, c, d) ((uchar4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_ushort4(a, b, c, d) ((ushort4){.x = a, .y = b, .z = c, .w = d})
|
||||
#define make_float4(a, b, c, d) ((float4){.x = a, .y = b, .z = c, .w = d})
|
||||
|
||||
// Conversions from the tex instruction's 4-register output to various types
|
||||
#define TEX2D(type, ret) static inline __device__ void conv(type* out, unsigned a, unsigned b, unsigned c, unsigned d) {*out = (ret);}
|
||||
|
||||
TEX2D(unsigned char, a & 0xFF)
|
||||
TEX2D(unsigned short, a & 0xFFFF)
|
||||
TEX2D(float, a)
|
||||
TEX2D(uchar2, make_uchar2(a & 0xFF, b & 0xFF))
|
||||
TEX2D(ushort2, make_ushort2(a & 0xFFFF, b & 0xFFFF))
|
||||
TEX2D(float2, make_float2(a, b))
|
||||
TEX2D(uchar4, make_uchar4(a & 0xFF, b & 0xFF, c & 0xFF, d & 0xFF))
|
||||
TEX2D(ushort4, make_ushort4(a & 0xFFFF, b & 0xFFFF, c & 0xFFFF, d & 0xFFFF))
|
||||
TEX2D(float4, make_float4(a, b, c, d))
|
||||
|
||||
// Template calling tex instruction and converting the output to the selected type
|
||||
template<typename T>
|
||||
inline __device__ T tex2D(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
T ret;
|
||||
unsigned ret1, ret2, ret3, ret4;
|
||||
asm("tex.2d.v4.u32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret1), "=r"(ret2), "=r"(ret3), "=r"(ret4) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
conv(&ret, ret1, ret2, ret3, ret4);
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float4 tex2D<float4>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret;
|
||||
asm("tex.2d.v4.f32.f32 {%0, %1, %2, %3}, [%4, {%5, %6}];" :
|
||||
"=r"(ret.x), "=r"(ret.y), "=r"(ret.z), "=r"(ret.w) :
|
||||
"l"(texObject), "f"(x), "f"(y));
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float tex2D<float>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
return tex2D<float4>(texObject, x, y).x;
|
||||
}
|
||||
|
||||
template<>
|
||||
inline __device__ float2 tex2D<float2>(cudaTextureObject_t texObject, float x, float y)
|
||||
{
|
||||
float4 ret = tex2D<float4>(texObject, x, y);
|
||||
return make_float2(ret.x, ret.y);
|
||||
}
|
||||
|
||||
// Math helper functions
|
||||
static inline __device__ float floorf(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ float floor(float a) { return __builtin_floorf(a); }
|
||||
static inline __device__ double floor(double a) { return __builtin_floor(a); }
|
||||
static inline __device__ float ceilf(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ float ceil(float a) { return __builtin_ceilf(a); }
|
||||
static inline __device__ double ceil(double a) { return __builtin_ceil(a); }
|
||||
static inline __device__ float truncf(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ float trunc(float a) { return __builtin_truncf(a); }
|
||||
static inline __device__ double trunc(double a) { return __builtin_trunc(a); }
|
||||
static inline __device__ float fabsf(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ float fabs(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ double fabs(double a) { return __builtin_fabs(a); }
|
||||
static inline __device__ float sqrtf(float a) { return __builtin_sqrtf(a); }
|
||||
|
||||
static inline __device__ float __saturatef(float a) { return __nvvm_saturate_f(a); }
|
||||
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
||||
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
||||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
||||
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||
@@ -16,8 +16,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#ifndef AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
#define AV_COMPAT_CUDA_DYNLINK_LOADER_H
|
||||
|
||||
#include "libavutil/log.h"
|
||||
#include "compat/w32dlfcn.h"
|
||||
@@ -30,4 +30,4 @@
|
||||
|
||||
#include <ffnvcodec/dynlink_loader.h>
|
||||
|
||||
#endif /* COMPAT_CUDA_DYNLINK_LOADER_H */
|
||||
#endif
|
||||
|
||||
36
compat/cuda/ptx2c.sh
Executable file
36
compat/cuda/ptx2c.sh
Executable file
@@ -0,0 +1,36 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
|
||||
set -e
|
||||
|
||||
OUT="$1"
|
||||
IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while read LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
printf ";\n" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#define FUN(name, type, op) \
|
||||
type name(type x, type y) \
|
||||
{ \
|
||||
if (fpclassify(x) == FP_NAN) return y; \
|
||||
if (fpclassify(y) == FP_NAN) return x; \
|
||||
return x op y ? x : y; \
|
||||
}
|
||||
|
||||
FUN(fmin, double, <)
|
||||
FUN(fmax, double, >)
|
||||
FUN(fminf, float, <)
|
||||
FUN(fmaxf, float, >)
|
||||
|
||||
long double fmodl(long double x, long double y)
|
||||
{
|
||||
return fmod(x, y);
|
||||
}
|
||||
|
||||
long double scalbnl(long double x, int exp)
|
||||
{
|
||||
return scalbn(x, exp);
|
||||
}
|
||||
|
||||
long double copysignl(long double x, long double y)
|
||||
{
|
||||
return copysign(x, y);
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
double fmin(double, double);
|
||||
double fmax(double, double);
|
||||
float fminf(float, float);
|
||||
float fmaxf(float, float);
|
||||
long double fmodl(long double, long double);
|
||||
long double scalbnl(long double, int);
|
||||
long double copysignl(long double, long double);
|
||||
@@ -59,7 +59,7 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See https://web.archive.org/web/20151214111935/http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
|
||||
@@ -27,19 +27,15 @@
|
||||
#define COMPAT_OS2THREADS_H
|
||||
|
||||
#define INCL_DOS
|
||||
#define INCL_DOSERRORS
|
||||
#include <os2.h>
|
||||
|
||||
#undef __STRICT_ANSI__ /* for _beginthread() */
|
||||
#include <stdlib.h>
|
||||
#include <time.h>
|
||||
|
||||
#include <sys/builtin.h>
|
||||
#include <sys/fmutex.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct {
|
||||
TID tid;
|
||||
@@ -167,28 +163,6 @@ static av_always_inline int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_timedwait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
ULONG t = av_clip64(abs_milli - av_gettime() / 1000, 0, ULONG_MAX);
|
||||
|
||||
__atomic_increment(&cond->wait_count);
|
||||
|
||||
pthread_mutex_unlock(mutex);
|
||||
|
||||
APIRET ret = DosWaitEventSem(cond->event_sem, t);
|
||||
|
||||
__atomic_decrement(&cond->wait_count);
|
||||
|
||||
DosPostEventSem(cond->ack_sem);
|
||||
|
||||
pthread_mutex_lock(mutex);
|
||||
|
||||
return (ret == ERROR_TIMEOUT) ? ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static av_always_inline int pthread_cond_wait(pthread_cond_t *cond,
|
||||
pthread_mutex_t *mutex)
|
||||
{
|
||||
|
||||
@@ -20,40 +20,11 @@
|
||||
#define COMPAT_W32DLFCN_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <stdint.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/macros.h"
|
||||
#if (_WIN32_WINNT < 0x0602) || HAVE_WINRT
|
||||
#include "libavutil/wchar_filename.h"
|
||||
|
||||
static inline wchar_t *get_module_filename(HMODULE module)
|
||||
{
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD path_size = 0, path_len;
|
||||
|
||||
do {
|
||||
path_size = path_size ? FFMIN(2 * path_size, INT16_MAX + 1) : MAX_PATH;
|
||||
new_path = av_realloc_array(path, path_size, sizeof *path);
|
||||
if (!new_path) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
path = new_path;
|
||||
// Returns path_size in case of insufficient buffer.
|
||||
// Whether the error is set or not and whether the output
|
||||
// is null-terminated or not depends on the version of Windows.
|
||||
path_len = GetModuleFileNameW(module, path, path_size);
|
||||
} while (path_len && path_size <= INT16_MAX && path_size <= path_len);
|
||||
|
||||
if (!path_len) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* Safe function used to open dynamic libs. This attempts to improve program security
|
||||
* by removing the current directory from the dll search path. Only dll's found in the
|
||||
@@ -63,53 +34,29 @@ static inline wchar_t *get_module_filename(HMODULE module)
|
||||
*/
|
||||
static inline HMODULE win32_dlopen(const char *name)
|
||||
{
|
||||
wchar_t *name_w;
|
||||
HMODULE module = NULL;
|
||||
if (utf8towchar(name, &name_w))
|
||||
name_w = NULL;
|
||||
#if _WIN32_WINNT < 0x0602
|
||||
// On Win7 and earlier we check if KB2533623 is available
|
||||
// Need to check if KB2533623 is available
|
||||
if (!GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetDefaultDllDirectories")) {
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD pathlen, pathsize, namelen;
|
||||
if (!name_w)
|
||||
HMODULE module = NULL;
|
||||
wchar_t *path = NULL, *name_w = NULL;
|
||||
DWORD pathlen;
|
||||
if (utf8towchar(name, &name_w))
|
||||
goto exit;
|
||||
namelen = wcslen(name_w);
|
||||
path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t));
|
||||
// Try local directory first
|
||||
path = get_module_filename(NULL);
|
||||
if (!path)
|
||||
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
||||
pathlen = wcsrchr(path, '\\') - path;
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
new_path = wcsrchr(path, '\\');
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
pathlen = new_path - path;
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
if (module == NULL) {
|
||||
// Next try System32 directory
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
pathlen = GetSystemDirectoryW(path, MAX_PATH);
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
// Buffer is not enough in two cases:
|
||||
// 1. system directory + \ + module name
|
||||
// 2. system directory even without the module name.
|
||||
if (pathlen + namelen + 2 > pathsize) {
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
// Query again to handle the case #2.
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
goto exit;
|
||||
}
|
||||
path[pathlen] = L'\\';
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
}
|
||||
@@ -126,19 +73,16 @@ exit:
|
||||
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
|
||||
#endif
|
||||
#if HAVE_WINRT
|
||||
if (!name_w)
|
||||
wchar_t *name_w = NULL;
|
||||
int ret;
|
||||
if (utf8towchar(name, &name_w))
|
||||
return NULL;
|
||||
module = LoadPackagedLibrary(name_w, 0);
|
||||
#else
|
||||
#define LOAD_FLAGS (LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
/* filename may be be in CP_ACP */
|
||||
if (!name_w)
|
||||
return LoadLibraryExA(name, NULL, LOAD_FLAGS);
|
||||
module = LoadLibraryExW(name_w, NULL, LOAD_FLAGS);
|
||||
#undef LOAD_FLAGS
|
||||
#endif
|
||||
ret = LoadPackagedLibrary(name_w, 0);
|
||||
av_free(name_w);
|
||||
return module;
|
||||
return ret;
|
||||
#else
|
||||
return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32);
|
||||
#endif
|
||||
}
|
||||
#define dlopen(name, flags) win32_dlopen(name)
|
||||
#define dlclose FreeLibrary
|
||||
|
||||
@@ -35,15 +35,14 @@
|
||||
* As most functions here are used without checking return values,
|
||||
* only implement return values as necessary. */
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
#include <time.h>
|
||||
|
||||
#include "libavutil/attributes.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/time.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
@@ -62,17 +61,7 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#define InitializeCriticalSection(x) InitializeCriticalSectionEx(x, 0, 0)
|
||||
#define WaitForSingleObject(a, b) WaitForSingleObjectEx(a, b, FALSE)
|
||||
|
||||
#define PTHREAD_CANCEL_ENABLE 1
|
||||
#define PTHREAD_CANCEL_DISABLE 0
|
||||
|
||||
#if HAVE_WINRT
|
||||
#define THREADFUNC_RETTYPE DWORD
|
||||
#else
|
||||
#define THREADFUNC_RETTYPE unsigned
|
||||
#endif
|
||||
|
||||
static av_unused THREADFUNC_RETTYPE
|
||||
__stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
h->ret = h->func(h->arg);
|
||||
@@ -167,31 +156,10 @@ static inline int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
|
||||
const struct timespec *abstime)
|
||||
{
|
||||
int64_t abs_milli = abstime->tv_sec * 1000LL + abstime->tv_nsec / 1000000;
|
||||
DWORD t = av_clip64(abs_milli - av_gettime() / 1000, 0, UINT32_MAX);
|
||||
|
||||
if (!SleepConditionVariableSRW(cond, mutex, t, 0)) {
|
||||
DWORD err = GetLastError();
|
||||
if (err == ERROR_TIMEOUT)
|
||||
return ETIMEDOUT;
|
||||
else
|
||||
return EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
WakeConditionVariable(cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pthread_setcancelstate(int state, int *oldstate)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* COMPAT_W32PTHREADS_H */
|
||||
|
||||
@@ -48,7 +48,7 @@ trap 'rm -f -- $libname' EXIT
|
||||
if [ -n "$AR" ]; then
|
||||
$AR rcs ${libname} $@ >/dev/null
|
||||
else
|
||||
lib.exe -out:${libname} $@ >/dev/null
|
||||
lib -out:${libname} $@ >/dev/null
|
||||
fi
|
||||
if [ $? != 0 ]; then
|
||||
echo "Could not create temporary library." >&2
|
||||
@@ -108,7 +108,7 @@ if [ -n "$NM" ]; then
|
||||
cut -d' ' -f3 |
|
||||
sed -e "s/^${prefix}//")
|
||||
else
|
||||
dump=$(dumpbin.exe -linkermember:1 ${libname} |
|
||||
dump=$(dumpbin -linkermember:1 ${libname} |
|
||||
sed -e '/public symbols/,$!d' -e '/^ \{1,\}Summary/,$d' -e "s/ \{1,\}${prefix}/ /" -e 's/ \{1,\}/ /g' |
|
||||
tail -n +2 |
|
||||
cut -d' ' -f3)
|
||||
|
||||
@@ -4,6 +4,6 @@ LINK_EXE_PATH=$(dirname "$(command -v cl)")/link
|
||||
if [ -x "$LINK_EXE_PATH" ]; then
|
||||
"$LINK_EXE_PATH" $@
|
||||
else
|
||||
link.exe $@
|
||||
link $@
|
||||
fi
|
||||
exit $?
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "--version" ]; then
|
||||
rc.exe -?
|
||||
exit $?
|
||||
fi
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: mswindres [-I/include/path ...] [-DSOME_DEFINE ...] [-o output.o] input.rc [output.o]" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
EXTRA_OPTS="-nologo"
|
||||
|
||||
while [ $# -gt 2 ]; do
|
||||
case $1 in
|
||||
-D*) EXTRA_OPTS="$EXTRA_OPTS -d$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-I*) EXTRA_OPTS="$EXTRA_OPTS -i$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-o) OPT_OUT="$2"; shift ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
IN="$1"
|
||||
if [ -z "$OPT_OUT" ]; then
|
||||
OUT="$2"
|
||||
else
|
||||
OUT="$OPT_OUT"
|
||||
fi
|
||||
|
||||
eval set -- $EXTRA_OPTS
|
||||
rc.exe "$@" -fo "$OUT" "$IN"
|
||||
1039
doc/APIchanges
1039
doc/APIchanges
File diff suppressed because it is too large
Load Diff
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 7.0.1
|
||||
PROJECT_NUMBER = 4.0
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
@@ -1980,7 +1980,6 @@ PREDEFINED = __attribute__(x)= \
|
||||
av_alloc_size(...)= \
|
||||
AV_GCC_VERSION_AT_LEAST(x,y)=1 \
|
||||
AV_GCC_VERSION_AT_MOST(x,y)=0 \
|
||||
"FF_PAD_STRUCTURE(name,size,...)=typedef struct name { __VA_ARGS__ } name;" \
|
||||
__GNUC__
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
|
||||
|
||||
@@ -19,7 +19,6 @@ MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
doc/community.html \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
@@ -28,9 +27,6 @@ HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMP
|
||||
doc/mailing-list-faq.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
$(SRC_PATH)/doc/bootstrap.min.css \
|
||||
$(SRC_PATH)/doc/style.min.css \
|
||||
$(SRC_PATH)/doc/default.css \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
@@ -106,7 +102,7 @@ DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
|
||||
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(https://git.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{https://git.ffmpeg.org/ffmpeg}.
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
|
||||
@@ -37,61 +37,6 @@ raw ADTS AAC or an MPEG-TS container to MP4A-LATM, to an FLV file, or
|
||||
to MOV/MP4 files and related formats such as 3GP or M4A. Please note
|
||||
that it is auto-inserted for MP4A-LATM and MOV/MP4 and related formats.
|
||||
|
||||
@section av1_metadata
|
||||
|
||||
Modify metadata embedded in an AV1 stream.
|
||||
|
||||
@table @option
|
||||
@item td
|
||||
Insert or remove temporal delimiter OBUs in all temporal units of the
|
||||
stream.
|
||||
|
||||
@table @samp
|
||||
@item insert
|
||||
Insert a TD at the beginning of every TU which does not already have one.
|
||||
@item remove
|
||||
Remove the TD from the beginning of every TU which has one.
|
||||
@end table
|
||||
|
||||
@item color_primaries
|
||||
@item transfer_characteristics
|
||||
@item matrix_coefficients
|
||||
Set the color description fields in the stream (see AV1 section 6.4.2).
|
||||
|
||||
@item color_range
|
||||
Set the color range in the stream (see AV1 section 6.4.2; note that
|
||||
this cannot be set for streams using BT.709 primaries, sRGB transfer
|
||||
characteristic and identity (RGB) matrix coefficients).
|
||||
@table @samp
|
||||
@item tv
|
||||
Limited range.
|
||||
@item pc
|
||||
Full range.
|
||||
@end table
|
||||
|
||||
@item chroma_sample_position
|
||||
Set the chroma sample location in the stream (see AV1 section 6.4.2).
|
||||
This can only be set for 4:2:0 streams.
|
||||
|
||||
@table @samp
|
||||
@item vertical
|
||||
Left position (matching the default in MPEG-2 and H.264).
|
||||
@item colocated
|
||||
Top-left position.
|
||||
@end table
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (@emph{time_scale / num_units_in_display_tick}) in
|
||||
the timing info in the sequence header.
|
||||
@item num_ticks_per_picture
|
||||
Set the number of ticks in each picture, to indicate that the stream
|
||||
has a fixed framerate. Ignored if @option{tick_rate} is not also set.
|
||||
|
||||
@item delete_padding
|
||||
Deletes Padding OBUs.
|
||||
|
||||
@end table
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
@@ -103,9 +48,7 @@ DTS-HD.
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets except when
|
||||
said packets already exactly begin with the extradata that is intended
|
||||
to be added.
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
|
||||
@table @option
|
||||
@item freq
|
||||
@@ -122,7 +65,7 @@ add extradata to all packets
|
||||
@end table
|
||||
@end table
|
||||
|
||||
If not specified it is assumed @samp{k}.
|
||||
If not specified it is assumed @samp{e}.
|
||||
|
||||
For example the following @command{ffmpeg} command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
@@ -132,36 +75,6 @@ the header stored in extradata to the key packets:
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section dv_error_marker
|
||||
|
||||
Blocks in DV which are marked as damaged are replaced by blocks of the specified color.
|
||||
|
||||
@table @option
|
||||
@item color
|
||||
The color to replace damaged blocks by
|
||||
@item sta
|
||||
A 16 bit mask which specifies which of the 16 possible error status values are
|
||||
to be replaced by colored blocks. 0xFFFE is the default which replaces all non 0
|
||||
error status values.
|
||||
@table @samp
|
||||
@item ok
|
||||
No error, no concealment
|
||||
@item err
|
||||
Error, No concealment
|
||||
@item res
|
||||
Reserved
|
||||
@item notok
|
||||
Error or concealment
|
||||
@item notres
|
||||
Not reserved
|
||||
@item Aa, Ba, Ca, Ab, Bb, Cb, A, B, C, a, b, erri, erru
|
||||
The specific error status code
|
||||
@end table
|
||||
see page 44-46 or section 5.5 of
|
||||
@url{http://web.archive.org/web/20060927044735/http://www.smpte.org/smpte_store/standards/pdf/s314m.pdf}
|
||||
|
||||
@end table
|
||||
|
||||
@section eac3_core
|
||||
|
||||
Extract the core from a E-AC-3 stream, dropping extra channels.
|
||||
@@ -199,13 +112,6 @@ Identical to @option{pass_types}, except the units in the given set
|
||||
removed and all others passed through.
|
||||
@end table
|
||||
|
||||
The types used by pass_types and remove_types correspond to NAL unit types
|
||||
(nal_unit_type) in H.264, HEVC and H.266 (see Table 7-1 in the H.264
|
||||
and HEVC specifications or Table 5 in the H.266 specification), to
|
||||
marker values for JPEG (without 0xFF prefix) and to start codes without
|
||||
start code prefix (i.e. the byte following the 0x000001) for MPEG-2.
|
||||
For VP8 and VP9, every unit has type zero.
|
||||
|
||||
Extradata is unchanged by this transformation, but note that if the stream
|
||||
contains inline parameter sets then the output may be unusable if they are
|
||||
removed.
|
||||
@@ -220,21 +126,6 @@ To remove all AUDs, SEI and filler from an H.265 stream:
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=35|38-40' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all user data from a MPEG-2 stream, including Closed Captions:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=178' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all SEI from a H264 stream, including Closed Captions:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=6' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all prefix and suffix SEI from a HEVC stream, including Closed Captions and dynamic HDR:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=39|40' OUTPUT
|
||||
@end example
|
||||
|
||||
@section hapqa_extract
|
||||
|
||||
Extract Rgb or Alpha part of an HAPQA file, without recompression, in order to create an HAPQ or an HAPAlphaOnly file.
|
||||
@@ -269,20 +160,12 @@ Modify metadata embedded in an H.264 stream.
|
||||
Insert or remove AUD NAL units in all access units of the stream.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
@item sample_aspect_ratio
|
||||
Set the sample aspect ratio of the stream in the VUI parameters.
|
||||
See H.264 table E-1.
|
||||
|
||||
@item overscan_appropriate_flag
|
||||
Set whether the stream is suitable for display using overscan
|
||||
or not (see H.264 section E.2.1).
|
||||
|
||||
@item video_format
|
||||
@item video_full_range_flag
|
||||
@@ -300,7 +183,7 @@ Set the chroma sample location in the stream (see H.264 section
|
||||
E.2.1 and figure E-1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (time_scale / num_units_in_tick) in the VUI
|
||||
Set the tick rate (num_units_in_tick / time_scale) in the VUI
|
||||
parameters. This is the smallest time unit representable in the
|
||||
stream, and in many cases represents the field rate of the stream
|
||||
(double the frame rate).
|
||||
@@ -309,11 +192,6 @@ Set whether the stream has fixed framerate - typically this indicates
|
||||
that the framerate is exactly half the tick rate, but the exact
|
||||
meaning is dependent on interlacing and the picture structure (see
|
||||
H.264 section E.2.1 and table E-6).
|
||||
@item zero_new_constraint_set_flags
|
||||
Zero constraint_set4_flag and constraint_set5_flag in the SPS. These
|
||||
bits were reserved in a previous version of the H.264 spec, and thus
|
||||
some hardware decoders require these to be zero. The result of zeroing
|
||||
this is still a valid bitstream.
|
||||
|
||||
@item crop_left
|
||||
@item crop_right
|
||||
@@ -337,46 +215,6 @@ insert the string ``hello'' associated with the given UUID.
|
||||
@item delete_filler
|
||||
Deletes both filler NAL units and filler SEI messages.
|
||||
|
||||
@item display_orientation
|
||||
Insert, extract or remove Display orientation SEI messages.
|
||||
See H.264 section D.1.27 and D.2.27 for syntax and semantics.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@item extract
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
Insert mode works in conjunction with @code{rotate} and @code{flip} options.
|
||||
Any pre-existing Display orientation messages will be removed in insert or remove mode.
|
||||
Extract mode attaches the display matrix to the packet as side data.
|
||||
|
||||
@item rotate
|
||||
Set rotation in display orientation SEI (anticlockwise angle in degrees).
|
||||
Range is -360 to +360. Default is NaN.
|
||||
|
||||
@item flip
|
||||
Set flip in display orientation SEI.
|
||||
|
||||
@table @samp
|
||||
@item horizontal
|
||||
@item vertical
|
||||
@end table
|
||||
|
||||
Default is unset.
|
||||
|
||||
@item level
|
||||
Set the level in the SPS. Refer to H.264 section A.3 and tables A-1
|
||||
to A-5.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{4.2}), a
|
||||
level_idc value (for example, @samp{42}), or the special name @samp{auto}
|
||||
indicating that the filter should attempt to guess the level from the
|
||||
input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section h264_mp4toannexb
|
||||
@@ -404,6 +242,9 @@ This applies a specific fixup to some Blu-ray streams which contain
|
||||
redundant PPSs modifying irrelevant parameters of the stream which
|
||||
confuse other transformations which require correct extradata.
|
||||
|
||||
A new single global PPS is created, and all of the redundant PPSs
|
||||
within the stream are removed.
|
||||
|
||||
@section hevc_metadata
|
||||
|
||||
Modify metadata embedded in an HEVC stream.
|
||||
@@ -436,8 +277,8 @@ Set the chroma sample location in the stream (see H.265 section
|
||||
E.3.1 and figure E.1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate in the VPS and VUI parameters (time_scale /
|
||||
num_units_in_tick). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
Set the tick rate in the VPS and VUI parameters (num_units_in_tick /
|
||||
time_scale). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
set a constant framerate in the stream. Note that it is likely to be
|
||||
overridden by container parameters when the stream is in a container.
|
||||
|
||||
@@ -456,15 +297,6 @@ will replace the current ones if the stream is already cropped.
|
||||
These fields are set in pixels. Note that some sizes may not be
|
||||
representable if the chroma is subsampled (H.265 section 7.4.3.2.1).
|
||||
|
||||
@item level
|
||||
Set the level in the VPS and SPS. See H.265 section A.4 and tables
|
||||
A.6 and A.7.
|
||||
|
||||
The argument must be the name of a level (for example, @samp{5.1}), a
|
||||
@emph{general_level_idc} value (for example, @samp{153} for level 5.1),
|
||||
or the special name @samp{auto} indicating that the filter should
|
||||
attempt to guess the level from the input stream properties.
|
||||
|
||||
@end table
|
||||
|
||||
@section hevc_mp4toannexb
|
||||
@@ -550,6 +382,10 @@ metadata header from each subtitle packet.
|
||||
|
||||
See also the @ref{text2movsub} filter.
|
||||
|
||||
@section mp3decomp
|
||||
|
||||
Decompress non-standard compressed MP3 audio headers.
|
||||
|
||||
@section mpeg2_metadata
|
||||
|
||||
Modify metadata embedded in an MPEG-2 stream.
|
||||
@@ -614,185 +450,25 @@ container. Can be used for fuzzing or testing error resilience/concealment.
|
||||
Parameters:
|
||||
@table @option
|
||||
@item amount
|
||||
Accepts an expression whose evaluation per-packet determines how often bytes in that
|
||||
packet will be modified. A value below 0 will result in a variable frequency.
|
||||
Default is 0 which results in no modification. However, if neither amount nor drop is specified,
|
||||
amount will be set to @var{-1}. See below for accepted variables.
|
||||
@item drop
|
||||
Accepts an expression evaluated per-packet whose value determines whether that packet is dropped.
|
||||
Evaluation to a positive value results in the packet being dropped. Evaluation to a negative
|
||||
value results in a variable chance of it being dropped, roughly inverse in proportion to the magnitude
|
||||
of the value. Default is 0 which results in no drops. See below for accepted variables.
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
@item dropamount
|
||||
Accepts a non-negative integer, which assigns a variable chance of it being dropped, roughly inverse
|
||||
in proportion to the value. Default is 0 which results in no drops. This option is kept for backwards
|
||||
compatibility and is equivalent to setting drop to a negative value with the same magnitude
|
||||
i.e. @code{dropamount=4} is the same as @code{drop=-4}. Ignored if drop is also specified.
|
||||
A numeral string, whose value is related to how often packets will be dropped.
|
||||
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||
@end table
|
||||
|
||||
Both @code{amount} and @code{drop} accept expressions containing the following variables:
|
||||
|
||||
@table @samp
|
||||
@item n
|
||||
The index of the packet, starting from zero.
|
||||
@item tb
|
||||
The timebase for packet timestamps.
|
||||
@item pts
|
||||
Packet presentation timestamp.
|
||||
@item dts
|
||||
Packet decoding timestamp.
|
||||
@item nopts
|
||||
Constant representing AV_NOPTS_VALUE.
|
||||
@item startpts
|
||||
First non-AV_NOPTS_VALUE PTS seen in the stream.
|
||||
@item startdts
|
||||
First non-AV_NOPTS_VALUE DTS seen in the stream.
|
||||
@item duration
|
||||
@itemx d
|
||||
Packet duration, in timebase units.
|
||||
@item pos
|
||||
Packet position in input; may be -1 when unknown or not set.
|
||||
@item size
|
||||
Packet size, in bytes.
|
||||
@item key
|
||||
Whether packet is marked as a keyframe.
|
||||
@item state
|
||||
A pseudo random integer, primarily derived from the content of packet payload.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Apply modification to every byte but don't drop any packets.
|
||||
The following example applies the modification to every byte but does not drop
|
||||
any packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
||||
@end example
|
||||
|
||||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
||||
modify any of the remaining packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
|
||||
@end example
|
||||
|
||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
|
||||
@section pcm_rechunk
|
||||
|
||||
Repacketize PCM audio to a fixed number of samples per packet or a fixed packet
|
||||
rate per second. This is similar to the @ref{asetnsamples,,asetnsamples audio
|
||||
filter,ffmpeg-filters} but works on audio packets instead of audio frames.
|
||||
|
||||
@table @option
|
||||
@item nb_out_samples, n
|
||||
Set the number of samples per each output audio packet. The number is intended
|
||||
as the number of samples @emph{per each channel}. Default value is 1024.
|
||||
|
||||
@item pad, p
|
||||
If set to 1, the filter will pad the last audio packet with silence, so that it
|
||||
will contain the same number of samples (or roughly the same number of samples,
|
||||
see @option{frame_rate}) as the previous ones. Default value is 1.
|
||||
|
||||
@item frame_rate, r
|
||||
This option makes the filter output a fixed number of packets per second instead
|
||||
of a fixed number of samples per packet. If the audio sample rate is not
|
||||
divisible by the frame rate then the number of samples will not be constant but
|
||||
will vary slightly so that each packet will start as close to the frame
|
||||
boundary as possible. Using this option has precedence over @option{nb_out_samples}.
|
||||
@end table
|
||||
|
||||
You can generate the well known 1602-1601-1602-1601-1602 pattern of 48kHz audio
|
||||
for NTSC frame rate using the @option{frame_rate} option.
|
||||
@example
|
||||
ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc -
|
||||
@end example
|
||||
|
||||
@section pgs_frame_merge
|
||||
|
||||
Merge a sequence of PGS Subtitle segments ending with an "end of display set"
|
||||
segment into a single packet.
|
||||
|
||||
This is required by some containers that support PGS subtitles
|
||||
(muxer @code{matroska}).
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
|
||||
@table @option
|
||||
@item color_primaries
|
||||
Set the color primaries.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same color primaries property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item bt470bg
|
||||
BT601 625
|
||||
|
||||
@item smpte170m
|
||||
BT601 525
|
||||
|
||||
@item bt2020
|
||||
@item smpte431
|
||||
DCI P3
|
||||
|
||||
@item smpte432
|
||||
P3 D65
|
||||
|
||||
@end table
|
||||
|
||||
@item transfer_characteristics
|
||||
Set the color transfer.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same transfer characteristics property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
BT 601, BT 709, BT 2020
|
||||
@item smpte2084
|
||||
SMPTE ST 2084
|
||||
@item arib-std-b67
|
||||
ARIB STD-B67
|
||||
@end table
|
||||
|
||||
|
||||
@item matrix_coefficients
|
||||
Set the matrix coefficient.
|
||||
Available values are:
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Keep the same colorspace property (default).
|
||||
|
||||
@item unknown
|
||||
@item bt709
|
||||
@item smpte170m
|
||||
BT 601
|
||||
|
||||
@item bt2020nc
|
||||
@end table
|
||||
@end table
|
||||
|
||||
Set Rec709 colorspace for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt709:color_trc=bt709:colorspace=bt709 output.mov
|
||||
@end example
|
||||
|
||||
Set Hybrid Log-Gamma parameters for each frame of the file
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v prores_metadata=color_primaries=bt2020:color_trc=arib-std-b67:colorspace=bt2020nc output.mov
|
||||
@end example
|
||||
|
||||
@section remove_extra
|
||||
|
||||
Remove extradata from packets.
|
||||
@@ -815,100 +491,6 @@ Remove extradata from all frames.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section setts
|
||||
Set PTS and DTS in packets.
|
||||
|
||||
It accepts the following parameters:
|
||||
@table @option
|
||||
@item ts
|
||||
@item pts
|
||||
@item dts
|
||||
Set expressions for PTS, DTS or both.
|
||||
@item duration
|
||||
Set expression for duration.
|
||||
@item time_base
|
||||
Set output time base.
|
||||
@end table
|
||||
|
||||
The expressions are evaluated through the eval API and can contain the following
|
||||
constants:
|
||||
|
||||
@table @option
|
||||
@item N
|
||||
The count of the input packet. Starting from 0.
|
||||
|
||||
@item TS
|
||||
The demux timestamp in input in case of @code{ts} or @code{dts} option or presentation
|
||||
timestamp in case of @code{pts} option.
|
||||
|
||||
@item POS
|
||||
The original position in the file of the packet, or undefined if undefined
|
||||
for the current packet
|
||||
|
||||
@item DTS
|
||||
The demux timestamp in input.
|
||||
|
||||
@item PTS
|
||||
The presentation timestamp in input.
|
||||
|
||||
@item DURATION
|
||||
The duration in input.
|
||||
|
||||
@item STARTDTS
|
||||
The DTS of the first packet.
|
||||
|
||||
@item STARTPTS
|
||||
The PTS of the first packet.
|
||||
|
||||
@item PREV_INDTS
|
||||
The previous input DTS.
|
||||
|
||||
@item PREV_INPTS
|
||||
The previous input PTS.
|
||||
|
||||
@item PREV_INDURATION
|
||||
The previous input duration.
|
||||
|
||||
@item PREV_OUTDTS
|
||||
The previous output DTS.
|
||||
|
||||
@item PREV_OUTPTS
|
||||
The previous output PTS.
|
||||
|
||||
@item PREV_OUTDURATION
|
||||
The previous output duration.
|
||||
|
||||
@item NEXT_DTS
|
||||
The next input DTS.
|
||||
|
||||
@item NEXT_PTS
|
||||
The next input PTS.
|
||||
|
||||
@item NEXT_DURATION
|
||||
The next input duration.
|
||||
|
||||
@item TB
|
||||
The timebase of stream packet belongs.
|
||||
|
||||
@item TB_OUT
|
||||
The output timebase.
|
||||
|
||||
@item SR
|
||||
The sample rate of stream packet belongs.
|
||||
|
||||
@item NOPTS
|
||||
The AV_NOPTS_VALUE constant.
|
||||
@end table
|
||||
|
||||
For example, to set PTS equal to DTS (not recommended if B-frames are involved):
|
||||
@example
|
||||
ffmpeg -i INPUT -c:a copy -bsf:a setts=pts=DTS out.mkv
|
||||
@end example
|
||||
|
||||
@section showinfo
|
||||
Log basic packet information. Mainly useful for testing, debugging,
|
||||
and development.
|
||||
|
||||
@anchor{text2movsub}
|
||||
@section text2movsub
|
||||
|
||||
@@ -923,40 +505,7 @@ Log trace output containing all syntax elements in the coded stream
|
||||
headers (everything above the level of individual coded blocks).
|
||||
This can be useful for debugging low-level stream issues.
|
||||
|
||||
Supports AV1, H.264, H.265, (M)JPEG, MPEG-2 and VP9, but depending
|
||||
on the build only a subset of these may be available.
|
||||
|
||||
@section truehd_core
|
||||
|
||||
Extract the core from a TrueHD stream, dropping ATMOS data.
|
||||
|
||||
@section vp9_metadata
|
||||
|
||||
Modify metadata embedded in a VP9 stream.
|
||||
|
||||
@table @option
|
||||
@item color_space
|
||||
Set the color space value in the frame header. Note that any frame
|
||||
set to RGB will be implicitly set to PC range and that RGB is
|
||||
incompatible with profiles 0 and 2.
|
||||
@table @samp
|
||||
@item unknown
|
||||
@item bt601
|
||||
@item bt709
|
||||
@item smpte170
|
||||
@item smpte240
|
||||
@item bt2020
|
||||
@item rgb
|
||||
@end table
|
||||
|
||||
@item color_range
|
||||
Set the color range value in the frame header. Note that any value
|
||||
imposed by the color space will take precedence over this value.
|
||||
@table @samp
|
||||
@item tv
|
||||
@item pc
|
||||
@end table
|
||||
@end table
|
||||
Supports H.264, H.265 and MPEG-2.
|
||||
|
||||
@section vp9_superframe
|
||||
|
||||
|
||||
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -36,11 +36,11 @@ install
|
||||
examples
|
||||
Build all examples located in doc/examples.
|
||||
|
||||
checkheaders
|
||||
Check headers dependencies.
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
|
||||
alltools
|
||||
Build all tools in tools directory.
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as an example).
|
||||
|
||||
config
|
||||
Reconfigure the project with the current configuration.
|
||||
@@ -48,8 +48,6 @@ config
|
||||
tools/target_dec_<decoder>_fuzzer
|
||||
Build fuzzer to fuzz the specified decoder.
|
||||
|
||||
tools/target_bsf_<filter>_fuzzer
|
||||
Build fuzzer to fuzz the specified bitstream filter.
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
||||
375
doc/codecs.texi
375
doc/codecs.texi
@@ -3,7 +3,7 @@
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
libavcodec provides some generic global options, which can be set on
|
||||
all the encoders and decoders. In addition, each codec may support
|
||||
all the encoders and decoders. In addition each codec may support
|
||||
so-called private options, which are specific for a given codec.
|
||||
|
||||
Sometimes, a global option may only affect a specific kind of codec,
|
||||
@@ -50,13 +50,11 @@ Use internal 2pass ratecontrol in first pass mode.
|
||||
Use internal 2pass ratecontrol in second pass mode.
|
||||
@item gray
|
||||
Only decode/encode grayscale.
|
||||
@item emu_edge
|
||||
Do not draw edges.
|
||||
@item psnr
|
||||
Set error[?] variables during encoding.
|
||||
@item truncated
|
||||
Input bitstream might be randomly truncated.
|
||||
@item drop_changed
|
||||
Don't output frames whose parameters differ from first decoded frame in stream.
|
||||
Error AVERROR_INPUT_CHANGED is returned when a frame is dropped.
|
||||
|
||||
@item ildct
|
||||
Use interlaced DCT.
|
||||
@@ -70,14 +68,50 @@ This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item aic
|
||||
Apply H263 advanced intra coding / mpeg4 ac prediction.
|
||||
@item cbp
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item qprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item ilme
|
||||
Apply interlaced motion estimation.
|
||||
@item cgop
|
||||
Use closed gop.
|
||||
@item output_corrupt
|
||||
Output even potentially corrupted frames.
|
||||
@end table
|
||||
|
||||
@item me_method @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item zero
|
||||
zero motion estimation (fastest)
|
||||
@item full
|
||||
full motion estimation (slowest)
|
||||
@item epzs
|
||||
EPZS motion estimation (default)
|
||||
@item esa
|
||||
esa motion estimation (alias for full)
|
||||
@item tesa
|
||||
tesa motion estimation
|
||||
@item dia
|
||||
dia motion estimation (alias for epzs)
|
||||
@item log
|
||||
log motion estimation
|
||||
@item phods
|
||||
phods motion estimation
|
||||
@item x1
|
||||
X1 motion estimation
|
||||
@item hex
|
||||
hex motion estimation
|
||||
@item umh
|
||||
umh motion estimation
|
||||
@item iter
|
||||
iter motion estimation
|
||||
@end table
|
||||
|
||||
@item extradata_size @var{integer}
|
||||
Set extradata size.
|
||||
|
||||
@item time_base @var{rational number}
|
||||
Set codec time base.
|
||||
|
||||
@@ -144,6 +178,24 @@ Default value is 0.
|
||||
@item b_qfactor @var{float} (@emph{encoding,video})
|
||||
Set qp factor between P and B frames.
|
||||
|
||||
@item rc_strategy @var{integer} (@emph{encoding,video})
|
||||
Set ratecontrol method.
|
||||
|
||||
@item b_strategy @var{integer} (@emph{encoding,video})
|
||||
Set strategy to choose between I/P/B-frames.
|
||||
|
||||
@item ps @var{integer} (@emph{encoding,video})
|
||||
Set RTP payload size in bytes.
|
||||
|
||||
@item mv_bits @var{integer}
|
||||
@item header_bits @var{integer}
|
||||
@item i_tex_bits @var{integer}
|
||||
@item p_tex_bits @var{integer}
|
||||
@item i_count @var{integer}
|
||||
@item p_count @var{integer}
|
||||
@item skip_count @var{integer}
|
||||
@item misc_bits @var{integer}
|
||||
@item frame_bits @var{integer}
|
||||
@item codec_tag @var{integer}
|
||||
@item bug @var{flags} (@emph{decoding,video})
|
||||
Workaround not auto detected encoder bugs.
|
||||
@@ -152,6 +204,8 @@ Possible values:
|
||||
@table @samp
|
||||
@item autodetect
|
||||
|
||||
@item old_msmpeg4
|
||||
some old lavc generated msmpeg4v3 files (no autodetection)
|
||||
@item xvid_ilace
|
||||
Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
@item ump4
|
||||
@@ -160,6 +214,8 @@ Xvid interlacing bug (autodetected if fourcc==XVIX)
|
||||
padding bug (autodetected)
|
||||
@item amv
|
||||
|
||||
@item ac_vlc
|
||||
illegal vlc bug (autodetected per fourcc)
|
||||
@item qpel_chroma
|
||||
|
||||
@item std_qpel
|
||||
@@ -180,6 +236,14 @@ Workaround various bugs in microsoft broken decoders.
|
||||
trancated frames
|
||||
@end table
|
||||
|
||||
@item lelim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for luminance (negative
|
||||
values also consider DC coefficient).
|
||||
|
||||
@item celim @var{integer} (@emph{encoding,video})
|
||||
Set single coefficient elimination threshold for chrominance (negative
|
||||
values also consider dc coefficient)
|
||||
|
||||
@item strict @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
Specify how strictly to follow the standards.
|
||||
|
||||
@@ -233,8 +297,29 @@ consider things that a sane encoder should not do as an error
|
||||
|
||||
@item block_align @var{integer}
|
||||
|
||||
@item mpeg_quant @var{integer} (@emph{encoding,video})
|
||||
Use MPEG quantizers instead of H.263.
|
||||
|
||||
@item qsquish @var{float} (@emph{encoding,video})
|
||||
How to keep quantizer between qmin and qmax (0 = clip, 1 = use
|
||||
differentiable function).
|
||||
|
||||
@item rc_qmod_amp @var{float} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_qmod_freq @var{integer} (@emph{encoding,video})
|
||||
Set experimental quantizer modulation.
|
||||
|
||||
@item rc_override_count @var{integer}
|
||||
|
||||
@item rc_eq @var{string} (@emph{encoding,video})
|
||||
Set rate control equation. When computing the expression, besides the
|
||||
standard functions defined in the section 'Expression Evaluation', the
|
||||
following functions are available: bits2qp(bits), qp2bits(qp). Also
|
||||
the following constants are available: iTex pTex tex mv fCode iCount
|
||||
mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
|
||||
avgTex.
|
||||
|
||||
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
||||
Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
|
||||
|
||||
@@ -245,12 +330,18 @@ encode. It is of little use elsewise.
|
||||
@item bufsize @var{integer} (@emph{encoding,audio,video})
|
||||
Set ratecontrol buffer size (in bits).
|
||||
|
||||
@item rc_buf_aggressivity @var{float} (@emph{encoding,video})
|
||||
Currently useless.
|
||||
|
||||
@item i_qfactor @var{float} (@emph{encoding,video})
|
||||
Set QP factor between P and I frames.
|
||||
|
||||
@item i_qoffset @var{float} (@emph{encoding,video})
|
||||
Set QP offset between P and I frames.
|
||||
|
||||
@item rc_init_cplx @var{float} (@emph{encoding,video})
|
||||
Set initial complexity for 1-pass encoding.
|
||||
|
||||
@item dct @var{integer} (@emph{encoding,video})
|
||||
Set DCT algorithm.
|
||||
|
||||
@@ -315,7 +406,11 @@ Automatically pick a IDCT compatible with the simple one
|
||||
|
||||
@item simpleneon
|
||||
|
||||
@item xvid
|
||||
@item simplealpha
|
||||
|
||||
@item ipp
|
||||
|
||||
@item xvidmmx
|
||||
|
||||
@item faani
|
||||
floating point AAN IDCT
|
||||
@@ -338,6 +433,19 @@ favor predicting from the previous frame instead of the current
|
||||
|
||||
@item bits_per_coded_sample @var{integer}
|
||||
|
||||
@item pred @var{integer} (@emph{encoding,video})
|
||||
Set prediction method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item left
|
||||
|
||||
@item plane
|
||||
|
||||
@item median
|
||||
|
||||
@end table
|
||||
|
||||
@item aspect @var{rational number} (@emph{encoding,video})
|
||||
Set sample aspect ratio.
|
||||
|
||||
@@ -532,28 +640,13 @@ noise preserving sum of squared differences
|
||||
|
||||
@item dia_size @var{integer} (@emph{encoding,video})
|
||||
Set diamond type & size for motion estimation.
|
||||
@table @samp
|
||||
@item (1024, INT_MAX)
|
||||
full motion estimation(slowest)
|
||||
@item (768, 1024]
|
||||
umh motion estimation
|
||||
@item (512, 768]
|
||||
hex motion estimation
|
||||
@item (256, 512]
|
||||
l2s diamond motion estimation
|
||||
@item [2,256]
|
||||
var diamond motion estimation
|
||||
@item (-1, 2)
|
||||
small diamond motion estimation
|
||||
@item -1
|
||||
funny diamond motion estimation
|
||||
@item (INT_MIN, -1)
|
||||
sab diamond motion estimation
|
||||
@end table
|
||||
|
||||
@item last_pred @var{integer} (@emph{encoding,video})
|
||||
Set amount of motion predictors from the previous frame.
|
||||
|
||||
@item preme @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation.
|
||||
|
||||
@item precmp @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation compare function.
|
||||
|
||||
@@ -597,11 +690,40 @@ Set diamond type & size for motion estimation pre-pass.
|
||||
@item subq @var{integer} (@emph{encoding,video})
|
||||
Set sub pel motion estimation quality.
|
||||
|
||||
@item dtg_active_format @var{integer}
|
||||
|
||||
@item me_range @var{integer} (@emph{encoding,video})
|
||||
Set limit motion vectors range (1023 for DivX player).
|
||||
|
||||
@item ibias @var{integer} (@emph{encoding,video})
|
||||
Set intra quant bias.
|
||||
|
||||
@item pbias @var{integer} (@emph{encoding,video})
|
||||
Set inter quant bias.
|
||||
|
||||
@item color_table_id @var{integer}
|
||||
|
||||
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
@item coder @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item vlc
|
||||
variable length coder / huffman coder
|
||||
@item ac
|
||||
arithmetic coder
|
||||
@item raw
|
||||
raw (no encoding)
|
||||
@item rle
|
||||
run-length coder
|
||||
@item deflate
|
||||
deflate-based coder
|
||||
@end table
|
||||
|
||||
@item context @var{integer} (@emph{encoding,video})
|
||||
Set context model.
|
||||
|
||||
@item slice_flags @var{integer}
|
||||
|
||||
@item mbd @var{integer} (@emph{encoding,video})
|
||||
@@ -617,16 +739,32 @@ use fewest bits
|
||||
use best rate distortion
|
||||
@end table
|
||||
|
||||
@item stream_codec_tag @var{integer}
|
||||
|
||||
@item sc_threshold @var{integer} (@emph{encoding,video})
|
||||
Set scene change threshold.
|
||||
|
||||
@item lmin @var{integer} (@emph{encoding,video})
|
||||
Set min lagrange factor (VBR).
|
||||
|
||||
@item lmax @var{integer} (@emph{encoding,video})
|
||||
Set max lagrange factor (VBR).
|
||||
|
||||
@item nr @var{integer} (@emph{encoding,video})
|
||||
Set noise reduction.
|
||||
|
||||
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
||||
Set number of bits which should be loaded into the rc buffer before
|
||||
decoding starts.
|
||||
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item flags2 @var{flags} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item fast
|
||||
Allow non spec compliant speedup tricks.
|
||||
@item sgop
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item noout
|
||||
Skip bitstream encoding.
|
||||
@item ignorecrop
|
||||
@@ -637,34 +775,17 @@ Place global headers at every keyframe instead of in extradata.
|
||||
Frame data might be split into multiple chunks.
|
||||
@item showall
|
||||
Show all frames before the first keyframe.
|
||||
@item skiprd
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
@item export_mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item skip_manual
|
||||
Do not skip samples and export skip information as frame side data.
|
||||
@item ass_ro_flush_noop
|
||||
Do not reset ASS ReadOrder field on flush.
|
||||
@item icc_profiles
|
||||
Generate/parse embedded ICC profiles from/to colorimetry tags.
|
||||
@end table
|
||||
|
||||
@item export_side_data @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@item error @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item mvs
|
||||
Export motion vectors into frame side-data (see @code{AV_FRAME_DATA_MOTION_VECTORS})
|
||||
for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
@item prft
|
||||
Export encoder Producer Reference Time into packet side-data (see @code{AV_PKT_DATA_PRFT})
|
||||
for codecs that support it.
|
||||
@item venc_params
|
||||
Export video encoding parameters through frame side data (see @code{AV_FRAME_DATA_VIDEO_ENC_PARAMS})
|
||||
for codecs that support it. At present, those are H.264 and VP9.
|
||||
@item film_grain
|
||||
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
|
||||
Supported at present by AV1 decoders.
|
||||
@end table
|
||||
@item qns @var{integer} (@emph{encoding,video})
|
||||
Deprecated, use mpegvideo private options instead.
|
||||
|
||||
@item threads @var{integer} (@emph{decoding/encoding,video})
|
||||
Set the number of threads to be used, in case the selected codec
|
||||
@@ -678,6 +799,12 @@ automatically select the number of threads to set
|
||||
|
||||
Default value is @samp{auto}.
|
||||
|
||||
@item me_threshold @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation threshold.
|
||||
|
||||
@item mb_threshold @var{integer} (@emph{encoding,video})
|
||||
Set macroblock threshold.
|
||||
|
||||
@item dc @var{integer} (@emph{encoding,video})
|
||||
Set intra_dc_precision.
|
||||
|
||||
@@ -692,29 +819,122 @@ Set number of macroblock rows at the bottom which are skipped.
|
||||
|
||||
@item profile @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set encoder codec profile. Default value is @samp{unknown}. Encoder specific
|
||||
profiles are documented in the relevant encoder documentation.
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@item aac_main
|
||||
|
||||
@item aac_low
|
||||
|
||||
@item aac_ssr
|
||||
|
||||
@item aac_ltp
|
||||
|
||||
@item aac_he
|
||||
|
||||
@item aac_he_v2
|
||||
|
||||
@item aac_ld
|
||||
|
||||
@item aac_eld
|
||||
|
||||
@item mpeg2_aac_low
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item mpeg4_sp
|
||||
|
||||
@item mpeg4_core
|
||||
|
||||
@item mpeg4_main
|
||||
|
||||
@item mpeg4_asp
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
||||
@item dts_96_24
|
||||
|
||||
@item dts_hd_hra
|
||||
|
||||
@item dts_hd_ma
|
||||
|
||||
@end table
|
||||
|
||||
@item level @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set the encoder level. This level depends on the specific codec, and
|
||||
might correspond to the profile level. It is set by default to
|
||||
@samp{unknown}.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@end table
|
||||
|
||||
@item lowres @var{integer} (@emph{decoding,audio,video})
|
||||
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
||||
|
||||
@item skip_threshold @var{integer} (@emph{encoding,video})
|
||||
Set frame skip threshold.
|
||||
|
||||
@item skip_factor @var{integer} (@emph{encoding,video})
|
||||
Set frame skip factor.
|
||||
|
||||
@item skip_exp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip exponent.
|
||||
Negative values behave identical to the corresponding positive ones, except
|
||||
that the score is normalized.
|
||||
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||
|
||||
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip compare function.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item w53
|
||||
5/3 wavelet, only used in snow
|
||||
@item w97
|
||||
9/7 wavelet, only used in snow
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
|
||||
@item border_mask @var{float} (@emph{encoding,video})
|
||||
Increase the quantizer for macroblocks close to borders.
|
||||
|
||||
@item mblmin @var{integer} (@emph{encoding,video})
|
||||
Set min macroblock lagrange factor (VBR).
|
||||
|
||||
@item mblmax @var{integer} (@emph{encoding,video})
|
||||
Set max macroblock lagrange factor (VBR).
|
||||
|
||||
@item mepc @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation bitrate penalty compensation (1.0 = 256).
|
||||
|
||||
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
||||
@item skip_idct @var{integer} (@emph{decoding,video})
|
||||
@item skip_frame @var{integer} (@emph{decoding,video})
|
||||
@@ -742,9 +962,6 @@ Discard all bidirectional frames.
|
||||
@item nokey
|
||||
Discard all frames excepts keyframes.
|
||||
|
||||
@item nointra
|
||||
Discard all frames except I frames.
|
||||
|
||||
@item all
|
||||
Discard all frames.
|
||||
@end table
|
||||
@@ -754,24 +971,52 @@ Default value is @samp{default}.
|
||||
@item bidir_refine @var{integer} (@emph{encoding,video})
|
||||
Refine the two motion vectors used in bidirectional macroblocks.
|
||||
|
||||
@item brd_scale @var{integer} (@emph{encoding,video})
|
||||
Downscale frames for dynamic B-frame decision.
|
||||
|
||||
@item keyint_min @var{integer} (@emph{encoding,video})
|
||||
Set minimum interval between IDR-frames.
|
||||
|
||||
@item refs @var{integer} (@emph{encoding,video})
|
||||
Set reference frames to consider for motion compensation.
|
||||
|
||||
@item chromaoffset @var{integer} (@emph{encoding,video})
|
||||
Set chroma qp offset from luma.
|
||||
|
||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||
Set rate-distortion optimal quantization.
|
||||
|
||||
@item sc_factor @var{integer} (@emph{encoding,video})
|
||||
Set value multiplied by qscale for each frame and added to
|
||||
scene_change_score.
|
||||
|
||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||
Adjust sensitivity of b_frame_strategy 1.
|
||||
|
||||
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
||||
@item min_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item max_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item timecode_frame_start @var{integer} (@emph{encoding,video})
|
||||
Set GOP timecode frame start number, in non drop frame format.
|
||||
|
||||
@item request_channels @var{integer} (@emph{decoding,audio})
|
||||
Set desired number of audio channels.
|
||||
|
||||
@item bits_per_raw_sample @var{integer}
|
||||
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
||||
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the required syntax.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item request_channel_layout @var{integer} (@emph{decoding,audio})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item rc_max_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item rc_min_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item ticks_per_frame @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
|
||||
Possible values:
|
||||
@@ -871,12 +1116,6 @@ BT.2020 NCL
|
||||
BT.2020 CL
|
||||
@item smpte2085
|
||||
SMPTE 2085
|
||||
@item chroma-derived-nc
|
||||
Chroma-derived NCL
|
||||
@item chroma-derived-c
|
||||
Chroma-derived CL
|
||||
@item ictcp
|
||||
ICtCp
|
||||
@end table
|
||||
|
||||
@item color_range @var{integer} (@emph{decoding/encoding,video})
|
||||
@@ -886,11 +1125,9 @@ Possible values:
|
||||
@table @samp
|
||||
@item tv
|
||||
@item mpeg
|
||||
@item limited
|
||||
MPEG (219*2^(n-8))
|
||||
@item pc
|
||||
@item jpeg
|
||||
@item full
|
||||
JPEG (2^n-1)
|
||||
@end table
|
||||
|
||||
@@ -999,7 +1236,7 @@ instead of alpha. Default is 0.
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Community
|
||||
@titlepage
|
||||
@center @titlefont{Community}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@anchor{Organisation}
|
||||
@chapter Organisation
|
||||
|
||||
The FFmpeg project is organized through a community working on global consensus.
|
||||
|
||||
Decisions are taken by the ensemble of active members, through voting and are aided by two committees.
|
||||
|
||||
@anchor{General Assembly}
|
||||
@chapter General Assembly
|
||||
|
||||
The ensemble of active members is called the General Assembly (GA).
|
||||
|
||||
The General Assembly is sovereign and legitimate for all its decisions regarding the FFmpeg project.
|
||||
|
||||
The General Assembly is made up of active contributors.
|
||||
|
||||
Contributors are considered "active contributors" if they have authored more than 20 patches in the last 36 months in the main FFmpeg repository, or if they have been voted in by the GA.
|
||||
|
||||
The list of active contributors is updated twice each year, on 1st January and 1st July, 0:00 UTC.
|
||||
|
||||
Additional members are added to the General Assembly through a vote after proposal by a member of the General Assembly. They are part of the GA for two years, after which they need a confirmation by the GA.
|
||||
|
||||
A script to generate the current members of the general assembly (minus members voted in) can be found in `tools/general_assembly.pl`.
|
||||
|
||||
@anchor{Voting}
|
||||
@chapter Voting
|
||||
|
||||
Voting is done using a ranked voting system, currently running on https://vote.ffmpeg.org/ .
|
||||
|
||||
Majority vote means more than 50% of the expressed ballots.
|
||||
|
||||
@anchor{Technical Committee}
|
||||
@chapter Technical Committee
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project. They will consider the merits of all the positions, judge them and make a decision.
|
||||
|
||||
The TC resolves technical conflicts but is not a technical steering committee.
|
||||
|
||||
Decisions by the TC are binding for all the contributors.
|
||||
|
||||
Decisions made by the TC can be re-opened after 1 year or by a majority vote of the General Assembly, requested by one of the member of the GA.
|
||||
|
||||
The TC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the TC.
|
||||
|
||||
The members of the TC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The conflict resolution process is detailed in the resolution process document.
|
||||
|
||||
The TC can be contacted at <tc@@ffmpeg>.
|
||||
|
||||
@anchor{Resolution Process}
|
||||
@section Resolution Process
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project.
|
||||
|
||||
The TC main role is to resolve technical conflicts. It is therefore not a technical steering committee, but it is understood that some decisions might impact the future of the project.
|
||||
|
||||
@subsection Seizing
|
||||
|
||||
The TC can take possession of any technical matter that it sees fit.
|
||||
|
||||
To involve the TC in a matter, email tc@ or CC them on an ongoing discussion.
|
||||
|
||||
As members of TC are developers, they also can email tc@ to raise an issue.
|
||||
@subsection Announcement
|
||||
|
||||
The TC, once seized, must announce itself on the main mailing list, with a [TC] tag.
|
||||
|
||||
The TC has 2 modes of operation: a RFC one and an internal one.
|
||||
|
||||
If the TC thinks it needs the input from the larger community, the TC can call for a RFC. Else, it can decide by itself.
|
||||
|
||||
If the disagreement involves a member of the TC, that member should recuse themselves from the decision.
|
||||
|
||||
The decision to use a RFC process or an internal discussion is a discretionary decision of the TC.
|
||||
|
||||
The TC can also reject a seizure for a few reasons such as: the matter was not discussed enough previously; it lacks expertise to reach a beneficial decision on the matter; or the matter is too trivial.
|
||||
@subsection RFC call
|
||||
|
||||
In the RFC mode, one person from the TC posts on the mailing list the technical question and will request input from the community.
|
||||
|
||||
The mail will have the following specification:
|
||||
|
||||
a precise title
|
||||
a specific tag [TC RFC]
|
||||
a top-level email
|
||||
contain a precise question that does not exceed 100 words and that is answerable by developers
|
||||
may have an extra description, or a link to a previous discussion, if deemed necessary,
|
||||
contain a precise end date for the answers.
|
||||
|
||||
The answers from the community must be on the main mailing list and must have the following specification:
|
||||
|
||||
keep the tag and the title unchanged
|
||||
limited to 400 words
|
||||
a first-level, answering directly to the main email
|
||||
answering to the question.
|
||||
|
||||
Further replies to answers are permitted, as long as they conform to the community standards of politeness, they are limited to 100 words, and are not nested more than once. (max-depth=2)
|
||||
|
||||
After the end-date, mails on the thread will be ignored.
|
||||
|
||||
Violations of those rules will be escalated through the Community Committee.
|
||||
|
||||
After all the emails are in, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay, that will be notified on the mailing list.
|
||||
@subsection Within TC
|
||||
|
||||
In the internal case, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay.
|
||||
@subsection Decisions
|
||||
|
||||
The decisions from the TC will be sent on the mailing list, with the [TC] tag.
|
||||
|
||||
Internally, the TC should take decisions with a majority, or using ranked-choice voting.
|
||||
|
||||
The decision from the TC should be published with a summary of the reasons that lead to this decision.
|
||||
|
||||
The decisions from the TC are final, until the matters are reopened after no less than one year.
|
||||
|
||||
@anchor{Community Committee}
|
||||
@chapter Community Committee
|
||||
|
||||
The Community Committee (CC) is here to arbitrage and make decisions when inter-personal conflicts occur in the project. It will decide quickly and take actions, for the sake of the project.
|
||||
|
||||
The CC can remove privileges of offending members, including removal of commit access and temporary ban from the community.
|
||||
|
||||
Decisions made by the CC can be re-opened after 1 year or by a majority vote of the General Assembly. Indefinite bans from the community must be confirmed by the General Assembly, in a majority vote.
|
||||
|
||||
The CC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the CC.
|
||||
|
||||
The members of the CC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The CC is governed by and responsible for enforcing the Code of Conduct.
|
||||
|
||||
The CC can be contacted at <cc@@ffmpeg>.
|
||||
|
||||
@anchor{Code of Conduct}
|
||||
@chapter Code of Conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@bye
|
||||
@@ -25,19 +25,6 @@ enabled decoders.
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section av1
|
||||
|
||||
AOMedia Video 1 (AV1) decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item operating_point
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
@@ -60,157 +47,6 @@ top-field-first is assumed
|
||||
|
||||
@end table
|
||||
|
||||
@section libdav1d
|
||||
|
||||
dav1d AV1 decoder.
|
||||
|
||||
libdav1d allows libavcodec to decode the AOMedia Video 1 (AV1) codec.
|
||||
Requires the presence of the libdav1d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libdav1d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libdav1d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item framethreads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
option @code{max_frame_delay} and the global option @code{threads} instead.
|
||||
|
||||
@item tilethreads
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
global option @code{threads} instead.
|
||||
|
||||
@item max_frame_delay
|
||||
Set max amount of frames the decoder may buffer internally. The default value is 0
|
||||
(autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||
internal default of the library.
|
||||
This option is deprecated and will be removed in the future. See the global option
|
||||
@code{export_side_data} to export Film Grain parameters instead of applying it.
|
||||
|
||||
@item oppoint
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||
internal default of the library.
|
||||
|
||||
@item alllayers
|
||||
Output all spatial layers of a scalable AV1 bitstream. The default value is false.
|
||||
|
||||
@end table
|
||||
|
||||
@section libdavs2
|
||||
|
||||
AVS2-P2/IEEE1857.4 video decoder wrapper.
|
||||
|
||||
This decoder allows libavcodec to decode AVS2 streams with davs2 library.
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@section libuavs3d
|
||||
|
||||
AVS3-P2/IEEE1857.10 video decoder.
|
||||
|
||||
libuavs3d allows libavcodec to decode AVS3 streams.
|
||||
Requires the presence of the libuavs3d headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libuavs3d}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libuavs3d wrapper.
|
||||
|
||||
@table @option
|
||||
|
||||
@item frame_threads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
|
||||
@end table
|
||||
|
||||
@section libxevd
|
||||
|
||||
eXtra-fast Essential Video Decoder (XEVD) MPEG-5 EVC decoder wrapper.
|
||||
|
||||
This decoder requires the presence of the libxevd headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libxevd}.
|
||||
|
||||
The xevd project website is at @url{https://github.com/mpeg5/xevd}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxevd wrapper.
|
||||
The xevd-equivalent options or values are listed in parentheses for easy migration.
|
||||
|
||||
To get a more accurate and extensive documentation of the libxevd options,
|
||||
invoke the command @code{xevd_app --help} or consult the libxevd documentation.
|
||||
|
||||
@table @option
|
||||
@item threads (@emph{threads})
|
||||
Force to use a specific number of threads
|
||||
|
||||
@end table
|
||||
|
||||
@section QSV Decoders
|
||||
|
||||
The family of Intel QuickSync Video decoders (VC1, MPEG-2, H.264, HEVC,
|
||||
JPEG/MJPEG, VP8, VP9, AV1).
|
||||
|
||||
@subsection Common Options
|
||||
|
||||
The following options are supported by all qsv decoders.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{async_depth}
|
||||
Internal parallelization depth, the higher the value the higher the latency.
|
||||
|
||||
@item @var{gpu_copy}
|
||||
A GPU-accelerated copy between video and system memory
|
||||
@table @samp
|
||||
@item default
|
||||
@item on
|
||||
@item off
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HEVC Options
|
||||
Extra options for hevc_qsv.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{load_plugin}
|
||||
A user plugin to load in an internal session
|
||||
@table @samp
|
||||
@item none
|
||||
@item hevc_sw
|
||||
@item hevc_hw
|
||||
@end table
|
||||
|
||||
@item @var{load_plugins}
|
||||
A :-separate list of hexadecimal plugin UIDs to load in an internal session
|
||||
|
||||
@end table
|
||||
|
||||
@section v210
|
||||
|
||||
Uncompressed 4:2:2 10-bit decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item custom_stride
|
||||
Set the line size of the v210 data in bytes. The default value is 0
|
||||
(autodetect). You can use the special -1 value for a strideless v210 as seen in
|
||||
BOXX files.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@@ -232,7 +68,7 @@ the undocumented RealAudio 3 (a.k.a. dnet).
|
||||
|
||||
@item -drc_scale @var{value}
|
||||
Dynamic Range Scale Factor. The factor to apply to dynamic range values
|
||||
from the AC-3 stream. This factor is applied exponentially. The default value is 1.
|
||||
from the AC-3 stream. This factor is applied exponentially.
|
||||
There are 3 notable scale factor ranges:
|
||||
@table @option
|
||||
@item drc_scale == 0
|
||||
@@ -352,194 +188,6 @@ without this library.
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section libaribb24
|
||||
|
||||
ARIB STD-B24 caption decoder.
|
||||
|
||||
Implements profiles A and C of the ARIB STD-B24 standard.
|
||||
|
||||
@subsection libaribb24 Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -aribb24-base-path @var{path}
|
||||
Sets the base path for the libaribb24 library. This is utilized for reading of
|
||||
configuration files (for custom unicode conversions), and for dumping of
|
||||
non-text symbols as images under that location.
|
||||
|
||||
Unset by default.
|
||||
|
||||
@item -aribb24-skip-ruby-text @var{boolean}
|
||||
Tells the decoder wrapper to skip text blocks that contain half-height ruby
|
||||
text.
|
||||
|
||||
Enabled by default.
|
||||
|
||||
@end table
|
||||
|
||||
@section libaribcaption
|
||||
|
||||
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
||||
library.
|
||||
|
||||
Implements profiles A and C of the Japanse ARIB STD-B24 standard,
|
||||
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
||||
|
||||
Requires the presence of the libaribcaption headers and library
|
||||
(@url{https://github.com/xqq/libaribcaption}) during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libaribcaption}.
|
||||
If both @dfn{libaribb24} and @dfn{libaribcaption} are enabled, @dfn{libaribcaption}
|
||||
decoder precedes.
|
||||
|
||||
@subsection libaribcaption Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -sub_type @var{subtitle_type}
|
||||
Specifies the format of the decoded subtitles.
|
||||
|
||||
@table @samp
|
||||
@item bitmap
|
||||
Graphical image.
|
||||
@item ass
|
||||
ASS formatted text.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@end table
|
||||
|
||||
The default is @dfn{ass} as same as @dfn{libaribb24} decoder.
|
||||
Some present players (e.g., @dfn{mpv}) expect ASS format for ARIB caption.
|
||||
|
||||
@item -caption_encoding @var{encoding_scheme}
|
||||
Specifies the encoding scheme of input subtitle text.
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically detect text encoding (default).
|
||||
@item jis
|
||||
8bit-char JIS encoding defined in ARIB STD B24.
|
||||
This encoding used in Japan for ISDB captions.
|
||||
@item utf8
|
||||
UTF-8 encoding defined in ARIB STD B24.
|
||||
This encoding is used in Philippines for ISDB-T captions.
|
||||
@item latin
|
||||
Latin character encoding defined in ABNT NBR 15606-1.
|
||||
This encoding is used in South America for SBTVD / ISDB-Tb captions.
|
||||
@end table
|
||||
|
||||
@item -font @var{font_name[,font_name2,...]}
|
||||
Specify comma-separated list of font family names to be used for @dfn{bitmap}
|
||||
or @dfn{ass} type subtitle rendering.
|
||||
Only first font name is used for @dfn{ass} type subtitle.
|
||||
|
||||
If not specified, use internaly defined default font family.
|
||||
|
||||
@item -ass_single_rect @var{boolean}
|
||||
ARIB STD-B24 specifies that some captions may be displayed at different
|
||||
positions at a time (multi-rectangle subtitle).
|
||||
Since some players (e.g., old @dfn{mpv}) can't handle multiple ASS rectangles
|
||||
in a single AVSubtitle, or multiple ASS rectangles of indeterminate duration
|
||||
with the same start timestamp, this option can change the behavior so that
|
||||
all the texts are displayed in a single ASS rectangle.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
If your player cannot handle AVSubtitles with multiple ASS rectangles properly,
|
||||
set this option to @var{true} or define @env{ASS_SINGLE_RECT=1} to change
|
||||
default behavior at compilation.
|
||||
|
||||
@item -force_outline_text @var{boolean}
|
||||
Specify whether always render outline text for all characters regardless of
|
||||
the indication by charactor style.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -outline_width @var{number} (0.0 - 3.0)
|
||||
Specify width for outline text, in dots (relative).
|
||||
|
||||
The default is @var{1.5}.
|
||||
|
||||
@item -ignore_background @var{boolean}
|
||||
Specify whether to ignore background color rendering.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -ignore_ruby @var{boolean}
|
||||
Specify whether to ignore rendering for ruby-like (furigana) characters.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -replace_drcs @var{boolean}
|
||||
Specify whether to render replaced DRCS characters as Unicode characters.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_ascii @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) fullwidth
|
||||
alphanumerics with halfwidth alphanumerics.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_japanese @var{boolean}
|
||||
Specify whether to replace some MSZ (Middle Size; half width) fullwidth
|
||||
japanese special characters with halfwidth ones.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_glyph @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) characters
|
||||
with halfwidth glyphs if the fonts supports it.
|
||||
This option works under FreeType or DirectWrite renderer
|
||||
with Adobe-Japan1 compliant fonts.
|
||||
e.g., IBM Plex Sans JP, Morisawa BIZ UDGothic, Morisawa BIZ UDMincho,
|
||||
Yu Gothic, Yu Mincho, and Meiryo.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -canvas_size @var{image_size}
|
||||
Specify the resolution of the canvas to render subtitles to; usually, this
|
||||
should be frame size of input video.
|
||||
This only applies when @code{-subtitle_type} is set to @var{bitmap}.
|
||||
|
||||
The libaribcaption decoder assumes input frame size for bitmap rendering as below:
|
||||
@enumerate
|
||||
@item
|
||||
PROFILE_A : 1440 x 1080 with SAR (PAR) 4:3
|
||||
@item
|
||||
PROFILE_C : 320 x 180 with SAR (PAR) 1:1
|
||||
@end enumerate
|
||||
|
||||
If actual frame size of input video does not match above assumption,
|
||||
the rendered captions may be distorted.
|
||||
To make the captions undistorted, add @code{-canvas_size} option to specify
|
||||
actual input video size.
|
||||
|
||||
Note that the @code{-canvas_size} option is not required for video with
|
||||
different size but same aspect ratio.
|
||||
In such cases, the caption will be stretched or shrunk to actual video size
|
||||
if @code{-canvas_size} option is not specified.
|
||||
If @code{-canvas_size} option is specified with different size,
|
||||
the caption will be stretched or shrunk as specified size with calculated SAR.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection libaribcaption decoder usage examples
|
||||
|
||||
Display MPEG-TS file with ARIB subtitle by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap MPEG.TS
|
||||
@end example
|
||||
|
||||
Display MPEG-TS file with input frame size 1920x1080 by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap -canvas_size 1920x1080 MPEG.TS
|
||||
@end example
|
||||
|
||||
Embed ARIB subtitle in transcoded video:
|
||||
@example
|
||||
ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h264 dest.mp4
|
||||
@end example
|
||||
|
||||
@section dvbsub
|
||||
|
||||
@subsection Options
|
||||
@@ -547,8 +195,6 @@ ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h
|
||||
@table @option
|
||||
@item compute_clut
|
||||
@table @option
|
||||
@item -2
|
||||
Compute clut once if no matching CLUT is in the stream.
|
||||
@item -1
|
||||
Compute clut if no matching CLUT is in the stream.
|
||||
@item 0
|
||||
@@ -577,7 +223,7 @@ palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by commas, for example @code{0d00ee,
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
|
||||
@@ -602,30 +248,18 @@ configuration. You need to explicitly configure the build with
|
||||
|
||||
@table @option
|
||||
@item txt_page
|
||||
List of teletext page numbers to decode. Pages that do not match the specified
|
||||
list are dropped. You may use the special @code{*} string to match all pages,
|
||||
or @code{subtitle} to match all subtitle pages.
|
||||
List of teletext page numbers to decode. You may use the special * string to
|
||||
match all pages. Pages that do not match the specified list are dropped.
|
||||
Default value is *.
|
||||
@item txt_default_region
|
||||
Set default character set used for decoding, a value between 0 and 87 (see
|
||||
ETS 300 706, Section 15, Table 32). Default value is -1, which does not
|
||||
override the libzvbi default. This option is needed for some legacy level 1.0
|
||||
transmissions which cannot signal the proper charset.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
Specifies the format of the decoded subtitles.
|
||||
@table @option
|
||||
@item bitmap
|
||||
The default format, you should use this for teletext pages, because certain
|
||||
graphics and colors cannot be expressed in simple text or even ASS.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@item ass
|
||||
Formatted ASS output, subtitle pages and teletext pages are returned in
|
||||
different styles, subtitle pages are stripped down to text, but an effort is
|
||||
made to keep the text alignment and the formatting.
|
||||
@end table
|
||||
Specifies the format of the decoded subtitles. The teletext decoder is capable
|
||||
of decoding the teletext pages to bitmaps or to simple text, you should use
|
||||
"bitmap" for teletext pages, because certain graphics and colors cannot be
|
||||
expressed in simple text. You might use "text" for teletext based subtitles if
|
||||
your application can handle simple text based subtitles. Default value is
|
||||
bitmap.
|
||||
@item txt_left
|
||||
X offset of generated bitmaps, default is 0.
|
||||
@item txt_top
|
||||
@@ -638,8 +272,7 @@ present between the subtitle lines because of double-sized teletext characters.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
milliseconds. Default value is -1 which means infinity or until the next
|
||||
subtitle event comes.
|
||||
milliseconds. Default value is 30000 which is 30 seconds.
|
||||
@item txt_transparent
|
||||
Force transparent background of the generated teletext bitmaps. Default value
|
||||
is 0 which means an opaque background.
|
||||
|
||||
@@ -25,12 +25,16 @@ Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section aac
|
||||
@section applehttp
|
||||
|
||||
Raw Audio Data Transport Stream AAC demuxer.
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer is used to demux an ADTS input containing a single AAC stream
|
||||
alongwith any ID3v1/2 or APE tags in it.
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section apng
|
||||
|
||||
@@ -44,15 +48,12 @@ between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set. Default is enabled.
|
||||
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second. Default of 0 imposes no limit.
|
||||
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible). Default is 15.
|
||||
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
@@ -103,7 +104,8 @@ backslash or single quotes.
|
||||
All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version.
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appear exactly as is (no extra space or byte-order-mark) on the very first
|
||||
@@ -157,16 +159,6 @@ directive) will be reduced based on their specified Out point.
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
This directive is deprecated, use @code{file_packet_meta} instead.
|
||||
|
||||
@item @code{file_packet_meta @var{key} @var{value}}
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
|
||||
@item @code{option @var{key} @var{value}}
|
||||
Option to access, open and probe the file.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
@@ -184,20 +176,6 @@ subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@item @code{stream_meta @var{key} @var{value}}
|
||||
Metadata for the stream.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream_codec @var{value}}
|
||||
Codec for the stream.
|
||||
|
||||
@item @code{stream_extradata @var{hex_string}}
|
||||
Extradata for the string, encoded in hexadecimal.
|
||||
|
||||
@item @code{chapter @var{id} @var{start} @var{end}}
|
||||
Add a chapter. @var{id} is an unique identifier, possibly small and
|
||||
consecutive.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
@@ -207,8 +185,7 @@ This demuxer accepts the following option:
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths and directives.
|
||||
A file path is considered safe if it
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
@@ -218,6 +195,9 @@ If set to 0, any file name is accepted.
|
||||
|
||||
The default is 1.
|
||||
|
||||
-1 is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
@@ -274,217 +254,11 @@ which streams to actually receive.
|
||||
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
||||
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item cenc_decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@end table
|
||||
|
||||
@section dvdvideo
|
||||
|
||||
DVD-Video demuxer, powered by libdvdnav and libdvdread.
|
||||
|
||||
Can directly ingest DVD titles, specifically sequential PGCs, into
|
||||
a conversion pipeline. Menu assets, such as background video or audio,
|
||||
can also be demuxed given the menu's coordinates (at best effort).
|
||||
Seeking is not supported at this time.
|
||||
|
||||
Block devices (DVD drives), ISO files, and directory structures are accepted.
|
||||
Activate with @code{-f dvdvideo} in front of one of these inputs.
|
||||
|
||||
This demuxer does NOT have decryption code of any kind. You are on your own
|
||||
working with encrypted DVDs, and should not expect support on the matter.
|
||||
|
||||
Underlying playback is handled by libdvdnav, and structure parsing by libdvdread.
|
||||
FFmpeg must be built with GPL library support available as well as the
|
||||
configure switches @code{--enable-libdvdnav} and @code{--enable-libdvdread}.
|
||||
|
||||
You will need to provide either the desired "title number" or exact PGC/PG coordinates.
|
||||
Many open-source DVD players and tools can aid in providing this information.
|
||||
If not specified, the demuxer will default to title 1 which works for many discs.
|
||||
However, due to the flexibility of the format, it is recommended to check manually.
|
||||
There are many discs that are authored strangely or with invalid headers.
|
||||
|
||||
If the input is a real DVD drive, please note that there are some drives which may
|
||||
silently fail on reading bad sectors from the disc, returning random bits instead
|
||||
which is effectively corrupt data. This is especially prominent on aging or rotting discs.
|
||||
A second pass and integrity checks would be needed to detect the corruption.
|
||||
This is not an FFmpeg issue.
|
||||
|
||||
@subsection Background
|
||||
|
||||
DVD-Video is not a directly accessible, linear container format in the
|
||||
traditional sense. Instead, it allows for complex and programmatic playback of
|
||||
carefully muxed MPEG-PS streams that are stored in headerless VOB files.
|
||||
To the end-user, these streams are known simply as "titles", but the actual
|
||||
logical playback sequence is defined by one or more "PGCs", or Program Group Chains,
|
||||
within the title. The PGC is in turn comprised of multiple "PGs", or Programs",
|
||||
which are the actual video segments (and for a typical video feature, sequentially
|
||||
ordered). The PGC structure, along with stream layout and metadata, are stored in
|
||||
IFO files that need to be parsed. PGCs can be thought of as playlists in easier terms.
|
||||
|
||||
An actual DVD player relies on user GUI interaction via menus and an internal VM
|
||||
to drive the direction of demuxing. Generally, the user would either navigate (via menus)
|
||||
or automatically be redirected to the PGC of their choice. During this process and
|
||||
the subsequent playback, the DVD player's internal VM also maintains a state and
|
||||
executes instructions that can create jumps to different sectors during playback.
|
||||
This is why libdvdnav is involved, as a linear read of the MPEG-PS blobs on the
|
||||
disc (VOBs) is not enough to produce the right sequence in many cases.
|
||||
|
||||
There are many other DVD structures (a long subject) that will not be discussed here.
|
||||
NAV packets, in particular, are handled by this demuxer to build accurate timing
|
||||
but not emitted as a stream. For a good high-level understanding, refer to:
|
||||
@url{https://code.videolan.org/videolan/libdvdnav/-/blob/master/doc/dvd_structures}
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item title @var{int}
|
||||
The title number to play. Must be set if @option{pgc} and @option{pg} are not set.
|
||||
Not applicable to menus.
|
||||
Default is 0 (auto), which currently only selects the first available title (title 1)
|
||||
and notifies the user about the implications.
|
||||
|
||||
@item chapter_start @var{int}
|
||||
The chapter, or PTT (part-of-title), number to start at. Not applicable to menus.
|
||||
Default is 1.
|
||||
|
||||
@item chapter_end @var{int}
|
||||
The chapter, or PTT (part-of-title), number to end at. Not applicable to menus.
|
||||
Default is 0, which is a special value to signal end at the last possible chapter.
|
||||
|
||||
@item angle @var{int}
|
||||
The video angle number, referring to what is essentially an additional
|
||||
video stream that is composed from alternate frames interleaved in the VOBs.
|
||||
Not applicable to menus.
|
||||
Default is 1.
|
||||
|
||||
@item region @var{int}
|
||||
The region code to use for playback. Some discs may use this to default playback
|
||||
at a particular angle in different regions. This option will not affect the region code
|
||||
of a real DVD drive, if used as an input. Not applicable to menus.
|
||||
Default is 0, "world".
|
||||
|
||||
@item menu @var{bool}
|
||||
Demux menu assets instead of navigating a title. Requires exact coordinates
|
||||
of the menu (@option{menu_lu}, @option{menu_vts}, @option{pgc}, @option{pg}).
|
||||
Default is false.
|
||||
|
||||
@item menu_lu @var{int}
|
||||
The menu language to demux. In DVD, menus are grouped by language.
|
||||
Default is 0, the first language unit.
|
||||
|
||||
@item menu_vts @var{int}
|
||||
The VTS where the menu lives, or 0 if it is a VMG menu (root-level).
|
||||
Default is 0, VMG menu.
|
||||
|
||||
@item pgc @var{int}
|
||||
The entry PGC to start playback, in conjunction with @option{pg}.
|
||||
Alternative to setting @option{title}.
|
||||
Chapter markers are not supported at this time.
|
||||
Must be explicitly set for menus.
|
||||
Default is 0, automatically resolve from value of @option{title}.
|
||||
|
||||
@item pg @var{int}
|
||||
The entry PG to start playback, in conjunction with @option{pgc}.
|
||||
Alternative to setting @option{title}.
|
||||
Chapter markers are not supported at this time.
|
||||
Default is 0, automatically resolve from value of @option{title}, or
|
||||
start from the beginning (PG 1) of the menu.
|
||||
|
||||
@item preindex @var{bool}
|
||||
Enable this to have accurate chapter (PTT) markers and duration measurement,
|
||||
which requires a slow second pass read in order to index the chapter marker
|
||||
timestamps from NAV packets. This is non-ideal extra work for real optical drives.
|
||||
It is recommended and faster to use this option with a backup of the DVD structure
|
||||
stored on a hard drive. Not compatible with @option{pgc} and @option{pg}.
|
||||
Not applicable to menus.
|
||||
Default is 0, false.
|
||||
|
||||
@item trim @var{bool}
|
||||
Skip padding cells (i.e. cells shorter than 1 second) from the beginning.
|
||||
There exist many discs with filler segments at the beginning of the PGC,
|
||||
often with junk data intended for controlling a real DVD player's
|
||||
buffering speed and with no other material data value.
|
||||
Not applicable to menus.
|
||||
Default is 1, true.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Open title 3 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -title 3 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open chapters 3-6 from title 1 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -chapter_start 3 -chapter_end 6 -title 1 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open only chapter 5 from title 1 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -chapter_start 5 -chapter_end 5 -title 1 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Demux menu with language 1 from VTS 1, PGC 1, starting at PG 1:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -menu 1 -menu_lu 1 -menu_vts 1 -pgc 1 -pg 1 -i <path to DVD> ...
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section ea
|
||||
|
||||
Electronic Arts Multimedia format demuxer.
|
||||
|
||||
This format is used by various Electronic Arts games.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item merge_alpha @var{bool}
|
||||
|
||||
Normally the VP6 alpha channel (if exists) is returned as a secondary video
|
||||
stream, by setting this option you can make the demuxer return a single video
|
||||
stream which contains the alpha channel in addition to the ordinary video.
|
||||
|
||||
@end table
|
||||
|
||||
@section imf
|
||||
|
||||
Interoperable Master Format demuxer.
|
||||
|
||||
This demuxer presents audio and video streams found in an IMF Composition, as
|
||||
specified in @url{https://doi.org/10.5594/SMPTE.ST2067-2.2020, SMPTE ST 2067-2}.
|
||||
|
||||
@example
|
||||
ffmpeg [-assetmaps <path of ASSETMAP1>,<path of ASSETMAP2>,...] -i <path of CPL> ...
|
||||
@end example
|
||||
|
||||
If @code{-assetmaps} is not specified, the demuxer looks for a file called
|
||||
@file{ASSETMAP.xml} in the same directory as the CPL.
|
||||
|
||||
@section flv, live_flv, kux
|
||||
@section flv, live_flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
||||
KUX is a flv variant used on the Youku platform.
|
||||
|
||||
@example
|
||||
ffmpeg -f flv -i myfile.flv ...
|
||||
@@ -495,12 +269,6 @@ ffmpeg -f live_flv -i rtmp://<any.server>/anything/key ....
|
||||
@table @option
|
||||
@item -flv_metadata @var{bool}
|
||||
Allocate the streams according to the onMetaData array content.
|
||||
|
||||
@item -flv_ignore_prevtag @var{bool}
|
||||
Ignore the size of previous tag value.
|
||||
|
||||
@item -flv_full_metadata @var{bool}
|
||||
Output all context of the onMetadata.
|
||||
@end table
|
||||
|
||||
@section gif
|
||||
@@ -546,24 +314,12 @@ infinitely.
|
||||
|
||||
HLS demuxer
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item live_start_index
|
||||
segment index to start live streams at (negative values are from the end).
|
||||
|
||||
@item prefer_x_start
|
||||
prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
|
||||
@item allowed_extensions
|
||||
',' separated list of file extensions that hls is allowed to access.
|
||||
|
||||
@@ -571,10 +327,6 @@ prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
|
||||
@item m3u8_hold_counters
|
||||
The maximum number of times to load m3u8 when it refreshes without new segments.
|
||||
Default value is 1000.
|
||||
|
||||
@item http_persistent
|
||||
Use persistent HTTP connections. Applicable only for HTTP streams.
|
||||
Enabled by default.
|
||||
@@ -582,17 +334,6 @@ Enabled by default.
|
||||
@item http_multiple
|
||||
Use multiple HTTP connections for downloading HTTP segments.
|
||||
Enabled by default for HTTP/1.1 servers.
|
||||
|
||||
@item http_seekable
|
||||
Use HTTP partial requests for downloading HTTP segments.
|
||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||
|
||||
@item seg_format_options
|
||||
Set options for the demuxer of media segments using a list of key=value pairs separated by @code{:}.
|
||||
|
||||
@item seg_max_retry
|
||||
Maximum number of times to reload a segment on error, useful when segment skip on network error is not desired.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
@@ -703,17 +444,6 @@ nanosecond precision.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@item export_path_metadata
|
||||
If set to 1, will add two extra fields to the metadata found in input, making them
|
||||
also available for other filters (see @var{drawtext} filter for examples). Default
|
||||
value is 0. The extra fields are described below:
|
||||
@table @option
|
||||
@item lavf.image2dec.source_path
|
||||
Corresponds to the full path to the input file being read.
|
||||
@item lavf.image2dec.source_basename
|
||||
Corresponds to the name of the file being read.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -745,84 +475,14 @@ ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{https://bitbucket.org/mpyne/game-music-emu/overview} for more information.
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
|
||||
It accepts the following options:
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
|
||||
@table @option
|
||||
|
||||
@item track_index
|
||||
Set the index of which track to demux. The demuxer can only export one track.
|
||||
Track indexes start at 0. Default is to pick the first track. Number of tracks
|
||||
is exported as @var{tracks} metadata entry.
|
||||
|
||||
@item sample_rate
|
||||
Set the sampling rate of the exported track. Range is 1000 to 999999. Default is 44100.
|
||||
|
||||
@item max_size @emph{(bytes)}
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read.
|
||||
Default is 50 MiB.
|
||||
|
||||
@end table
|
||||
|
||||
@section libmodplug
|
||||
|
||||
ModPlug based module demuxer
|
||||
|
||||
See @url{https://github.com/Konstanty/libmodplug}
|
||||
|
||||
It will export one 2-channel 16-bit 44.1 kHz audio stream.
|
||||
Optionally, a @code{pal8} 16-color video stream can be exported with or without printed metadata.
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item noise_reduction
|
||||
Apply a simple low-pass filter. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item reverb_depth
|
||||
Set amount of reverb. Range 0-100. Default is 0.
|
||||
|
||||
@item reverb_delay
|
||||
Set delay in ms, clamped to 40-250 ms. Default is 0.
|
||||
|
||||
@item bass_amount
|
||||
Apply bass expansion a.k.a. XBass or megabass. Range is 0 (quiet) to 100 (loud). Default is 0.
|
||||
|
||||
@item bass_range
|
||||
Set cutoff i.e. upper-bound for bass frequencies. Range is 10-100 Hz. Default is 0.
|
||||
|
||||
@item surround_depth
|
||||
Apply a Dolby Pro-Logic surround effect. Range is 0 (quiet) to 100 (heavy). Default is 0.
|
||||
|
||||
@item surround_delay
|
||||
Set surround delay in ms, clamped to 5-40 ms. Default is 0.
|
||||
|
||||
@item max_size
|
||||
The demuxer buffers the entire file into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of files that can be read. Range is 0 to 100 MiB.
|
||||
0 removes buffer size limit (not recommended). Default is 5 MiB.
|
||||
|
||||
@item video_stream_expr
|
||||
String which is evaluated using the eval API to assign colors to the generated video stream.
|
||||
Variables which can be used are @code{x}, @code{y}, @code{w}, @code{h}, @code{t}, @code{speed},
|
||||
@code{tempo}, @code{order}, @code{pattern} and @code{row}.
|
||||
|
||||
@item video_stream
|
||||
Generate video stream. Can be 1 (on) or 0 (off). Default is 0.
|
||||
|
||||
@item video_stream_w
|
||||
Set video frame width in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_h
|
||||
Set video frame height in 'chars' where one char indicates 8 pixels. Range is 20-512. Default is 30.
|
||||
|
||||
@item video_stream_ptxt
|
||||
Print metadata on video stream. Includes @code{speed}, @code{tempo}, @code{order}, @code{pattern},
|
||||
@code{row} and @code{ts} (time in ms). Can be 1 (on) or 0 (off). Default is 1.
|
||||
|
||||
@end table
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
|
||||
@section libopenmpt
|
||||
|
||||
@@ -851,13 +511,9 @@ Set the sample rate for libopenmpt to output.
|
||||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@section mov/mp4/3gp
|
||||
@section mov/mp4/3gp/QuickTime
|
||||
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
|
||||
Registered extensions: mov, mp4, m4a, 3gp, 3g2, mj2, psp, m4b, ism, ismv, isma, f4v
|
||||
|
||||
@subsection Options
|
||||
QuickTime / MP4 demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@@ -868,95 +524,10 @@ Enabling this can theoretically leak information in some use cases.
|
||||
@item use_absolute_path
|
||||
Allows loading of external tracks via absolute paths, disabled by default.
|
||||
Enabling this poses a security risk. It should only be enabled if the source
|
||||
is known to be non-malicious.
|
||||
|
||||
@item seek_streams_individually
|
||||
When seeking, identify the closest point in each stream individually and demux packets in
|
||||
that stream from identified point. This can lead to a different sequence of packets compared
|
||||
to demuxing linearly from the beginning. Default is true.
|
||||
|
||||
@item ignore_editlist
|
||||
Ignore any edit list atoms. The demuxer, by default, modifies the stream index to reflect the
|
||||
timeline described by the edit list. Default is false.
|
||||
|
||||
@item advanced_editlist
|
||||
Modify the stream index to reflect the timeline described by the edit list. @code{ignore_editlist}
|
||||
must be set to false for this option to be effective.
|
||||
If both @code{ignore_editlist} and this option are set to false, then only the
|
||||
start of the stream index is modified to reflect initial dwell time or starting timestamp
|
||||
described by the edit list. Default is true.
|
||||
|
||||
@item ignore_chapters
|
||||
Don't parse chapters. This includes GoPro 'HiLight' tags/moments. Note that chapters are
|
||||
only parsed when input is seekable. Default is false.
|
||||
|
||||
@item use_mfra_for
|
||||
For seekable fragmented input, set fragment's starting timestamp from media fragment random access box, if present.
|
||||
|
||||
Following options are available:
|
||||
@table @samp
|
||||
@item auto
|
||||
Auto-detect whether to set mfra timestamps as PTS or DTS @emph{(default)}
|
||||
|
||||
@item dts
|
||||
Set mfra timestamps as DTS
|
||||
|
||||
@item pts
|
||||
Set mfra timestamps as PTS
|
||||
|
||||
@item 0
|
||||
Don't use mfra box to set timestamps
|
||||
@end table
|
||||
|
||||
@item use_tfdt
|
||||
For fragmented input, set fragment's starting timestamp to @code{baseMediaDecodeTime} from the @code{tfdt} box.
|
||||
Default is enabled, which will prefer to use the @code{tfdt} box to set DTS. Disable to use the @code{earliest_presentation_time} from the @code{sidx} box.
|
||||
In either case, the timestamp from the @code{mfra} box will be used if it's available and @code{use_mfra_for} is
|
||||
set to pts or dts.
|
||||
|
||||
@item export_all
|
||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||
characters of the box type are set as the key. Default is false.
|
||||
|
||||
@item export_xmp
|
||||
Export entire contents of @var{XMP_} box and @var{uuid} box as a string with key @code{xmp}. Note that
|
||||
if @code{export_all} is set and this option isn't, the contents of @var{XMP_} box are still exported
|
||||
but with key @code{XMP_}. Default is false.
|
||||
|
||||
@item activation_bytes
|
||||
4-byte key required to decrypt Audible AAX and AAX+ files. See Audible AAX subsection below.
|
||||
|
||||
@item audible_fixed_key
|
||||
Fixed key used for handling Audible AAX/AAX+ files. It has been pre-set so should not be necessary to
|
||||
specify.
|
||||
|
||||
@item decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@item max_stts_delta
|
||||
Very high sample deltas written in a trak's stts box may occasionally be intended but usually they are written in
|
||||
error or used to store a negative value for dts correction when treated as signed 32-bit integers. This option lets
|
||||
the user set an upper limit, beyond which the delta is clamped to 1. Values greater than the limit if negative when
|
||||
cast to int32 are used to adjust onward dts.
|
||||
|
||||
Unit is the track time scale. Range is 0 to UINT_MAX. Default is @code{UINT_MAX - 48000*10} which allows up to
|
||||
a 10 second dts correction for 48 kHz audio streams while accommodating 99.9% of @code{uint32} range.
|
||||
|
||||
@item interleaved_read
|
||||
Interleave packets from multiple tracks at demuxer level. For badly interleaved files, this prevents playback issues
|
||||
caused by large gaps between packets in different tracks, as MOV/MP4 do not have packet placement requirements.
|
||||
However, this can cause excessive seeking on very badly interleaved files, due to seeking between tracks, so disabling
|
||||
it may prevent I/O issues, at the expense of playback.
|
||||
is known to be non malicious.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Audible AAX
|
||||
|
||||
Audible AAX files are encrypted M4B files, and they can be decrypted by specifying a 4 byte activation secret.
|
||||
@example
|
||||
ffmpeg -activation_bytes 1CEB00DA -i test.aax -vn -c:a copy output.mp4
|
||||
@end example
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
@@ -967,9 +538,6 @@ This demuxer accepts the following options:
|
||||
Set size limit for looking up a new synchronization. Default value is
|
||||
65536.
|
||||
|
||||
@item skip_unknown_pmt
|
||||
Skip PMTs for programs not defined in the PAT. Default value is 0.
|
||||
|
||||
@item fix_teletext_pts
|
||||
Override teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
@@ -984,14 +552,6 @@ Show the detected raw packet size, cannot be set by the user.
|
||||
Scan and combine all PMTs. The value is an integer with value from -1
|
||||
to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||
disabled). Default value is -1.
|
||||
|
||||
@item merge_pmt_versions
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
|
||||
@item max_packet_size
|
||||
Set maximum size, in bytes, of packet emitted by the demuxer. Payloads above this size
|
||||
are split across multiple packets. Range is 1 to INT_MAX/2. Default is 204800 bytes.
|
||||
@end table
|
||||
|
||||
@section mpjpeg
|
||||
@@ -1089,43 +649,4 @@ Example: convert the captions to a format most players understand:
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@section vapoursynth
|
||||
|
||||
Vapoursynth wrapper.
|
||||
|
||||
Due to security concerns, Vapoursynth scripts will not
|
||||
be autodetected so the input format has to be forced. For ff* CLI tools,
|
||||
add @code{-f vapoursynth} before the input @code{-i yourscript.vpy}.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item max_script_size
|
||||
The demuxer buffers the entire script into memory. Adjust this value to set the maximum buffer size,
|
||||
which in turn, acts as a ceiling for the size of scripts that can be read.
|
||||
Default is 1 MiB.
|
||||
@end table
|
||||
|
||||
@section w64
|
||||
|
||||
Sony Wave64 Audio demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item max_size
|
||||
See the same option for the @ref{wav} demuxer.
|
||||
@end table
|
||||
|
||||
@anchor{wav}
|
||||
@section wav
|
||||
|
||||
RIFF Wave Audio demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item max_size
|
||||
Specify the maximum packet size in bytes for the demuxed packets. By default
|
||||
this is set to 0, which means that a sensible value is chosen based on the
|
||||
input format.
|
||||
@end table
|
||||
|
||||
@c man end DEMUXERS
|
||||
|
||||
@@ -10,109 +10,41 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
@chapter Notes for external developers
|
||||
|
||||
This text is concerned with the development @emph{of} FFmpeg itself. Information
|
||||
on using the FFmpeg libraries in other programs can be found elsewhere, e.g. in:
|
||||
@itemize @bullet
|
||||
@item
|
||||
the installed header files
|
||||
@item
|
||||
@url{http://ffmpeg.org/doxygen/trunk/index.html, the Doxygen documentation}
|
||||
generated from the headers
|
||||
@item
|
||||
the examples under @file{doc/examples}
|
||||
@end itemize
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{https://ffmpeg.org/legal.html}.
|
||||
|
||||
If you modify FFmpeg code for your own use case, you are highly encouraged to
|
||||
@emph{submit your changes back to us}, using this document as a guide. There are
|
||||
both pragmatic and ideological reasons to do so:
|
||||
@chapter Contributing
|
||||
|
||||
There are 2 ways by which code gets into FFmpeg:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Maintaining external changes to keep up with upstream development is
|
||||
time-consuming and error-prone. With your code in the main tree, it will be
|
||||
maintained by FFmpeg developers.
|
||||
@item
|
||||
FFmpeg developers include leading experts in the field who can find bugs or
|
||||
design flaws in your code.
|
||||
@item
|
||||
By supporting the project you find useful you ensure it continues to be
|
||||
maintained and developed.
|
||||
@item Submitting patches to the ffmpeg-devel mailing list.
|
||||
See @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@end itemize
|
||||
|
||||
All proposed code changes should be submitted for review to
|
||||
@url{mailto:ffmpeg-devel@@ffmpeg.org, the development mailing list}, as
|
||||
described in more detail in the @ref{Submitting patches} chapter. The code
|
||||
should comply with the @ref{Development Policy} and follow the @ref{Coding Rules}.
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@chapter Coding Rules
|
||||
|
||||
@section Language
|
||||
|
||||
FFmpeg is mainly programmed in the ISO C11 language, except for the public
|
||||
headers which must stay C99 compatible.
|
||||
|
||||
Compiler-specific extensions may be used with good reason, but must not be
|
||||
depended on, i.e. the code must still compile and work with compilers lacking
|
||||
the extension.
|
||||
|
||||
The following C99 features must not be used anywhere in the codebase:
|
||||
@itemize @bullet
|
||||
@item
|
||||
variable-length arrays;
|
||||
|
||||
@item
|
||||
complex numbers;
|
||||
|
||||
@item
|
||||
mixed statements and declarations.
|
||||
@end itemize
|
||||
|
||||
@subsection SIMD/DSP
|
||||
@anchor{SIMD/DSP}
|
||||
|
||||
As modern compilers are unable to generate efficient SIMD or other
|
||||
performance-critical DSP code from plain C, handwritten assembly is used.
|
||||
Usually such code is isolated in a separate function. Then the standard approach
|
||||
is writing multiple versions of this function – a plain C one that works
|
||||
everywhere and may also be useful for debugging, and potentially multiple
|
||||
architecture-specific optimized implementations. Initialization code then
|
||||
chooses the best available version at runtime and loads it into a function
|
||||
pointer; the function in question is then always called through this pointer.
|
||||
|
||||
The specific syntax used for writing assembly is:
|
||||
@itemize @bullet
|
||||
@item
|
||||
NASM on x86;
|
||||
|
||||
@item
|
||||
GAS on ARM and RISC-V.
|
||||
@end itemize
|
||||
|
||||
A unit testing framework for assembly called @code{checkasm} lives under
|
||||
@file{tests/checkasm}. All new assembly should come with @code{checkasm} tests;
|
||||
adding tests for existing assembly that lacks them is also strongly encouraged.
|
||||
|
||||
@subsection Other languages
|
||||
|
||||
Other languages than C may be used in special cases:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Compiler intrinsics or inline assembly when the code in question cannot be
|
||||
written in the standard way described in the @ref{SIMD/DSP} section. This
|
||||
typically applies to code that needs to be inlined.
|
||||
|
||||
@item
|
||||
Objective-C where required for interacting with macOS-specific interfaces.
|
||||
@end itemize
|
||||
|
||||
@section Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
@@ -135,39 +67,8 @@ K&R coding style is used.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
@subsection Vim configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
@subsection Emacs configuration
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@section Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
@@ -209,52 +110,86 @@ int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@anchor{Naming conventions}
|
||||
@section Naming conventions
|
||||
@section C language features
|
||||
|
||||
Names of functions, variables, and struct members must be lowercase, using
|
||||
underscores (_) to separate words. For example, @samp{avfilter_get_video_buffer}
|
||||
is an acceptable function name and @samp{AVFilterGetVideo} is not.
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
Struct, union, enum, and typedeffed type names must use CamelCase. All structs
|
||||
and unions should be typedeffed to the same name as the struct/union tag, e.g.
|
||||
@code{typedef struct AVFoo @{ ... @} AVFoo;}. Enums are typically not
|
||||
typedeffed.
|
||||
|
||||
Enumeration constants and macros must be UPPERCASE, except for macros
|
||||
masquerading as functions, which should use the function naming convention.
|
||||
|
||||
All identifiers in the libraries should be namespaced as follows:
|
||||
@itemize @bullet
|
||||
@item
|
||||
No namespacing for identifiers with file and lower scope (e.g. local variables,
|
||||
static functions), and struct and union members,
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
The @code{ff_} prefix must be used for variables and functions visible outside
|
||||
of file scope, but only used internally within a single library, e.g.
|
||||
@samp{ff_w64_demuxer}. This prevents name collisions when FFmpeg is statically
|
||||
linked.
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
casts are binary truncated. Shift right of signed values uses sign extension.
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@section Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in CamelCase.
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_report_missing_feature}.
|
||||
|
||||
@item
|
||||
All other internal identifiers, like private type or macro names, should be
|
||||
namespaced only to avoid possible internal conflicts. E.g. @code{H264_NAL_SPS}
|
||||
vs. @code{HEVC_NAL_SPS}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
|
||||
@item
|
||||
Other public identifiers (struct, union, enum, macro, type names) must use their
|
||||
library's public prefix (@code{AV}, @code{Sws}, or @code{Swr}).
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
@@ -268,50 +203,50 @@ symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
@section Miscellaneous conventions
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
@item
|
||||
Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@anchor{Development Policy}
|
||||
@section Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
|
||||
@chapter Development Policy
|
||||
|
||||
@section Code behaviour
|
||||
|
||||
@subheading Correctness
|
||||
The code must be valid. It must not crash, abort, access invalid pointers, leak
|
||||
memory, cause data races or signed integer overflow, or otherwise cause
|
||||
undefined behaviour. Error codes should be checked and, when applicable,
|
||||
forwarded to the caller.
|
||||
|
||||
@subheading Thread- and library-safety
|
||||
Our libraries may be called by multiple independent callers in the same process.
|
||||
These calls may happen from any number of threads and the different call sites
|
||||
may not be aware of each other - e.g. a user program may be calling our
|
||||
libraries directly, and use one or more libraries that also call our libraries.
|
||||
The code must behave correctly under such conditions.
|
||||
|
||||
@subheading Robustness
|
||||
The code must treat as untrusted any bytestream received from a caller or read
|
||||
from a file, network, etc. It must not misbehave when arbitrary data is sent to
|
||||
it - typically it should print an error message and return
|
||||
@code{AVERROR_INVALIDDATA} on encountering invalid input data.
|
||||
|
||||
@subheading Memory allocation
|
||||
The code must use the @code{av_malloc()} family of functions from
|
||||
@file{libavutil/mem.h} to perform all memory allocation, except in special cases
|
||||
(e.g. when interacting with an external library that requires a specific
|
||||
allocator to be used).
|
||||
|
||||
All allocations should be checked and @code{AVERROR(ENOMEM)} returned on
|
||||
failure. A common mistake is that error paths leak memory - make sure that does
|
||||
not happen.
|
||||
|
||||
@subheading stdio
|
||||
Our libraries must not access the stdio streams stdin/stdout/stderr directly
|
||||
(e.g. via @code{printf()} family of functions), as that is not library-safe. For
|
||||
logging, use @code{av_log()}.
|
||||
|
||||
@section Patches/Committing
|
||||
@subheading Licenses for patches must be compatible with FFmpeg.
|
||||
Contributions should be licensed under the
|
||||
@@ -334,24 +269,13 @@ missing samples or an implementation with a small subset of features.
|
||||
Always check the mailing list for any reviewers with issues and test
|
||||
FATE before you push.
|
||||
|
||||
@subheading Commit messages
|
||||
Commit messages are highly important tools for informing other developers on
|
||||
what a given change does and why. Every commit must always have a properly
|
||||
filled out commit message with the following format:
|
||||
@example
|
||||
area changed: short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
If the commit addresses a known bug on our bug tracker or other external issue
|
||||
(e.g. CVE), the commit message should include the relevant bug ID(s) or other
|
||||
external identifiers. Note that this should be done in addition to a proper
|
||||
explanation and not instead of it. Comments such as "fixed!" or "Changed it."
|
||||
are not acceptable.
|
||||
|
||||
When applying patches that have been discussed at length on the mailing list,
|
||||
reference the thread in the commit message.
|
||||
@subheading Keep the main commit message short with an extended description below.
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
@subheading Testing must be adequate but not excessive.
|
||||
If it works for you, others, and passes FATE then it should be OK to commit
|
||||
@@ -370,6 +294,15 @@ later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
@subheading Ask before you change the build system (configure, etc).
|
||||
Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
@subheading Cosmetic changes should be kept in separate patches.
|
||||
We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
@@ -384,15 +317,27 @@ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
|
||||
@subheading Commit messages should always be filled out properly.
|
||||
Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@subheading Credit the author of the patch.
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
@subheading Credit any researchers
|
||||
If a commit/patch fixes an issues found by some researcher, always credit the
|
||||
researcher in the commit message for finding/reporting the issue.
|
||||
@subheading Complex patches should refer to discussion surrounding them.
|
||||
When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
@subheading Always wait long enough before pushing changes
|
||||
Do NOT commit to code actively maintained by others without permission.
|
||||
@@ -402,6 +347,22 @@ time-frame (12h for build failures and security fixes, 3 days small changes,
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
@section Code
|
||||
@subheading API/ABI changes should be discussed before they are made.
|
||||
Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove widely used functionality or features (redundant code can be removed).
|
||||
|
||||
@subheading Remember to check if you need to bump versions for libav*.
|
||||
Depending on the change, you may need to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@subheading Warnings for correct code may be disabled if there is no other option.
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
@@ -411,150 +372,10 @@ If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
@section Library public interfaces
|
||||
Every library in FFmpeg provides a set of public APIs in its installed headers,
|
||||
which are those listed in the variable @code{HEADERS} in that library's
|
||||
@file{Makefile}. All identifiers defined in those headers (except for those
|
||||
explicitly documented otherwise), and corresponding symbols exported from
|
||||
compiled shared or static libraries are considered public interfaces and must
|
||||
comply with the API and ABI compatibility rules described in this section.
|
||||
|
||||
Public APIs must be backward compatible within a given major version. I.e. any
|
||||
valid user code that compiles and works with a given library version must still
|
||||
compile and work with any later version, as long as the major version number is
|
||||
unchanged. "Valid user code" here means code that is calling our APIs in a
|
||||
documented and/or intended manner and is not relying on any undefined behavior.
|
||||
Incrementing the major version may break backward compatibility, but only to the
|
||||
extent described in @ref{Major version bumps}.
|
||||
|
||||
We also guarantee backward ABI compatibility for shared and static libraries.
|
||||
I.e. it should be possible to replace a shared or static build of our library
|
||||
with a build of any later version (re-linking the user binary in the static
|
||||
case) without breaking any valid user binaries, as long as the major version
|
||||
number remains unchanged.
|
||||
|
||||
@subsection Adding new interfaces
|
||||
Any new public identifiers in installed headers are considered new API - this
|
||||
includes new functions, structs, macros, enum values, typedefs, new fields in
|
||||
existing structs, new installed headers, etc. Consider the following
|
||||
guidelines when adding new APIs.
|
||||
|
||||
@subsubheading Motivation
|
||||
While new APIs can be added relatively easily, changing or removing them is much
|
||||
harder due to abovementioned compatibility requirements. You should then
|
||||
consider carefully whether the functionality you are adding really needs to be
|
||||
exposed to our callers as new public API.
|
||||
|
||||
Your new API should have at least one well-established use case outside of the
|
||||
library that cannot be easily achieved with existing APIs. Every library in
|
||||
FFmpeg also has a defined scope - your new API must fit within it.
|
||||
|
||||
@subsubheading Replacing existing APIs
|
||||
If your new API is replacing an existing one, it should be strictly superior to
|
||||
it, so that the advantages of using the new API outweight the cost to the
|
||||
callers of changing their code. After adding the new API you should then
|
||||
deprecate the old one and schedule it for removal, as described in
|
||||
@ref{Removing interfaces}.
|
||||
|
||||
If you deem an existing API deficient and want to fix it, the preferred approach
|
||||
in most cases is to add a differently-named replacement and deprecate the
|
||||
existing API rather than modify it. It is important to make the changes visible
|
||||
to our callers (e.g. through compile- or run-time deprecation warnings) and make
|
||||
it clear how to transition to the new API (e.g. in the Doxygen documentation or
|
||||
on the wiki).
|
||||
|
||||
@subsubheading API design
|
||||
The FFmpeg libraries are used by a variety of callers to perform a wide range of
|
||||
multimedia-related processing tasks. You should therefore - within reason - try
|
||||
to design your new API for the broadest feasible set of use cases and avoid
|
||||
unnecessarily limiting it to a specific type of callers (e.g. just media
|
||||
playback or just transcoding).
|
||||
|
||||
@subsubheading Consistency
|
||||
Check whether similar APIs already exist in FFmpeg. If they do, try to model
|
||||
your new addition on them to achieve better overall consistency.
|
||||
|
||||
The naming of your new identifiers should follow the @ref{Naming conventions}
|
||||
and be aligned with other similar APIs, if applicable.
|
||||
|
||||
@subsubheading Extensibility
|
||||
You should also consider how your API might be extended in the future in a
|
||||
backward-compatible way. If you are adding a new struct @code{AVFoo}, the
|
||||
standard approach is requiring the caller to always allocate it through a
|
||||
constructor function, typically named @code{av_foo_alloc()}. This way new fields
|
||||
may be added to the end of the struct without breaking ABI compatibility.
|
||||
Typically you will also want a destructor - @code{av_foo_free(AVFoo**)} that
|
||||
frees the indirectly supplied object (and its contents, if applicable) and
|
||||
writes @code{NULL} to the supplied pointer, thus eliminating the potential
|
||||
dangling pointer in the caller's memory.
|
||||
|
||||
If you are adding new functions, consider whether it might be desirable to tweak
|
||||
their behavior in the future - you may want to add a flags argument, even though
|
||||
it would be unused initially.
|
||||
|
||||
@subsubheading Documentation
|
||||
All new APIs must be documented as Doxygen-formatted comments above the
|
||||
identifiers you add to the public headers. You should also briefly mention the
|
||||
change in @file{doc/APIchanges}.
|
||||
|
||||
@subsubheading Bump the version
|
||||
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
||||
major version number, as described in @ref{Major version bumps}. Major
|
||||
bumps are significant events that happen on a schedule - so if your change
|
||||
strictly requires one you should add it under @code{#if} preprocesor guards that
|
||||
disable it until the next major bump happens.
|
||||
|
||||
New APIs that can be added without breaking API or ABI compatibility require
|
||||
bumping the minor version number.
|
||||
|
||||
Incrementing the third (micro) version component means a noteworthy binary
|
||||
compatible change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@anchor{Removing interfaces}
|
||||
@subsection Removing interfaces
|
||||
Due to abovementioned compatibility guarantees, removing APIs is an involved
|
||||
process that should only be undertaken with good reason. Typically a deficient,
|
||||
restrictive, or otherwise inadequate API is replaced by a superior one, though
|
||||
it does at times happen that we remove an API without any replacement (e.g. when
|
||||
the feature it provides is deemed not worth the maintenance effort, out of scope
|
||||
of the project, fundamentally flawed, etc.).
|
||||
|
||||
The removal has two steps - first the API is deprecated and scheduled for
|
||||
removal, but remains present and functional. The second step is actually
|
||||
removing the API - this is described in @ref{Major version bumps}.
|
||||
|
||||
To deprecate an API you should signal to our users that they should stop using
|
||||
it. E.g. if you intend to remove struct members or functions, you should mark
|
||||
them with @code{attribute_deprecated}. When this cannot be done, it may be
|
||||
possible to detect the use of the deprecated API at runtime and print a warning
|
||||
(though take care not to print it too often). You should also document the
|
||||
deprecation (and the replacement, if applicable) in the relevant Doxygen
|
||||
documentation block.
|
||||
|
||||
Finally, you should define a deprecation guard along the lines of
|
||||
@code{#define FF_API_<FOO> (LIBAVBAR_VERSION_MAJOR < XX)} (where XX is the major
|
||||
version in which the API will be removed) in @file{libavbar/version_major.h}
|
||||
(@file{version.h} in case of @code{libavutil}). Then wrap all uses of the
|
||||
deprecated API in @code{#if FF_API_<FOO> .... #endif}, so that the code will
|
||||
automatically get disabled once the major version reaches XX. You can also use
|
||||
@code{FF_DISABLE_DEPRECATION_WARNINGS} and @code{FF_ENABLE_DEPRECATION_WARNINGS}
|
||||
to suppress compiler deprecation warnings inside these guards. You should test
|
||||
that the code compiles and works with the guard macro evaluating to both true
|
||||
and false.
|
||||
|
||||
@anchor{Major version bumps}
|
||||
@subsection Major version bumps
|
||||
A major version bump signifies an API and/or ABI compatibility break. To reduce
|
||||
the negative effects on our callers, who are required to adapt their code,
|
||||
backward-incompatible changes during a major bump should be limited to:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Removing previously deprecated APIs.
|
||||
|
||||
@item
|
||||
Performing ABI- but not API-breaking changes, like reordering struct contents.
|
||||
@end itemize
|
||||
@subheading Check untrusted input properly.
|
||||
Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
@section Documentation/Other
|
||||
@subheading Subscribe to the ffmpeg-devel mailing list.
|
||||
@@ -598,6 +419,35 @@ finding a new maintainer and also don't forget to update the @file{MAINTAINERS}
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
@chapter Code of conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@chapter Submitting patches
|
||||
|
||||
@@ -638,22 +488,6 @@ patch is inline or attached per mail.
|
||||
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
||||
likely was wrong.
|
||||
|
||||
@subheading Sending patches from email clients
|
||||
Using @code{git send-email} might not be desirable for everyone. The
|
||||
following trick allows to send patches via email clients in a safe
|
||||
way. It has been tested with Outlook and Thunderbird (with X-Unsent
|
||||
extension) and might work with other applications.
|
||||
|
||||
Create your patch like this:
|
||||
|
||||
@verbatim
|
||||
git format-patch -s -o "outputfolder" --add-header "X-Unsent: 1" --suffix .eml --to ffmpeg-devel@ffmpeg.org -1 1a2b3c4d
|
||||
@end verbatim
|
||||
|
||||
Now you'll just need to open the eml file with the email application
|
||||
and execute 'Send'.
|
||||
|
||||
@subheading Reviews
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
@@ -785,7 +619,7 @@ If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to @url{https://streams.videolan.org/upload/}.
|
||||
URL, you can upload to ftp://upload.ffmpeg.org.
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
@@ -814,14 +648,16 @@ Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
@item
|
||||
Consider adding a regression test for your code. All new modules
|
||||
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
||||
filters, bitstream filters, parsers. If its not possible to do that, add
|
||||
an explanation why to your patchset, its ok to not test if theres a reason.
|
||||
Consider adding a regression test for your code.
|
||||
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm.
|
||||
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
of leaks, out of array accesses, etc.
|
||||
@@ -871,8 +707,6 @@ accordingly].
|
||||
|
||||
@section Adding files to the fate-suite dataset
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
OUT_DIR="${1}"
|
||||
SRC_DIR="${2}"
|
||||
DOXYFILE="${3}"
|
||||
DOXYGEN="${4}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 4
|
||||
|
||||
cd ${SRC_DIR}
|
||||
shift 3
|
||||
|
||||
if [ -e "VERSION" ]; then
|
||||
VERSION=`cat "VERSION"`
|
||||
|
||||
1904
doc/encoders.texi
1904
doc/encoders.texi
File diff suppressed because it is too large
Load Diff
5
doc/examples/.gitignore
vendored
5
doc/examples/.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
/avio_list_dir
|
||||
/avio_dir_cmd
|
||||
/avio_reading
|
||||
/decode_audio
|
||||
/decode_video
|
||||
@@ -20,6 +20,3 @@
|
||||
/scaling_video
|
||||
/transcode_aac
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
||||
/qsv_transcode
|
||||
|
||||
@@ -1,27 +1,26 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_HTTP_SERVE_FILES) += avio_http_serve_files
|
||||
EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
EXAMPLES-$(CONFIG_AVIO_READ_CALLBACK_EXAMPLE) += avio_read_callback
|
||||
EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_AUDIO_EXAMPLE) += decode_filter_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_VIDEO_EXAMPLE) += decode_filter_video
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
EXAMPLES-$(CONFIG_DEMUX_DECODE_EXAMPLE) += demux_decode
|
||||
EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
EXAMPLES-$(CONFIG_HW_DECODE_EXAMPLE) += hw_decode
|
||||
EXAMPLES-$(CONFIG_MUX_EXAMPLE) += mux
|
||||
EXAMPLES-$(CONFIG_QSV_DECODE_EXAMPLE) += qsv_decode
|
||||
EXAMPLES-$(CONFIG_REMUX_EXAMPLE) += remux
|
||||
EXAMPLES-$(CONFIG_RESAMPLE_AUDIO_EXAMPLE) += resample_audio
|
||||
EXAMPLES-$(CONFIG_SCALE_VIDEO_EXAMPLE) += scale_video
|
||||
EXAMPLES-$(CONFIG_SHOW_METADATA_EXAMPLE) += show_metadata
|
||||
EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_EXAMPLE) += transcode
|
||||
EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
EXAMPLES-$(CONFIG_VAAPI_ENCODE_EXAMPLE) += vaapi_encode
|
||||
EXAMPLES-$(CONFIG_VAAPI_TRANSCODE_EXAMPLE) += vaapi_transcode
|
||||
EXAMPLES-$(CONFIG_QSV_TRANSCODE_EXAMPLE) += qsv_transcode
|
||||
|
||||
EXAMPLES := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
EXAMPLES_G := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
@@ -38,7 +37,7 @@ $(EXAMPLES_G): %$(PROGSSUF)_g$(EXESUF): %.o
|
||||
examples: $(EXAMPLES)
|
||||
|
||||
$(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.o): | doc/examples
|
||||
OUTDIRS += doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT += $(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.c)
|
||||
|
||||
|
||||
@@ -11,40 +11,33 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
# missing the following targets, since they need special options in the FFmpeg build:
|
||||
# qsv_decode
|
||||
# qsv_transcode
|
||||
# vaapi_encode
|
||||
# vaapi_transcode
|
||||
|
||||
EXAMPLES=\
|
||||
avio_http_serve_files \
|
||||
avio_list_dir \
|
||||
avio_read_callback \
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_filter_audio \
|
||||
decode_filter_video \
|
||||
decode_video \
|
||||
demux_decode \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
hw_decode \
|
||||
mux \
|
||||
remux \
|
||||
resample_audio \
|
||||
scale_video \
|
||||
show_metadata \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcode
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
mux: LDLIBS += -lm
|
||||
resample_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
|
||||
@@ -7,10 +7,8 @@ that you have them installed and working on your system.
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and run:
|
||||
make -f Makefile.example
|
||||
|
||||
It will link to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
@@ -22,4 +20,4 @@ examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make -f Makefile.example
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
||||
|
||||
@@ -20,13 +20,6 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext list directory API usage example
|
||||
* @example avio_list_dir.c
|
||||
*
|
||||
* Show how to list directories through the libavformat AVIOContext API.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
@@ -109,15 +102,38 @@ static int list_op(const char *input_dir)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s input_dir\n"
|
||||
"API example program to show how to list files in directory "
|
||||
"accessed through AVIOContext.\n", program_name);
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
@@ -129,7 +145,32 @@ int main(int argc, char *argv[])
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
ret = list_op(argv[1]);
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext read callback API usage example
|
||||
* @example avio_read_callback.c
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
@@ -116,12 +117,11 @@ int main(int argc, char *argv[])
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx)
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
avio_context_free(&avio_ctx);
|
||||
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec audio decoding API usage example
|
||||
* @example decode_audio.c
|
||||
* @file
|
||||
* audio decoding with libavcodec API example
|
||||
*
|
||||
* Decode data from an MP2 input file and generate a raw audio file to
|
||||
* be played with ffplay.
|
||||
* @example decode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -40,35 +39,6 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
FILE *outfile)
|
||||
{
|
||||
@@ -98,7 +68,7 @@ static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++)
|
||||
for (ch = 0; ch < dec_ctx->ch_layout.nb_channels; ch++)
|
||||
for (ch = 0; ch < dec_ctx->channels; ch++)
|
||||
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
}
|
||||
@@ -116,9 +86,6 @@ int main(int argc, char **argv)
|
||||
size_t data_size;
|
||||
AVPacket *pkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
enum AVSampleFormat sfmt;
|
||||
int n_channels = 0;
|
||||
const char *fmt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -205,26 +172,6 @@ int main(int argc, char **argv)
|
||||
pkt->size = 0;
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
/* print output pcm infomations, because there have no metadata of pcm */
|
||||
sfmt = c->sample_fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
}
|
||||
|
||||
n_channels = c->ch_layout.nb_channels;
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, c->sample_rate,
|
||||
outfilename);
|
||||
end:
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec video decoding API usage example
|
||||
* @example decode_video.c *
|
||||
* @file
|
||||
* video decoding with libavcodec API example
|
||||
*
|
||||
* Read from an MPEG1 video file, decode frames, and generate PGM images as
|
||||
* output.
|
||||
* @example decode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -42,7 +41,7 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"wb");
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
@@ -70,12 +69,12 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("saving frame %3"PRId64"\n", dec_ctx->frame_num);
|
||||
printf("saving frame %3d\n", dec_ctx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%"PRId64, filename, dec_ctx->frame_num);
|
||||
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
}
|
||||
@@ -93,12 +92,10 @@ int main(int argc, char **argv)
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
int ret;
|
||||
int eof;
|
||||
AVPacket *pkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n"
|
||||
"And check your input file is encoded by mpeg1video please.\n", argv[0]);
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(0);
|
||||
}
|
||||
filename = argv[1];
|
||||
@@ -152,16 +149,15 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
do {
|
||||
while (!feof(f)) {
|
||||
/* read raw data from the input file */
|
||||
data_size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (ferror(f))
|
||||
if (!data_size)
|
||||
break;
|
||||
eof = !data_size;
|
||||
|
||||
/* use the parser to split the data into frames */
|
||||
data = inbuf;
|
||||
while (data_size > 0 || eof) {
|
||||
while (data_size > 0) {
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
@@ -173,10 +169,8 @@ int main(int argc, char **argv)
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, frame, pkt, outfilename);
|
||||
else if (eof)
|
||||
break;
|
||||
}
|
||||
} while (!eof);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
decode(c, frame, NULL, outfilename);
|
||||
|
||||
@@ -21,18 +21,17 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat and libavcodec demuxing and decoding API usage example
|
||||
* @example demux_decode.c
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and decode audio
|
||||
* and video data. Write the output as raw audio and input files to be played by
|
||||
* ffplay.
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -52,95 +51,99 @@ static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket *pkt = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
/* Enable or disable frame reference counting. You are not supposed to support
|
||||
* both paths in your application but pick the one most appropriate to your
|
||||
* needs. Look for the use of refcount in this example to see what are the
|
||||
* differences of API usage between them. */
|
||||
static int refcount = 0;
|
||||
|
||||
printf("video_frame n:%d\n",
|
||||
video_frame_count++);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy2(video_dst_data, video_dst_linesize,
|
||||
frame->data, frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
*got_frame = 0;
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
if (*got_frame) {
|
||||
|
||||
av_frame_unref(frame);
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
/* If we use frame reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && refcount)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
@@ -148,7 +151,8 @@ static int open_codec_context(int *stream_idx,
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
@@ -182,8 +186,9 @@ static int open_codec_context(int *stream_idx,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) {
|
||||
/* Init the decoders, with or without reference counting */
|
||||
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -225,17 +230,24 @@ static int get_format_from_sample_fmt(const char **fmt,
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
|
||||
refcount = 1;
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
@@ -301,12 +313,10 @@ int main (int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
@@ -314,23 +324,24 @@ int main (int argc, char **argv)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, pkt);
|
||||
else if (pkt->stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, pkt);
|
||||
av_packet_unref(pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_packet_unref(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
@@ -343,7 +354,7 @@ int main (int argc, char **argv)
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->ch_layout.nb_channels;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
@@ -372,7 +383,6 @@ end:
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding audio API usage examples
|
||||
* @example encode_audio.c
|
||||
* @file
|
||||
* audio encoding with libavcodec API example.
|
||||
*
|
||||
* Generate a synthetic audio signal and encode it to an output MP2 file.
|
||||
* @example encode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
@@ -70,25 +70,26 @@ static int select_sample_rate(const AVCodec *codec)
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst)
|
||||
static int select_channel_layout(const AVCodec *codec)
|
||||
{
|
||||
const AVChannelLayout *p, *best_ch_layout;
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->ch_layouts)
|
||||
return av_channel_layout_copy(dst, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->ch_layouts;
|
||||
while (p->nb_channels) {
|
||||
int nb_channels = p->nb_channels;
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = p;
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return av_channel_layout_copy(dst, best_ch_layout);
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
|
||||
@@ -163,9 +164,8 @@ int main(int argc, char **argv)
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
ret = select_channel_layout(codec, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
@@ -195,9 +195,7 @@ int main(int argc, char **argv)
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
ret = av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* allocate the data buffers */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
@@ -220,7 +218,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->ch_layout.nb_channels; k++)
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding video API usage example
|
||||
* @example encode_video.c
|
||||
* @file
|
||||
* video encoding with libavcodec API example
|
||||
*
|
||||
* Generate synthetic video data and encode it to an output file.
|
||||
* @example encode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -145,7 +145,7 @@ int main(int argc, char **argv)
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate the video frame data\n");
|
||||
exit(1);
|
||||
@@ -155,25 +155,12 @@ int main(int argc, char **argv)
|
||||
for (i = 0; i < 25; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
/* Make sure the frame data is writable.
|
||||
On the first round, the frame is fresh from av_frame_get_buffer()
|
||||
and therefore we know it is writable.
|
||||
But on the next rounds, encode() will have called
|
||||
avcodec_send_frame(), and the codec may have kept a reference to
|
||||
the frame in its internal structures, that makes the frame
|
||||
unwritable.
|
||||
av_frame_make_writable() checks that and allocates a new buffer
|
||||
for the frame only if necessary.
|
||||
*/
|
||||
/* make sure the frame data is writable */
|
||||
ret = av_frame_make_writable(frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* Prepare a dummy image.
|
||||
In real code, this is where you would have your own logic for
|
||||
filling the frame. FFmpeg does not care what you put in the
|
||||
frame.
|
||||
*/
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
@@ -198,14 +185,8 @@ int main(int argc, char **argv)
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
|
||||
/* Add sequence end code to have a real MPEG file.
|
||||
It makes only sense because this tiny examples writes packets
|
||||
directly. This is called "elementary stream" and only works for some
|
||||
codecs. To create a valid file, you usually need to write packets
|
||||
into a proper file format or protocol; see mux.c.
|
||||
*/
|
||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_free_context(&c);
|
||||
|
||||
@@ -21,16 +21,7 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec motion vectors extraction API usage example
|
||||
* @example extract_mvs.c
|
||||
*
|
||||
* Read from input file, decode video stream and print a motion vectors
|
||||
* representation to stdout.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -69,11 +60,10 @@ static int decode_packet(const AVPacket *pkt)
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64",%4d,%4d,%4d\n",
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags,
|
||||
mv->motion_x, mv->motion_y, mv->motion_scale);
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
@@ -88,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
||||
@@ -114,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
ret = avcodec_open2(dec_ctx, dec, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (ret < 0) {
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -133,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
@@ -168,20 +156,13 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags,motion_x,motion_y,motion_scale\n");
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(pkt);
|
||||
av_packet_unref(pkt);
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(&pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -193,6 +174,5 @@ end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
return ret < 0;
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavfilter audio filtering API usage example
|
||||
* @example filter_audio.c
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* This example will generate a sine wave audio, pass it through a simple filter
|
||||
* chain, and then compute the MD5 checksum of the output data.
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
@@ -53,7 +55,7 @@
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
@@ -98,7 +100,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
@@ -152,8 +154,9 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100);
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
@@ -212,7 +215,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = frame->ch_layout.nb_channels;
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
@@ -245,7 +248,7 @@ static int get_input(AVFrame *frame, int frame_num)
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
av_channel_layout_copy(&frame->ch_layout, &INPUT_CHANNEL_LAYOUT);
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
|
||||
@@ -23,11 +23,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio decoding and filtering usage example
|
||||
* @example decode_filter_audio.c
|
||||
*
|
||||
* Demux, decode and filter audio input file, generate a raw audio
|
||||
* file to be played with ffplay.
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
@@ -36,7 +34,6 @@
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
@@ -51,8 +48,8 @@ static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -77,6 +74,7 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
@@ -96,6 +94,7 @@ static int init_filters(const char *filters_descr)
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
@@ -107,13 +106,12 @@ static int init_filters(const char *filters_descr)
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
ret = snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=",
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt));
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, args + ret, sizeof(args) - ret);
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -136,7 +134,7 @@ static int init_filters(const char *filters_descr)
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
@@ -187,7 +185,7 @@ static int init_filters(const char *filters_descr)
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_channel_layout_describe(&outlink->ch_layout, args, sizeof(args));
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
@@ -202,7 +200,7 @@ end:
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * frame->ch_layout.nb_channels;
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
@@ -217,12 +215,12 @@ static void print_frame(const AVFrame *frame)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (!packet || !frame || !filt_frame) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
@@ -237,11 +235,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -277,13 +275,12 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_packet_free(&packet);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
@@ -24,13 +24,11 @@
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example decode_filter_video.c
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
@@ -53,8 +51,8 @@ static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
@@ -79,6 +77,7 @@ static int open_input_file(const char *filename)
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
@@ -210,20 +209,16 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet;
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
packet = av_packet_alloc();
|
||||
if (!frame || !filt_frame || !packet) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -234,11 +229,11 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
@@ -253,28 +248,30 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
if (ret >= 0) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
@@ -282,7 +279,6 @@ end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
av_packet_free(&packet);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat multi-client network API usage example
|
||||
* @example avio_http_serve_files.c
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* Serve a file without decoding or demuxing it over the HTTP protocol. Multiple
|
||||
* clients can connect and will receive the same file.
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
@@ -4,31 +4,30 @@
|
||||
*
|
||||
* HW Acceleration API (video decoding) decode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file HW-accelerated decoding API usage.example
|
||||
* @example hw_decode.c
|
||||
* @file
|
||||
* HW-Accelerated decoding example.
|
||||
*
|
||||
* Perform HW-accelerated decoding with output frames from HW video
|
||||
* surfaces.
|
||||
* @example hw_decode.c
|
||||
* This example shows how to do HW-accelerated decoding with output
|
||||
* frames from the HW video surfaces.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -151,8 +150,8 @@ int main(int argc, char *argv[])
|
||||
int video_stream, ret;
|
||||
AVStream *video = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVPacket *packet = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVPacket packet;
|
||||
enum AVHWDeviceType type;
|
||||
int i;
|
||||
|
||||
@@ -171,12 +170,6 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
}
|
||||
|
||||
packet = av_packet_alloc();
|
||||
if (!packet) {
|
||||
fprintf(stderr, "Failed to allocate AVPacket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||
@@ -218,6 +211,7 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
|
||||
decoder_ctx->get_format = get_hw_format;
|
||||
av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
if (hw_decoder_init(decoder_ctx, type) < 0)
|
||||
return -1;
|
||||
@@ -228,25 +222,27 @@ int main(int argc, char *argv[])
|
||||
}
|
||||
|
||||
/* open the file to dump raw data */
|
||||
output_file = fopen(argv[3], "w+b");
|
||||
output_file = fopen(argv[3], "w+");
|
||||
|
||||
/* actual decoding and dump the raw data */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(input_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == packet->stream_index)
|
||||
ret = decode_write(decoder_ctx, packet);
|
||||
if (video_stream == packet.stream_index)
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_write(decoder_ctx, NULL);
|
||||
packet.data = NULL;
|
||||
packet.size = 0;
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
av_packet_unref(&packet);
|
||||
|
||||
if (output_file)
|
||||
fclose(output_file);
|
||||
av_packet_free(&packet);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avformat_close_input(&input_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat metadata extraction API usage example
|
||||
* @example show_metadata.c
|
||||
*
|
||||
* Show metadata from an input file.
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -35,7 +34,7 @@
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
const AVDictionaryEntry *tag = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
@@ -48,12 +47,7 @@ int main (int argc, char **argv)
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_iterate(fmt_ctx->metadata, tag)))
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
@@ -21,11 +21,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat muxing API usage example
|
||||
* @example mux.c
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Generate a synthetic audio and video signal and mux them to a media file in
|
||||
* any supported libavformat format. The default codecs are used.
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@@ -38,7 +39,6 @@
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
@@ -61,8 +61,6 @@ typedef struct OutputStream {
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVPacket *tmp_pkt;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
@@ -80,50 +78,20 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
const AVCodec **codec,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
@@ -137,12 +105,6 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->tmp_pkt = av_packet_alloc();
|
||||
if (!ost->tmp_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
@@ -169,7 +131,16 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
av_channel_layout_copy(&c->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
@@ -199,7 +170,7 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
@@ -214,22 +185,25 @@ static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
const AVChannelLayout *channel_layout,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
av_channel_layout_copy(&frame->ch_layout, channel_layout);
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
@@ -238,8 +212,7 @@ static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
@@ -268,9 +241,9 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
@@ -281,25 +254,25 @@ static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
@@ -312,12 +285,12 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
|
||||
for (i = 0; i < ost->enc->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
@@ -336,20 +309,23 @@ static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
@@ -373,37 +349,51 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *frame;
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame)
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
frame->format = pix_fmt;
|
||||
frame->width = width;
|
||||
frame->height = height;
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return frame;
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->enc;
|
||||
@@ -420,7 +410,7 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
@@ -431,9 +421,9 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_frame(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary video frame\n");
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
@@ -474,7 +464,7 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
@@ -516,7 +506,37 @@ static AVFrame *get_video_frame(OutputStream *ost)
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
@@ -524,7 +544,6 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
av_packet_free(&ost->tmp_pkt);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
@@ -535,10 +554,10 @@ static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const AVOutputFormat *fmt;
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
const AVCodec *audio_codec, *video_codec;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
@@ -625,6 +644,10 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
@@ -1,435 +0,0 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated video transcoding API usage example
|
||||
* @example qsv_transcode.c
|
||||
*
|
||||
* Perform QSV-accelerated transcoding and show to dynamically change
|
||||
* encoder's options.
|
||||
*
|
||||
* Usage: qsv_transcode input_stream codec output_stream initial option
|
||||
* { frame_number new_option }
|
||||
* e.g: - qsv_transcode input.mp4 h264_qsv output_h264.mp4 "g 60"
|
||||
* - qsv_transcode input.mp4 hevc_qsv output_hevc.mp4 "g 60 async_depth 1"
|
||||
* 100 "g 120"
|
||||
* (initialize codec with gop_size 60 and change it to 120 after 100
|
||||
* frames)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
static AVBufferRef *hw_device_ctx = NULL;
|
||||
static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;
|
||||
static int video_stream = -1;
|
||||
|
||||
typedef struct DynamicSetting {
|
||||
int frame_number;
|
||||
char* optstr;
|
||||
} DynamicSetting;
|
||||
static DynamicSetting *dynamic_setting;
|
||||
static int setting_number;
|
||||
static int current_setting_number;
|
||||
|
||||
static int str_to_dict(char* optstr, AVDictionary **opt)
|
||||
{
|
||||
char *key, *value;
|
||||
if (strlen(optstr) == 0)
|
||||
return 0;
|
||||
key = strtok(optstr, " ");
|
||||
if (key == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
do {
|
||||
key = strtok(NULL, " ");
|
||||
if (key == NULL)
|
||||
return 0;
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
} while(1);
|
||||
}
|
||||
|
||||
static int dynamic_set_parameter(AVCodecContext *avctx)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
static int frame_number = 0;
|
||||
frame_number++;
|
||||
if (current_setting_number < setting_number &&
|
||||
frame_number == dynamic_setting[current_setting_number].frame_number) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
ret = str_to_dict(dynamic_setting[current_setting_number++].optstr, &opts);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "The dynamic parameter is wrong\n");
|
||||
goto fail;
|
||||
}
|
||||
/* Set common option. The dictionary will be freed and replaced
|
||||
* by a new one containing all options not found in common option list.
|
||||
* Then this new dictionary is used to set private option. */
|
||||
if ((ret = av_opt_set_dict(avctx, &opts)) < 0)
|
||||
goto fail;
|
||||
/* Set codec specific option */
|
||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||
goto fail;
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to set
|
||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
avctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
}
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int open_input_file(char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
|
||||
filename, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot find a video stream in the input file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
video_stream = ret;
|
||||
video = ifmt_ctx->streams[video_stream];
|
||||
|
||||
switch(video->codecpar->codec_id) {
|
||||
case AV_CODEC_ID_H264:
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
decoder = avcodec_find_decoder_by_name("hevc_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP9:
|
||||
decoder = avcodec_find_decoder_by_name("vp9_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP8:
|
||||
decoder = avcodec_find_decoder_by_name("vp8_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_AV1:
|
||||
decoder = avcodec_find_decoder_by_name("av1_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
decoder = avcodec_find_decoder_by_name("mpeg2_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MJPEG:
|
||||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Codec is not supportted by qsv\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
|
||||
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
decoder_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video, NULL);
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
if (!decoder_ctx->hw_device_ctx) {
|
||||
fprintf(stderr, "A hardware device reference create failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
decoder_ctx->get_format = get_format;
|
||||
decoder_ctx->pkt_timebase = video->time_base;
|
||||
if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
|
||||
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if((ret = dynamic_set_parameter(encoder_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to set dynamic parameter. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
if (ret = avcodec_receive_packet(encoder_ctx, enc_pkt))
|
||||
break;
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, encoder_ctx->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
if ((ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt)) < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
if (ret == AVERROR_EOF)
|
||||
return 0;
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding. Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
if (!(frame = av_frame_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
av_frame_free(&frame);
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while decoding. Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
AVStream *ost;
|
||||
/* we need to ref hw_frames_ctx of decoder to initialize encoder's codec.
|
||||
Only after we get a decoded frame, can we obtain its hw_frames_ctx */
|
||||
encoder_ctx->hw_frames_ctx = av_buffer_ref(decoder_ctx->hw_frames_ctx);
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
/* set AVCodecContext Parameters for encoder, here we keep them stay
|
||||
* the same as decoder.
|
||||
*/
|
||||
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
|
||||
encoder_ctx->pix_fmt = AV_PIX_FMT_QSV;
|
||||
encoder_ctx->width = decoder_ctx->width;
|
||||
encoder_ctx->height = decoder_ctx->height;
|
||||
if ((ret = str_to_dict(optstr, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||
goto fail;
|
||||
}
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to
|
||||
* set framerate, which is compatible with ffmpeg commandline. The
|
||||
* video is assumed to be average frame rate, so set time_base to
|
||||
* 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
encoder_ctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
if ((ret = avcodec_open2(encoder_ctx, enc_codec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
av_dict_free(&opts);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {
|
||||
fprintf(stderr, "Failed to allocate stream for output format.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ost->time_base = encoder_ctx->time_base;
|
||||
ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy the stream parameters. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the stream header */
|
||||
if ((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Error while writing stream header. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
frame->pts = av_rescale_q(frame->pts, decoder_ctx->pkt_timebase,
|
||||
encoder_ctx->time_base);
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt = NULL;
|
||||
|
||||
if (argc < 5 || (argc - 5) % 2) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <encoder> <output file>"
|
||||
" <\"encoding option set 0\"> [<frame_number> <\"encoding options set 1\">]...\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
setting_number = (argc - 5) / 2;
|
||||
dynamic_setting = av_malloc(setting_number * sizeof(*dynamic_setting));
|
||||
current_setting_number = 0;
|
||||
for (int i = 0; i < setting_number; i++) {
|
||||
dynamic_setting[i].frame_number = atoi(argv[i*2 + 5]);
|
||||
dynamic_setting[i].optstr = argv[i*2 + 6];
|
||||
}
|
||||
|
||||
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to create a QSV device. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {
|
||||
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) {
|
||||
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
|
||||
"%s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec, argv[4]);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
if ((ret = dec_enc(dec_pkt, enc_codec, argv[4])) < 0) {
|
||||
fprintf(stderr, "Failed to flush decoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
if ((ret = encode_write(dec_pkt, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to flush encoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* write the trailer for output stream */
|
||||
if ((ret = av_write_trailer(ofmt_ctx)) < 0)
|
||||
fprintf(stderr, "Failed to write trailer %s\n", av_err2str(ret));
|
||||
|
||||
end:
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
avformat_close_input(&ofmt_ctx);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
av_freep(&dynamic_setting);
|
||||
return ret;
|
||||
}
|
||||
@@ -21,13 +21,16 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated H.264 decoding API usage example
|
||||
* @example qsv_decode.c
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* Perform QSV-accelerated H.264 decoding with output frames in the
|
||||
* GPU video surfaces, write the decoded frames to an output file.
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the GPU video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
@@ -41,10 +44,38 @@
|
||||
#include "libavutil/hwcontext_qsv.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
AVBufferRef *hw_device_ref;
|
||||
} DecodeContext;
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
AVQSVFramesContext *frames_hwctx;
|
||||
int ret;
|
||||
|
||||
/* create a pool of surfaces to be used by the decoder */
|
||||
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||
if (!avctx->hw_frames_ctx)
|
||||
return AV_PIX_FMT_NONE;
|
||||
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||
frames_ctx->initial_pool_size = 32;
|
||||
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
@@ -56,7 +87,7 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *decoder_ctx,
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
{
|
||||
@@ -110,15 +141,15 @@ int main(int argc, char **argv)
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
|
||||
AVBufferRef *device_ref = NULL;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
@@ -146,7 +177,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
@@ -178,8 +209,7 @@ int main(int argc, char **argv)
|
||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||
}
|
||||
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
@@ -197,26 +227,27 @@ int main(int argc, char **argv)
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
pkt = av_packet_alloc();
|
||||
if (!frame || !sw_frame || !pkt) {
|
||||
if (!frame || !sw_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, pkt);
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt->stream_index == video_st->index)
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx);
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(pkt);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
@@ -229,11 +260,10 @@ finish:
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&device_ref);
|
||||
av_buffer_unref(&decode.hw_device_ref);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat/libavcodec demuxing and muxing API usage example
|
||||
* @example remux.c
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another. Data is copied from the
|
||||
* input to the output without transcoding.
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
@@ -45,9 +45,9 @@ static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, cons
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVOutputFormat *ofmt = NULL;
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
int stream_index = 0;
|
||||
@@ -65,12 +65,6 @@ int main(int argc, char **argv)
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
@@ -91,7 +85,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping));
|
||||
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
|
||||
if (!stream_mapping) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
@@ -146,39 +140,38 @@ int main(int argc, char **argv)
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, pkt);
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt->stream_index];
|
||||
if (pkt->stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt->stream_index] < 0) {
|
||||
av_packet_unref(pkt);
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
if (pkt.stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt.stream_index] < 0) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
|
||||
pkt->stream_index = stream_mapping[pkt->stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt->stream_index];
|
||||
log_packet(ifmt_ctx, pkt, "in");
|
||||
pkt.stream_index = stream_mapping[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
|
||||
pkt->pos = -1;
|
||||
log_packet(ofmt_ctx, pkt, "out");
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
@@ -21,12 +21,8 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio resampling API usage example
|
||||
* @example resample_audio.c
|
||||
*
|
||||
* Generate a synthetic audio signal, and Use libswresample API to perform audio
|
||||
* resampling. The output is written to a raw audio file to be played with
|
||||
* ffplay.
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
@@ -84,7 +80,7 @@ static void fill_samples(double *dst, int nb_samples, int nb_channels, int sampl
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_STEREO, dst_ch_layout = AV_CHANNEL_LAYOUT_SURROUND;
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
@@ -96,7 +92,6 @@ int main(int argc, char **argv)
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
char buf[64];
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
@@ -125,11 +120,11 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout(swr_ctx, "in_chlayout", &src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
@@ -141,7 +136,7 @@ int main(int argc, char **argv)
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = src_ch_layout.nb_channels;
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -156,7 +151,7 @@ int main(int argc, char **argv)
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = dst_ch_layout.nb_channels;
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
@@ -199,10 +194,9 @@ int main(int argc, char **argv)
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
av_channel_layout_describe(&dst_ch_layout, buf, sizeof(buf));
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %s -channels %d -ar %d %s\n",
|
||||
fmt, buf, dst_nb_channels, dst_rate, dst_filename);
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
@@ -21,10 +21,9 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libswscale API usage example
|
||||
* @example scale_video.c
|
||||
*
|
||||
* Generate a synthetic video signal and use libswscale to perform rescaling.
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2022 Andreas Unterweger
|
||||
* Copyright (c) 2013-2018 Andreas Unterweger
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -19,11 +19,12 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio transcoding to MPEG/AAC API usage example
|
||||
* @example transcode_aac.c
|
||||
* @file
|
||||
* Simple audio converter
|
||||
*
|
||||
* Convert an input audio file to AAC in an MP4 container. Formats other than
|
||||
* MP4 are supported based on the output file extension.
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* Formats other than MP4 are supported based on the output file extension.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
@@ -37,7 +38,6 @@
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
@@ -60,8 +60,7 @@ static int open_input_file(const char *filename,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodecContext *avctx;
|
||||
const AVCodec *input_codec;
|
||||
const AVStream *stream;
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/* Open the input file to read from it. */
|
||||
@@ -89,10 +88,8 @@ static int open_input_file(const char *filename,
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
stream = (*input_format_context)->streams[0];
|
||||
|
||||
/* Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder(stream->codecpar->codec_id))) {
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
@@ -107,7 +104,7 @@ static int open_input_file(const char *filename,
|
||||
}
|
||||
|
||||
/* Initialize the stream parameters with demuxer information. */
|
||||
error = avcodec_parameters_to_context(avctx, stream->codecpar);
|
||||
error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
|
||||
if (error < 0) {
|
||||
avformat_close_input(input_format_context);
|
||||
avcodec_free_context(&avctx);
|
||||
@@ -123,9 +120,6 @@ static int open_input_file(const char *filename,
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Set the packet timebase for the decoder. */
|
||||
avctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Save the decoder context for easier access later. */
|
||||
*input_codec_context = avctx;
|
||||
|
||||
@@ -150,7 +144,7 @@ static int open_output_file(const char *filename,
|
||||
AVCodecContext *avctx = NULL;
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
const AVCodec *output_codec = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/* Open the output file to write to it. */
|
||||
@@ -205,11 +199,15 @@ static int open_output_file(const char *filename,
|
||||
|
||||
/* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion. */
|
||||
av_channel_layout_default(&avctx->ch_layout, OUTPUT_CHANNELS);
|
||||
avctx->channels = OUTPUT_CHANNELS;
|
||||
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
avctx->sample_rate = input_codec_context->sample_rate;
|
||||
avctx->sample_fmt = output_codec->sample_fmts[0];
|
||||
avctx->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/* Allow the use of the experimental AAC encoder. */
|
||||
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/* Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
@@ -247,16 +245,14 @@ cleanup:
|
||||
|
||||
/**
|
||||
* Initialize one data packet for reading or writing.
|
||||
* @param[out] packet Packet to be initialized
|
||||
* @return Error code (0 if successful)
|
||||
* @param packet Packet to be initialized
|
||||
*/
|
||||
static int init_packet(AVPacket **packet)
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
if (!(*packet = av_packet_alloc())) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
av_init_packet(packet);
|
||||
/* Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -291,18 +287,21 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
/*
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
error = swr_alloc_set_opts2(resample_context,
|
||||
&output_codec_context->ch_layout,
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
&input_codec_context->ch_layout,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (error < 0) {
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return error;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/*
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
@@ -330,7 +329,7 @@ static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
{
|
||||
/* Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->ch_layout.nb_channels, 1))) {
|
||||
output_codec_context->channels, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -372,33 +371,28 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *input_packet;
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
error = init_packet(&input_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
*data_present = 0;
|
||||
*finished = 0;
|
||||
/* Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, input_packet)) < 0) {
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/* If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the audio frame stored in the temporary packet to the decoder.
|
||||
* The input audio stream decoder is used to do this. */
|
||||
if ((error = avcodec_send_packet(input_codec_context, input_packet)) < 0) {
|
||||
if ((error = avcodec_send_packet(input_codec_context, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not send packet for decoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one frame from the decoder. */
|
||||
@@ -424,7 +418,7 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&input_packet);
|
||||
av_packet_unref(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -447,17 +441,26 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
int error;
|
||||
|
||||
/* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will point to the audio samples of the corresponding
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience. */
|
||||
if ((error = av_samples_alloc_array_and_samples(converted_input_samples, NULL,
|
||||
output_codec_context->ch_layout.nb_channels,
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
av_err2str(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
@@ -550,7 +553,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFrame *input_frame = NULL;
|
||||
/* Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int data_present = 0;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/* Initialize temporary storage for one input frame. */
|
||||
@@ -589,9 +592,10 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples)
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
av_freep(&converted_input_samples);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
@@ -623,7 +627,7 @@ static int init_output_frame(AVFrame **frame,
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity. */
|
||||
(*frame)->nb_samples = frame_size;
|
||||
av_channel_layout_copy(&(*frame)->ch_layout, &output_codec_context->ch_layout);
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
@@ -657,12 +661,9 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
int *data_present)
|
||||
{
|
||||
/* Packet used for temporary storage. */
|
||||
AVPacket *output_packet;
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
|
||||
error = init_packet(&output_packet);
|
||||
if (error < 0)
|
||||
return error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/* Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
@@ -670,20 +671,21 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
*data_present = 0;
|
||||
/* Send the audio frame stored in the temporary packet to the encoder.
|
||||
* The output audio stream encoder is used to do this. */
|
||||
error = avcodec_send_frame(output_codec_context, frame);
|
||||
/* Check for errors, but proceed with fetching encoded samples if the
|
||||
* encoder signals that it has nothing more to encode. */
|
||||
if (error < 0 && error != AVERROR_EOF) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
/* The encoder signals that it has nothing more to encode. */
|
||||
if (error == AVERROR_EOF) {
|
||||
error = 0;
|
||||
goto cleanup;
|
||||
} else if (error < 0) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Receive one encoded frame from the encoder. */
|
||||
error = avcodec_receive_packet(output_codec_context, output_packet);
|
||||
error = avcodec_receive_packet(output_codec_context, &output_packet);
|
||||
/* If the encoder asks for more data to be able to provide an
|
||||
* encoded frame, return indicating that no data is present. */
|
||||
if (error == AVERROR(EAGAIN)) {
|
||||
@@ -704,14 +706,14 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
|
||||
/* Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present &&
|
||||
(error = av_write_frame(output_format_context, output_packet)) < 0) {
|
||||
(error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
av_packet_free(&output_packet);
|
||||
av_packet_unref(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
@@ -850,6 +852,7 @@ int main(int argc, char **argv)
|
||||
int data_written;
|
||||
/* Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
data_written = 0;
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
|
||||
@@ -23,18 +23,15 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file demuxing, decoding, filtering, encoding and muxing API usage example
|
||||
* @example transcode.c
|
||||
*
|
||||
* Convert input to output file, applying some hard-coded filter-graph on both
|
||||
* audio and video streams.
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
@@ -44,17 +41,12 @@ typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
|
||||
AVPacket *enc_pkt;
|
||||
AVFrame *filtered_frame;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
typedef struct StreamContext {
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
AVFrame *dec_frame;
|
||||
} StreamContext;
|
||||
static StreamContext *stream_ctx;
|
||||
|
||||
@@ -74,13 +66,13 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
if (!stream_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream = ifmt_ctx->streams[i];
|
||||
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodecContext *codec_ctx;
|
||||
if (!dec) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||
@@ -97,11 +89,6 @@ static int open_input_file(const char *filename)
|
||||
"for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Inform the decoder about the timebase for the packet timestamps.
|
||||
* This is highly recommended, but not mandatory. */
|
||||
codec_ctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
@@ -115,10 +102,6 @@ static int open_input_file(const char *filename)
|
||||
}
|
||||
}
|
||||
stream_ctx[i].dec_ctx = codec_ctx;
|
||||
|
||||
stream_ctx[i].dec_frame = av_frame_alloc();
|
||||
if (!stream_ctx[i].dec_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
@@ -130,7 +113,7 @@ static int open_output_file(const char *filename)
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
const AVCodec *encoder;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
@@ -182,21 +165,17 @@ static int open_output_file(const char *filename)
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open %s encoder for stream #%u\n", encoder->name, i);
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
|
||||
@@ -204,6 +183,8 @@ static int open_output_file(const char *filename)
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
out_stream->time_base = enc_ctx->time_base;
|
||||
stream_ctx[i].enc_ctx = enc_ctx;
|
||||
@@ -271,7 +252,7 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
@@ -297,7 +278,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
@@ -306,14 +286,14 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf));
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s",
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den, dec_ctx->sample_rate,
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
buf);
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
@@ -336,9 +316,9 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf));
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts",
|
||||
buf, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
@@ -417,67 +397,54 @@ static int init_filters(void)
|
||||
stream_ctx[i].enc_ctx, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filter_ctx[i].enc_pkt = av_packet_alloc();
|
||||
if (!filter_ctx[i].enc_pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
filter_ctx[i].filtered_frame = av_frame_alloc();
|
||||
if (!filter_ctx[i].filtered_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(unsigned int stream_index, int flush)
|
||||
{
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
|
||||
AVPacket *enc_pkt = filter->enc_pkt;
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if (filt_frame && filt_frame->pts != AV_NOPTS_VALUE)
|
||||
filt_frame->pts = av_rescale_q(filt_frame->pts, filt_frame->time_base,
|
||||
stream->enc_ctx->time_base);
|
||||
|
||||
ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
|
||||
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt->stream_index = stream_index;
|
||||
av_packet_rescale_ts(enc_pkt,
|
||||
stream->enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
}
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
stream_ctx[stream_index].enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
@@ -486,9 +453,14 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter->buffersink_ctx,
|
||||
filter->filtered_frame);
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
@@ -496,13 +468,12 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filter->filtered_frame->time_base = av_buffersink_get_time_base(filter->buffersink_ctx);;
|
||||
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(stream_index, 0);
|
||||
av_frame_unref(filter->filtered_frame);
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -512,20 +483,34 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
return encode_write_frame(stream_index, 1);
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = NULL;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -538,85 +523,63 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
if (!(packet = av_packet_alloc()))
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet->stream_index;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
|
||||
ret = avcodec_send_packet(stream->dec_ctx, packet);
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
stream_ctx[stream_index].dec_ctx->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, stream_index);
|
||||
if (got_frame) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(packet,
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush decoders, filters and encoders */
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
StreamContext *stream;
|
||||
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
|
||||
stream = &stream_ctx[i];
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream %u decoder\n", i);
|
||||
|
||||
/* flush decoder */
|
||||
ret = avcodec_send_packet(stream->dec_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing decoding failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, i);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush filter */
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
@@ -633,18 +596,14 @@ int main(int argc, char **argv)
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&packet);
|
||||
av_packet_unref(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_free_context(&stream_ctx[i].dec_ctx);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
|
||||
avcodec_free_context(&stream_ctx[i].enc_ctx);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph) {
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
av_packet_free(&filter_ctx[i].enc_pkt);
|
||||
av_frame_free(&filter_ctx[i].filtered_frame);
|
||||
}
|
||||
|
||||
av_frame_free(&stream_ctx[i].dec_frame);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
av_free(stream_ctx);
|
||||
@@ -1,30 +1,31 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* Video Acceleration API (video encoding) encode sample
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated encoding API usage example
|
||||
* @example vaapi_encode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated encoding example.
|
||||
*
|
||||
* @example vaapi_encode.c
|
||||
* This example shows how to do VAAPI-accelerated encoding. now only support NV12
|
||||
* raw file, usage like: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*
|
||||
* Perform VAAPI-accelerated encoding. Read input from an NV12 raw
|
||||
* file, and write the H.264 encoded data to an output raw file.
|
||||
* Usage: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -71,27 +72,27 @@ static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
|
||||
static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *enc_pkt;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
if (!(enc_pkt = av_packet_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(avctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(avctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(avctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
ret = fwrite(enc_pkt->data, enc_pkt->size, 1, fout);
|
||||
av_packet_unref(enc_pkt);
|
||||
enc_pkt.stream_index = 0;
|
||||
ret = fwrite(enc_pkt.data, enc_pkt.size, 1, fout);
|
||||
av_packet_unref(&enc_pkt);
|
||||
}
|
||||
|
||||
end:
|
||||
av_packet_free(&enc_pkt);
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0 : -1);
|
||||
return ret;
|
||||
}
|
||||
@@ -102,7 +103,7 @@ int main(int argc, char *argv[])
|
||||
FILE *fin = NULL, *fout = NULL;
|
||||
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
||||
AVCodecContext *avctx = NULL;
|
||||
const AVCodec *codec = NULL;
|
||||
AVCodec *codec = NULL;
|
||||
const char *enc_name = "h264_vaapi";
|
||||
|
||||
if (argc < 5) {
|
||||
@@ -169,7 +170,7 @@ int main(int argc, char *argv[])
|
||||
sw_frame->width = width;
|
||||
sw_frame->height = height;
|
||||
sw_frame->format = AV_PIX_FMT_NV12;
|
||||
if ((err = av_frame_get_buffer(sw_frame, 0)) < 0)
|
||||
if ((err = av_frame_get_buffer(sw_frame, 32)) < 0)
|
||||
goto close;
|
||||
if ((err = fread((uint8_t*)(sw_frame->data[0]), size, 1, fin)) <= 0)
|
||||
break;
|
||||
|
||||
@@ -1,28 +1,29 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
* Video Acceleration API (video transcoding) transcode sample
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated transcoding API usage example
|
||||
* @example vaapi_transcode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated transcoding example.
|
||||
*
|
||||
* Perform VAAPI-accelerated transcoding.
|
||||
* @example vaapi_transcode.c
|
||||
* This example shows how to do VAAPI-accelerated transcoding.
|
||||
* Usage: vaapi_transcode input_stream codec output_stream
|
||||
* e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
|
||||
* - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
|
||||
@@ -59,7 +60,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -106,25 +107,28 @@ static int open_input_file(const char *filename)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
static int encode_write(AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket enc_pkt;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
av_init_packet(&enc_pkt);
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
ret = avcodec_receive_packet(encoder_ctx, enc_pkt);
|
||||
ret = avcodec_receive_packet(encoder_ctx, &enc_pkt);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
enc_pkt.stream_index = 0;
|
||||
av_packet_rescale_ts(&enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
@@ -139,7 +143,7 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
@@ -173,7 +177,7 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
}
|
||||
/* set AVCodecContext Parameters for encoder, here we keep them stay
|
||||
* the same as decoder.
|
||||
* xxx: now the sample can't handle resolution change case.
|
||||
* xxx: now the the sample can't handle resolution change case.
|
||||
*/
|
||||
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
|
||||
encoder_ctx->pix_fmt = AV_PIX_FMT_VAAPI;
|
||||
@@ -210,20 +214,22 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
initialized = 1;
|
||||
}
|
||||
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
if ((ret = encode_write(frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt;
|
||||
AVPacket dec_pkt;
|
||||
AVCodec *enc_codec;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
||||
@@ -238,12 +244,6 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
@@ -273,21 +273,23 @@ int main(int argc, char **argv)
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
if ((ret = av_read_frame(ifmt_ctx, &dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
if (video_stream == dec_pkt.stream_index)
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
av_packet_unref(&dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
ret = dec_enc(dec_pkt, enc_codec);
|
||||
dec_pkt.data = NULL;
|
||||
dec_pkt.size = 0;
|
||||
ret = dec_enc(&dec_pkt, enc_codec);
|
||||
av_packet_unref(&dec_pkt);
|
||||
|
||||
/* flush encoder */
|
||||
ret = encode_write(dec_pkt, NULL);
|
||||
ret = encode_write(NULL);
|
||||
|
||||
/* write the trailer for output stream */
|
||||
av_write_trailer(ofmt_ctx);
|
||||
@@ -298,6 +300,5 @@ end:
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
16
doc/faq.texi
16
doc/faq.texi
@@ -76,7 +76,7 @@ the gcc developers. Note that we will not add workarounds for gcc bugs.
|
||||
|
||||
Also note that (some of) the gcc developers believe this is not a bug or
|
||||
not a bug they should fix:
|
||||
@url{https://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@@ -257,13 +257,13 @@ default.
|
||||
@section Which are good parameters for encoding high quality MPEG-4?
|
||||
|
||||
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
|
||||
|
||||
'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2'
|
||||
but beware the '-g 100' might cause problems with some decoders.
|
||||
Things to try: '-bf 2', '-mpv_flags qp_rd', '-mpv_flags mv0', '-mpv_flags skip_rd'.
|
||||
Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd.
|
||||
|
||||
@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong?
|
||||
|
||||
@@ -450,7 +450,7 @@ work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5@tie{}Mo) and @code{analyzeduration} (default 5,000,000@tie{}µs = 5@tie{}s). For
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
@@ -467,7 +467,7 @@ point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@section I have a stretched video, why does scaling not fix it?
|
||||
@section I have a stretched video, why does scaling does not fix it?
|
||||
|
||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
||||
video: this is the ratio between the width and the height of either the full
|
||||
@@ -516,7 +516,7 @@ in the ffmpeg invocation. This is effective whether you run ffmpeg in a shell
|
||||
or invoke ffmpeg in its own process via an operating system API.
|
||||
|
||||
As an alternative, when you are running ffmpeg in a shell, you can redirect
|
||||
standard input to @code{/dev/null} (on Linux and macOS)
|
||||
standard input to @code{/dev/null} (on Linux and Mac OS)
|
||||
or @code{NUL} (on Windows). You can do this redirect either
|
||||
on the ffmpeg invocation, or from a shell script which calls ffmpeg.
|
||||
|
||||
@@ -526,7 +526,7 @@ For example:
|
||||
ffmpeg -nostdin -i INPUT OUTPUT
|
||||
@end example
|
||||
|
||||
or (on Linux, macOS, and other UNIX-like shells):
|
||||
or (on Linux, Mac OS, and other UNIX-like shells):
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT OUTPUT </dev/null
|
||||
@@ -601,7 +601,7 @@ No. These tools are too bloated and they complicate the build.
|
||||
FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read @uref{https://web.archive.org/web/20111004021423/http://kernel.org/pub/linux/docs/lkml/#s15, "Programming Religion"}.
|
||||
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
|
||||
|
||||
@section Why are the ffmpeg programs devoid of debugging symbols?
|
||||
|
||||
|
||||
@@ -79,29 +79,6 @@ Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
Beware that some assertions are disabled by default, so mind setting
|
||||
@option{--assert-level=<level>} at configuration time, e.g. when seeking
|
||||
the highest possible test coverage:
|
||||
@example
|
||||
./configure --assert-level=2
|
||||
@end example
|
||||
Note that raising the assert level could have a performance impact.
|
||||
|
||||
To get the complete list of tests, run the command:
|
||||
@example
|
||||
make fate-list
|
||||
@end example
|
||||
|
||||
You can specify a subset of tests to run by specifying the
|
||||
corresponding elements from the list with the @code{fate-} prefix,
|
||||
e.g. as in:
|
||||
@example
|
||||
make fate-ffprobe_compact fate-ffprobe_xml
|
||||
@end example
|
||||
|
||||
This makes it easier to run a few tests in case of failure without
|
||||
running the complete test suite.
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
@@ -172,18 +149,12 @@ the synchronisation of the samples directory.
|
||||
|
||||
@chapter Uploading new samples to the fate suite
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
This is for developers who have an account on the fate suite server.
|
||||
If you upload new samples, please make sure they are as small as possible,
|
||||
space on each client, network bandwidth and so on benefit from smaller test cases.
|
||||
Also keep in mind older checkouts use existing sample files, that means in
|
||||
practice generally do not replace, remove or overwrite files as it likely would
|
||||
break older checkouts or releases.
|
||||
Also all needed samples for a commit should be uploaded, ideally 24
|
||||
hours, before the push.
|
||||
If you need an account for frequently uploading samples or you wish to help
|
||||
others by doing that send a mail to ffmpeg-devel.
|
||||
|
||||
@example
|
||||
#First update your local samples copy:
|
||||
@@ -231,14 +202,6 @@ meaning only while running the regression tests.
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
This variable may be set to the string "random", optionally followed by a
|
||||
number, like "random99", This will cause each test to use a random number of
|
||||
threads. If a number is specified, it is used as a maximum number of threads,
|
||||
otherwise 16 is the maximum.
|
||||
|
||||
In case a test fails, the thread count used for it will be written into the
|
||||
errfile.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @samp{slice} or @samp{frame},
|
||||
by default @samp{slice+frame}
|
||||
@@ -259,11 +222,6 @@ Set to @samp{1} to generate the missing or mismatched references.
|
||||
Specify which hardware acceleration to use while running regression tests,
|
||||
by default @samp{none} is used.
|
||||
|
||||
@item KEEP
|
||||
Set to @samp{1} to keep temp files generated by fate test(s) when test is successful.
|
||||
Default is @samp{0}, which removes these files. Files are always kept when a test
|
||||
fails.
|
||||
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
@@ -31,25 +31,3 @@ makeopts= # extra options passed to 'make'
|
||||
# defaulting to makeopts above if this is not set
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
||||
#fate_targets= # targets to make when running fate; defaults to "fate",
|
||||
# can be set to run a subset of tests, e.g. "fate-checkasm".
|
||||
|
||||
#fate_environments= # a list of names of configurations to run tests for;
|
||||
# each round is run with variables from ${${name}_env} set.
|
||||
|
||||
# One example of using fate_environments:
|
||||
|
||||
# target_exec="qemu-aarch64-static"
|
||||
# fate_targets="fate-checkasm fate-cpu"
|
||||
# fate_environments="sve128 sve256"
|
||||
# sve128_env="QEMU_CPU=max,sve128=on"
|
||||
# sve256_env="QEMU_CPU=max,sve256=on"
|
||||
|
||||
# The variables set by fate_environments can also be used explicitly
|
||||
# by target_exec, e.g. like this:
|
||||
|
||||
# target_exec="qemu-aarch64-static -cpu \$(MY_CPU)"
|
||||
# fate_targets="fate-checkasm fate-cpu"
|
||||
# fate_environments="sve128 sve256"
|
||||
# sve128_env="MY_CPU=max,sve128=on"
|
||||
# sve256_env="MY_CPU=max,sve256=on"
|
||||
|
||||
1566
doc/ffmpeg.texi
1566
doc/ffmpeg.texi
File diff suppressed because it is too large
Load Diff
@@ -34,6 +34,10 @@ various FFmpeg APIs.
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
@@ -56,14 +60,10 @@ Play @var{duration} seconds of audio/video.
|
||||
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -seek_interval
|
||||
Set custom interval, in seconds, for seeking using left/right keys. Default is 10 seconds.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -noborder
|
||||
Borderless window.
|
||||
@item -alwaysontop
|
||||
Window always on top. Available on: X11 with SDL >= 2.0.5, Windows SDL >= 2.0.6.
|
||||
@item -volume
|
||||
Set the startup volume. 0 means silence, 100 means no volume reduction or
|
||||
amplification. Negative values are treated as 0, values above 100 are treated
|
||||
@@ -72,10 +72,6 @@ as 100.
|
||||
Force format.
|
||||
@item -window_title @var{title}
|
||||
Set window title (default is the input filename).
|
||||
@item -left @var{title}
|
||||
Set the x position for the left of the window (default is a centered window).
|
||||
@item -top @var{title}
|
||||
Set the y position for the top of the window (default is a centered window).
|
||||
@item -loop @var{number}
|
||||
Loops movie playback <number> times. 0 means forever.
|
||||
@item -showmode @var{mode}
|
||||
@@ -122,12 +118,15 @@ Read @var{input_url}.
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is shown by default, unless the
|
||||
log level is lower than @code{info}. Its display can be forced by manually
|
||||
specifying this option. To disable it, you need to specify @code{-nostats}.
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@@ -190,24 +189,6 @@ input as soon as possible. Enabled by default for realtime streams, where data
|
||||
may be dropped if not read in time. Use this option to enable infinite buffers
|
||||
for all inputs, use @option{-noinfbuf} to disable it.
|
||||
|
||||
@item -filter_threads @var{nb_threads}
|
||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||
will produce a thread pool with this many threads available for parallel
|
||||
processing. The default is 0 which means that the thread count will be
|
||||
determined by the number of available CPUs.
|
||||
|
||||
@item -enable_vulkan
|
||||
Use vulkan renderer rather than SDL builtin renderer. Depends on libplacebo.
|
||||
|
||||
@item -vulkan_params
|
||||
|
||||
Vulkan configuration using a list of @var{key}=@var{value} pairs separated by
|
||||
":".
|
||||
|
||||
@item -hwaccel
|
||||
Use HW accelerated decoding. Enable this option will enable vulkan renderer
|
||||
automatically.
|
||||
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -226,6 +207,8 @@ Pause.
|
||||
Toggle mute.
|
||||
|
||||
@item 9, 0
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@item /, *
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@@ -297,7 +280,6 @@ Toggle full screen.
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] @file{input_url}
|
||||
ffprobe [@var{options}] [@file{input_url}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -28,9 +28,6 @@ If a url is specified in input, ffprobe will try to open and
|
||||
probe the url content. If the url cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
If no output is specified as output with @option{o} ffprobe will write
|
||||
to stdout.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
@@ -41,15 +38,15 @@ ffprobe will show it.
|
||||
|
||||
ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{output_format} option.
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT", "STREAM", "STREAM_GROUP_STREAM"
|
||||
or "PROGRAM_STREAM" section.
|
||||
and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
|
||||
section.
|
||||
|
||||
@c man end
|
||||
|
||||
@@ -83,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -output_format, -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@@ -91,7 +88,7 @@ Set the output printing format.
|
||||
|
||||
For example for printing the output in JSON format, specify:
|
||||
@example
|
||||
-output_format json
|
||||
-print_format json
|
||||
@end example
|
||||
|
||||
For more details on the available output printing formats, see the
|
||||
@@ -232,13 +229,6 @@ multimedia stream.
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "PROGRAM_STREAM".
|
||||
|
||||
@item -show_stream_groups
|
||||
Show information about stream groups and their streams contained in the
|
||||
input multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM_GROUP_STREAM".
|
||||
|
||||
@item -show_chapters
|
||||
Show information about chapters stored in the format.
|
||||
|
||||
@@ -345,12 +335,6 @@ Show information about all pixel formats supported by FFmpeg.
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -show_optional_fields @var{value}
|
||||
Some writers viz. JSON and XML, omit the printing of fields with invalid or non-applicable values,
|
||||
while other writers always print them. This option enables one to control this behaviour.
|
||||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
||||
Default is @var{auto}.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
@@ -358,10 +342,6 @@ on the specific build.
|
||||
@item -i @var{input_url}
|
||||
Read @var{input_url}.
|
||||
|
||||
@item -o @var{output_url}
|
||||
Write output to @var{output_url}. If not specified, the output is sent
|
||||
to stdout.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@@ -422,9 +402,8 @@ keyN=valN
|
||||
[/SECTION]
|
||||
@end example
|
||||
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM,
|
||||
STREAM_GROUP_STREAM or PROGRAM_STREAM section, and are prefixed by the
|
||||
string "TAG:".
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
|
||||
PROGRAM_STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@@ -446,7 +425,7 @@ The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specified, the output has the form:
|
||||
If no option is specifid, the output has the form:
|
||||
@example
|
||||
section|key1=val1| ... |keyN=valN
|
||||
@end example
|
||||
@@ -605,14 +584,14 @@ value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_strict, x
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{https://www.w3.org/XML/}.
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@@ -663,7 +642,6 @@ DV, GXF and AVI timecodes are available in format metadata
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@include general_contents.texi
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
898
doc/ffprobe.xsd
898
doc/ffprobe.xsd
@@ -1,527 +1,379 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="stream_groups" type="ffprobe:StreamGroupsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_datum" type="ffprobe:packetSideDatumType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="type" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDatumType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="crop_top" type="xsd:long" />
|
||||
<xsd:attribute name="crop_bottom" type="xsd:long" />
|
||||
<xsd:attribute name="crop_left" type="xsd:long" />
|
||||
<xsd:attribute name="crop_right" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecodes" type="ffprobe:frameSideDataTimecodeList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:frameSideDataComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDatumType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecode" type="ffprobe:frameSideDataTimecodeType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeType">
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:frameSideDataComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pieces" type="ffprobe:frameSideDataPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="piece" type="ffprobe:frameSideDataPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="StreamGroupsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream_group" type="ffprobe:streamGroupType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="non_diegetic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="captions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="descriptions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="metadata" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dependent" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="still_image" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_size" type="xsd:int" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="film_grain" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="initial_padding" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:streamGroupComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="type" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:streamGroupComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subcomponents" type="ffprobe:streamGroupSubComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="component_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subcomponent" type="ffprobe:streamGroupSubComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pieces" type="ffprobe:streamGroupPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="subcomponent_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="piece" type="ffprobe:streamGroupPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subpieces" type="ffprobe:streamGroupSubPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="piece_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subpiece" type="ffprobe:streamGroupSubPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="blocks" type="ffprobe:streamGroupBlockList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="subpiece_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupBlockList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="block" type="ffprobe:streamGroupBlockType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupBlockType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="block_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupEntryType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_stream_groups" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
|
||||
@@ -13,15 +13,6 @@ corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
|
||||
Options that take arguments support a special syntax where the argument given on
|
||||
the command line is interpreted as a path to the file from which the actual
|
||||
argument value is loaded. To use this feature, add a forward slash '/'
|
||||
immediately before the option name (after the leading dash). E.g.
|
||||
@example
|
||||
ffmpeg -i INPUT -/filter:v filter.script OUTPUT
|
||||
@end example
|
||||
will load a filtergraph description from the file named @file{filter.script}.
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
@@ -43,35 +34,27 @@ Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4. If @var{stream_index} is used as an
|
||||
additional stream specifier (see below), then it selects stream number
|
||||
@var{stream_index} from the matching streams. Stream numbering is based on the
|
||||
order of the streams as detected by libavformat except when a stream group
|
||||
specifier or program ID is also specified. In this case it is based on the
|
||||
ordering of the streams in the group or program.
|
||||
@item @var{stream_type}[:@var{additional_stream_specifier}]
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
|
||||
streams, 'V' only matches video streams which are not attached pictures, video
|
||||
thumbnails or cover arts. If @var{additional_stream_specifier} is used, then
|
||||
it matches streams which both have this type and match the
|
||||
@var{additional_stream_specifier}. Otherwise, it matches all streams of the
|
||||
specified type.
|
||||
@item g:@var{group_specifier}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the group with the specifier @var{group_specifier}.
|
||||
if @var{additional_stream_specifier} is used, then it matches streams which both
|
||||
are part of the group and match the @var{additional_stream_specifier}.
|
||||
@var{group_specifier} may be one of the following:
|
||||
@table @option
|
||||
@item @var{group_index}
|
||||
Match the stream with this group index.
|
||||
@item #@var{group_id} or i:@var{group_id}
|
||||
Match the stream with this group id.
|
||||
@end table
|
||||
@item p:@var{program_id}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the program with the id @var{program_id}. If
|
||||
@var{additional_stream_specifier} is used, then it matches streams which both
|
||||
are part of the program and match the @var{additional_stream_specifier}.
|
||||
thumbnails or cover arts. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}] or p:@var{program_id}[:@var{stream_type}[:@var{stream_index}]] or
|
||||
p:@var{program_id}:m:@var{key}[:@var{value}]
|
||||
In first version, if @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program. In the second version, @var{stream_type} is one of following: 'v' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data. If @var{stream_index} is also given, then it matches
|
||||
stream number @var{stream_index} of this type in the program with the id @var{program_id}.
|
||||
Otherwise, if only @var{stream_type} is given, it matches all
|
||||
streams of this type in the program with the id @var{program_id}.
|
||||
In the third version matches streams in the program with the id @var{program_id} with the metadata
|
||||
tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value.
|
||||
|
||||
@item #@var{stream_id} or i:@var{stream_id}
|
||||
Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@@ -127,24 +110,13 @@ Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter named @var{filter_name}. Use the
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
|
||||
@item bsf=@var{bitstream_filter_name}
|
||||
Print detailed information about the bitstream filter named @var{bitstream_filter_name}.
|
||||
Use the @option{-bsfs} option to get a list of all bitstream filters.
|
||||
|
||||
@item protocol=@var{protocol_name}
|
||||
Print detailed information about the protocol named @var{protocol_name}.
|
||||
Use the @option{-protocols} option to get a list of all protocols.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -buildconf
|
||||
Show the build configuration, one option per line.
|
||||
|
||||
@item -formats
|
||||
Show available formats (including devices).
|
||||
|
||||
@@ -187,9 +159,6 @@ Show available sample formats.
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -dispositions
|
||||
Show stream dispositions.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@@ -266,15 +235,17 @@ ffmpeg [...] -loglevel +repeat
|
||||
By default the program logs to stderr. If coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR}, or can be forced setting
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a future FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and log output to a file named
|
||||
Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel debug}.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @env{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
@@ -375,19 +346,6 @@ Possible flags for this option are:
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -cpucount @var{count} (@emph{global})
|
||||
Override detection of CPU count. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpucount 2
|
||||
@end example
|
||||
|
||||
@item -max_alloc @var{bytes}
|
||||
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
||||
family of malloc functions. Exercise @strong{extreme caution} when using
|
||||
this option. Don't use if you do not understand the full consequence of doing so.
|
||||
Default is INT_MAX.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
@@ -413,15 +371,7 @@ ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
@end example
|
||||
|
||||
All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them:
|
||||
@example
|
||||
ffmpeg -i multichannel.mxf -map 0:v:0 -map 0:a:0 -map 0:a:0 -c:a:0 ac3 -b:a:0 640k -ac:a:1 2 -c:a:1 aac -b:2 128k out.mp4
|
||||
@end example
|
||||
|
||||
In the above example, a multichannel audio stream is mapped twice for output.
|
||||
The first instance is encoded with codec ac3 and bitrate 640k.
|
||||
The second instance is downmixed to 2 channels and encoded with codec aac. A bitrate of 128k is specified for it using
|
||||
absolute index of the output stream.
|
||||
should be attached to them.
|
||||
|
||||
Note: the @option{-nooption} syntax cannot be used for boolean
|
||||
AVOptions, use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
14251
doc/filters.texi
14251
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -27,49 +27,40 @@ stream information. A higher value will enable detecting more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item max_probe_packets @var{integer} (@emph{input})
|
||||
Set the maximum number of buffered packets when probing a codec.
|
||||
Default is 2500 packets.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags}
|
||||
Set format flags. Some are implemented for a limited number of formats.
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
|
||||
Possible values for input files:
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item discardcorrupt
|
||||
Discard corrupted packets.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item fastseek
|
||||
Enable fast, but inaccurate seeks for some formats.
|
||||
@item genpts
|
||||
Generate missing PTS if DTS is present.
|
||||
@item igndts
|
||||
Ignore DTS if PTS is also set. In case the PTS is set, the DTS value
|
||||
is set to NOPTS. This is ignored when the @code{nofillin} flag is set.
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by buffering during initial input streams analysis.
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
Do not fill in missing values in packet fields that can be exactly calculated.
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS. At present, available only for AVIs with an index.
|
||||
@end table
|
||||
|
||||
Possible values for output files:
|
||||
@table @samp
|
||||
@item autobsf
|
||||
Automatically apply bitstream filters as required by the output format. Enabled by default.
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@item bitexact
|
||||
Only write platform-, build- and time-independent data.
|
||||
This ensures that file and data checksums are reproducible and match between
|
||||
platforms. Its primary use is for regression testing.
|
||||
@item flush_packets
|
||||
Write out packets immediately.
|
||||
@item shortest
|
||||
Stop muxing at the end of the shortest stream.
|
||||
It may be needed to increase max_interleave_delta to avoid flushing the longer
|
||||
@@ -142,7 +133,7 @@ Consider things that a sane encoder should not do as an error.
|
||||
|
||||
@item max_interleave_delta @var{integer} (@emph{output})
|
||||
Set maximum buffering duration for interleaving. The duration is
|
||||
expressed in microseconds, and defaults to 10000000 (10 seconds).
|
||||
expressed in microseconds, and defaults to 1000000 (1 second).
|
||||
|
||||
To ensure all the streams are interleaved correctly, libavformat will
|
||||
wait until it has at least one packet for each stream before actually
|
||||
@@ -214,7 +205,7 @@ is @code{0} (meaning that no offset is applied).
|
||||
@item dump_separator @var{string} (@emph{input})
|
||||
Separator used to separate the fields printed on the command line about the
|
||||
Stream parameters.
|
||||
For example, to separate the fields with newlines and indentation:
|
||||
For example to separate the fields with newlines and indention:
|
||||
@example
|
||||
ffprobe -dump_separator "
|
||||
" -i ~/videos/matrixbench_mpeg2.mpg
|
||||
@@ -223,32 +214,6 @@ ffprobe -dump_separator "
|
||||
@item max_streams @var{integer} (@emph{input})
|
||||
Specifies the maximum number of streams. This can be used to reject files that
|
||||
would require too many resources due to a large number of streams.
|
||||
|
||||
@item skip_estimate_duration_from_pts @var{bool} (@emph{input})
|
||||
Skip estimation of input duration when calculated using PTS.
|
||||
At present, applicable for MPEG-PS and MPEG-TS.
|
||||
|
||||
@item strict, f_strict @var{integer} (@emph{input/output})
|
||||
Specify how strictly to follow the standards. @code{f_strict} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item very
|
||||
strictly conform to an older more strict version of the spec or reference software
|
||||
@item strict
|
||||
strictly conform to all the things in the spec no matter what consequences
|
||||
@item normal
|
||||
|
||||
@item unofficial
|
||||
allow unofficial extensions
|
||||
@item experimental
|
||||
allow non standardized experimental things, experimental
|
||||
(unfinished/work in progress/not well tested) decoders and encoders.
|
||||
Note: experimental decoders can pose a security risk, do not use this for
|
||||
decoding untrusted input.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
@@ -259,10 +224,30 @@ decoding untrusted input.
|
||||
Format stream specifiers allow selection of one or more streams that
|
||||
match specific properties.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index.
|
||||
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio,
|
||||
's' for subtitle, 'd' for data, and 't' for attachments. If
|
||||
@var{stream_index} is given, then it matches the stream number
|
||||
@var{stream_index} of this type. Otherwise, it matches all streams of
|
||||
this type.
|
||||
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number
|
||||
@var{stream_index} in the program with the id
|
||||
@var{program_id}. Otherwise, it matches all streams in the program.
|
||||
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
The exact semantics of stream specifiers is defined by the
|
||||
@code{avformat_match_stream_specifier()} function declared in the
|
||||
@file{libavformat/avformat.h} header and documented in the
|
||||
@ref{Stream specifiers,,Stream specifiers section in the ffmpeg(1) manual,ffmpeg}.
|
||||
@file{libavformat/avformat.h} header.
|
||||
|
||||
@ifclear config-writeonly
|
||||
@include demuxers.texi
|
||||
|
||||
1289
doc/general.texi
1289
doc/general.texi
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it.
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git <target>
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
@@ -66,7 +66,7 @@ This will put the FFmpeg sources into the directory @var{<target>} and let
|
||||
you push back your changes to the remote repository.
|
||||
|
||||
@example
|
||||
git clone git@@ffmpeg.org:ffmpeg-web <target>
|
||||
git clone gil@@ffmpeg.org:ffmpeg-web <target>
|
||||
@end example
|
||||
|
||||
This will put the source of the FFmpeg website into the directory
|
||||
@@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions.
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told Git your name, email address and GPG key
|
||||
Make sure you have told Git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
git config --global user.signingkey ABCDEF0123245
|
||||
@end example
|
||||
|
||||
Enable signing all commits or use -S
|
||||
|
||||
@example
|
||||
git config --global commit.gpgsign true
|
||||
@end example
|
||||
|
||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||
@@ -224,46 +217,16 @@ git config --global core.editor
|
||||
or set by one of the following environment variables:
|
||||
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
||||
|
||||
@section Writing a commit message
|
||||
Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don't
|
||||
include filenames in log messages, Git provides that information.
|
||||
|
||||
Log messages should be concise but descriptive.
|
||||
|
||||
The first line must contain the context, a colon and a very short
|
||||
summary of what the commit does. Details can be added, if necessary,
|
||||
separated by an empty line. These details should not exceed 60-72 characters
|
||||
per line, except when containing code.
|
||||
|
||||
Example of a good commit message:
|
||||
|
||||
@example
|
||||
avcodec/cbs: add a helper to read extradata within packet side data
|
||||
|
||||
Using ff_cbs_read() on the raw buffer will not parse it as extradata,
|
||||
resulting in parsing errors for example when handling ISOBMFF avcC.
|
||||
This helper works around that.
|
||||
@end example
|
||||
|
||||
@example
|
||||
ptr might be NULL
|
||||
@end example
|
||||
|
||||
If the summary on the first line is not enough, in the body of the message,
|
||||
explain why you made a change, what you did will be obvious from the changes
|
||||
themselves most of the time. Saying just "bug fix" or "10l" is bad. Remember
|
||||
that people of varying skill levels look at and educate themselves while
|
||||
reading through your code. Don't include filenames in log messages except in
|
||||
the context, Git provides that information.
|
||||
|
||||
If the commit fixes a registered issue, state it in a separate line of the
|
||||
body: @code{Fix Trac ticket #42.}
|
||||
|
||||
The first line will be used to name
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by @command{git format-patch}.
|
||||
|
||||
Common mistakes for the first line, as seen in @command{git log --oneline}
|
||||
include: missing context at the beginning; description of what the code did
|
||||
before the patch; line too long or wrapped to the second line.
|
||||
|
||||
@section Preparing a patchset
|
||||
|
||||
@example
|
||||
@@ -430,19 +393,6 @@ git checkout -b svn_23456 $SHA1
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter gpg key generation
|
||||
|
||||
If you have no gpg key yet, we recommend that you create a ed25519 based key as it
|
||||
is small, fast and secure. Especially it results in small signatures in git.
|
||||
|
||||
@example
|
||||
gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com"
|
||||
@end example
|
||||
|
||||
When generating a key, make sure the email specified matches the email used in git as some sites like
|
||||
github consider mismatches a reason to declare such commits unverified. After generating a key you
|
||||
can add it to the MAINTAINER file and upload it to a keyserver.
|
||||
|
||||
@chapter Pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
|
||||
345
doc/indevs.texi
345
doc/indevs.texi
@@ -178,9 +178,6 @@ Capture the mouse pointer. Default is 0.
|
||||
@item -capture_mouse_clicks
|
||||
Capture the screen mouse clicks. Default is 0.
|
||||
|
||||
@item -capture_raw_data
|
||||
Capture the raw device data. Default is 0.
|
||||
Using this option may result in receiving the underlying data delivered to the AVFoundation framework. E.g. for muxed devices that sends raw DV data to the framework (like tape-based camcorders), setting this option to false results in extracted video frames captured in the designated pixel format only. Setting this option to true results in receiving the raw DV stream untouched.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -211,19 +208,11 @@ Record video from the system default video device using the pixel format bgr0 an
|
||||
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Record raw DV data from a suitable input device and write the output into out.dv:
|
||||
@example
|
||||
$ ffmpeg -f avfoundation -capture_raw_data true -i "zr100:none" out.dv
|
||||
@end example
|
||||
|
||||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device. Deprecated and will be removed - please contact
|
||||
the developers if you are interested in maintaining it.
|
||||
BSD video input device.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -278,8 +267,7 @@ audio track.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sources} option of ffmpeg to list the available input devices.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -293,35 +281,25 @@ as @option{pal} (3 letters).
|
||||
Default behavior is autodetection of the input video format, if the hardware
|
||||
supports it.
|
||||
|
||||
@item bm_v210
|
||||
This is a deprecated option, you can use @option{raw_format} instead.
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item raw_format
|
||||
Set the pixel format of the captured video.
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item auto
|
||||
|
||||
This is the default which means 8-bit YUV 422 or 8-bit ARGB if format
|
||||
autodetection is used, 8-bit YUV 422 otherwise.
|
||||
|
||||
@item uyvy422
|
||||
|
||||
8-bit YUV 422.
|
||||
|
||||
@item yuv422p10
|
||||
|
||||
10-bit YUV 422.
|
||||
|
||||
@item argb
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item bgra
|
||||
|
||||
8-bit RGB.
|
||||
|
||||
@item rgb10
|
||||
|
||||
10-bit RGB.
|
||||
|
||||
@end table
|
||||
|
||||
@item teletext_lines
|
||||
@@ -345,34 +323,9 @@ Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp
|
||||
Defaults to @samp{2}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Sets the decklink device duplex mode. Must be @samp{unset}, @samp{half} or @samp{full}.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timecode_format
|
||||
Timecode type to include in the frame and video stream metadata. Must be
|
||||
@samp{none}, @samp{rp188vitc}, @samp{rp188vitc2}, @samp{rp188ltc},
|
||||
@samp{rp188hfr}, @samp{rp188any}, @samp{vitc}, @samp{vitc2}, or @samp{serial}.
|
||||
Defaults to @samp{none} (not included).
|
||||
|
||||
In order to properly support 50/60 fps timecodes, the ordering of the queried
|
||||
timecode types for @samp{rp188any} is HFR, VITC1, VITC2 and LTC for >30 fps
|
||||
content. Note that this is slightly different to the ordering used by the
|
||||
DeckLink API, which is HFR, VITC1, LTC, VITC2.
|
||||
|
||||
@item video_input
|
||||
Sets the video input source. Must be @samp{unset}, @samp{sdi}, @samp{hdmi},
|
||||
@samp{optical_sdi}, @samp{component}, @samp{composite} or @samp{s_video}.
|
||||
@@ -411,34 +364,6 @@ If set to @option{true}, timestamps are forwarded as they are without removing
|
||||
the initial offset.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item timestamp_align
|
||||
Capture start time alignment in seconds. If set to nonzero, input frames are
|
||||
dropped till the system timestamp aligns with configured value.
|
||||
Alignment difference of up to one frame duration is tolerated.
|
||||
This is useful for maintaining input synchronization across N different
|
||||
hardware devices deployed for 'N-way' redundancy. The system time of different
|
||||
hardware devices should be synchronized with protocols such as NTP or PTP,
|
||||
before using this option.
|
||||
Note that this method is not foolproof. In some border cases input
|
||||
synchronization may not happen due to thread scheduling jitters in the OS.
|
||||
Either sync could go wrong by 1 frame or in a rarer case
|
||||
@option{timestamp_align} seconds.
|
||||
Defaults to @samp{0}.
|
||||
|
||||
@item wait_for_tc (@emph{bool})
|
||||
Drop frames till a frame with timecode is received. Sometimes serial timecode
|
||||
isn't received with the first input frame. If that happens, the stored stream
|
||||
timecode will be inaccurate. If this option is set to @option{true}, input frames
|
||||
are dropped till a frame with timecode is received.
|
||||
Option @var{timecode_format} must be specified.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item enable_klv(@emph{bool})
|
||||
If set to @option{true}, extracts KLV data from VANC and outputs KLV packets.
|
||||
KLV VANC packets are joined based on MID and PSC fields and aggregated into
|
||||
one KLV packet.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -448,7 +373,7 @@ Defaults to @option{false}.
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -sources decklink
|
||||
ffmpeg -f decklink -list_devices 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -466,7 +391,7 @@ ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy outp
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -raw_format yuv422p10 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -477,6 +402,116 @@ ffmpeg -channels 16 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder'
|
||||
|
||||
@end itemize
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek input device provides capture capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Input filename is a NDI source name that could be found by sending -find_sources 1
|
||||
to command line - it has no specific syntax but human-readable formatted.
|
||||
|
||||
To enable this input device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item find_sources
|
||||
If set to @option{true}, print a list of found/available NDI sources and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item wait_sources
|
||||
Override time to wait until the number of online sources have changed.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item allow_video_fields
|
||||
When this flag is @option{false}, all video that you receive will be progressive.
|
||||
Defaults to @option{true}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -find_sources 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
Restream to NDI:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -i "DEV-5.INTERNAL.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
@@ -626,12 +661,6 @@ Save the currently used video capture filter device and its
|
||||
parameters (if the filter supports it) to a file.
|
||||
If a file with the same name exists it will be overwritten.
|
||||
|
||||
@item use_video_device_timestamps
|
||||
If set to @option{false}, the timestamp for video frames will be
|
||||
derived from the wallclock instead of the timestamp provided by
|
||||
the capture device. This allows working around devices that
|
||||
provide unreliable timestamps.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -723,7 +752,7 @@ Win32 GDI-based screen capture device.
|
||||
|
||||
This device allows you to capture a region of the display on Windows.
|
||||
|
||||
Amongst options for the imput filenames are such elements as:
|
||||
There are two options for the input filename:
|
||||
@example
|
||||
desktop
|
||||
@end example
|
||||
@@ -731,13 +760,9 @@ or
|
||||
@example
|
||||
title=@var{window_title}
|
||||
@end example
|
||||
or
|
||||
@example
|
||||
hwnd=@var{window_hwnd}
|
||||
@end example
|
||||
|
||||
The first option will capture the entire desktop, or a fixed region of the
|
||||
desktop. The second and third options will instead capture the contents of a single
|
||||
desktop. The second option will instead capture the contents of a single
|
||||
window, regardless of its position on the screen.
|
||||
|
||||
For example, to grab the entire desktop using @command{ffmpeg}:
|
||||
@@ -851,7 +876,7 @@ ffplay -f iec61883 -i auto
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -dvbuffer 100000 out.mpg
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@@ -914,80 +939,6 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. This can be autodetected if you are running Linux 5.7
|
||||
or later, but needs to be provided for earlier versions. Defaults to @option{bgr0},
|
||||
which is the most common format used by the Linux console and Xorg X server.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs. It can be autodetected if you are running Linux 5.7 or later, but will need
|
||||
to be provided explicitly when needed in earlier versions. See the libdrm documentation
|
||||
for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
To capture only part of a plane the output can be cropped - this can be used to capture
|
||||
a single window, as long as it has a known absolute position and size. For example, to
|
||||
capture and encode the middle quarter of a 1920x1080 plane:
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,crop=960:540:480:270,scale_vaapi=960:540:nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section lavfi
|
||||
|
||||
Libavfilter input virtual device.
|
||||
@@ -996,8 +947,9 @@ This input device reads data from the open output pads of a libavfilter
|
||||
filtergraph.
|
||||
|
||||
For each filtergraph open output, the input device will create a
|
||||
corresponding stream which is mapped to the generated output.
|
||||
The filtergraph is specified through the option @option{graph}.
|
||||
corresponding stream which is mapped to the generated output. Currently
|
||||
only video data is supported. The filtergraph is specified through the
|
||||
option @option{graph}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@@ -1125,21 +1077,6 @@ IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
Requires the configure option @code{--enable-libdc1394}.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is @code{ntsc}, corresponding to a frame
|
||||
rate of @code{30000/1001}.
|
||||
|
||||
@item pixel_format
|
||||
Select the pixel format. Default is @code{uyvy422}.
|
||||
|
||||
@item video_size
|
||||
Set the video size given as a string such as @code{640x480} or @code{hd720}.
|
||||
Default is @code{qvga}.
|
||||
@end table
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
@@ -1258,6 +1195,7 @@ Set the number of channels. Default is 2.
|
||||
|
||||
@end table
|
||||
|
||||
|
||||
@section pulse
|
||||
|
||||
PulseAudio input device.
|
||||
@@ -1293,11 +1231,11 @@ Specify the samplerate in Hz, by default 48kHz is used.
|
||||
Specify the channels in use, by default 2 (stereo) is set.
|
||||
|
||||
@item frame_size
|
||||
This option does nothing and is deprecated.
|
||||
Specify the number of bytes per frame, by default it is set to 1024.
|
||||
|
||||
@item fragment_size
|
||||
Specify the size in bytes of the minimal buffering fragment in PulseAudio, it
|
||||
will affect the audio latency. By default it is set to 50 ms amount of data.
|
||||
Specify the minimal buffering fragment in PulseAudio, it will affect the
|
||||
audio latency. By default it is unset.
|
||||
|
||||
@item wallclock
|
||||
Set the initial PTS using the current time. Default is 1.
|
||||
@@ -1532,14 +1470,6 @@ ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item select_region
|
||||
Specify whether to select the grabbing area graphically using the pointer.
|
||||
A value of @code{1} prompts the user to select the grabbing area graphically
|
||||
by clicking and dragging. A single click with no dragging will select the
|
||||
whole screen. A region with zero width or height will also select the whole
|
||||
screen. This option overwrites the @var{video_size}, @var{grab_x}, and
|
||||
@var{grab_y} options. Default value is @code{0}.
|
||||
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specifies
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
@@ -1588,21 +1518,8 @@ With @var{follow_mouse}:
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item window_id
|
||||
Grab this window, instead of the whole screen. Default value is 0, which maps to
|
||||
the whole screen (root window).
|
||||
|
||||
The id of a window can be found using the @command{xwininfo} program, possibly with options -tree and
|
||||
-root.
|
||||
|
||||
If the window is later enlarged, the new area is not recorded. Video ends when
|
||||
the window is closed, unmapped (i.e., iconified) or shrunk beyond the video
|
||||
size (which defaults to the initial window size).
|
||||
|
||||
This option disables options @option{follow_mouse} and @option{select_region}.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default is the full desktop or window.
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
|
||||
@item grab_x
|
||||
@item grab_y
|
||||
|
||||
@@ -1,94 +0,0 @@
|
||||
FFmpeg Infrastructure:
|
||||
======================
|
||||
|
||||
|
||||
|
||||
|
||||
Servers:
|
||||
~~~~~~~~
|
||||
|
||||
|
||||
Main Server:
|
||||
------------
|
||||
Our Main server is hosted at telepoint.bg
|
||||
for more details see: https://www.ffmpeg.org/#thanks_sponsor_0001
|
||||
Nothing runs on our main server directly, instead several VMs run on it.
|
||||
|
||||
|
||||
ffmpeg.org VM:
|
||||
--------------
|
||||
Web, mail, and public facing git, also website git
|
||||
|
||||
|
||||
fftrac VM:
|
||||
----------
|
||||
trac.ffmpeg.org Issue tracking
|
||||
|
||||
|
||||
ffaux VM:
|
||||
---------
|
||||
patchwork.ffmpeg.org Patch tracking
|
||||
vote.ffmpeg.org Condorcet voting
|
||||
|
||||
|
||||
fate:
|
||||
-----
|
||||
fate.ffmpeg.org FFmpeg automated testing environment
|
||||
|
||||
|
||||
coverage:
|
||||
---------
|
||||
coverage.ffmpeg.org Fate code coverage
|
||||
|
||||
|
||||
The main and fate server as well as VMs currently run ubuntu
|
||||
|
||||
|
||||
|
||||
Cronjobs:
|
||||
~~~~~~~~~
|
||||
Part of the docs is in the main ffmpeg repository as texi files, this part is build by a cronjob. So is the
|
||||
doxygen stuff as well as the FFmpeg git snapshot.
|
||||
These 3 scripts are under the ffcron user
|
||||
|
||||
|
||||
|
||||
Git:
|
||||
~~~~
|
||||
Public facing git is provided by our infra, (https://git.ffmpeg.org/gitweb)
|
||||
main developer ffmpeg git repository for historic reasons is provided by (git@source.ffmpeg.org:ffmpeg)
|
||||
Other developer git repositories are provided via git@git.ffmpeg.org:<NAME_OF_REPOSITORY>
|
||||
git mirrors are available on https://github.com/FFmpeg
|
||||
(there are some exceptions where primary repositories are on github or elsewhere instead of the mirrors)
|
||||
|
||||
Github mirrors are redundantly synced by multiple people
|
||||
|
||||
You need a new git repository related to FFmpeg ? contact root at ffmpeg.org
|
||||
|
||||
|
||||
Fate:
|
||||
~~~~~
|
||||
fatesamples are provided via rsync. Every FFmpeg developer who has a shell account in ffmpeg.org
|
||||
should be in the samples group and be able to upload samples.
|
||||
See https://www.ffmpeg.org/fate.html#Uploading-new-samples-to-the-fate-suite
|
||||
|
||||
|
||||
|
||||
Accounts:
|
||||
~~~~~~~~~
|
||||
You need an account for some FFmpeg work? Send mail to root at ffmpeg.org
|
||||
|
||||
|
||||
|
||||
VMs:
|
||||
~~~~
|
||||
You need a VM, docker container for FFmpeg? contact root at ffmpeg.org
|
||||
(for docker, CC Andriy)
|
||||
|
||||
|
||||
|
||||
IRC:
|
||||
~~~~
|
||||
irc channels are at https://libera.chat/
|
||||
irc channel archives are at https://libera.irclog.whitequark.org
|
||||
|
||||
@@ -95,16 +95,17 @@ Stuff that didn't reach the codebase:
|
||||
- 0cef06df0 checkasm: add HEVC MC tests
|
||||
- e7078e842 hevcdsp: add x86 SIMD for MC
|
||||
- 7993ec19a hevc: Add hevc_get_pixel_4/8/12/16/24/32/48/64
|
||||
- new bitstream reader (see http://ffmpeg.org/pipermail/ffmpeg-devel/2017-April/209609.html)
|
||||
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
|
||||
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
|
||||
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
|
||||
- Support recovery from an already present HLS playlist (see 16cb06bb30)
|
||||
- Remove all output devices (see 8e7e042d41, 8d3db95f20, 6ce13070bd, d46cd24986 and https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/216904.html)
|
||||
- avcodec/libaomenc: export the Sequence Header OBU as extradata (See a024c3ce9a)
|
||||
|
||||
Collateral damage that needs work locally:
|
||||
------------------------------------------
|
||||
|
||||
- Merge proresdec2.c and proresdec_lgpl.c
|
||||
- Merge proresenc_anatoliy.c and proresenc_kostya.c
|
||||
- Fix MIPS AC3 downmix
|
||||
|
||||
|
||||
@@ -47,8 +47,7 @@ We cannot provide help for scripts and/or third-party tools.
|
||||
@anchor{How do I ask a question or send a message to a mailing list?}
|
||||
@section How do I ask a question or send a message to a mailing list?
|
||||
|
||||
First you must @ref{How do I subscribe?, subscribe}. Then all you have to do is
|
||||
send an email:
|
||||
All you have to do is send an email:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@@ -58,14 +57,49 @@ ffmpeg-user mailing list.
|
||||
@item
|
||||
Email @email{libav-user@@ffmpeg.org} to send a message to the
|
||||
libav-user mailing list.
|
||||
|
||||
@item
|
||||
Email @email{ffmpeg-devel@@ffmpeg.org} to send a message to the
|
||||
ffmpeg-devel mailing list.
|
||||
@end itemize
|
||||
|
||||
If you are not subscribed to the mailing list then your question must be
|
||||
manually approved. Approval may take several days, but the wait is
|
||||
usually less. If you want the message to be sent with no delay then you
|
||||
must subscribe first. See @ref{How do I subscribe?}
|
||||
|
||||
Please do not send a message, subscribe, and re-send the message: this
|
||||
results in duplicates, causes more work for the admins, and may lower
|
||||
your chance at getting an answer. However, you may do so if you first
|
||||
@ref{How do I delete my message in the moderation queue?, delete your original message from the moderation queue}.
|
||||
|
||||
@chapter Subscribing / Unsubscribing
|
||||
|
||||
@section What does subscribing do?
|
||||
|
||||
Subscribing allows two things:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Your messages will show up in the mailing list without waiting in the
|
||||
moderation queue and needing to be manually approved by a mailing list
|
||||
admin.
|
||||
|
||||
@item
|
||||
You will receive all messages to the mailing list including replies to
|
||||
your messages. Non-subscribed users do not receive any messages.
|
||||
@end itemize
|
||||
|
||||
@section Do I need to subscribe?
|
||||
|
||||
No. You can still send a message to the mailing list without
|
||||
subscribing. See @ref{How do I ask a question or send a message to a mailing list?}
|
||||
|
||||
However, your message will need to be manually approved by a mailing
|
||||
list admin, and you will not receive any mailing list messages or
|
||||
replies.
|
||||
|
||||
You can ask to be CCd in your message, but replying users will
|
||||
sometimes forget to do so.
|
||||
|
||||
You may also view and reply to messages via the @ref{Where are the archives?, archives}.
|
||||
|
||||
@anchor{How do I subscribe?}
|
||||
@section How do I subscribe?
|
||||
|
||||
@@ -90,9 +124,6 @@ The process is the same for the other mailing lists.
|
||||
Please avoid asking a mailing list admin to unsubscribe you unless you
|
||||
are absolutely unable to do so by yourself. See @ref{Who do I contact if I have a problem with the mailing list?}
|
||||
|
||||
Note that it is possible to temporarily halt message delivery (vacation mode).
|
||||
See @ref{How do I disable mail delivery without unsubscribing?}
|
||||
|
||||
@chapter Moderation Queue
|
||||
@anchor{Why is my message awaiting moderator approval?}
|
||||
@section Why is my message awaiting moderator approval?
|
||||
@@ -103,6 +134,8 @@ must be manually approved by a mailing list admin:
|
||||
These are:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Messages from users who are @strong{not} subscribed.
|
||||
|
||||
@item
|
||||
Messages that exceed the @ref{What is the message size limit?, message size limit}.
|
||||
@@ -115,13 +148,13 @@ or is abusive towards others).
|
||||
|
||||
@section How long does it take for my message in the moderation queue to be approved?
|
||||
|
||||
The queue is not checked on a regular basis. You can ask on the
|
||||
@t{#ffmpeg-devel} IRC channel on Libera Chat for someone to approve your message.
|
||||
The queue is usually checked once or twice a day, but on occasion
|
||||
several days may pass before someone checks the queue.
|
||||
|
||||
@anchor{How do I delete my message in the moderation queue?}
|
||||
@section How do I delete my message in the moderation queue?
|
||||
|
||||
You should have received an email with the subject @emph{Your message to <mailing list name> awaits moderator approval}.
|
||||
You should have received an email with the subject @emph{Your message to ffmpeg-user awaits moderator approval}.
|
||||
A link is in the message that will allow you to delete your message
|
||||
unless a mailing list admin already approved or rejected it.
|
||||
|
||||
@@ -142,9 +175,6 @@ Click the email link at the top of the message just under the subject
|
||||
title. The link will provide the proper headers to keep the message
|
||||
within the thread.
|
||||
|
||||
Note that you must be subscribed to send a message to the ffmpeg-user or
|
||||
libav-user mailing lists.
|
||||
|
||||
@section How do I search the archives?
|
||||
|
||||
Perform a site search using your favorite search engine. Example:
|
||||
@@ -155,14 +185,13 @@ Perform a site search using your favorite search engine. Example:
|
||||
|
||||
@section Is there an alternative to the mailing list?
|
||||
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Libera Chat.
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||
|
||||
Some users prefer the third-party @url{http://www.ffmpeg-archive.org/, Nabble}
|
||||
interface which presents the mailing lists in a typical forum layout.
|
||||
Some users prefer the third-party Nabble interface which presents the
|
||||
mailing lists in a typical forum layout.
|
||||
|
||||
There are also numerous third-party help sites such as
|
||||
@url{https://superuser.com/tags/ffmpeg, Super User} and
|
||||
@url{https://www.reddit.com/r/ffmpeg/, r/ffmpeg on reddit}.
|
||||
There are also numerous third-party help sites such as Super User and
|
||||
r/ffmpeg on reddit.
|
||||
|
||||
@anchor{What is top-posting?}
|
||||
@section What is top-posting?
|
||||
@@ -174,15 +203,16 @@ Instead, use trimmed interleaved/inline replies (@url{https://lists.ffmpeg.org/p
|
||||
@anchor{What is the message size limit?}
|
||||
@section What is the message size limit?
|
||||
|
||||
The message size limit is 1000 kilobytes. Please provide links to larger files
|
||||
instead of attaching them.
|
||||
The message size limit is 500 kilobytes for the user lists and 1000
|
||||
kilobytes for ffmpeg-devel. Please provide links to larger files instead
|
||||
of attaching them.
|
||||
|
||||
@section Where can I upload sample files?
|
||||
|
||||
Anywhere that is not too annoying for us to use.
|
||||
|
||||
Google Drive and Dropbox are acceptable if you need a file host, and
|
||||
@url{https://0x0.st/, 0x0.st} is good for files under 256 MiB.
|
||||
0x0.st is good for files under 256 MiB.
|
||||
|
||||
Small, short samples are preferred if possible.
|
||||
|
||||
@@ -229,54 +259,6 @@ or headers.
|
||||
|
||||
You can then filter the mailing list messages to their own folder.
|
||||
|
||||
@anchor{How do I disable mail delivery without unsubscribing?}
|
||||
@section How do I disable mail delivery without unsubscribing?
|
||||
|
||||
Sometimes you may want to temporarily stop receiving all mailing list
|
||||
messages. This "vacation mode" is simple to do:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Go to the @url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user mailing list info page}
|
||||
|
||||
@item
|
||||
Enter your email address in the box at very bottom of the page and click the
|
||||
@emph{Unsubscribe or edit options} box.
|
||||
|
||||
@item
|
||||
Enter your password and click the @emph{Log in} button.
|
||||
|
||||
@item
|
||||
Look for the @emph{Mail delivery} option. Here you can disable/enable mail
|
||||
delivery. If you check @emph{Set globally} it will apply your choice to all
|
||||
other FFmpeg mailing lists you are subscribed to.
|
||||
@end enumerate
|
||||
|
||||
Alternatively, from your subscribed address, send a message to @email{ffmpeg-user-request@@ffmpeg.org}
|
||||
with the subject @emph{set delivery off}. To re-enable mail delivery send a
|
||||
message to @email{ffmpeg-user-request@@ffmpeg.org} with the subject
|
||||
@emph{set delivery on}.
|
||||
|
||||
@anchor{Why is the mailing list munging my address?}
|
||||
@section Why is the mailing list munging my address?
|
||||
|
||||
This is due to subscribers that use an email service with a DMARC reject policy
|
||||
which adds difficulties to mailing list operators.
|
||||
|
||||
The mailing list must re-write (munge) the @emph{From:} header for such users;
|
||||
otherwise their email service will reject and bounce the message resulting in
|
||||
automatic unsubscribing from the mailing list.
|
||||
|
||||
When sending a message these users will see @emph{via <mailing list name>}
|
||||
added to their name and the @emph{From:} address munged to the address of
|
||||
the particular mailing list.
|
||||
|
||||
If you want to avoid this then please use a different email service.
|
||||
|
||||
Note that ffmpeg-devel does not apply any munging as it causes issues with
|
||||
patch authorship. As a result users with an email service with a DMARC reject
|
||||
policy may be automatically unsubscribed due to rejected and bounced messages.
|
||||
|
||||
@chapter Rules and Etiquette
|
||||
|
||||
@section What are the rules and the proper etiquette?
|
||||
@@ -344,7 +326,7 @@ recommended.
|
||||
Avoid sending the same message to multiple mailing lists.
|
||||
|
||||
@item
|
||||
Please follow our @url{https://ffmpeg.org/community.html#Code-of-conduct, Code of Conduct}.
|
||||
Please follow our @url{https://ffmpeg.org/developer.html#Code-of-conduct, Code of Conduct}.
|
||||
@end itemize
|
||||
|
||||
@chapter Help
|
||||
@@ -375,15 +357,6 @@ form a multi-part message is recommended by email standards.
|
||||
Check your spam folder.
|
||||
@end itemize
|
||||
|
||||
@anchor{Why do I keep getting unsubscribed from ffmpeg-devel?}
|
||||
@section Why do I keep getting unsubscribed from ffmpeg-devel?
|
||||
|
||||
Users with an email service that has a DMARC reject or quarantine policy may be
|
||||
automatically unsubscribed from the ffmpeg-devel mailing list due to the mailing
|
||||
list messages being continuously rejected and bounced back.
|
||||
|
||||
Consider using a different email service.
|
||||
|
||||
@anchor{Who do I contact if I have a problem with the mailing list?}
|
||||
@section Who do I contact if I have a problem with the mailing list?
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
@anchor{metadata}
|
||||
@chapter Metadata
|
||||
@c man begin METADATA
|
||||
|
||||
@@ -34,7 +33,7 @@ At the beginning of a chapter section there may be an optional timebase to be
|
||||
used for start/end values. It must be in form
|
||||
@samp{TIMEBASE=@var{num}/@var{den}}, where @var{num} and @var{den} are
|
||||
integers. If the timebase is missing then start/end times are assumed to
|
||||
be in nanoseconds.
|
||||
be in milliseconds.
|
||||
|
||||
Next a chapter section must contain chapter start and end times in form
|
||||
@samp{START=@var{num}}, @samp{END=@var{num}}, where @var{num} is a positive
|
||||
|
||||
@@ -48,6 +48,11 @@ Files that have MIPS copyright notice in them:
|
||||
float_dsp_mips.c
|
||||
libm_mips.h
|
||||
softfloat_tables.h
|
||||
* libavcodec/
|
||||
fft_fixed_32.c
|
||||
fft_init_table.c
|
||||
fft_table.h
|
||||
mdct_fixed_32.c
|
||||
* libavcodec/mips/
|
||||
aacdec_fixed.c
|
||||
aacsbr_fixed.c
|
||||
@@ -65,6 +70,9 @@ Files that have MIPS copyright notice in them:
|
||||
compute_antialias_float.h
|
||||
lsp_mips.h
|
||||
dsputil_mips.c
|
||||
fft_mips.c
|
||||
fft_table.h
|
||||
fft_init_table.c
|
||||
fmtconvert_mips.c
|
||||
iirfilter_mips.c
|
||||
mpegaudiodsp_mips_fixed.c
|
||||
|
||||
@@ -20,7 +20,8 @@ Slice threading -
|
||||
|
||||
Frame threading -
|
||||
* Restrictions with slice threading also apply.
|
||||
* Custom get_buffer2() and get_format() callbacks must be thread-safe.
|
||||
* For best performance, the client should set thread_safe_callbacks if it
|
||||
provides a thread-safe get_buffer() callback.
|
||||
* There is one frame of delay added for every thread beyond the first one.
|
||||
Clients must be able to handle this; the pkt_dts and pkt_pts fields in
|
||||
AVFrame will work as usual.
|
||||
@@ -50,13 +51,16 @@ the decode process starts. Call ff_thread_finish_setup() afterwards. If
|
||||
some code can't be moved, have update_thread_context() run it in the next
|
||||
thread.
|
||||
|
||||
If the codec allocates writable tables in its init(), add an init_thread_copy()
|
||||
which re-allocates them for other threads.
|
||||
|
||||
Add AV_CODEC_CAP_FRAME_THREADS to the codec capabilities. There will be very little
|
||||
speed gain at this point but it should work.
|
||||
|
||||
If there are inter-frame dependencies, so the codec calls
|
||||
ff_thread_report/await_progress(), set FF_CODEC_CAP_ALLOCATE_PROGRESS in
|
||||
FFCodec.caps_internal and use ff_thread_get_buffer() to allocate frames.
|
||||
Otherwise decode directly into the user-supplied frames.
|
||||
ff_thread_report/await_progress(), set AVCodecInternal.allocate_progress. The
|
||||
frames must then be freed with ff_thread_release_buffer().
|
||||
Otherwise leave it at zero and decode directly into the user-supplied frames.
|
||||
|
||||
Call ff_thread_report_progress() after some part of the current picture has decoded.
|
||||
A good place to put this is where draw_horiz_band() is called - add this if it isn't
|
||||
|
||||
2414
doc/muxers.texi
2414
doc/muxers.texi
File diff suppressed because it is too large
Load Diff
@@ -267,11 +267,6 @@ CELL/SPU:
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/30B3520C93F437AB87257060006FFE5E/$file/Language_Extensions_for_CBEA_2.4.pdf
|
||||
http://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/9F820A5FFA3ECE8C8725716A0062585F/$file/CBE_Handbook_v1.1_24APR2007_pub.pdf
|
||||
|
||||
RISC-V-specific:
|
||||
----------------
|
||||
The RISC-V Instruction Set Manual, Volume 1, Unprivileged ISA:
|
||||
https://riscv.org/technical/specifications/
|
||||
|
||||
GCC asm links:
|
||||
--------------
|
||||
official doc but quite ugly
|
||||
|
||||
182
doc/outdevs.texi
182
doc/outdevs.texi
@@ -38,52 +38,6 @@ ffmpeg -i INPUT -f alsa hw:1,7
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section AudioToolbox
|
||||
|
||||
AudioToolbox output device.
|
||||
|
||||
Allows native output to CoreAudio devices on OSX.
|
||||
|
||||
The output filename can be empty (or @code{-}) to refer to the default system output device or a number that refers to the device index as shown using: @code{-list_devices true}.
|
||||
|
||||
Alternatively, the audio input device can be chosen by index using the
|
||||
@option{
|
||||
-audio_device_index <INDEX>
|
||||
}
|
||||
, overriding any device name or index given in the input filename.
|
||||
|
||||
All available devices can be enumerated by using @option{-list_devices true}, listing
|
||||
all device names, UIDs and corresponding indices.
|
||||
|
||||
@subsection Options
|
||||
|
||||
AudioToolbox supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item -audio_device_index <INDEX>
|
||||
Specify the audio device by its index. Overrides anything given in the output filename.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of supported devices and output a sine wave to the default device:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -list_devices true -
|
||||
@end example
|
||||
|
||||
@item
|
||||
Output a sine wave to the device with the index 2, overriding any output filename:
|
||||
@example
|
||||
$ ffmpeg -f lavfi -i sine=r=44100 -f audiotoolbox -audio_device_index 2 -
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section caca
|
||||
|
||||
CACA output device.
|
||||
@@ -186,8 +140,7 @@ device with @command{-list_formats 1}. Audio sample rate is always 48 kHz.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}. This option is deprecated, please use the
|
||||
@code{-sinks} option of ffmpeg to list the available output devices.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item list_formats
|
||||
If set to @option{true}, print a list of supported formats and exit.
|
||||
@@ -197,49 +150,6 @@ Defaults to @option{false}.
|
||||
Amount of time to preroll video in seconds.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item duplex_mode
|
||||
Sets the decklink device duplex/profile mode. Must be @samp{unset}, @samp{half}, @samp{full},
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
Note: DeckLink SDK 11.0 have replaced the duplex property by a profile property.
|
||||
For the DeckLink Duo 2 and DeckLink Quad 2, a profile is shared between any 2
|
||||
sub-devices that utilize the same connectors. For the DeckLink 8K Pro, a profile
|
||||
is shared between all 4 sub-devices. So DeckLink 8K Pro support four profiles.
|
||||
|
||||
Valid profile modes for DeckLink 8K Pro(with DeckLink SDK >= 11.0):
|
||||
@samp{one_sub_device_full}, @samp{one_sub_device_half}, @samp{two_sub_device_full},
|
||||
@samp{four_sub_device_half}
|
||||
|
||||
Valid profile modes for DeckLink Quad 2 and DeckLink Duo 2:
|
||||
@samp{half}, @samp{full}
|
||||
|
||||
@item timing_offset
|
||||
Sets the genlock timing pixel offset on the used output.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item link
|
||||
Sets the SDI video link configuration on the used output. Must be
|
||||
@samp{unset}, @samp{single} link SDI, @samp{dual} link SDI or @samp{quad} link
|
||||
SDI.
|
||||
Defaults to @samp{unset}.
|
||||
|
||||
@item sqd
|
||||
Enable Square Division Quad Split mode for Quad-link SDI output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@item level_a
|
||||
Enable SMPTE Level A mode on the used output.
|
||||
Must be @samp{unset}, @samp{true} or @samp{false}.
|
||||
Defaults to @option{unset}.
|
||||
|
||||
@item vanc_queue_size
|
||||
Sets maximum output buffer size in bytes for VANC data. If the buffering reaches this value,
|
||||
outgoing VANC data will be dropped.
|
||||
Defaults to @samp{1048576}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -249,7 +159,7 @@ Defaults to @samp{1048576}.
|
||||
@item
|
||||
List output devices:
|
||||
@example
|
||||
ffmpeg -sinks decklink
|
||||
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -272,6 +182,51 @@ ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLi
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek output device provides playback capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Output filename is a NDI name.
|
||||
|
||||
To enable this output device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
NDI uses uyvy422 pixel format natively, but also supports bgra, bgr0, rgba and
|
||||
rgb0.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item reference_level
|
||||
The audio reference level in dB. This specifies how many dB above the
|
||||
reference level (+4dBU) is the full range of 16 bit audio.
|
||||
Defaults to @option{0}.
|
||||
|
||||
@item clock_video
|
||||
These specify whether video "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item clock_audio
|
||||
These specify whether audio "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Play video clip:
|
||||
@example
|
||||
ffmpeg -i "udp://@@239.1.1.1:10480?fifo_size=1000000&overrun_nonfatal=1" -vf "scale=720:576,fps=fps=25,setdar=dar=16/9,format=pix_fmts=uyvy422" -f libndi_newtek NEW_NDI1
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer output device.
|
||||
@@ -302,7 +257,7 @@ ffmpeg -re -i INPUT -c:v rawvideo -pix_fmt bgra -f fbdev /dev/fb0
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section opengl
|
||||
OpenGL output device. Deprecated and will be removed.
|
||||
OpenGL output device.
|
||||
|
||||
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
|
||||
|
||||
@@ -408,15 +363,7 @@ ffmpeg -i INPUT -f pulse "stream name"
|
||||
|
||||
@section sdl
|
||||
|
||||
SDL (Simple DirectMedia Layer) output device. Deprecated and will be removed.
|
||||
|
||||
For monitoring purposes in FFmpeg, pipes and a video player such as ffplay can be used:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -f nut -c:v rawvideo - | ffplay -
|
||||
@end example
|
||||
|
||||
"sdl2" can be used as alias for "sdl".
|
||||
SDL (Simple DirectMedia Layer) output device.
|
||||
|
||||
This output device allows one to show a video stream in an SDL
|
||||
window. Only one SDL window is allowed per application, so you can
|
||||
@@ -432,18 +379,13 @@ For more information about SDL, check:
|
||||
|
||||
@table @option
|
||||
|
||||
@item window_borderless
|
||||
Set SDL window border off.
|
||||
Default value is 0 (enable window border).
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_enable_quit
|
||||
Enable quit action (using window button or keyboard key)
|
||||
when non-zero value is provided.
|
||||
Default value is 1 (enable quit action).
|
||||
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
@item icon_title
|
||||
Set the name of the iconified SDL window, if not specified it is set
|
||||
to the same value of @var{window_title}.
|
||||
|
||||
@item window_size
|
||||
Set the SDL window size, can be a string of the form
|
||||
@@ -451,13 +393,9 @@ Set the SDL window size, can be a string of the form
|
||||
If not specified it defaults to the size of the input video,
|
||||
downscaled according to the aspect ratio.
|
||||
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_x
|
||||
@item window_y
|
||||
Set the position of the window on the screen.
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
@end table
|
||||
|
||||
@subsection Interactive commands
|
||||
@@ -482,10 +420,6 @@ ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL out
|
||||
|
||||
sndio audio output device.
|
||||
|
||||
@section v4l2
|
||||
|
||||
Video4Linux2 output device.
|
||||
|
||||
@section xv
|
||||
|
||||
XV (XVideo) output device.
|
||||
|
||||
@@ -92,6 +92,9 @@ For information about compiling FFmpeg on OS/2 see
|
||||
|
||||
@chapter Windows
|
||||
|
||||
To get help and instructions for building FFmpeg under Windows, check out
|
||||
the FFmpeg Windows Help Forum at @url{http://ffmpeg.zeranoe.com/forum/}.
|
||||
|
||||
@section Native Windows compilation using MinGW or MinGW-w64
|
||||
|
||||
FFmpeg can be built to run natively on Windows using the MinGW-w64
|
||||
@@ -145,11 +148,16 @@ To target 32 bits replace @code{x86_64} with @code{i686} in the command above.
|
||||
|
||||
@section Microsoft Visual C++ or Intel C++ Compiler for Windows
|
||||
|
||||
FFmpeg can be built with MSVC 2013 or later.
|
||||
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
|
||||
and wrapper, or with MSVC 2013 and ICL natively.
|
||||
|
||||
You will need the following prerequisites:
|
||||
|
||||
@itemize
|
||||
@item @uref{https://github.com/libav/c99-to-c89/, C99-to-C89 Converter & Wrapper}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.nasm.us/, NASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@@ -158,13 +166,16 @@ You will need the following prerequisites:
|
||||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}.
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}. If using MSVC 2012 or
|
||||
earlier, place @code{c99wrap.exe} and @code{c99conv.exe} somewhere in your
|
||||
@code{PATH} as well.
|
||||
|
||||
Next, make sure any other headers and libs you want to use, such as zlib, are
|
||||
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
|
||||
and @code{INCLUDE} environment variables to include the @strong{Windows-style}
|
||||
paths to these directories. Alternatively, you can try to use the
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options.
|
||||
@code{--extra-cflags}/@code{--extra-ldflags} configure options. If using MSVC
|
||||
2012 or earlier, place @code{inttypes.h} somewhere the compiler can see too.
|
||||
|
||||
Finally, run:
|
||||
|
||||
@@ -206,6 +217,8 @@ can see.
|
||||
|
||||
@item FFmpeg has been tested with the following on i686 and x86_64:
|
||||
@itemize
|
||||
@item Visual Studio 2010 Pro and Express
|
||||
@item Visual Studio 2012 Pro and Express
|
||||
@item Visual Studio 2013 Pro and Express
|
||||
@item Intel Composer XE 2013
|
||||
@item Intel Composer XE 2013 SP1
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user