mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2026-01-14 10:05:51 +01:00
Compare commits
39 Commits
release/3.
...
n3.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1968a1eef1 | ||
|
|
f4f3bf3c94 | ||
|
|
6557ea8e2b | ||
|
|
9d742f774a | ||
|
|
5e84c94f69 | ||
|
|
e90de50195 | ||
|
|
51ca6fda05 | ||
|
|
d1cae50a04 | ||
|
|
b51217381d | ||
|
|
f5f0b2f44c | ||
|
|
e9fc7a90ba | ||
|
|
414d11fff6 | ||
|
|
1830b0a6c7 | ||
|
|
0ed4f26cf2 | ||
|
|
69e35db80d | ||
|
|
af43c7092c | ||
|
|
ecdf52745f | ||
|
|
07e7ebf52d | ||
|
|
37589e6443 | ||
|
|
ad37fb86d7 | ||
|
|
4f325589f9 | ||
|
|
707d4c7fb5 | ||
|
|
c30d0ace65 | ||
|
|
0c188bc595 | ||
|
|
72e038acaf | ||
|
|
83e6a4a32b | ||
|
|
7182fbc471 | ||
|
|
0b4d87fad1 | ||
|
|
7034009f62 | ||
|
|
6c9574e490 | ||
|
|
37fcf089b4 | ||
|
|
f4400a92f5 | ||
|
|
a430ba9925 | ||
|
|
1833ec5334 | ||
|
|
c9c977be27 | ||
|
|
3c9e1b89a1 | ||
|
|
2ff93effb3 | ||
|
|
b1377b2d28 | ||
|
|
e0064df4ff |
6
.gitignore
vendored
6
.gitignore
vendored
@@ -18,9 +18,6 @@
|
||||
*.so.*
|
||||
*.swp
|
||||
*.ver
|
||||
*.version
|
||||
*.ptx
|
||||
*.ptx.c
|
||||
*_g
|
||||
\#*
|
||||
.\#*
|
||||
@@ -30,8 +27,7 @@
|
||||
/ffplay
|
||||
/ffprobe
|
||||
/ffserver
|
||||
/config.asm
|
||||
/config.h
|
||||
/config.*
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/lcov/
|
||||
|
||||
@@ -6,7 +6,7 @@ os:
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- nasm
|
||||
- yasm
|
||||
- diffutils
|
||||
compiler:
|
||||
- clang
|
||||
@@ -17,7 +17,7 @@ cache:
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update --all; fi
|
||||
install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install nasm; fi
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install yasm; fi
|
||||
script:
|
||||
- mkdir -p ffmpeg-samples
|
||||
- ./configure --samples=ffmpeg-samples --cc=$CC
|
||||
|
||||
4
CREDITS
4
CREDITS
@@ -1,6 +1,6 @@
|
||||
See the Git history of the project (https://git.ffmpeg.org/ffmpeg) to
|
||||
See the Git history of the project (git://source.ffmpeg.org/ffmpeg) to
|
||||
get the names of people who have contributed to FFmpeg.
|
||||
|
||||
To check the log, you can type the command "git log" in the FFmpeg
|
||||
source directory, or browse the online repository at
|
||||
https://git.ffmpeg.org/ffmpeg
|
||||
http://source.ffmpeg.org.
|
||||
|
||||
@@ -15,11 +15,3 @@ NOTICE
|
||||
------
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
||||
|
||||
NOTICE for Package Maintainers
|
||||
------------------------------
|
||||
|
||||
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
||||
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
||||
can then be built. And last build FFmpeg with full dependancies (which may in turn depend on
|
||||
some of these 3rd party packages). This avoids circular dependencies during build.
|
||||
|
||||
29
MAINTAINERS
29
MAINTAINERS
@@ -159,10 +159,8 @@ Codecs:
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
cuvid.c Timo Rothenpieler
|
||||
dca* foo86
|
||||
dirac* Rostislav Pehlivanov
|
||||
dnxhd* Baptiste Coudurier
|
||||
dolby_e* foo86
|
||||
dpcm.c Mike Melanson
|
||||
dss_sp.c Oleksij Rempel
|
||||
dv.c Roman Shaposhnik
|
||||
@@ -194,6 +192,7 @@ Codecs:
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libschroedinger* David Conrad
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
libvpx* James Zern
|
||||
@@ -233,7 +232,6 @@ Codecs:
|
||||
smvjpegdec.c Ash Hughes
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
speedhq.c Steinar H. Gunderson
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
@@ -282,7 +280,7 @@ libavdevice
|
||||
|
||||
|
||||
avfoundation.m Thilo Borgmann
|
||||
decklink* Marton Balint
|
||||
decklink* Deti Fliegl
|
||||
dshow.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
fbdev_enc.c Lukasz Marek
|
||||
gdigrab.c Roger Pack (CC rogerdpack@gmail.com)
|
||||
@@ -291,6 +289,7 @@ libavdevice
|
||||
libdc1394.c Roman Shaposhnik
|
||||
opengl_enc.c Lukasz Marek
|
||||
pulse_audio_enc.c Lukasz Marek
|
||||
qtkit.m Thilo Borgmann
|
||||
sdl Stefano Sabatini
|
||||
sdl2.c Josh de Kock
|
||||
v4l2.c Giorgio Vazzana
|
||||
@@ -329,7 +328,6 @@ Filters:
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_bwdif Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_chromakey.c Timo Rothenpieler
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
@@ -345,7 +343,6 @@ Filters:
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_(t)interlace Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_mestimate.c Davinder Singh
|
||||
@@ -396,10 +393,8 @@ Muxers/Demuxers:
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
crc.c Michael Niedermayer
|
||||
dashdec.c Steven Liu
|
||||
daud.c Reimar Doeffinger
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
electronicarts.c Peter Ross
|
||||
@@ -411,7 +406,7 @@ Muxers/Demuxers:
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hls.c Anssi Hannula
|
||||
hlsenc.c Christian Suloway, Steven Liu
|
||||
hls encryption (hlsenc.c) Christian Suloway, Steven Liu
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
@@ -421,6 +416,7 @@ Muxers/Demuxers:
|
||||
iss.c Stefan Gehrer
|
||||
jvdec.c Peter Ross
|
||||
libmodplug.c Clément Bœsch
|
||||
libnut.c Oded Shimon
|
||||
libopenmpt.c Josh de Kock
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
@@ -443,6 +439,7 @@ Muxers/Demuxers:
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier
|
||||
mxfdec.c Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
@@ -471,7 +468,6 @@ Muxers/Demuxers:
|
||||
rtpdec_vc2hq.*, rtpenc_vc2hq.* Thomas Volkert
|
||||
rtpdec_vp9.c Thomas Volkert
|
||||
rtpenc_mpv.*, rtpenc_aac.* Martin Storsjo
|
||||
s337m.c foo86
|
||||
sbgdec.c Nicolas George
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
@@ -521,7 +517,7 @@ Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale
|
||||
MIPS Nedeljko Babic
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
@@ -547,7 +543,6 @@ Ganesh Ajjanagadde
|
||||
Henrik Gramner
|
||||
Ivan Uskov
|
||||
James Darnley
|
||||
Jan Ekström
|
||||
Joakim Plate
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
@@ -563,12 +558,10 @@ wm4
|
||||
Releases
|
||||
========
|
||||
|
||||
7.0 Michael Niedermayer
|
||||
6.1 Michael Niedermayer
|
||||
5.1 Michael Niedermayer
|
||||
4.4 Michael Niedermayer
|
||||
3.4 Michael Niedermayer
|
||||
2.8 Michael Niedermayer
|
||||
2.7 Michael Niedermayer
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
@@ -593,7 +586,6 @@ Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lou Logan 7D68 DC73 CBEF EABB 671A B6CF 621C 2E28 82F8 DC3A
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
@@ -605,7 +597,6 @@ Reynaldo H. Verdejo Pinochet 6E27 CD34 170C C78E 4D4F 5F40 C18E 077F 3114 452A
|
||||
Robert Swain EE7A 56EA 4A81 A7B5 2001 A521 67FA 362D A2FC 3E71
|
||||
Sascha Sommer 38A0 F88B 868E 9D3A 97D4 D6A0 E823 706F 1E07 0D3C
|
||||
Stefano Sabatini 0D0B AD6B 5330 BBAD D3D6 6A0C 719C 2839 FC43 2D5F
|
||||
Steinar H. Gunderson C2E9 004F F028 C18E 4EAD DB83 7F61 7561 7797 8F76
|
||||
Stephan Hilb 4F38 0B3A 5F39 B99B F505 E562 8D5C 5554 4E17 8863
|
||||
Tiancheng "Timothy" Gu 9456 AFC0 814A 8139 E994 8351 7FE6 B095 B582 B0D4
|
||||
Tim Nicholson 38CF DB09 3ED0 F607 8B67 6CED 0C0B FC44 8B0B FC83
|
||||
|
||||
127
Makefile
127
Makefile
@@ -1,5 +1,5 @@
|
||||
MAIN_MAKEFILE=1
|
||||
include ffbuild/config.mak
|
||||
include config.mak
|
||||
|
||||
vpath %.c $(SRC_PATH)
|
||||
vpath %.cpp $(SRC_PATH)
|
||||
@@ -11,12 +11,40 @@ vpath %.asm $(SRC_PATH)
|
||||
vpath %.rc $(SRC_PATH)
|
||||
vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %.cu $(SRC_PATH)
|
||||
vpath %.ptx $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
AVPROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
AVPROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
AVPROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
AVPROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
|
||||
AVPROGS := $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
INSTPROGS = $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
PROGS += $(AVPROGS)
|
||||
|
||||
AVBASENAMES = ffmpeg ffplay ffprobe ffserver
|
||||
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog) += cmdutils.o))
|
||||
$(foreach prog,$(AVBASENAMES),$(eval OBJS-$(prog)-$(CONFIG_OPENCL) += cmdutils_opencl.o))
|
||||
|
||||
OBJS-ffmpeg += ffmpeg_opt.o ffmpeg_filter.o
|
||||
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += ffmpeg_videotoolbox.o
|
||||
OBJS-ffmpeg-$(CONFIG_LIBMFX) += ffmpeg_qsv.o
|
||||
OBJS-ffmpeg-$(CONFIG_VAAPI) += ffmpeg_vaapi.o
|
||||
ifndef CONFIG_VIDEOTOOLBOX
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += ffmpeg_videotoolbox.o
|
||||
endif
|
||||
OBJS-ffmpeg-$(CONFIG_CUVID) += ffmpeg_cuvid.o
|
||||
OBJS-ffmpeg-$(HAVE_DXVA2_LIB) += ffmpeg_dxva2.o
|
||||
OBJS-ffmpeg-$(HAVE_VDPAU_X11) += ffmpeg_vdpau.o
|
||||
OBJS-ffserver += ffserver_config.o
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
TOOLS = qt-faststart trasher uncoded_frame
|
||||
TOOLS-$(CONFIG_ZLIB) += cws2fws
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
@@ -31,46 +59,41 @@ FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
FFLIBS := avutil
|
||||
|
||||
DATA_FILES := $(wildcard $(SRC_PATH)/presets/*.ffpreset) $(SRC_PATH)/doc/ffprobe.xsd
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/Makefile $(SRC_PATH)/doc/examples/README
|
||||
|
||||
SKIPHEADERS = compat/w32pthreads.h
|
||||
SKIPHEADERS = cmdutils_common_opts.h \
|
||||
compat/w32pthreads.h
|
||||
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
|
||||
include $(SRC_PATH)/tools/Makefile
|
||||
include $(SRC_PATH)/ffbuild/common.mak
|
||||
include $(SRC_PATH)/common.mak
|
||||
|
||||
FF_EXTRALIBS := $(FFEXTRALIBS)
|
||||
FF_DEP_LIBS := $(DEP_LIBS)
|
||||
FF_STATIC_DEP_LIBS := $(STATIC_DEP_LIBS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
all: $(AVPROGS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o $(EXEOBJS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS)
|
||||
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/cws2fws$(EXESUF): ELIBS = $(ZLIB)
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/target_dec_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config.h: ffbuild/.config
|
||||
ffbuild/.config: $(CONFIGURABLE_COMPONENTS)
|
||||
config.h: .config
|
||||
.config: $(CONFIGURABLE_COMPONENTS)
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?) newer than config.h, rerun configure\n\n'
|
||||
@-printf '\nWARNING: $(?F) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
SUBDIR_VARS := CLEANFILES EXAMPLES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
HEADERS ARCH_HEADERS BUILT_HEADERS SKIPHEADERS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS YASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
@@ -85,32 +108,41 @@ SUBDIR := $(1)/
|
||||
include $(SRC_PATH)/$(1)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(ARCH)/Makefile
|
||||
-include $(SRC_PATH)/$(1)/$(INTRINSICS)/Makefile
|
||||
include $(SRC_PATH)/ffbuild/library.mak
|
||||
include $(SRC_PATH)/library.mak
|
||||
endef
|
||||
|
||||
$(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
|
||||
|
||||
include $(SRC_PATH)/fftools/Makefile
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
define DOPROG
|
||||
OBJS-$(1) += $(1).o $(EXEOBJS) $(OBJS-$(1)-yes)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(LIBS-$(1))
|
||||
-include $$(OBJS-$(1):.o=.d)
|
||||
endef
|
||||
|
||||
$(foreach P,$(PROGS),$(eval $(call DOPROG,$(P:$(PROGSSUF)$(EXESUF)=))))
|
||||
|
||||
ffprobe.o cmdutils.o libavcodec/utils.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
ifeq ($(STRIPTYPE),direct)
|
||||
$(STRIP) -o $@ $<
|
||||
else
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
|
||||
%$(PROGSSUF)_g$(EXESUF): $(FF_DEP_LIBS)
|
||||
%$(PROGSSUF)_g$(EXESUF): %.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
|
||||
VERSION_SH = $(SRC_PATH)/ffbuild/version.sh
|
||||
OBJDIRS += tools
|
||||
|
||||
-include $(wildcard tools/*.d)
|
||||
|
||||
VERSION_SH = $(SRC_PATH)/version.sh
|
||||
GIT_LOG = $(SRC_PATH)/.git/logs/HEAD
|
||||
|
||||
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) ffbuild/config.mak
|
||||
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) config.mak
|
||||
.version: M=@
|
||||
|
||||
libavutil/ffversion.h .version:
|
||||
@@ -120,21 +152,38 @@ libavutil/ffversion.h .version:
|
||||
# force version.sh to run whenever version might have changed
|
||||
-include .version
|
||||
|
||||
ifdef AVPROGS
|
||||
install: install-progs install-data
|
||||
endif
|
||||
|
||||
install: install-libs install-headers
|
||||
|
||||
install-libs: install-libs-yes
|
||||
|
||||
install-data: $(DATA_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
install-progs-yes:
|
||||
install-progs-$(CONFIG_SHARED): install-libs
|
||||
|
||||
uninstall: uninstall-libs uninstall-headers uninstall-data
|
||||
install-progs: install-progs-yes $(AVPROGS)
|
||||
$(Q)mkdir -p "$(BINDIR)"
|
||||
$(INSTALL) -c -m 755 $(INSTPROGS) "$(BINDIR)"
|
||||
|
||||
install-data: $(DATA_FILES) $(EXAMPLES_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)/examples"
|
||||
$(INSTALL) -m 644 $(DATA_FILES) "$(DATADIR)"
|
||||
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
|
||||
|
||||
uninstall: uninstall-libs uninstall-headers uninstall-progs uninstall-data
|
||||
|
||||
uninstall-progs:
|
||||
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
|
||||
|
||||
uninstall-data:
|
||||
$(RM) -r "$(DATADIR)"
|
||||
|
||||
clean::
|
||||
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G)
|
||||
$(RM) $(CLEANSUFFIXES)
|
||||
$(RM) $(CLEANSUFFIXES:%=tools/%)
|
||||
$(RM) $(CLEANSUFFIXES:%=compat/msvcrt/%)
|
||||
$(RM) $(CLEANSUFFIXES:%=compat/atomics/pthread/%)
|
||||
$(RM) $(CLEANSUFFIXES:%=compat/%)
|
||||
@@ -143,10 +192,7 @@ clean::
|
||||
|
||||
distclean::
|
||||
$(RM) $(DISTCLEANSUFFIXES)
|
||||
$(RM) .version avversion.h config.asm config.h mapfile \
|
||||
ffbuild/.config ffbuild/config.* libavutil/avconfig.h \
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c
|
||||
$(RM) config.* .config libavutil/avconfig.h .version mapfile avversion.h version.h libavutil/ffversion.h libavcodec/codec_names.h libavcodec/bsf_list.c libavformat/protocol_list.c
|
||||
ifeq ($(SRC_LINK),src)
|
||||
$(RM) src
|
||||
endif
|
||||
@@ -170,4 +216,5 @@ $(sort $(OBJDIRS)):
|
||||
# so this saves some time on slow systems.
|
||||
.SUFFIXES:
|
||||
|
||||
.PHONY: all all-yes alltools check *clean config install* testprogs uninstall*
|
||||
.PHONY: all all-yes alltools check *clean config install*
|
||||
.PHONY: testprogs uninstall*
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
|
||||
┌───────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 3.4 "Cantor" │
|
||||
└───────────────────────────────────────┘
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 3.3 "Hilbert" │
|
||||
└────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 3.4 "Cantor", about 6
|
||||
months after the release of FFmpeg 3.3.
|
||||
The FFmpeg Project proudly presents FFmpeg 3.3 "Hilbert", about 5
|
||||
months after the release of FFmpeg 3.2.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on http://source.ffmpeg.org.
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
|
||||
@@ -14,4 +14,4 @@ OBJS-$(HAVE_ALTIVEC) += $(ALTIVEC-OBJS) $(ALTIVEC-OBJS-yes)
|
||||
OBJS-$(HAVE_VSX) += $(VSX-OBJS) $(VSX-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_X86ASM) += $(X86ASM-OBJS) $(X86ASM-OBJS-yes)
|
||||
OBJS-$(HAVE_YASM) += $(YASM-OBJS) $(YASM-OBJS-yes)
|
||||
@@ -540,7 +540,7 @@ static const AVOption *opt_find(void *obj, const char *name, const char *unit,
|
||||
return o;
|
||||
}
|
||||
|
||||
#define FLAGS ((o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0)
|
||||
#define FLAGS (o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0
|
||||
int opt_default(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
const AVOption *o;
|
||||
@@ -19,8 +19,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFTOOLS_CMDUTILS_H
|
||||
#define FFTOOLS_CMDUTILS_H
|
||||
#ifndef CMDUTILS_H
|
||||
#define CMDUTILS_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
@@ -206,59 +206,6 @@ typedef struct OptionDef {
|
||||
void show_help_options(const OptionDef *options, const char *msg, int req_flags,
|
||||
int rej_flags, int alt_flags);
|
||||
|
||||
#if CONFIG_OPENCL
|
||||
#define CMDUTILS_COMMON_OPTIONS_OPENCL \
|
||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, \
|
||||
"run benchmark on all OpenCL devices and show results" }, \
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, \
|
||||
"set OpenCL environment options" }, \
|
||||
|
||||
#else
|
||||
#define CMDUTILS_COMMON_OPTIONS_OPENCL
|
||||
#endif
|
||||
|
||||
#if CONFIG_AVDEVICE
|
||||
#define CMDUTILS_COMMON_OPTIONS_AVDEVICE \
|
||||
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources }, \
|
||||
"list sources of the input device", "device" }, \
|
||||
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks }, \
|
||||
"list sinks of the output device", "device" }, \
|
||||
|
||||
#else
|
||||
#define CMDUTILS_COMMON_OPTIONS_AVDEVICE
|
||||
#endif
|
||||
|
||||
#define CMDUTILS_COMMON_OPTIONS \
|
||||
{ "L", OPT_EXIT, { .func_arg = show_license }, "show license" }, \
|
||||
{ "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
|
||||
{ "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
|
||||
{ "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
|
||||
{ "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" }, \
|
||||
{ "version", OPT_EXIT, { .func_arg = show_version }, "show version" }, \
|
||||
{ "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" }, \
|
||||
{ "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" }, \
|
||||
{ "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" }, \
|
||||
{ "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" }, \
|
||||
{ "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" }, \
|
||||
{ "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" }, \
|
||||
{ "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" }, \
|
||||
{ "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" }, \
|
||||
{ "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" }, \
|
||||
{ "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" }, \
|
||||
{ "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" }, \
|
||||
{ "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" }, \
|
||||
{ "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" }, \
|
||||
{ "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" }, \
|
||||
{ "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" }, \
|
||||
{ "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||
{ "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" }, \
|
||||
{ "report", 0, { (void*)opt_report }, "generate a report" }, \
|
||||
{ "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" }, \
|
||||
{ "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" }, \
|
||||
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" }, \
|
||||
CMDUTILS_COMMON_OPTIONS_OPENCL \
|
||||
CMDUTILS_COMMON_OPTIONS_AVDEVICE \
|
||||
|
||||
/**
|
||||
* Show help for all options with given flags in class and all its
|
||||
* children.
|
||||
@@ -659,4 +606,4 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
|
||||
|
||||
double get_rotation(AVStream *st);
|
||||
|
||||
#endif /* FFTOOLS_CMDUTILS_H */
|
||||
#endif /* CMDUTILS_H */
|
||||
37
cmdutils_common_opts.h
Normal file
37
cmdutils_common_opts.h
Normal file
@@ -0,0 +1,37 @@
|
||||
{ "L" , OPT_EXIT, {.func_arg = show_license}, "show license" },
|
||||
{ "h" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "?" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "-help" , OPT_EXIT, {.func_arg = show_help}, "show help", "topic" },
|
||||
{ "version" , OPT_EXIT, {.func_arg = show_version}, "show version" },
|
||||
{ "buildconf" , OPT_EXIT, {.func_arg = show_buildconf}, "show build configuration" },
|
||||
{ "formats" , OPT_EXIT, {.func_arg = show_formats }, "show available formats" },
|
||||
{ "muxers" , OPT_EXIT, {.func_arg = show_muxers }, "show available muxers" },
|
||||
{ "demuxers" , OPT_EXIT, {.func_arg = show_demuxers }, "show available demuxers" },
|
||||
{ "devices" , OPT_EXIT, {.func_arg = show_devices }, "show available devices" },
|
||||
{ "codecs" , OPT_EXIT, {.func_arg = show_codecs }, "show available codecs" },
|
||||
{ "decoders" , OPT_EXIT, {.func_arg = show_decoders }, "show available decoders" },
|
||||
{ "encoders" , OPT_EXIT, {.func_arg = show_encoders }, "show available encoders" },
|
||||
{ "bsfs" , OPT_EXIT, {.func_arg = show_bsfs }, "show available bit stream filters" },
|
||||
{ "protocols" , OPT_EXIT, {.func_arg = show_protocols}, "show available protocols" },
|
||||
{ "filters" , OPT_EXIT, {.func_arg = show_filters }, "show available filters" },
|
||||
{ "pix_fmts" , OPT_EXIT, {.func_arg = show_pix_fmts }, "show available pixel formats" },
|
||||
{ "layouts" , OPT_EXIT, {.func_arg = show_layouts }, "show standard channel layouts" },
|
||||
{ "sample_fmts", OPT_EXIT, {.func_arg = show_sample_fmts }, "show available audio sample formats" },
|
||||
{ "colors" , OPT_EXIT, {.func_arg = show_colors }, "show available color names" },
|
||||
{ "loglevel" , HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "v", HAS_ARG, {.func_arg = opt_loglevel}, "set logging level", "loglevel" },
|
||||
{ "report" , 0, {(void*)opt_report}, "generate a report" },
|
||||
{ "max_alloc" , HAS_ARG, {.func_arg = opt_max_alloc}, "set maximum size of a single allocated block", "bytes" },
|
||||
{ "cpuflags" , HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
|
||||
{ "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
|
||||
#if CONFIG_OPENCL
|
||||
{ "opencl_bench", OPT_EXIT, {.func_arg = opt_opencl_bench}, "run benchmark on all OpenCL devices and show results" },
|
||||
{ "opencl_options", HAS_ARG, {.func_arg = opt_opencl}, "set OpenCL environment options" },
|
||||
#endif
|
||||
#if CONFIG_AVDEVICE
|
||||
{ "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
|
||||
"list sources of the input device", "device" },
|
||||
{ "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
|
||||
"list sinks of the output device", "device" },
|
||||
#endif
|
||||
@@ -129,11 +129,11 @@ static int64_t run_opencl_bench(AVOpenCLExternalEnv *ext_opencl_env)
|
||||
cl_int status;
|
||||
size_t kernel_len;
|
||||
char *inbuf;
|
||||
int *mask = NULL;
|
||||
int *mask;
|
||||
int buf_size = width * height * sizeof(char);
|
||||
int mask_size = sizeof(uint32_t) * 128;
|
||||
|
||||
cl_mem cl_mask = NULL, cl_inbuf = NULL, cl_outbuf = NULL;
|
||||
cl_mem cl_mask, cl_inbuf, cl_outbuf;
|
||||
cl_kernel kernel = NULL;
|
||||
cl_program program = NULL;
|
||||
size_t local_work_size_2d[2] = {16, 16};
|
||||
@@ -2,12 +2,15 @@
|
||||
# common bits used by all libraries
|
||||
#
|
||||
|
||||
DEFAULT_X86ASMD=.dbg
|
||||
# first so "all" becomes default target
|
||||
all: all-yes
|
||||
|
||||
DEFAULT_YASMD=.dbg
|
||||
|
||||
ifeq ($(DBG),1)
|
||||
X86ASMD=$(DEFAULT_X86ASMD)
|
||||
YASMD=$(DEFAULT_YASMD)
|
||||
else
|
||||
X86ASMD=
|
||||
YASMD=
|
||||
endif
|
||||
|
||||
ifndef SUBDIR
|
||||
@@ -15,8 +18,8 @@ ifndef SUBDIR
|
||||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS YASM AR LD STRIP CP WINDRES
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPYASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
M = @$(call ECHO,$(TAG),$@);
|
||||
@@ -37,8 +40,7 @@ OBJCFLAGS += $(EOBJCFLAGS)
|
||||
OBJCCFLAGS = $(CPPFLAGS) $(CFLAGS) $(OBJCFLAGS)
|
||||
ASFLAGS := $(CPPFLAGS) $(ASFLAGS)
|
||||
CXXFLAGS := $(CPPFLAGS) $(CFLAGS) $(CXXFLAGS)
|
||||
X86ASMFLAGS += $(IFLAGS:%=%/) -I$(<D)/ -Pconfig.asm
|
||||
NVCCFLAGS += -ptx
|
||||
YASMFLAGS += $(IFLAGS:%=%/) -Pconfig.asm
|
||||
|
||||
HOSTCCFLAGS = $(IFLAGS) $(HOSTCPPFLAGS) $(HOSTCFLAGS)
|
||||
LDFLAGS := $(ALLFFLIBS:%=$(LD_PATH)lib%) $(LDFLAGS)
|
||||
@@ -52,9 +54,7 @@ COMPILE_C = $(call COMPILE,CC)
|
||||
COMPILE_CXX = $(call COMPILE,CXX)
|
||||
COMPILE_S = $(call COMPILE,AS)
|
||||
COMPILE_M = $(call COMPILE,OBJCC)
|
||||
COMPILE_X86ASM = $(call COMPILE,X86ASM)
|
||||
COMPILE_HOSTC = $(call COMPILE,HOSTCC)
|
||||
COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
|
||||
%.o: %.c
|
||||
$(COMPILE_C)
|
||||
@@ -74,16 +74,17 @@ COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
%_host.o: %.c
|
||||
$(COMPILE_HOSTC)
|
||||
|
||||
%$(DEFAULT_X86ASMD).asm: %.asm
|
||||
$(DEPX86ASM) $(X86ASMFLAGS) -M -o $@ $< > $(@:.asm=.d)
|
||||
$(X86ASM) $(X86ASMFLAGS) -e $< | sed '/^%/d;/^$$/d;' > $@
|
||||
%$(DEFAULT_YASMD).asm: %.asm
|
||||
$(DEPYASM) $(YASMFLAGS) -I $(<D)/ -M -o $@ $< > $(@:.asm=.d)
|
||||
$(YASM) $(YASMFLAGS) -I $(<D)/ -e $< | sed '/^%/d;/^$$/d;' > $@
|
||||
|
||||
%.o: %.asm
|
||||
$(COMPILE_X86ASM)
|
||||
$(DEPYASM) $(YASMFLAGS) -I $(<D)/ -M -o $@ $< > $(@:.o=.d)
|
||||
$(YASM) $(YASMFLAGS) -I $(<D)/ -o $@ $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||
-$(if $(ASMSTRIPFLAGS), $(STRIP) $(ASMSTRIPFLAGS) $@)
|
||||
|
||||
%.o: %.rc
|
||||
$(WINDRES) $(IFLAGS) $(foreach ARG,$(CC_DEPFLAGS),--preprocessor-arg "$(ARG)") -o $@ $<
|
||||
$(WINDRES) $(IFLAGS) --preprocessor "$(DEPWINDRES) -E -xc-header -DRC_INVOKED $(CC_DEPFLAGS)" -o $@ $<
|
||||
|
||||
%.i: %.c
|
||||
$(CC) $(CCFLAGS) $(CC_E) $<
|
||||
@@ -91,13 +92,7 @@ COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
%.h.c:
|
||||
$(Q)echo '#include "$*.h"' >$@
|
||||
|
||||
%.ptx: %.cu
|
||||
$(COMPILE_NVCC)
|
||||
|
||||
%.ptx.c: %.ptx
|
||||
$(Q)sh $(SRC_PATH)/compat/cuda/ptx2c.sh $@ $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<)
|
||||
|
||||
%.c %.h %.pc %.ver %.version: TAG = GEN
|
||||
%.c %.h %.ver: TAG = GEN
|
||||
|
||||
# Dummy rule to stop make trying to rebuild removed or renamed headers
|
||||
%.h:
|
||||
@@ -111,7 +106,7 @@ COMPILE_NVCC = $(call COMPILE,NVCC)
|
||||
$(OBJS):
|
||||
endif
|
||||
|
||||
include $(SRC_PATH)/ffbuild/arch.mak
|
||||
include $(SRC_PATH)/arch.mak
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
SLIBOBJS += $(SLIBOBJS-yes)
|
||||
@@ -141,10 +136,8 @@ ALLHEADERS := $(subst $(SRC_DIR)/,$(SUBDIR),$(wildcard $(SRC_DIR)/*.h $(SRC_DIR)
|
||||
SKIPHEADERS += $(ARCH_HEADERS:%=$(ARCH)/%) $(SKIPHEADERS-)
|
||||
SKIPHEADERS := $(SKIPHEADERS:%=$(SUBDIR)%)
|
||||
HOBJS = $(filter-out $(SKIPHEADERS:.h=.h.o),$(ALLHEADERS:.h=.h.o))
|
||||
PTXOBJS = $(filter %.ptx.o,$(OBJS))
|
||||
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=)
|
||||
.SECONDARY: $(HOBJS:.o=.c)
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
@@ -152,7 +145,7 @@ $(HOSTOBJS): %.o: %.c
|
||||
$(COMPILE_HOSTC)
|
||||
|
||||
$(HOSTPROGS): %$(HOSTEXESUF): %.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS)
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTLIBS)
|
||||
|
||||
$(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
@@ -163,7 +156,7 @@ $(TOOLOBJS): | tools
|
||||
|
||||
OBJDIRS := $(OBJDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.gcda *.gcno *.map *.ver *.version *.ho *$(DEFAULT_X86ASMD).asm *.ptx *.ptx.c
|
||||
CLEANSUFFIXES = *.d *.o *~ *.h.c *.gcda *.gcno *.map *.ver *.ho *$(DEFAULT_YASMD).asm
|
||||
DISTCLEANSUFFIXES = *.pc
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
@@ -174,4 +167,4 @@ endef
|
||||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_YASMD).d)
|
||||
@@ -108,7 +108,7 @@ static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *exp
|
||||
intptr_t desired)
|
||||
{
|
||||
intptr_t old = *expected;
|
||||
*expected = (intptr_t)atomic_cas_ptr(object, (void *)old, (void *)desired);
|
||||
*expected = atomic_cas_ptr(object, old, desired);
|
||||
return *expected == old;
|
||||
}
|
||||
|
||||
|
||||
@@ -19,7 +19,6 @@
|
||||
#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <windows.h>
|
||||
@@ -105,8 +104,7 @@ static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *exp
|
||||
intptr_t desired)
|
||||
{
|
||||
intptr_t old = *expected;
|
||||
*expected = (intptr_t)InterlockedCompareExchangePointer(
|
||||
(PVOID *)object, (PVOID)desired, (PVOID)old);
|
||||
*expected = InterlockedCompareExchangePointer(object, desired, old);
|
||||
return *expected == old;
|
||||
}
|
||||
|
||||
|
||||
@@ -43,7 +43,6 @@
|
||||
typedef int CUdevice;
|
||||
typedef void* CUarray;
|
||||
typedef void* CUcontext;
|
||||
typedef void* CUstream;
|
||||
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
|
||||
typedef unsigned long long CUdeviceptr;
|
||||
#else
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* This copyright notice applies to this header file only:
|
||||
*
|
||||
* Copyright (c) 2010-2017 NVIDIA Corporation
|
||||
* Copyright (c) 2010-2016 NVIDIA Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
@@ -25,17 +25,17 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*****************************************************************************************************/
|
||||
//! \file cuviddec.h
|
||||
//! NVDECODE API provides video decoding interface to NVIDIA GPU devices.
|
||||
//! \date 2015-2017
|
||||
//! This file contains constants, structure definitions and function prototypes used for decoding.
|
||||
/*****************************************************************************************************/
|
||||
/**
|
||||
* \file cuviddec.h
|
||||
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
|
||||
* \date 2015-2016
|
||||
* This file contains constants, structure definitions and function prototypes used for decoding.
|
||||
*/
|
||||
|
||||
#if !defined(__CUDA_VIDEO_H__)
|
||||
#define __CUDA_VIDEO_H__
|
||||
|
||||
#if defined(_WIN64) || defined(__LP64__) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
|
||||
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
|
||||
#if (CUDA_VERSION >= 3020) && (!defined(CUDA_FORCE_API_VERSION) || (CUDA_FORCE_API_VERSION >= 3020))
|
||||
#define __CUVID_DEVPTR64
|
||||
#endif
|
||||
@@ -54,126 +54,93 @@ typedef unsigned long tcu_ulong;
|
||||
typedef void *CUvideodecoder;
|
||||
typedef struct _CUcontextlock_st *CUvideoctxlock;
|
||||
|
||||
/*********************************************************************************/
|
||||
//! \enum cudaVideoCodec
|
||||
//! Video codec enums
|
||||
//! These enums are used in CUVIDDECODECREATEINFO and CUVIDDECODECAPS structures
|
||||
/*********************************************************************************/
|
||||
/**
|
||||
* \addtogroup VIDEO_DECODER Video Decoder
|
||||
* @{
|
||||
*/
|
||||
|
||||
/*!
|
||||
* \enum cudaVideoCodec
|
||||
* Video Codec Enums
|
||||
*/
|
||||
typedef enum cudaVideoCodec_enum {
|
||||
cudaVideoCodec_MPEG1=0, /**< MPEG1 */
|
||||
cudaVideoCodec_MPEG2, /**< MPEG2 */
|
||||
cudaVideoCodec_MPEG4, /**< MPEG4 */
|
||||
cudaVideoCodec_VC1, /**< VC1 */
|
||||
cudaVideoCodec_H264, /**< H264 */
|
||||
cudaVideoCodec_JPEG, /**< JPEG */
|
||||
cudaVideoCodec_H264_SVC, /**< H264-SVC */
|
||||
cudaVideoCodec_H264_MVC, /**< H264-MVC */
|
||||
cudaVideoCodec_HEVC, /**< HEVC */
|
||||
cudaVideoCodec_VP8, /**< VP8 */
|
||||
cudaVideoCodec_VP9, /**< VP9 */
|
||||
cudaVideoCodec_NumCodecs, /**< Max codecs */
|
||||
cudaVideoCodec_MPEG1=0, /**< MPEG1 */
|
||||
cudaVideoCodec_MPEG2, /**< MPEG2 */
|
||||
cudaVideoCodec_MPEG4, /**< MPEG4 */
|
||||
cudaVideoCodec_VC1, /**< VC1 */
|
||||
cudaVideoCodec_H264, /**< H264 */
|
||||
cudaVideoCodec_JPEG, /**< JPEG */
|
||||
cudaVideoCodec_H264_SVC, /**< H264-SVC */
|
||||
cudaVideoCodec_H264_MVC, /**< H264-MVC */
|
||||
cudaVideoCodec_HEVC, /**< HEVC */
|
||||
cudaVideoCodec_VP8, /**< VP8 */
|
||||
cudaVideoCodec_VP9, /**< VP9 */
|
||||
cudaVideoCodec_NumCodecs, /**< Max COdecs */
|
||||
// Uncompressed YUV
|
||||
cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */
|
||||
cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */
|
||||
cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */
|
||||
cudaVideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), /**< Y,U,V (4:2:0) */
|
||||
cudaVideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,V,U (4:2:0) */
|
||||
cudaVideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), /**< Y,UV (4:2:0) */
|
||||
cudaVideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), /**< YUYV/YUY2 (4:2:2) */
|
||||
cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */
|
||||
cudaVideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')) /**< UYVY (4:2:2) */
|
||||
} cudaVideoCodec;
|
||||
|
||||
/*********************************************************************************/
|
||||
//! \enum cudaVideoSurfaceFormat
|
||||
//! Video surface format enums used for output format of decoded output
|
||||
//! These enums are used in CUVIDDECODECREATEINFO structure
|
||||
/*********************************************************************************/
|
||||
/*!
|
||||
* \enum cudaVideoSurfaceFormat
|
||||
* Video Surface Formats Enums
|
||||
*/
|
||||
typedef enum cudaVideoSurfaceFormat_enum {
|
||||
cudaVideoSurfaceFormat_NV12=0, /**< NV12 format */
|
||||
cudaVideoSurfaceFormat_P016=1 /**< 16 bit semiplaner format. Can be used for 10 bit(6LSB bits 0),
|
||||
12 bit (4LSB bits 0) */
|
||||
cudaVideoSurfaceFormat_NV12=0, /**< NV12 */
|
||||
cudaVideoSurfaceFormat_P016=1 /**< P016 */
|
||||
} cudaVideoSurfaceFormat;
|
||||
|
||||
/******************************************************************************************************************/
|
||||
//! \enum cudaVideoDeinterlaceMode
|
||||
//! Deinterlacing mode enums
|
||||
//! These enums are used in CUVIDDECODECREATEINFO structure
|
||||
//! Use cudaVideoDeinterlaceMode_Weave for progressive content and for content that doesn't need deinterlacing
|
||||
//! cudaVideoDeinterlaceMode_Adaptive needs more video memory than other DImodes
|
||||
/******************************************************************************************************************/
|
||||
/*!
|
||||
* \enum cudaVideoDeinterlaceMode
|
||||
* Deinterlacing Modes Enums
|
||||
*/
|
||||
typedef enum cudaVideoDeinterlaceMode_enum {
|
||||
cudaVideoDeinterlaceMode_Weave=0, /**< Weave both fields (no deinterlacing) */
|
||||
cudaVideoDeinterlaceMode_Bob, /**< Drop one field */
|
||||
cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */
|
||||
cudaVideoDeinterlaceMode_Bob, /**< Drop one field */
|
||||
cudaVideoDeinterlaceMode_Adaptive /**< Adaptive deinterlacing */
|
||||
} cudaVideoDeinterlaceMode;
|
||||
|
||||
/**************************************************************************************************************/
|
||||
//! \enum cudaVideoChromaFormat
|
||||
//! Chroma format enums
|
||||
//! These enums are used in CUVIDDECODECREATEINFO and CUVIDDECODECAPS structures
|
||||
//! JPEG supports Monochrome, YUV 4:2:0, YUV 4:2:2 and YUV 4:4:4 chroma formats.
|
||||
//! H264, HEVC, VP9, VP8, VC1, MPEG1, MPEG2 and MPEG4 support YUV 4:2:0 chroma format only.
|
||||
/**************************************************************************************************************/
|
||||
/*!
|
||||
* \enum cudaVideoChromaFormat
|
||||
* Chroma Formats Enums
|
||||
*/
|
||||
typedef enum cudaVideoChromaFormat_enum {
|
||||
cudaVideoChromaFormat_Monochrome=0, /**< MonoChrome */
|
||||
cudaVideoChromaFormat_420, /**< YUV 4:2:0 */
|
||||
cudaVideoChromaFormat_422, /**< YUV 4:2:2 */
|
||||
cudaVideoChromaFormat_444 /**< YUV 4:4:4 */
|
||||
cudaVideoChromaFormat_420, /**< 4:2:0 */
|
||||
cudaVideoChromaFormat_422, /**< 4:2:2 */
|
||||
cudaVideoChromaFormat_444 /**< 4:4:4 */
|
||||
} cudaVideoChromaFormat;
|
||||
|
||||
/*************************************************************************************************************/
|
||||
//! \enum cudaVideoCreateFlags
|
||||
//! Decoder flag enums to select preferred decode path
|
||||
//! cudaVideoCreate_Default and cudaVideoCreate_PreferCUVID are most optimized, use these whenever possible
|
||||
/*************************************************************************************************************/
|
||||
/*!
|
||||
* \enum cudaVideoCreateFlags
|
||||
* Decoder Flags Enums
|
||||
*/
|
||||
typedef enum cudaVideoCreateFlags_enum {
|
||||
cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */
|
||||
cudaVideoCreate_PreferCUDA = 0x01, /**< Use CUDA-based decoder (requires valid vidLock object for multi-threading) */
|
||||
cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */
|
||||
cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */
|
||||
cudaVideoCreate_Default = 0x00, /**< Default operation mode: use dedicated video engines */
|
||||
cudaVideoCreate_PreferCUDA = 0x01, /**< Use a CUDA-based decoder if faster than dedicated engines (requires a valid vidLock object for multi-threading) */
|
||||
cudaVideoCreate_PreferDXVA = 0x02, /**< Go through DXVA internally if possible (requires D3D9 interop) */
|
||||
cudaVideoCreate_PreferCUVID = 0x04 /**< Use dedicated video engines directly */
|
||||
} cudaVideoCreateFlags;
|
||||
|
||||
|
||||
/**************************************************************************************************************/
|
||||
//! \struct CUVIDDECODECAPS;
|
||||
//! This structure is used in cuvidGetDecoderCaps API
|
||||
/**************************************************************************************************************/
|
||||
typedef struct _CUVIDDECODECAPS
|
||||
{
|
||||
cudaVideoCodec eCodecType; /**< IN: cudaVideoCodec_XXX */
|
||||
cudaVideoChromaFormat eChromaFormat; /**< IN: cudaVideoChromaFormat_XXX */
|
||||
unsigned int nBitDepthMinus8; /**< IN: The Value "BitDepth minus 8" */
|
||||
unsigned int reserved1[3]; /**< Reserved for future use - set to zero */
|
||||
|
||||
unsigned char bIsSupported; /**< OUT: 1 if codec supported, 0 if not supported */
|
||||
unsigned char reserved2[3]; /**< Reserved for future use - set to zero */
|
||||
unsigned int nMaxWidth; /**< OUT: Max supported coded width in pixels */
|
||||
unsigned int nMaxHeight; /**< OUT: Max supported coded height in pixels */
|
||||
unsigned int nMaxMBCount; /**< OUT: Max supported macroblock count
|
||||
CodedWidth*CodedHeight/256 must be <= nMaxMBCount */
|
||||
unsigned short nMinWidth; /**< OUT: Min supported coded width in pixels */
|
||||
unsigned short nMinHeight; /**< OUT: Min supported coded height in pixels */
|
||||
unsigned int reserved3[11]; /**< Reserved for future use - set to zero */
|
||||
} CUVIDDECODECAPS;
|
||||
|
||||
/**************************************************************************************************************/
|
||||
//! \struct CUVIDDECODECREATEINFO
|
||||
//! This structure is used in cuvidCreateDecoder API
|
||||
/**************************************************************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDDECODECREATEINFO
|
||||
* Struct used in create decoder
|
||||
*/
|
||||
typedef struct _CUVIDDECODECREATEINFO
|
||||
{
|
||||
tcu_ulong ulWidth; /**< IN: Coded sequence width in pixels */
|
||||
tcu_ulong ulHeight; /**< IN: Coded sequence height in pixels */
|
||||
tcu_ulong ulNumDecodeSurfaces; /**< IN: Maximum number of internal decode surfaces */
|
||||
cudaVideoCodec CodecType; /**< IN: cudaVideoCodec_XXX */
|
||||
cudaVideoChromaFormat ChromaFormat; /**< IN: cudaVideoChromaFormat_XXX */
|
||||
tcu_ulong ulCreationFlags; /**< IN: Decoder creation flags (cudaVideoCreateFlags_XXX) */
|
||||
tcu_ulong bitDepthMinus8; /**< IN: The value "BitDepth minus 8" */
|
||||
tcu_ulong ulIntraDecodeOnly; /**< IN: Set 1 only if video has all intra frames (default value is 0). This will
|
||||
optimize video memory for Intra frames only decoding. The support is limited
|
||||
to specific codecs(H264 rightnow), the flag will be ignored for codecs which
|
||||
are not supported. However decoding might fail if the flag is enabled in case
|
||||
of supported codecs for regular bit streams having P and/or B frames. */
|
||||
tcu_ulong Reserved1[3]; /**< Reserved for future use - set to zero */
|
||||
tcu_ulong ulWidth; /**< Coded Sequence Width */
|
||||
tcu_ulong ulHeight; /**< Coded Sequence Height */
|
||||
tcu_ulong ulNumDecodeSurfaces; /**< Maximum number of internal decode surfaces */
|
||||
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
|
||||
cudaVideoChromaFormat ChromaFormat; /**< cudaVideoChromaFormat_XXX (only 4:2:0 is currently supported) */
|
||||
tcu_ulong ulCreationFlags; /**< Decoder creation flags (cudaVideoCreateFlags_XXX) */
|
||||
tcu_ulong bitDepthMinus8;
|
||||
tcu_ulong Reserved1[4]; /**< Reserved for future use - set to zero */
|
||||
/**
|
||||
* IN: area of the frame that should be displayed
|
||||
* area of the frame that should be displayed
|
||||
*/
|
||||
struct {
|
||||
short left;
|
||||
@@ -182,15 +149,14 @@ typedef struct _CUVIDDECODECREATEINFO
|
||||
short bottom;
|
||||
} display_area;
|
||||
|
||||
cudaVideoSurfaceFormat OutputFormat; /**< IN: cudaVideoSurfaceFormat_XXX */
|
||||
cudaVideoDeinterlaceMode DeinterlaceMode; /**< IN: cudaVideoDeinterlaceMode_XXX */
|
||||
tcu_ulong ulTargetWidth; /**< IN: Post-processed output width (Should be aligned to 2) */
|
||||
tcu_ulong ulTargetHeight; /**< IN: Post-processed output height (Should be aligbed to 2) */
|
||||
tcu_ulong ulNumOutputSurfaces; /**< IN: Maximum number of output surfaces simultaneously mapped */
|
||||
CUvideoctxlock vidLock; /**< IN: If non-NULL, context lock used for synchronizing ownership of
|
||||
the cuda context. Needed for cudaVideoCreate_PreferCUDA decode */
|
||||
cudaVideoSurfaceFormat OutputFormat; /**< cudaVideoSurfaceFormat_XXX */
|
||||
cudaVideoDeinterlaceMode DeinterlaceMode; /**< cudaVideoDeinterlaceMode_XXX */
|
||||
tcu_ulong ulTargetWidth; /**< Post-processed Output Width (Should be aligned to 2) */
|
||||
tcu_ulong ulTargetHeight; /**< Post-processed Output Height (Should be aligbed to 2) */
|
||||
tcu_ulong ulNumOutputSurfaces; /**< Maximum number of output surfaces simultaneously mapped */
|
||||
CUvideoctxlock vidLock; /**< If non-NULL, context lock used for synchronizing ownership of the cuda context */
|
||||
/**
|
||||
* IN: target rectangle in the output frame (for aspect ratio conversion)
|
||||
* target rectangle in the output frame (for aspect ratio conversion)
|
||||
* if a null rectangle is specified, {0,0,ulTargetWidth,ulTargetHeight} will be used
|
||||
*/
|
||||
struct {
|
||||
@@ -202,43 +168,40 @@ typedef struct _CUVIDDECODECREATEINFO
|
||||
tcu_ulong Reserved2[5]; /**< Reserved for future use - set to zero */
|
||||
} CUVIDDECODECREATEINFO;
|
||||
|
||||
/*********************************************************/
|
||||
//! \struct CUVIDH264DPBENTRY
|
||||
//! H.264 DPB entry
|
||||
//! This structure is used in CUVIDH264PICPARAMS structure
|
||||
/*********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDH264DPBENTRY
|
||||
* H.264 DPB Entry
|
||||
*/
|
||||
typedef struct _CUVIDH264DPBENTRY
|
||||
{
|
||||
int PicIdx; /**< picture index of reference frame */
|
||||
int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */
|
||||
int is_long_term; /**< 0=short term reference, 1=long term reference */
|
||||
int PicIdx; /**< picture index of reference frame */
|
||||
int FrameIdx; /**< frame_num(short-term) or LongTermFrameIdx(long-term) */
|
||||
int is_long_term; /**< 0=short term reference, 1=long term reference */
|
||||
int not_existing; /**< non-existing reference frame (corresponding PicIdx should be set to -1) */
|
||||
int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */
|
||||
int FieldOrderCnt[2]; /**< field order count of top and bottom fields */
|
||||
int used_for_reference; /**< 0=unused, 1=top_field, 2=bottom_field, 3=both_fields */
|
||||
int FieldOrderCnt[2]; /**< field order count of top and bottom fields */
|
||||
} CUVIDH264DPBENTRY;
|
||||
|
||||
/************************************************************/
|
||||
//! \struct CUVIDH264MVCEXT
|
||||
//! H.264 MVC picture parameters ext
|
||||
//! This structure is used in CUVIDH264PICPARAMS structure
|
||||
/************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDH264MVCEXT
|
||||
* H.264 MVC Picture Parameters Ext
|
||||
*/
|
||||
typedef struct _CUVIDH264MVCEXT
|
||||
{
|
||||
int num_views_minus1; /**< Max number of coded views minus 1 in video : Range - 0 to 1023 */
|
||||
int view_id; /**< view identifier */
|
||||
unsigned char inter_view_flag; /**< 1 if used for inter-view prediction, 0 if not */
|
||||
unsigned char num_inter_view_refs_l0; /**< number of inter-view ref pics in RefPicList0 */
|
||||
unsigned char num_inter_view_refs_l1; /**< number of inter-view ref pics in RefPicList1 */
|
||||
unsigned char MVCReserved8Bits; /**< Reserved bits */
|
||||
int InterViewRefsL0[16]; /**< view id of the i-th view component for inter-view prediction in RefPicList0 */
|
||||
int InterViewRefsL1[16]; /**< view id of the i-th view component for inter-view prediction in RefPicList1 */
|
||||
int num_views_minus1;
|
||||
int view_id;
|
||||
unsigned char inter_view_flag;
|
||||
unsigned char num_inter_view_refs_l0;
|
||||
unsigned char num_inter_view_refs_l1;
|
||||
unsigned char MVCReserved8Bits;
|
||||
int InterViewRefsL0[16];
|
||||
int InterViewRefsL1[16];
|
||||
} CUVIDH264MVCEXT;
|
||||
|
||||
/*********************************************************/
|
||||
//! \struct CUVIDH264SVCEXT
|
||||
//! H.264 SVC picture parameters ext
|
||||
//! This structure is used in CUVIDH264PICPARAMS structure
|
||||
/*********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDH264SVCEXT
|
||||
* H.264 SVC Picture Parameters Ext
|
||||
*/
|
||||
typedef struct _CUVIDH264SVCEXT
|
||||
{
|
||||
unsigned char profile_idc;
|
||||
@@ -264,22 +227,22 @@ typedef struct _CUVIDH264SVCEXT
|
||||
unsigned char store_ref_base_pic_flag;
|
||||
unsigned char Reserved8BitsA;
|
||||
unsigned char Reserved8BitsB;
|
||||
|
||||
// For the 4 scaled_ref_layer_XX fields below,
|
||||
// if (extended_spatial_scalability_idc == 1), SPS field, G.7.3.2.1.4, add prefix "seq_"
|
||||
// if (extended_spatial_scalability_idc == 2), SLH field, G.7.3.3.4,
|
||||
short scaled_ref_layer_left_offset;
|
||||
short scaled_ref_layer_top_offset;
|
||||
short scaled_ref_layer_right_offset;
|
||||
short scaled_ref_layer_bottom_offset;
|
||||
unsigned short Reserved16Bits;
|
||||
struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded.
|
||||
Linked list ends at the target layer. */
|
||||
struct _CUVIDPICPARAMS *pNextLayer; /**< Points to the picparams for the next layer to be decoded. Linked list ends at the target layer. */
|
||||
int bRefBaseLayer; /**< whether to store ref base pic */
|
||||
} CUVIDH264SVCEXT;
|
||||
|
||||
/******************************************************/
|
||||
//! \struct CUVIDH264PICPARAMS
|
||||
//! H.264 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/******************************************************/
|
||||
/*!
|
||||
* \struct CUVIDH264PICPARAMS
|
||||
* H.264 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDH264PICPARAMS
|
||||
{
|
||||
// SPS
|
||||
@@ -328,21 +291,20 @@ typedef struct _CUVIDH264PICPARAMS
|
||||
unsigned long long slice_group_map_addr;
|
||||
const unsigned char *pMb2SliceGroupMap;
|
||||
} fmo;
|
||||
unsigned int Reserved[12];
|
||||
unsigned int Reserved[12];
|
||||
// SVC/MVC
|
||||
union
|
||||
{
|
||||
CUVIDH264MVCEXT mvcext;
|
||||
CUVIDH264SVCEXT svcext;
|
||||
};
|
||||
} svcmvc;
|
||||
} CUVIDH264PICPARAMS;
|
||||
|
||||
|
||||
/********************************************************/
|
||||
//! \struct CUVIDMPEG2PICPARAMS
|
||||
//! MPEG-2 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDMPEG2PICPARAMS
|
||||
* MPEG-2 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDMPEG2PICPARAMS
|
||||
{
|
||||
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
|
||||
@@ -363,17 +325,21 @@ typedef struct _CUVIDMPEG2PICPARAMS
|
||||
unsigned char QuantMatrixInter[64];
|
||||
} CUVIDMPEG2PICPARAMS;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// MPEG-4 Picture Parameters
|
||||
//
|
||||
|
||||
// MPEG-4 has VOP types instead of Picture types
|
||||
#define I_VOP 0
|
||||
#define P_VOP 1
|
||||
#define B_VOP 2
|
||||
#define S_VOP 3
|
||||
|
||||
/*******************************************************/
|
||||
//! \struct CUVIDMPEG4PICPARAMS
|
||||
//! MPEG-4 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/*******************************************************/
|
||||
/*!
|
||||
* \struct CUVIDMPEG4PICPARAMS
|
||||
* MPEG-4 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDMPEG4PICPARAMS
|
||||
{
|
||||
int ForwardRefIdx; // Picture index of forward reference (P/B-frames)
|
||||
@@ -404,21 +370,20 @@ typedef struct _CUVIDMPEG4PICPARAMS
|
||||
int gmc_enabled;
|
||||
} CUVIDMPEG4PICPARAMS;
|
||||
|
||||
/********************************************************/
|
||||
//! \struct CUVIDVC1PICPARAMS
|
||||
//! VC1 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDVC1PICPARAMS
|
||||
* VC1 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDVC1PICPARAMS
|
||||
{
|
||||
int ForwardRefIdx; /**< Picture index of forward reference (P/B-frames) */
|
||||
int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */
|
||||
int FrameWidth; /**< Actual frame width */
|
||||
int FrameHeight; /**< Actual frame height */
|
||||
int BackwardRefIdx; /**< Picture index of backward reference (B-frames) */
|
||||
int FrameWidth; /**< Actual frame width */
|
||||
int FrameHeight; /**< Actual frame height */
|
||||
// PICTURE
|
||||
int intra_pic_flag; /**< Set to 1 for I,BI frames */
|
||||
int ref_pic_flag; /**< Set to 1 for I,P frames */
|
||||
int progressive_fcm; /**< Progressive frame */
|
||||
int ref_pic_flag; /**< Set to 1 for I,P frames */
|
||||
int progressive_fcm; /**< Progressive frame */
|
||||
// SEQUENCE
|
||||
int profile;
|
||||
int postprocflag;
|
||||
@@ -449,22 +414,20 @@ typedef struct _CUVIDVC1PICPARAMS
|
||||
int rangeredfrm; // range reduction state
|
||||
} CUVIDVC1PICPARAMS;
|
||||
|
||||
/***********************************************************/
|
||||
//! \struct CUVIDJPEGPICPARAMS
|
||||
//! JPEG picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/***********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDJPEGPICPARAMS
|
||||
* JPEG Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDJPEGPICPARAMS
|
||||
{
|
||||
int Reserved;
|
||||
} CUVIDJPEGPICPARAMS;
|
||||
|
||||
|
||||
/*******************************************************/
|
||||
//! \struct CUVIDHEVCPICPARAMS
|
||||
//! HEVC picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/*******************************************************/
|
||||
/*!
|
||||
* \struct CUVIDHEVCPICPARAMS
|
||||
* HEVC Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDHEVCPICPARAMS
|
||||
{
|
||||
// sps
|
||||
@@ -531,8 +494,8 @@ typedef struct _CUVIDHEVCPICPARAMS
|
||||
|
||||
unsigned char deblocking_filter_override_enabled_flag;
|
||||
unsigned char pps_deblocking_filter_disabled_flag;
|
||||
signed char pps_beta_offset_div2;
|
||||
signed char pps_tc_offset_div2;
|
||||
signed char pps_beta_offset_div2;
|
||||
signed char pps_tc_offset_div2;
|
||||
unsigned char tiles_enabled_flag;
|
||||
unsigned char uniform_spacing_flag;
|
||||
unsigned char num_tile_columns_minus1;
|
||||
@@ -540,7 +503,7 @@ typedef struct _CUVIDHEVCPICPARAMS
|
||||
|
||||
unsigned short column_width_minus1[21];
|
||||
unsigned short row_height_minus1[21];
|
||||
unsigned int reserved3[15];
|
||||
unsigned int reserved3[15];
|
||||
|
||||
// RefPicSets
|
||||
int NumBitsForShortTermRPSInSlice;
|
||||
@@ -550,15 +513,15 @@ typedef struct _CUVIDHEVCPICPARAMS
|
||||
int NumPocStCurrAfter;
|
||||
int NumPocLtCurr;
|
||||
int CurrPicOrderCntVal;
|
||||
int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference)
|
||||
int PicOrderCntVal[16]; // [refpic]
|
||||
unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference
|
||||
int RefPicIdx[16]; // [refpic] Indices of valid reference pictures (-1 if unused for reference)
|
||||
int PicOrderCntVal[16]; // [refpic]
|
||||
unsigned char IsLongTerm[16]; // [refpic] 0=not a long-term reference, 1=long-term reference
|
||||
unsigned char RefPicSetStCurrBefore[8]; // [0..NumPocStCurrBefore-1] -> refpic (0..15)
|
||||
unsigned char RefPicSetStCurrAfter[8]; // [0..NumPocStCurrAfter-1] -> refpic (0..15)
|
||||
unsigned char RefPicSetLtCurr[8]; // [0..NumPocLtCurr-1] -> refpic (0..15)
|
||||
unsigned char RefPicSetInterLayer0[8];
|
||||
unsigned char RefPicSetInterLayer1[8];
|
||||
unsigned int reserved4[12];
|
||||
unsigned int reserved4[12];
|
||||
|
||||
// scaling lists (diag order)
|
||||
unsigned char ScalingList4x4[6][16]; // [matrixId][i]
|
||||
@@ -570,11 +533,10 @@ typedef struct _CUVIDHEVCPICPARAMS
|
||||
} CUVIDHEVCPICPARAMS;
|
||||
|
||||
|
||||
/***********************************************************/
|
||||
//! \struct CUVIDVP8PICPARAMS
|
||||
//! VP8 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/***********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDVP8PICPARAMS
|
||||
* VP8 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDVP8PICPARAMS
|
||||
{
|
||||
int width;
|
||||
@@ -593,16 +555,15 @@ typedef struct _CUVIDVP8PICPARAMS
|
||||
unsigned char Reserved2Bits : 2;
|
||||
};
|
||||
unsigned char wFrameTagFlags;
|
||||
};
|
||||
} tagflags;
|
||||
unsigned char Reserved1[4];
|
||||
unsigned int Reserved2[3];
|
||||
} CUVIDVP8PICPARAMS;
|
||||
|
||||
/***********************************************************/
|
||||
//! \struct CUVIDVP9PICPARAMS
|
||||
//! VP9 picture parameters
|
||||
//! This structure is used in CUVIDPICPARAMS structure
|
||||
/***********************************************************/
|
||||
/*!
|
||||
* \struct CUVIDVP9PICPARAMS
|
||||
* VP9 Picture Parameters
|
||||
*/
|
||||
typedef struct _CUVIDVP9PICPARAMS
|
||||
{
|
||||
unsigned int width;
|
||||
@@ -648,7 +609,7 @@ typedef struct _CUVIDVP9PICPARAMS
|
||||
|
||||
|
||||
unsigned char segmentFeatureEnable[8][4];
|
||||
short segmentFeatureData[8][4];
|
||||
short segmentFeatureData[8][4];
|
||||
unsigned char mb_segment_tree_probs[7];
|
||||
unsigned char segment_pred_probs[3];
|
||||
unsigned char reservedSegment16Bits[2];
|
||||
@@ -670,163 +631,136 @@ typedef struct _CUVIDVP9PICPARAMS
|
||||
} CUVIDVP9PICPARAMS;
|
||||
|
||||
|
||||
/******************************************************************************************/
|
||||
//! \struct CUVIDPICPARAMS
|
||||
//! Picture parameters for decoding
|
||||
//! This structure is used in cuvidDecodePicture API
|
||||
//! IN for cuvidDecodePicture
|
||||
/******************************************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDPICPARAMS
|
||||
* Picture Parameters for Decoding
|
||||
*/
|
||||
typedef struct _CUVIDPICPARAMS
|
||||
{
|
||||
int PicWidthInMbs; /**< IN: Coded frame size in macroblocks */
|
||||
int FrameHeightInMbs; /**< IN: Coded frame height in macroblocks */
|
||||
int CurrPicIdx; /**< IN: Output index of the current picture */
|
||||
int field_pic_flag; /**< IN: 0=frame picture, 1=field picture */
|
||||
int bottom_field_flag; /**< IN: 0=top field, 1=bottom field (ignored if field_pic_flag=0) */
|
||||
int second_field; /**< IN: Second field of a complementary field pair */
|
||||
int PicWidthInMbs; /**< Coded Frame Size */
|
||||
int FrameHeightInMbs; /**< Coded Frame Height */
|
||||
int CurrPicIdx; /**< Output index of the current picture */
|
||||
int field_pic_flag; /**< 0=frame picture, 1=field picture */
|
||||
int bottom_field_flag; /**< 0=top field, 1=bottom field (ignored if field_pic_flag=0) */
|
||||
int second_field; /**< Second field of a complementary field pair */
|
||||
// Bitstream data
|
||||
unsigned int nBitstreamDataLen; /**< IN: Number of bytes in bitstream data buffer */
|
||||
const unsigned char *pBitstreamData; /**< IN: Ptr to bitstream data for this picture (slice-layer) */
|
||||
unsigned int nNumSlices; /**< IN: Number of slices in this picture */
|
||||
const unsigned int *pSliceDataOffsets; /**< IN: nNumSlices entries, contains offset of each slice within
|
||||
the bitstream data buffer */
|
||||
int ref_pic_flag; /**< IN: This picture is a reference picture */
|
||||
int intra_pic_flag; /**< IN: This picture is entirely intra coded */
|
||||
unsigned int Reserved[30]; /**< Reserved for future use */
|
||||
// IN: Codec-specific data
|
||||
unsigned int nBitstreamDataLen; /**< Number of bytes in bitstream data buffer */
|
||||
const unsigned char *pBitstreamData; /**< Ptr to bitstream data for this picture (slice-layer) */
|
||||
unsigned int nNumSlices; /**< Number of slices in this picture */
|
||||
const unsigned int *pSliceDataOffsets; /**< nNumSlices entries, contains offset of each slice within the bitstream data buffer */
|
||||
int ref_pic_flag; /**< This picture is a reference picture */
|
||||
int intra_pic_flag; /**< This picture is entirely intra coded */
|
||||
unsigned int Reserved[30]; /**< Reserved for future use */
|
||||
// Codec-specific data
|
||||
union {
|
||||
CUVIDMPEG2PICPARAMS mpeg2; /**< Also used for MPEG-1 */
|
||||
CUVIDH264PICPARAMS h264;
|
||||
CUVIDVC1PICPARAMS vc1;
|
||||
CUVIDH264PICPARAMS h264;
|
||||
CUVIDVC1PICPARAMS vc1;
|
||||
CUVIDMPEG4PICPARAMS mpeg4;
|
||||
CUVIDJPEGPICPARAMS jpeg;
|
||||
CUVIDHEVCPICPARAMS hevc;
|
||||
CUVIDVP8PICPARAMS vp8;
|
||||
CUVIDVP9PICPARAMS vp9;
|
||||
CUVIDJPEGPICPARAMS jpeg;
|
||||
CUVIDHEVCPICPARAMS hevc;
|
||||
CUVIDVP8PICPARAMS vp8;
|
||||
CUVIDVP9PICPARAMS vp9;
|
||||
unsigned int CodecReserved[1024];
|
||||
} CodecSpecific;
|
||||
} CUVIDPICPARAMS;
|
||||
|
||||
|
||||
/******************************************************/
|
||||
//! \struct CUVIDPROCPARAMS
|
||||
//! Picture parameters for postprocessing
|
||||
//! This structure is used in cuvidMapVideoFrame API
|
||||
/******************************************************/
|
||||
/*!
|
||||
* \struct CUVIDPROCPARAMS
|
||||
* Picture Parameters for Postprocessing
|
||||
*/
|
||||
typedef struct _CUVIDPROCPARAMS
|
||||
{
|
||||
int progressive_frame; /**< IN: Input is progressive (deinterlace_mode will be ignored) */
|
||||
int second_field; /**< IN: Output the second field (ignored if deinterlace mode is Weave) */
|
||||
int top_field_first; /**< IN: Input frame is top field first (1st field is top, 2nd field is bottom) */
|
||||
int unpaired_field; /**< IN: Input only contains one field (2nd field is invalid) */
|
||||
int progressive_frame; /**< Input is progressive (deinterlace_mode will be ignored) */
|
||||
int second_field; /**< Output the second field (ignored if deinterlace mode is Weave) */
|
||||
int top_field_first; /**< Input frame is top field first (1st field is top, 2nd field is bottom) */
|
||||
int unpaired_field; /**< Input only contains one field (2nd field is invalid) */
|
||||
// The fields below are used for raw YUV input
|
||||
unsigned int reserved_flags; /**< Reserved for future use (set to zero) */
|
||||
unsigned int reserved_zero; /**< Reserved (set to zero) */
|
||||
unsigned long long raw_input_dptr; /**< IN: Input CUdeviceptr for raw YUV extensions */
|
||||
unsigned int raw_input_pitch; /**< IN: pitch in bytes of raw YUV input (should be aligned appropriately) */
|
||||
unsigned int raw_input_format; /**< IN: Input YUV format (cudaVideoCodec_enum) */
|
||||
unsigned long long raw_output_dptr; /**< IN: Output CUdeviceptr for raw YUV extensions */
|
||||
unsigned int raw_output_pitch; /**< IN: pitch in bytes of raw YUV output (should be aligned appropriately) */
|
||||
unsigned int Reserved1; /**< Reserved for future use (set to zero) */
|
||||
CUstream output_stream; /**< IN: stream object used by cuvidMapVideoFrame */
|
||||
unsigned int Reserved[46]; /**< Reserved for future use (set to zero) */
|
||||
void *Reserved2[2]; /**< Reserved for future use (set to zero) */
|
||||
unsigned int reserved_flags; /**< Reserved for future use (set to zero) */
|
||||
unsigned int reserved_zero; /**< Reserved (set to zero) */
|
||||
unsigned long long raw_input_dptr; /**< Input CUdeviceptr for raw YUV extensions */
|
||||
unsigned int raw_input_pitch; /**< pitch in bytes of raw YUV input (should be aligned appropriately) */
|
||||
unsigned int raw_input_format; /**< Reserved for future use (set to zero) */
|
||||
unsigned long long raw_output_dptr; /**< Reserved for future use (set to zero) */
|
||||
unsigned int raw_output_pitch; /**< Reserved for future use (set to zero) */
|
||||
unsigned int Reserved[48];
|
||||
void *Reserved3[3];
|
||||
} CUVIDPROCPARAMS;
|
||||
|
||||
|
||||
/***********************************************************************************************************/
|
||||
//! VIDEO_DECODER
|
||||
//!
|
||||
//! In order to minimize decode latencies, there should be always at least 2 pictures in the decode
|
||||
//! queue at any time, in order to make sure that all decode engines are always busy.
|
||||
//!
|
||||
//! Overall data flow:
|
||||
//! - cuvidGetDecoderCaps(...)
|
||||
//! - cuvidCreateDecoder(...)
|
||||
//! - For each picture:
|
||||
//! + cuvidDecodePicture(N)
|
||||
//! + cuvidMapVideoFrame(N-4)
|
||||
//! + do some processing in cuda
|
||||
//! + cuvidUnmapVideoFrame(N-4)
|
||||
//! + cuvidDecodePicture(N+1)
|
||||
//! + cuvidMapVideoFrame(N-3)
|
||||
//! + ...
|
||||
//! - cuvidDestroyDecoder(...)
|
||||
//!
|
||||
//! NOTE:
|
||||
//! - When the cuda context is created from a D3D device, the D3D device must also be created
|
||||
//! with the D3DCREATE_MULTITHREADED flag.
|
||||
//! - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces)
|
||||
//! - cuvidDecodePicture may block the calling thread if there are too many pictures pending
|
||||
//! in the decode queue
|
||||
/***********************************************************************************************************/
|
||||
/**
|
||||
*
|
||||
* In order to minimize decode latencies, there should be always at least 2 pictures in the decode
|
||||
* queue at any time, in order to make sure that all decode engines are always busy.
|
||||
*
|
||||
* Overall data flow:
|
||||
* - cuvidCreateDecoder(...)
|
||||
* For each picture:
|
||||
* - cuvidDecodePicture(N)
|
||||
* - cuvidMapVideoFrame(N-4)
|
||||
* - do some processing in cuda
|
||||
* - cuvidUnmapVideoFrame(N-4)
|
||||
* - cuvidDecodePicture(N+1)
|
||||
* - cuvidMapVideoFrame(N-3)
|
||||
* ...
|
||||
* - cuvidDestroyDecoder(...)
|
||||
*
|
||||
* NOTE:
|
||||
* - When the cuda context is created from a D3D device, the D3D device must also be created
|
||||
* with the D3DCREATE_MULTITHREADED flag.
|
||||
* - There is a limit to how many pictures can be mapped simultaneously (ulNumOutputSurfaces)
|
||||
* - cuVidDecodePicture may block the calling thread if there are too many pictures pending
|
||||
* in the decode queue
|
||||
*/
|
||||
|
||||
|
||||
/**********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidGetDecoderCaps(CUVIDDECODECAPS *pdc)
|
||||
//! Queries decode capabilities of NVDEC-HW based on CodecType, ChromaFormat and BitDepthMinus8 parameters.
|
||||
//! 1. Application fills IN parameters CodecType, ChromaFormat and BitDepthMinus8 of CUVIDDECODECAPS structure
|
||||
//! 2. On calling cuvidGetDecoderCaps, driver fills OUT parameters if the IN parameters are supported
|
||||
//! If IN parameters passed to the driver are not supported by NVDEC-HW, then all OUT params are set to 0.
|
||||
//! E.g. on Geforce GTX 960:
|
||||
//! App fills - eCodecType = cudaVideoCodec_H264; eChromaFormat = cudaVideoChromaFormat_420; nBitDepthMinus8 = 0;
|
||||
//! Given IN parameters are supported, hence driver fills: bIsSupported = 1; nMinWidth = 48; nMinHeight = 16;
|
||||
//! nMaxWidth = 4096; nMaxHeight = 4096; nMaxMBCount = 65536;
|
||||
//! CodedWidth*CodedHeight/256 must be less than or equal to nMaxMBCount
|
||||
/**********************************************************************************************************************/
|
||||
typedef CUresult CUDAAPI tcuvidGetDecoderCaps(CUVIDDECODECAPS *pdc);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci)
|
||||
//! Create the decoder object based on pdci. A handle to the created decoder is returned
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci)
|
||||
* Create the decoder object
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCreateDecoder(CUvideodecoder *phDecoder, CUVIDDECODECREATEINFO *pdci);
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder)
|
||||
//! Destroy the decoder object.
|
||||
/********************************************************************************************************************/
|
||||
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidDestroyDecoder(CUvideodecoder hDecoder)
|
||||
* Destroy the decoder object
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidDestroyDecoder(CUvideodecoder hDecoder);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams)
|
||||
//! Decode a single picture (field or frame)
|
||||
//! Kicks off HW decoding
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams)
|
||||
* Decode a single picture (field or frame)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidDecodePicture(CUvideodecoder hDecoder, CUVIDPICPARAMS *pPicParams);
|
||||
|
||||
|
||||
#if !defined(__CUVID_DEVPTR64) || defined(__CUVID_INTERNAL)
|
||||
/************************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr,
|
||||
//! unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
//! Post-process and map video frame corresponding to nPicIdx for use in cuda. Returns cuda device pointer and associated
|
||||
//! pitch of the video frame
|
||||
/************************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx, unsigned int *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
* Post-process and map a video frame for use in cuda
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidMapVideoFrame(CUvideodecoder hDecoder, int nPicIdx,
|
||||
unsigned int *pDevPtr, unsigned int *pPitch,
|
||||
CUVIDPROCPARAMS *pVPP);
|
||||
unsigned int *pDevPtr, unsigned int *pPitch,
|
||||
CUVIDPROCPARAMS *pVPP);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr)
|
||||
//! Unmap a previously mapped video frame
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr)
|
||||
* Unmap a previously mapped video frame
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidUnmapVideoFrame(CUvideodecoder hDecoder, unsigned int DevPtr);
|
||||
#endif
|
||||
|
||||
#if defined(_WIN64) || defined(__LP64__) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
|
||||
/************************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr,
|
||||
//! unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
//! Post-process and map video frame corresponding to nPicIdx for use in cuda. Returns cuda device pointer and associated
|
||||
//! pitch of the video frame
|
||||
/************************************************************************************************************************/
|
||||
#if defined(WIN64) || defined(_WIN64) || defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr, unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
* map a video frame
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidMapVideoFrame64(CUvideodecoder hDecoder, int nPicIdx, unsigned long long *pDevPtr,
|
||||
unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
unsigned int *pPitch, CUVIDPROCPARAMS *pVPP);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
|
||||
//! Unmap a previously mapped video frame
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
|
||||
* Unmap a previously mapped video frame
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsigned long long DevPtr);
|
||||
|
||||
#if defined(__CUVID_DEVPTR64) && !defined(__CUVID_INTERNAL)
|
||||
@@ -836,48 +770,43 @@ typedef CUresult CUDAAPI tcuvidUnmapVideoFrame64(CUvideodecoder hDecoder, unsign
|
||||
#endif
|
||||
|
||||
|
||||
/**
|
||||
*
|
||||
* Context-locking: to facilitate multi-threaded implementations, the following 4 functions
|
||||
* provide a simple mutex-style host synchronization. If a non-NULL context is specified
|
||||
* in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given
|
||||
* context before making any cuda calls.
|
||||
* A multi-threaded application could create a lock associated with a context handle so that
|
||||
* multiple threads can safely share the same cuda context:
|
||||
* - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context
|
||||
* that can be passed to cuvidCtxLockCreate.
|
||||
* - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section.
|
||||
*
|
||||
* NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video
|
||||
* decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls).
|
||||
*/
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//!
|
||||
//! Context-locking: to facilitate multi-threaded implementations, the following 4 functions
|
||||
//! provide a simple mutex-style host synchronization. If a non-NULL context is specified
|
||||
//! in CUVIDDECODECREATEINFO, the codec library will acquire the mutex associated with the given
|
||||
//! context before making any cuda calls.
|
||||
//! A multi-threaded application could create a lock associated with a context handle so that
|
||||
//! multiple threads can safely share the same cuda context:
|
||||
//! - use cuCtxPopCurrent immediately after context creation in order to create a 'floating' context
|
||||
//! that can be passed to cuvidCtxLockCreate.
|
||||
//! - When using a floating context, all cuda calls should only be made within a cuvidCtxLock/cuvidCtxUnlock section.
|
||||
//!
|
||||
//! NOTE: This is a safer alternative to cuCtxPushCurrent and cuCtxPopCurrent, and is not related to video
|
||||
//! decoder in any way (implemented as a critical section associated with cuCtx{Push|Pop}Current calls).
|
||||
/********************************************************************************************************************/
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx)
|
||||
//! This API is used to create CtxLock object
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCtxLockCreate(CUvideoctxlock *pLock, CUcontext ctx);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck)
|
||||
//! This API is used to free CtxLock object
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCtxLockDestroy(CUvideoctxlock lck)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCtxLockDestroy(CUvideoctxlock lck);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags)
|
||||
//! This API is used to acquire ctxlock
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCtxLock(CUvideoctxlock lck, unsigned int reserved_flags);
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags)
|
||||
//! This API is used to release ctxlock
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCtxUnlock(CUvideoctxlock lck, unsigned int reserved_flags);
|
||||
|
||||
/**********************************************************************************************/
|
||||
/** @} */ /* End VIDEO_DECODER */
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
||||
@@ -66,9 +66,9 @@
|
||||
av_log(NULL, AV_LOG_TRACE, "Loaded lib: %s\n", path); \
|
||||
} while (0)
|
||||
|
||||
#define LOAD_SYMBOL(fun, tp, symbol) \
|
||||
#define LOAD_SYMBOL(fun, symbol) \
|
||||
do { \
|
||||
if (!((f->fun) = (tp*)dlsym(f->lib, symbol))) { \
|
||||
if (!((f->fun) = dlsym(f->lib, symbol))) { \
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot load %s\n", symbol); \
|
||||
ret = AVERROR_UNKNOWN; \
|
||||
goto error; \
|
||||
@@ -76,15 +76,6 @@
|
||||
av_log(NULL, AV_LOG_TRACE, "Loaded sym: %s\n", symbol); \
|
||||
} while (0)
|
||||
|
||||
#define LOAD_SYMBOL_OPT(fun, tp, symbol) \
|
||||
do { \
|
||||
if (!((f->fun) = (tp*)dlsym(f->lib, symbol))) { \
|
||||
av_log(NULL, AV_LOG_DEBUG, "Cannot load optional %s\n", symbol); \
|
||||
} else { \
|
||||
av_log(NULL, AV_LOG_TRACE, "Loaded sym: %s\n", symbol); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define GENERIC_LOAD_FUNC_PREAMBLE(T, n, N) \
|
||||
T *f; \
|
||||
int ret; \
|
||||
@@ -134,7 +125,6 @@ typedef struct CudaFunctions CudaFunctions;
|
||||
#endif
|
||||
|
||||
typedef struct CuvidFunctions {
|
||||
tcuvidGetDecoderCaps *cuvidGetDecoderCaps;
|
||||
tcuvidCreateDecoder *cuvidCreateDecoder;
|
||||
tcuvidDestroyDecoder *cuvidDestroyDecoder;
|
||||
tcuvidDecodePicture *cuvidDecodePicture;
|
||||
@@ -159,12 +149,9 @@ typedef struct CuvidFunctions {
|
||||
LIB_HANDLE lib;
|
||||
} CuvidFunctions;
|
||||
|
||||
typedef NVENCSTATUS NVENCAPI tNvEncodeAPICreateInstance(NV_ENCODE_API_FUNCTION_LIST *functionList);
|
||||
typedef NVENCSTATUS NVENCAPI tNvEncodeAPIGetMaxSupportedVersion(uint32_t* version);
|
||||
|
||||
typedef struct NvencFunctions {
|
||||
tNvEncodeAPICreateInstance *NvEncodeAPICreateInstance;
|
||||
tNvEncodeAPIGetMaxSupportedVersion *NvEncodeAPIGetMaxSupportedVersion;
|
||||
NVENCSTATUS (NVENCAPI *NvEncodeAPICreateInstance)(NV_ENCODE_API_FUNCTION_LIST *functionList);
|
||||
NVENCSTATUS (NVENCAPI *NvEncodeAPIGetMaxSupportedVersion)(uint32_t* version);
|
||||
|
||||
LIB_HANDLE lib;
|
||||
} NvencFunctions;
|
||||
@@ -191,20 +178,20 @@ static inline int cuda_load_functions(CudaFunctions **functions)
|
||||
{
|
||||
GENERIC_LOAD_FUNC_PREAMBLE(CudaFunctions, cuda, CUDA_LIBNAME);
|
||||
|
||||
LOAD_SYMBOL(cuInit, tcuInit, "cuInit");
|
||||
LOAD_SYMBOL(cuDeviceGetCount, tcuDeviceGetCount, "cuDeviceGetCount");
|
||||
LOAD_SYMBOL(cuDeviceGet, tcuDeviceGet, "cuDeviceGet");
|
||||
LOAD_SYMBOL(cuDeviceGetName, tcuDeviceGetName, "cuDeviceGetName");
|
||||
LOAD_SYMBOL(cuDeviceComputeCapability, tcuDeviceComputeCapability, "cuDeviceComputeCapability");
|
||||
LOAD_SYMBOL(cuCtxCreate, tcuCtxCreate_v2, "cuCtxCreate_v2");
|
||||
LOAD_SYMBOL(cuCtxPushCurrent, tcuCtxPushCurrent_v2, "cuCtxPushCurrent_v2");
|
||||
LOAD_SYMBOL(cuCtxPopCurrent, tcuCtxPopCurrent_v2, "cuCtxPopCurrent_v2");
|
||||
LOAD_SYMBOL(cuCtxDestroy, tcuCtxDestroy_v2, "cuCtxDestroy_v2");
|
||||
LOAD_SYMBOL(cuMemAlloc, tcuMemAlloc_v2, "cuMemAlloc_v2");
|
||||
LOAD_SYMBOL(cuMemFree, tcuMemFree_v2, "cuMemFree_v2");
|
||||
LOAD_SYMBOL(cuMemcpy2D, tcuMemcpy2D_v2, "cuMemcpy2D_v2");
|
||||
LOAD_SYMBOL(cuGetErrorName, tcuGetErrorName, "cuGetErrorName");
|
||||
LOAD_SYMBOL(cuGetErrorString, tcuGetErrorString, "cuGetErrorString");
|
||||
LOAD_SYMBOL(cuInit, "cuInit");
|
||||
LOAD_SYMBOL(cuDeviceGetCount, "cuDeviceGetCount");
|
||||
LOAD_SYMBOL(cuDeviceGet, "cuDeviceGet");
|
||||
LOAD_SYMBOL(cuDeviceGetName, "cuDeviceGetName");
|
||||
LOAD_SYMBOL(cuDeviceComputeCapability, "cuDeviceComputeCapability");
|
||||
LOAD_SYMBOL(cuCtxCreate, "cuCtxCreate_v2");
|
||||
LOAD_SYMBOL(cuCtxPushCurrent, "cuCtxPushCurrent_v2");
|
||||
LOAD_SYMBOL(cuCtxPopCurrent, "cuCtxPopCurrent_v2");
|
||||
LOAD_SYMBOL(cuCtxDestroy, "cuCtxDestroy_v2");
|
||||
LOAD_SYMBOL(cuMemAlloc, "cuMemAlloc_v2");
|
||||
LOAD_SYMBOL(cuMemFree, "cuMemFree_v2");
|
||||
LOAD_SYMBOL(cuMemcpy2D, "cuMemcpy2D_v2");
|
||||
LOAD_SYMBOL(cuGetErrorName, "cuGetErrorName");
|
||||
LOAD_SYMBOL(cuGetErrorString, "cuGetErrorString");
|
||||
|
||||
GENERIC_LOAD_FUNC_FINALE(cuda);
|
||||
}
|
||||
@@ -214,32 +201,31 @@ static inline int cuvid_load_functions(CuvidFunctions **functions)
|
||||
{
|
||||
GENERIC_LOAD_FUNC_PREAMBLE(CuvidFunctions, cuvid, NVCUVID_LIBNAME);
|
||||
|
||||
LOAD_SYMBOL_OPT(cuvidGetDecoderCaps, tcuvidGetDecoderCaps, "cuvidGetDecoderCaps");
|
||||
LOAD_SYMBOL(cuvidCreateDecoder, tcuvidCreateDecoder, "cuvidCreateDecoder");
|
||||
LOAD_SYMBOL(cuvidDestroyDecoder, tcuvidDestroyDecoder, "cuvidDestroyDecoder");
|
||||
LOAD_SYMBOL(cuvidDecodePicture, tcuvidDecodePicture, "cuvidDecodePicture");
|
||||
LOAD_SYMBOL(cuvidCreateDecoder, "cuvidCreateDecoder");
|
||||
LOAD_SYMBOL(cuvidDestroyDecoder, "cuvidDestroyDecoder");
|
||||
LOAD_SYMBOL(cuvidDecodePicture, "cuvidDecodePicture");
|
||||
#ifdef __CUVID_DEVPTR64
|
||||
LOAD_SYMBOL(cuvidMapVideoFrame, tcuvidMapVideoFrame, "cuvidMapVideoFrame64");
|
||||
LOAD_SYMBOL(cuvidUnmapVideoFrame, tcuvidUnmapVideoFrame, "cuvidUnmapVideoFrame64");
|
||||
LOAD_SYMBOL(cuvidMapVideoFrame, "cuvidMapVideoFrame64");
|
||||
LOAD_SYMBOL(cuvidUnmapVideoFrame, "cuvidUnmapVideoFrame64");
|
||||
#else
|
||||
LOAD_SYMBOL(cuvidMapVideoFrame, tcuvidMapVideoFrame, "cuvidMapVideoFrame");
|
||||
LOAD_SYMBOL(cuvidUnmapVideoFrame, tcuvidUnmapVideoFrame, "cuvidUnmapVideoFrame");
|
||||
LOAD_SYMBOL(cuvidMapVideoFrame, "cuvidMapVideoFrame");
|
||||
LOAD_SYMBOL(cuvidUnmapVideoFrame, "cuvidUnmapVideoFrame");
|
||||
#endif
|
||||
LOAD_SYMBOL(cuvidCtxLockCreate, tcuvidCtxLockCreate, "cuvidCtxLockCreate");
|
||||
LOAD_SYMBOL(cuvidCtxLockDestroy, tcuvidCtxLockDestroy, "cuvidCtxLockDestroy");
|
||||
LOAD_SYMBOL(cuvidCtxLock, tcuvidCtxLock, "cuvidCtxLock");
|
||||
LOAD_SYMBOL(cuvidCtxUnlock, tcuvidCtxUnlock, "cuvidCtxUnlock");
|
||||
LOAD_SYMBOL(cuvidCtxLockCreate, "cuvidCtxLockCreate");
|
||||
LOAD_SYMBOL(cuvidCtxLockDestroy, "cuvidCtxLockDestroy");
|
||||
LOAD_SYMBOL(cuvidCtxLock, "cuvidCtxLock");
|
||||
LOAD_SYMBOL(cuvidCtxUnlock, "cuvidCtxUnlock");
|
||||
|
||||
LOAD_SYMBOL(cuvidCreateVideoSource, tcuvidCreateVideoSource, "cuvidCreateVideoSource");
|
||||
LOAD_SYMBOL(cuvidCreateVideoSourceW, tcuvidCreateVideoSourceW, "cuvidCreateVideoSourceW");
|
||||
LOAD_SYMBOL(cuvidDestroyVideoSource, tcuvidDestroyVideoSource, "cuvidDestroyVideoSource");
|
||||
LOAD_SYMBOL(cuvidSetVideoSourceState, tcuvidSetVideoSourceState, "cuvidSetVideoSourceState");
|
||||
LOAD_SYMBOL(cuvidGetVideoSourceState, tcuvidGetVideoSourceState, "cuvidGetVideoSourceState");
|
||||
LOAD_SYMBOL(cuvidGetSourceVideoFormat, tcuvidGetSourceVideoFormat, "cuvidGetSourceVideoFormat");
|
||||
LOAD_SYMBOL(cuvidGetSourceAudioFormat, tcuvidGetSourceAudioFormat, "cuvidGetSourceAudioFormat");
|
||||
LOAD_SYMBOL(cuvidCreateVideoParser, tcuvidCreateVideoParser, "cuvidCreateVideoParser");
|
||||
LOAD_SYMBOL(cuvidParseVideoData, tcuvidParseVideoData, "cuvidParseVideoData");
|
||||
LOAD_SYMBOL(cuvidDestroyVideoParser, tcuvidDestroyVideoParser, "cuvidDestroyVideoParser");
|
||||
LOAD_SYMBOL(cuvidCreateVideoSource, "cuvidCreateVideoSource");
|
||||
LOAD_SYMBOL(cuvidCreateVideoSourceW, "cuvidCreateVideoSourceW");
|
||||
LOAD_SYMBOL(cuvidDestroyVideoSource, "cuvidDestroyVideoSource");
|
||||
LOAD_SYMBOL(cuvidSetVideoSourceState, "cuvidSetVideoSourceState");
|
||||
LOAD_SYMBOL(cuvidGetVideoSourceState, "cuvidGetVideoSourceState");
|
||||
LOAD_SYMBOL(cuvidGetSourceVideoFormat, "cuvidGetSourceVideoFormat");
|
||||
LOAD_SYMBOL(cuvidGetSourceAudioFormat, "cuvidGetSourceAudioFormat");
|
||||
LOAD_SYMBOL(cuvidCreateVideoParser, "cuvidCreateVideoParser");
|
||||
LOAD_SYMBOL(cuvidParseVideoData, "cuvidParseVideoData");
|
||||
LOAD_SYMBOL(cuvidDestroyVideoParser, "cuvidDestroyVideoParser");
|
||||
|
||||
GENERIC_LOAD_FUNC_FINALE(cuvid);
|
||||
}
|
||||
@@ -248,8 +234,8 @@ static inline int nvenc_load_functions(NvencFunctions **functions)
|
||||
{
|
||||
GENERIC_LOAD_FUNC_PREAMBLE(NvencFunctions, nvenc, NVENC_LIBNAME);
|
||||
|
||||
LOAD_SYMBOL(NvEncodeAPICreateInstance, tNvEncodeAPICreateInstance, "NvEncodeAPICreateInstance");
|
||||
LOAD_SYMBOL(NvEncodeAPIGetMaxSupportedVersion, tNvEncodeAPIGetMaxSupportedVersion, "NvEncodeAPIGetMaxSupportedVersion");
|
||||
LOAD_SYMBOL(NvEncodeAPICreateInstance, "NvEncodeAPICreateInstance");
|
||||
LOAD_SYMBOL(NvEncodeAPIGetMaxSupportedVersion, "NvEncodeAPIGetMaxSupportedVersion");
|
||||
|
||||
GENERIC_LOAD_FUNC_FINALE(nvenc);
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* This copyright notice applies to this header file only:
|
||||
*
|
||||
* Copyright (c) 2010-2017 NVIDIA Corporation
|
||||
* Copyright (c) 2010-2016 NVIDIA Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
@@ -25,12 +25,12 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/********************************************************************************************************************/
|
||||
//! \file nvcuvid.h
|
||||
//! NVDECODE API provides video decoding interface to NVIDIA GPU devices.
|
||||
//! \date 2015-2017
|
||||
//! This file contains the interface constants, structure definitions and function prototypes.
|
||||
/********************************************************************************************************************/
|
||||
/**
|
||||
* \file nvcuvid.h
|
||||
* NvCuvid API provides Video Decoding interface to NVIDIA GPU devices.
|
||||
* \date 2015-2015
|
||||
* This file contains the interface constants, structure definitions and function prototypes.
|
||||
*/
|
||||
|
||||
#if !defined(__NVCUVID_H__)
|
||||
#define __NVCUVID_H__
|
||||
@@ -41,87 +41,78 @@
|
||||
extern "C" {
|
||||
#endif /* __cplusplus */
|
||||
|
||||
/*********************************
|
||||
** Initialization
|
||||
*********************************/
|
||||
CUresult CUDAAPI cuvidInit(unsigned int Flags);
|
||||
|
||||
/***********************************************/
|
||||
//!
|
||||
//! High-level helper APIs for video sources
|
||||
//!
|
||||
/***********************************************/
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// High-level helper APIs for video sources
|
||||
//
|
||||
|
||||
typedef void *CUvideosource;
|
||||
typedef void *CUvideoparser;
|
||||
typedef long long CUvideotimestamp;
|
||||
|
||||
/**
|
||||
* \addtogroup VIDEO_PARSER Video Parser
|
||||
* @{
|
||||
*/
|
||||
|
||||
/************************************************************************/
|
||||
//! \enum cudaVideoState
|
||||
//! Video source state enums
|
||||
//! Used in cuvidSetVideoSourceState and cuvidGetVideoSourceState APIs
|
||||
/************************************************************************/
|
||||
/*!
|
||||
* \enum cudaVideoState
|
||||
* Video Source State
|
||||
*/
|
||||
typedef enum {
|
||||
cudaVideoState_Error = -1, /**< Error state (invalid source) */
|
||||
cudaVideoState_Error = -1, /**< Error state (invalid source) */
|
||||
cudaVideoState_Stopped = 0, /**< Source is stopped (or reached end-of-stream) */
|
||||
cudaVideoState_Started = 1 /**< Source is running and delivering data */
|
||||
cudaVideoState_Started = 1 /**< Source is running and delivering data */
|
||||
} cudaVideoState;
|
||||
|
||||
/************************************************************************/
|
||||
//! \enum cudaAudioCodec
|
||||
//! Audio compression enums
|
||||
//! Used in CUAUDIOFORMAT structure
|
||||
/************************************************************************/
|
||||
/*!
|
||||
* \enum cudaAudioCodec
|
||||
* Audio compression
|
||||
*/
|
||||
typedef enum {
|
||||
cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */
|
||||
cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */
|
||||
cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */
|
||||
cudaAudioCodec_MPEG1=0, /**< MPEG-1 Audio */
|
||||
cudaAudioCodec_MPEG2, /**< MPEG-2 Audio */
|
||||
cudaAudioCodec_MP3, /**< MPEG-1 Layer III Audio */
|
||||
cudaAudioCodec_AC3, /**< Dolby Digital (AC3) Audio */
|
||||
cudaAudioCodec_LPCM, /**< PCM Audio */
|
||||
cudaAudioCodec_AAC, /**< AAC Audio */
|
||||
cudaAudioCodec_LPCM /**< PCM Audio */
|
||||
} cudaAudioCodec;
|
||||
|
||||
/************************************************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDEOFORMAT
|
||||
//! Video format
|
||||
//! Used in cuvidGetSourceVideoFormat API
|
||||
/************************************************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDEOFORMAT
|
||||
* Video format
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
cudaVideoCodec codec; /**< OUT: Compression format */
|
||||
cudaVideoCodec codec; /**< Compression format */
|
||||
/**
|
||||
* OUT: frame rate = numerator / denominator (for example: 30000/1001)
|
||||
* frame rate = numerator / denominator (for example: 30000/1001)
|
||||
*/
|
||||
struct {
|
||||
/**< OUT: frame rate numerator (0 = unspecified or variable frame rate) */
|
||||
unsigned int numerator;
|
||||
/**< OUT: frame rate denominator (0 = unspecified or variable frame rate) */
|
||||
unsigned int denominator;
|
||||
unsigned int numerator; /**< frame rate numerator (0 = unspecified or variable frame rate) */
|
||||
unsigned int denominator; /**< frame rate denominator (0 = unspecified or variable frame rate) */
|
||||
} frame_rate;
|
||||
unsigned char progressive_sequence; /**< OUT: 0=interlaced, 1=progressive */
|
||||
unsigned char bit_depth_luma_minus8; /**< OUT: high bit depth luma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */
|
||||
unsigned char bit_depth_chroma_minus8; /**< OUT: high bit depth chroma. E.g, 2 for 10-bitdepth, 4 for 12-bitdepth */
|
||||
unsigned char reserved1; /**< Reserved for future use */
|
||||
unsigned int coded_width; /**< OUT: coded frame width in pixels */
|
||||
unsigned int coded_height; /**< OUT: coded frame height in pixels */
|
||||
unsigned char progressive_sequence; /**< 0=interlaced, 1=progressive */
|
||||
unsigned char bit_depth_luma_minus8; /**< high bit depth Luma */
|
||||
unsigned char bit_depth_chroma_minus8; /**< high bit depth Chroma */
|
||||
unsigned char reserved1; /**< Reserved for future use */
|
||||
unsigned int coded_width; /**< coded frame width */
|
||||
unsigned int coded_height; /**< coded frame height */
|
||||
/**
|
||||
* area of the frame that should be displayed
|
||||
* area of the frame that should be displayed
|
||||
* typical example:
|
||||
* coded_width = 1920, coded_height = 1088
|
||||
* display_area = { 0,0,1920,1080 }
|
||||
* coded_width = 1920, coded_height = 1088
|
||||
* display_area = { 0,0,1920,1080 }
|
||||
*/
|
||||
struct {
|
||||
int left; /**< OUT: left position of display rect */
|
||||
int top; /**< OUT: top position of display rect */
|
||||
int right; /**< OUT: right position of display rect */
|
||||
int bottom; /**< OUT: bottom position of display rect */
|
||||
int left; /**< left position of display rect */
|
||||
int top; /**< top position of display rect */
|
||||
int right; /**< right position of display rect */
|
||||
int bottom; /**< bottom position of display rect */
|
||||
} display_area;
|
||||
cudaVideoChromaFormat chroma_format; /**< OUT: Chroma format */
|
||||
unsigned int bitrate; /**< OUT: video bitrate (bps, 0=unknown) */
|
||||
cudaVideoChromaFormat chroma_format; /**< Chroma format */
|
||||
unsigned int bitrate; /**< video bitrate (bps, 0=unknown) */
|
||||
/**
|
||||
* OUT: Display Aspect Ratio = x:y (4:3, 16:9, etc)
|
||||
* Display Aspect Ratio = x:y (4:3, 16:9, etc)
|
||||
*/
|
||||
struct {
|
||||
int x;
|
||||
@@ -129,223 +120,192 @@ typedef struct
|
||||
} display_aspect_ratio;
|
||||
/**
|
||||
* Video Signal Description
|
||||
* Refer section E.2.1 (VUI parameters semantics) of H264 spec file
|
||||
*/
|
||||
struct {
|
||||
unsigned char video_format : 3; /**< OUT: 0-Component, 1-PAL, 2-NTSC, 3-SECAM, 4-MAC, 5-Unspecified */
|
||||
unsigned char video_full_range_flag : 1; /**< OUT: indicates the black level and luma and chroma range */
|
||||
unsigned char reserved_zero_bits : 4; /**< Reserved bits */
|
||||
unsigned char color_primaries; /**< OUT: chromaticity coordinates of source primaries */
|
||||
unsigned char transfer_characteristics; /**< OUT: opto-electronic transfer characteristic of the source picture */
|
||||
unsigned char matrix_coefficients; /**< OUT: used in deriving luma and chroma signals from RGB primaries */
|
||||
unsigned char video_format : 3;
|
||||
unsigned char video_full_range_flag : 1;
|
||||
unsigned char reserved_zero_bits : 4;
|
||||
unsigned char color_primaries;
|
||||
unsigned char transfer_characteristics;
|
||||
unsigned char matrix_coefficients;
|
||||
} video_signal_description;
|
||||
unsigned int seqhdr_data_length; /**< OUT: Additional bytes following (CUVIDEOFORMATEX) */
|
||||
unsigned int seqhdr_data_length; /**< Additional bytes following (CUVIDEOFORMATEX) */
|
||||
} CUVIDEOFORMAT;
|
||||
|
||||
/****************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDEOFORMATEX
|
||||
//! Video format including raw sequence header information
|
||||
//! Used in cuvidGetSourceVideoFormat API
|
||||
/****************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDEOFORMATEX
|
||||
* Video format including raw sequence header information
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
CUVIDEOFORMAT format; /**< OUT: CUVIDEOFORMAT structure */
|
||||
unsigned char raw_seqhdr_data[1024]; /**< OUT: Sequence header data */
|
||||
CUVIDEOFORMAT format;
|
||||
unsigned char raw_seqhdr_data[1024];
|
||||
} CUVIDEOFORMATEX;
|
||||
|
||||
/****************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUAUDIOFORMAT
|
||||
//! Audio formats
|
||||
//! Used in cuvidGetSourceAudioFormat API
|
||||
/****************************************************************/
|
||||
/*!
|
||||
* \struct CUAUDIOFORMAT
|
||||
* Audio Formats
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
cudaAudioCodec codec; /**< OUT: Compression format */
|
||||
unsigned int channels; /**< OUT: number of audio channels */
|
||||
unsigned int samplespersec; /**< OUT: sampling frequency */
|
||||
unsigned int bitrate; /**< OUT: For uncompressed, can also be used to determine bits per sample */
|
||||
unsigned int reserved1; /**< Reserved for future use */
|
||||
unsigned int reserved2; /**< Reserved for future use */
|
||||
cudaAudioCodec codec; /**< Compression format */
|
||||
unsigned int channels; /**< number of audio channels */
|
||||
unsigned int samplespersec; /**< sampling frequency */
|
||||
unsigned int bitrate; /**< For uncompressed, can also be used to determine bits per sample */
|
||||
unsigned int reserved1; /**< Reserved for future use */
|
||||
unsigned int reserved2; /**< Reserved for future use */
|
||||
} CUAUDIOFORMAT;
|
||||
|
||||
|
||||
/***************************************************************/
|
||||
//! \enum CUvideopacketflags
|
||||
//! Data packet flags
|
||||
//! Used in CUVIDSOURCEDATAPACKET structure
|
||||
/***************************************************************/
|
||||
/*!
|
||||
* \enum CUvideopacketflags
|
||||
* Data packet flags
|
||||
*/
|
||||
typedef enum {
|
||||
CUVID_PKT_ENDOFSTREAM = 0x01, /**< Set when this is the last packet for this stream */
|
||||
CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */
|
||||
CUVID_PKT_DISCONTINUITY = 0x04, /**< Set when a discontinuity has to be signalled */
|
||||
CUVID_PKT_ENDOFPICTURE = 0x08, /**< Set when the packet contains exactly one frame */
|
||||
CUVID_PKT_TIMESTAMP = 0x02, /**< Timestamp is valid */
|
||||
CUVID_PKT_DISCONTINUITY = 0x04 /**< Set when a discontinuity has to be signalled */
|
||||
} CUvideopacketflags;
|
||||
|
||||
/*****************************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDSOURCEDATAPACKET
|
||||
//! Data Packet
|
||||
//! Used in cuvidParseVideoData API
|
||||
//! IN for cuvidParseVideoData
|
||||
/*****************************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDSOURCEDATAPACKET
|
||||
* Data Packet
|
||||
*/
|
||||
typedef struct _CUVIDSOURCEDATAPACKET
|
||||
{
|
||||
tcu_ulong flags; /**< IN: Combination of CUVID_PKT_XXX flags */
|
||||
tcu_ulong payload_size; /**< IN: number of bytes in the payload (may be zero if EOS flag is set) */
|
||||
const unsigned char *payload; /**< IN: Pointer to packet payload data (may be NULL if EOS flag is set) */
|
||||
CUvideotimestamp timestamp; /**< IN: Presentation time stamp (10MHz clock), only valid if
|
||||
CUVID_PKT_TIMESTAMP flag is set */
|
||||
tcu_ulong flags; /**< Combination of CUVID_PKT_XXX flags */
|
||||
tcu_ulong payload_size; /**< number of bytes in the payload (may be zero if EOS flag is set) */
|
||||
const unsigned char *payload; /**< Pointer to packet payload data (may be NULL if EOS flag is set) */
|
||||
CUvideotimestamp timestamp; /**< Presentation timestamp (10MHz clock), only valid if CUVID_PKT_TIMESTAMP flag is set */
|
||||
} CUVIDSOURCEDATAPACKET;
|
||||
|
||||
// Callback for packet delivery
|
||||
typedef int (CUDAAPI *PFNVIDSOURCECALLBACK)(void *, CUVIDSOURCEDATAPACKET *);
|
||||
|
||||
/**************************************************************************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDSOURCEPARAMS
|
||||
//! Describes parameters needed in cuvidCreateVideoSource API
|
||||
//! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported
|
||||
//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed.
|
||||
/**************************************************************************************************************************/
|
||||
/*!
|
||||
* \struct CUVIDSOURCEPARAMS
|
||||
* Source Params
|
||||
*/
|
||||
typedef struct _CUVIDSOURCEPARAMS
|
||||
{
|
||||
unsigned int ulClockRate; /**< IN: Time stamp units in Hz (0=default=10000000Hz) */
|
||||
unsigned int uReserved1[7]; /**< Reserved for future use - set to zero */
|
||||
void *pUserData; /**< IN: User private data passed in to the data handlers */
|
||||
PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< IN: Called to deliver video packets */
|
||||
PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< IN: Called to deliver audio packets. */
|
||||
void *pvReserved2[8]; /**< Reserved for future use - set to NULL */
|
||||
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
|
||||
unsigned int uReserved1[7]; /**< Reserved for future use - set to zero */
|
||||
void *pUserData; /**< Parameter passed in to the data handlers */
|
||||
PFNVIDSOURCECALLBACK pfnVideoDataHandler; /**< Called to deliver audio packets */
|
||||
PFNVIDSOURCECALLBACK pfnAudioDataHandler; /**< Called to deliver video packets */
|
||||
void *pvReserved2[8]; /**< Reserved for future use - set to NULL */
|
||||
} CUVIDSOURCEPARAMS;
|
||||
|
||||
|
||||
/**********************************************/
|
||||
//! \ingroup ENUMS
|
||||
//! \enum CUvideosourceformat_flags
|
||||
//! CUvideosourceformat_flags
|
||||
//! Used in cuvidGetSourceVideoFormat API
|
||||
/**********************************************/
|
||||
/*!
|
||||
* \enum CUvideosourceformat_flags
|
||||
* CUvideosourceformat_flags
|
||||
*/
|
||||
typedef enum {
|
||||
CUVID_FMT_EXTFORMATINFO = 0x100 /**< Return extended format structure (CUVIDEOFORMATEX) */
|
||||
} CUvideosourceformat_flags;
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
/**************************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams)
|
||||
//! Create CUvideosource object. CUvideosource spawns demultiplexer thread that provides two callbacks:
|
||||
//! pfnVideoDataHandler() and pfnAudioDataHandler()
|
||||
//! NVDECODE API is intended for HW accelerated video decoding so CUvideosource doesn't have audio demuxer for all supported
|
||||
//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed.
|
||||
/**************************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams)
|
||||
* Create Video Source
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCreateVideoSource(CUvideosource *pObj, const char *pszFileName, CUVIDSOURCEPARAMS *pParams);
|
||||
|
||||
/****************************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams)
|
||||
//! Create video source object and initialize
|
||||
/****************************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams)
|
||||
* Create Video Source
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCreateVideoSourceW(CUvideosource *pObj, const wchar_t *pwszFileName, CUVIDSOURCEPARAMS *pParams);
|
||||
|
||||
/*********************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj)
|
||||
//! Destroy video source
|
||||
/*********************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidDestroyVideoSource(CUvideosource obj)
|
||||
* Destroy Video Source
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidDestroyVideoSource(CUvideosource obj);
|
||||
|
||||
/******************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state)
|
||||
//! Set video source state
|
||||
/******************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state)
|
||||
* Set Video Source state
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidSetVideoSourceState(CUvideosource obj, cudaVideoState state);
|
||||
|
||||
/******************************************************************************************/
|
||||
//! \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj)
|
||||
//! Get video source state
|
||||
/******************************************************************************************/
|
||||
/**
|
||||
* \fn cudaVideoState CUDAAPI cuvidGetVideoSourceState(CUvideosource obj)
|
||||
* Get Video Source state
|
||||
*/
|
||||
typedef cudaVideoState CUDAAPI tcuvidGetVideoSourceState(CUvideosource obj);
|
||||
|
||||
/****************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags)
|
||||
//! Gets details of video stream in pvidfmt
|
||||
/****************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags)
|
||||
* Get Video Source Format
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidGetSourceVideoFormat(CUvideosource obj, CUVIDEOFORMAT *pvidfmt, unsigned int flags);
|
||||
|
||||
/****************************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags)
|
||||
//! Get audio source format
|
||||
//! NVDECODE API is intended for HW accelarated video decoding so CUvideosource doesn't have audio demuxer for all suppported
|
||||
//! containers. It's recommended to clients to use their own or third party demuxer if audio support is needed.
|
||||
/****************************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags)
|
||||
* Set Video Source state
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidGetSourceAudioFormat(CUvideosource obj, CUAUDIOFORMAT *paudfmt, unsigned int flags);
|
||||
|
||||
#endif
|
||||
/**********************************************************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDPARSERDISPINFO
|
||||
//! Used in cuvidParseVideoData API with PFNVIDDISPLAYCALLBACK pfnDisplayPicture
|
||||
/**********************************************************************************/
|
||||
|
||||
/**
|
||||
* \struct CUVIDPARSERDISPINFO
|
||||
*/
|
||||
typedef struct _CUVIDPARSERDISPINFO
|
||||
{
|
||||
int picture_index; /**< OUT: Index of the current picture */
|
||||
int progressive_frame; /**< OUT: 1 if progressive frame; 0 otherwise */
|
||||
int top_field_first; /**< OUT: 1 if top field is displayed first; 0 otherwise */
|
||||
int repeat_first_field; /**< OUT: Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling,
|
||||
-1=unpaired field) */
|
||||
CUvideotimestamp timestamp; /**< OUT: Presentation time stamp */
|
||||
int picture_index; /**< */
|
||||
int progressive_frame; /**< */
|
||||
int top_field_first; /**< */
|
||||
int repeat_first_field; /**< Number of additional fields (1=ivtc, 2=frame doubling, 4=frame tripling, -1=unpaired field) */
|
||||
CUvideotimestamp timestamp; /**< */
|
||||
} CUVIDPARSERDISPINFO;
|
||||
|
||||
/***********************************************************************************************************************/
|
||||
//! Parser callbacks
|
||||
//! The parser will call these synchronously from within cuvidParseVideoData(), whenever a picture is ready to
|
||||
//! be decoded and/or displayed. First argument in functions is "void *pUserData" member of structure CUVIDSOURCEPARAMS
|
||||
/***********************************************************************************************************************/
|
||||
//
|
||||
// Parser callbacks
|
||||
// The parser will call these synchronously from within cuvidParseVideoData(), whenever a picture is ready to
|
||||
// be decoded and/or displayed.
|
||||
//
|
||||
typedef int (CUDAAPI *PFNVIDSEQUENCECALLBACK)(void *, CUVIDEOFORMAT *);
|
||||
typedef int (CUDAAPI *PFNVIDDECODECALLBACK)(void *, CUVIDPICPARAMS *);
|
||||
typedef int (CUDAAPI *PFNVIDDISPLAYCALLBACK)(void *, CUVIDPARSERDISPINFO *);
|
||||
|
||||
/**************************************/
|
||||
//! \ingroup STRUCTS
|
||||
//! \struct CUVIDPARSERPARAMS
|
||||
//! Used in cuvidCreateVideoParser API
|
||||
/**************************************/
|
||||
/**
|
||||
* \struct CUVIDPARSERPARAMS
|
||||
*/
|
||||
typedef struct _CUVIDPARSERPARAMS
|
||||
{
|
||||
cudaVideoCodec CodecType; /**< IN: cudaVideoCodec_XXX */
|
||||
unsigned int ulMaxNumDecodeSurfaces; /**< IN: Max # of decode surfaces (parser will cycle through these) */
|
||||
unsigned int ulClockRate; /**< IN: Timestamp units in Hz (0=default=10000000Hz) */
|
||||
unsigned int ulErrorThreshold; /**< IN: % Error threshold (0-100) for calling pfnDecodePicture (100=always
|
||||
IN: call pfnDecodePicture even if picture bitstream is fully corrupted) */
|
||||
unsigned int ulMaxDisplayDelay; /**< IN: Max display queue delay (improves pipelining of decode with display)
|
||||
0=no delay (recommended values: 2..4) */
|
||||
unsigned int uReserved1[5]; /**< IN: Reserved for future use - set to 0 */
|
||||
void *pUserData; /**< IN: User data for callbacks */
|
||||
PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< IN: Called before decoding frames and/or whenever there is a fmt change */
|
||||
PFNVIDDECODECALLBACK pfnDecodePicture; /**< IN: Called when a picture is ready to be decoded (decode order) */
|
||||
PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< IN: Called whenever a picture is ready to be displayed (display order) */
|
||||
void *pvReserved2[7]; /**< Reserved for future use - set to NULL */
|
||||
CUVIDEOFORMATEX *pExtVideoInfo; /**< IN: [Optional] sequence header data from system layer */
|
||||
cudaVideoCodec CodecType; /**< cudaVideoCodec_XXX */
|
||||
unsigned int ulMaxNumDecodeSurfaces; /**< Max # of decode surfaces (parser will cycle through these) */
|
||||
unsigned int ulClockRate; /**< Timestamp units in Hz (0=default=10000000Hz) */
|
||||
unsigned int ulErrorThreshold; /**< % Error threshold (0-100) for calling pfnDecodePicture (100=always call pfnDecodePicture even if picture bitstream is fully corrupted) */
|
||||
unsigned int ulMaxDisplayDelay; /**< Max display queue delay (improves pipelining of decode with display) - 0=no delay (recommended values: 2..4) */
|
||||
unsigned int uReserved1[5]; /**< Reserved for future use - set to 0 */
|
||||
void *pUserData; /**< User data for callbacks */
|
||||
PFNVIDSEQUENCECALLBACK pfnSequenceCallback; /**< Called before decoding frames and/or whenever there is a format change */
|
||||
PFNVIDDECODECALLBACK pfnDecodePicture; /**< Called when a picture is ready to be decoded (decode order) */
|
||||
PFNVIDDISPLAYCALLBACK pfnDisplayPicture; /**< Called whenever a picture is ready to be displayed (display order) */
|
||||
void *pvReserved2[7]; /**< Reserved for future use - set to NULL */
|
||||
CUVIDEOFORMATEX *pExtVideoInfo; /**< [Optional] sequence header data from system layer */
|
||||
} CUVIDPARSERPARAMS;
|
||||
|
||||
/************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams)
|
||||
//! Create video parser object and initialize
|
||||
/************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidCreateVideoParser(CUvideoparser *pObj, CUVIDPARSERPARAMS *pParams);
|
||||
|
||||
/************************************************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket)
|
||||
//! Parse the video data from source data packet in pPacket
|
||||
//! Extracts parameter sets like SPS, PPS, bitstream etc. from pPacket and
|
||||
//! calls back pfnDecodePicture with CUVIDPICPARAMS data for kicking of HW decoding
|
||||
/************************************************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidParseVideoData(CUvideoparser obj, CUVIDSOURCEDATAPACKET *pPacket);
|
||||
|
||||
/*******************************************************************/
|
||||
//! \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj)
|
||||
/*******************************************************************/
|
||||
/**
|
||||
* \fn CUresult CUDAAPI cuvidDestroyVideoParser(CUvideoparser obj)
|
||||
*/
|
||||
typedef CUresult CUDAAPI tcuvidDestroyVideoParser(CUvideoparser obj);
|
||||
|
||||
/**********************************************************************************************/
|
||||
/** @} */ /* END VIDEO_PARSER */
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
|
||||
set -e
|
||||
|
||||
OUT="$1"
|
||||
IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
while read LINE
|
||||
do
|
||||
printf "\n\t\"%s\\\n\"" "$(printf "%s" "$LINE" | sed -e 's/\r//g' -e 's/["\\]/\\&/g')" >> "$OUT"
|
||||
done < "$IN"
|
||||
printf ";\n" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
@@ -1,7 +1,7 @@
|
||||
/*
|
||||
* This copyright notice applies to this header file only:
|
||||
*
|
||||
* Copyright (c) 2010-2017 NVIDIA Corporation
|
||||
* Copyright (c) 2010-2015 NVIDIA Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
@@ -27,10 +27,8 @@
|
||||
|
||||
/**
|
||||
* \file nvEncodeAPI.h
|
||||
* NVIDIA GPUs - beginning with the Kepler generation - contain a hardware-based encoder
|
||||
* (referred to as NVENC) which provides fully-accelerated hardware-based video encoding.
|
||||
* NvEncodeAPI provides the interface for NVIDIA video encoder (NVENC).
|
||||
* \date 2011-2017
|
||||
* NvEncodeAPI provides a NVENC Video Encoding interface to NVIDIA GPU devices based on the Kepler architecture.
|
||||
* \date 2011-2016
|
||||
* This file contains the interface constants, structure definitions and function prototypes.
|
||||
*/
|
||||
|
||||
@@ -113,7 +111,7 @@ typedef void* NV_ENC_INPUT_PTR; /**< NVENCODE API input buffer
|
||||
typedef void* NV_ENC_OUTPUT_PTR; /**< NVENCODE API output buffer*/
|
||||
typedef void* NV_ENC_REGISTERED_PTR; /**< A Resource that has been registered with NVENCODE API*/
|
||||
|
||||
#define NVENCAPI_MAJOR_VERSION 8
|
||||
#define NVENCAPI_MAJOR_VERSION 7
|
||||
#define NVENCAPI_MINOR_VERSION 0
|
||||
|
||||
#define NVENCAPI_VERSION (NVENCAPI_MAJOR_VERSION | (NVENCAPI_MINOR_VERSION << 24))
|
||||
@@ -257,16 +255,13 @@ typedef enum _NV_ENC_PARAMS_RC_MODE
|
||||
NV_ENC_PARAMS_RC_CONSTQP = 0x0, /**< Constant QP mode */
|
||||
NV_ENC_PARAMS_RC_VBR = 0x1, /**< Variable bitrate mode */
|
||||
NV_ENC_PARAMS_RC_CBR = 0x2, /**< Constant bitrate mode */
|
||||
NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ = 0x8, /**< low-delay CBR, high quality */
|
||||
NV_ENC_PARAMS_RC_CBR_HQ = 0x10, /**< CBR, high quality (slower) */
|
||||
NV_ENC_PARAMS_RC_VBR_HQ = 0x20 /**< VBR, high quality (slower) */
|
||||
NV_ENC_PARAMS_RC_VBR_MINQP = 0x4, /**< Variable bitrate mode with MinQP */
|
||||
NV_ENC_PARAMS_RC_2_PASS_QUALITY = 0x8, /**< Multi pass encoding optimized for image quality and works only with low latency mode */
|
||||
NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP = 0x10, /**< Multi pass encoding optimized for maintaining frame size and works only with low latency mode */
|
||||
NV_ENC_PARAMS_RC_2_PASS_VBR = 0x20 /**< Multi pass VBR */
|
||||
} NV_ENC_PARAMS_RC_MODE;
|
||||
|
||||
#define NV_ENC_PARAMS_RC_VBR_MINQP (NV_ENC_PARAMS_RC_MODE)0x4 /**< Deprecated */
|
||||
#define NV_ENC_PARAMS_RC_2_PASS_QUALITY NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ /**< Deprecated */
|
||||
#define NV_ENC_PARAMS_RC_2_PASS_FRAMESIZE_CAP NV_ENC_PARAMS_RC_CBR_HQ /**< Deprecated */
|
||||
#define NV_ENC_PARAMS_RC_2_PASS_VBR NV_ENC_PARAMS_RC_VBR_HQ /**< Deprecated */
|
||||
#define NV_ENC_PARAMS_RC_CBR2 NV_ENC_PARAMS_RC_CBR /**< Deprecated */
|
||||
#define NV_ENC_PARAMS_RC_CBR2 NV_ENC_PARAMS_RC_CBR /**< Deprecated */
|
||||
|
||||
/**
|
||||
* Input picture structure
|
||||
@@ -318,26 +313,11 @@ typedef enum _NV_ENC_BUFFER_FORMAT
|
||||
NV_ENC_BUFFER_FORMAT_YUV444 = 0x00001000, /**< Planar YUV [Y plane followed by U and V planes] */
|
||||
NV_ENC_BUFFER_FORMAT_YUV420_10BIT = 0x00010000, /**< 10 bit Semi-Planar YUV [Y plane followed by interleaved UV plane]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
|
||||
NV_ENC_BUFFER_FORMAT_YUV444_10BIT = 0x00100000, /**< 10 bit Planar YUV444 [Y plane followed by U and V planes]. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
|
||||
NV_ENC_BUFFER_FORMAT_ARGB = 0x01000000, /**< 8 bit Packed A8R8G8B8. This is a word-ordered format
|
||||
where a pixel is represented by a 32-bit word with B
|
||||
in the lowest 8 bits, G in the next 8 bits, R in the
|
||||
8 bits after that and A in the highest 8 bits. */
|
||||
NV_ENC_BUFFER_FORMAT_ARGB10 = 0x02000000, /**< 10 bit Packed A2R10G10B10. This is a word-ordered format
|
||||
where a pixel is represented by a 32-bit word with B
|
||||
in the lowest 10 bits, G in the next 10 bits, R in the
|
||||
10 bits after that and A in the highest 2 bits. */
|
||||
NV_ENC_BUFFER_FORMAT_AYUV = 0x04000000, /**< 8 bit Packed A8Y8U8V8. This is a word-ordered format
|
||||
where a pixel is represented by a 32-bit word with V
|
||||
in the lowest 8 bits, U in the next 8 bits, Y in the
|
||||
8 bits after that and A in the highest 8 bits. */
|
||||
NV_ENC_BUFFER_FORMAT_ABGR = 0x10000000, /**< 8 bit Packed A8B8G8R8. This is a word-ordered format
|
||||
where a pixel is represented by a 32-bit word with R
|
||||
in the lowest 8 bits, G in the next 8 bits, B in the
|
||||
8 bits after that and A in the highest 8 bits. */
|
||||
NV_ENC_BUFFER_FORMAT_ABGR10 = 0x20000000, /**< 10 bit Packed A2B10G10R10. This is a word-ordered format
|
||||
where a pixel is represented by a 32-bit word with R
|
||||
in the lowest 10 bits, G in the next 10 bits, B in the
|
||||
10 bits after that and A in the highest 2 bits. */
|
||||
NV_ENC_BUFFER_FORMAT_ARGB = 0x01000000, /**< 8 bit Packed A8R8G8B8 */
|
||||
NV_ENC_BUFFER_FORMAT_ARGB10 = 0x02000000, /**< 10 bit Packed A2R10G10B10. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
|
||||
NV_ENC_BUFFER_FORMAT_AYUV = 0x04000000, /**< 8 bit Packed A8Y8U8V8 */
|
||||
NV_ENC_BUFFER_FORMAT_ABGR = 0x10000000, /**< 8 bit Packed A8B8G8R8 */
|
||||
NV_ENC_BUFFER_FORMAT_ABGR10 = 0x20000000, /**< 10 bit Packed A2B10G10R10. Each pixel of size 2 bytes. Most Significant 10 bits contain pixel data. */
|
||||
} NV_ENC_BUFFER_FORMAT;
|
||||
|
||||
#define NV_ENC_BUFFER_FORMAT_NV12_PL NV_ENC_BUFFER_FORMAT_NV12
|
||||
@@ -649,7 +629,6 @@ typedef enum _NV_ENC_INPUT_RESOURCE_TYPE
|
||||
NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX = 0x0, /**< input resource type is a directx9 surface*/
|
||||
NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR = 0x1, /**< input resource type is a cuda device pointer surface*/
|
||||
NV_ENC_INPUT_RESOURCE_TYPE_CUDAARRAY = 0x2, /**< input resource type is a cuda array surface */
|
||||
NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX = 0x3 /**< input resource type is an OpenGL texture */
|
||||
} NV_ENC_INPUT_RESOURCE_TYPE;
|
||||
|
||||
/**
|
||||
@@ -659,8 +638,6 @@ typedef enum _NV_ENC_DEVICE_TYPE
|
||||
{
|
||||
NV_ENC_DEVICE_TYPE_DIRECTX = 0x0, /**< encode device type is a directx9 device */
|
||||
NV_ENC_DEVICE_TYPE_CUDA = 0x1, /**< encode device type is a cuda device */
|
||||
NV_ENC_DEVICE_TYPE_OPENGL = 0x2 /**< encode device type is an OpenGL device.
|
||||
Use of this device type is supported only on Linux */
|
||||
} NV_ENC_DEVICE_TYPE;
|
||||
|
||||
/**
|
||||
@@ -919,8 +896,7 @@ typedef enum _NV_ENC_CAPS
|
||||
/**
|
||||
* Indicates HW support for MEOnly Mode.
|
||||
* \n 0 : MEOnly Mode not supported.
|
||||
* \n 1 : MEOnly Mode supported for I and P frames.
|
||||
* \n 2 : MEOnly Mode supported for I, P and B frames.
|
||||
* \n 1 : MEOnly Mode supported.
|
||||
*/
|
||||
NV_ENC_CAPS_SUPPORT_MEONLY_MODE,
|
||||
|
||||
@@ -943,17 +919,6 @@ typedef enum _NV_ENC_CAPS
|
||||
* \n 1 : 10 bit encoding supported.
|
||||
*/
|
||||
NV_ENC_CAPS_SUPPORT_10BIT_ENCODE,
|
||||
/**
|
||||
* Maximum number of Long Term Reference frames supported
|
||||
*/
|
||||
NV_ENC_CAPS_NUM_MAX_LTR_FRAMES,
|
||||
|
||||
/**
|
||||
* Indicates HW support for Weighted Predicition.
|
||||
* \n 0 : Weighted Predicition not supported.
|
||||
* \n 1 : Weighted Predicition supported.
|
||||
*/
|
||||
NV_ENC_CAPS_SUPPORT_WEIGHTED_PREDICTION,
|
||||
|
||||
/**
|
||||
* Reserved - Not to be used by clients.
|
||||
@@ -995,7 +960,7 @@ typedef struct _NV_ENC_CREATE_INPUT_BUFFER
|
||||
uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_INPUT_BUFFER_VER */
|
||||
uint32_t width; /**< [in]: Input buffer width */
|
||||
uint32_t height; /**< [in]: Input buffer width */
|
||||
NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */
|
||||
NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Will be removed in sdk 8.0 */
|
||||
NV_ENC_BUFFER_FORMAT bufferFmt; /**< [in]: Input buffer format */
|
||||
uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
|
||||
NV_ENC_INPUT_PTR inputBuffer; /**< [out]: Pointer to input buffer */
|
||||
@@ -1013,8 +978,8 @@ typedef struct _NV_ENC_CREATE_INPUT_BUFFER
|
||||
typedef struct _NV_ENC_CREATE_BITSTREAM_BUFFER
|
||||
{
|
||||
uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_CREATE_BITSTREAM_BUFFER_VER */
|
||||
uint32_t size; /**< [in]: Deprecated. Do not use */
|
||||
NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Do not use */
|
||||
uint32_t size; /**< [in]: Size of the bitstream buffer to be created */
|
||||
NV_ENC_MEMORY_HEAP memoryHeap; /**< [in]: Deprecated. Will be removed in sdk 8.0 */
|
||||
uint32_t reserved; /**< [in]: Reserved and must be set to 0 */
|
||||
NV_ENC_OUTPUT_PTR bitstreamBuffer; /**< [out]: Pointer to the output bitstream buffer */
|
||||
void* bitstreamBufferPtr; /**< [out]: Reserved and should not be used */
|
||||
@@ -1108,15 +1073,14 @@ typedef struct _NV_ENC_QP
|
||||
uint32_t zeroReorderDelay :1; /**< [in]: Set this to 1 to indicate zero latency operation (no reordering delay, num_reorder_frames=0) */
|
||||
uint32_t enableNonRefP :1; /**< [in]: Set this to 1 to enable automatic insertion of non-reference P-frames (no effect if enablePTD=0) */
|
||||
uint32_t strictGOPTarget :1; /**< [in]: Set this to 1 to minimize GOP-to-GOP rate fluctuations */
|
||||
uint32_t aqStrength :4; /**< [in]: When AQ (Spatial) is enabled (i.e. NV_ENC_RC_PARAMS::enableAQ is set), this field is used to specify AQ strength. AQ strength scale is from 1 (low) - 15 (aggressive). If not set, strength is autoselected by driver. */
|
||||
uint32_t aqStrength :4; /**< [in]: When AQ (Spatial) is enabled (i.e. NV_ENC_RC_PARAMS::enableAQ is set), this field is used to specify AQ strength. AQ strength scale is from 1 (low) - 15 (aggressive). If not set, strength is autoselected by driver. Currently supported only with h264 */
|
||||
uint32_t reservedBitFields :16; /**< [in]: Reserved bitfields and must be set to 0 */
|
||||
NV_ENC_QP minQP; /**< [in]: Specifies the minimum QP used for rate control. Client must set NV_ENC_CONFIG::enableMinQP to 1. */
|
||||
NV_ENC_QP maxQP; /**< [in]: Specifies the maximum QP used for rate control. Client must set NV_ENC_CONFIG::enableMaxQP to 1. */
|
||||
NV_ENC_QP initialRCQP; /**< [in]: Specifies the initial QP used for rate control. Client must set NV_ENC_CONFIG::enableInitialRCQP to 1. */
|
||||
uint32_t temporallayerIdxMask; /**< [in]: Specifies the temporal layers (as a bitmask) whose QPs have changed. Valid max bitmask is [2^NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS - 1] */
|
||||
uint8_t temporalLayerQP[8]; /**< [in]: Specifies the temporal layer QPs used for rate control. Temporal layer index is used as as the array index */
|
||||
uint8_t targetQuality; /**< [in]: Target CQ (Constant Quality) level for VBR mode (range 0-51 with 0-automatic) */
|
||||
uint8_t targetQualityLSB; /**< [in]: Fractional part of target quality (as 8.8 fixed point format) */
|
||||
uint16_t targetQuality; /**< [in]: Target CQ (Constant Quality) level for VBR mode (range 0-51 with 0-automatic) */
|
||||
uint16_t lookaheadDepth; /**< [in]: Maximum depth of lookahead with range 0-32 (only used if enableLookahead=1) */
|
||||
uint32_t reserved[9];
|
||||
} NV_ENC_RC_PARAMS;
|
||||
@@ -1153,14 +1117,13 @@ typedef NV_ENC_CONFIG_H264_VUI_PARAMETERS NV_ENC_CONFIG_HEVC_VUI_PARAMETERS;
|
||||
/**
|
||||
* \struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
|
||||
* External motion vector hint counts per block type.
|
||||
* H264 supports multiple hint while HEVC supports one hint for each valid candidate.
|
||||
*/
|
||||
typedef struct _NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
|
||||
{
|
||||
uint32_t numCandsPerBlk16x16 : 4; /**< [in]: Supported for H264,HEVC.It Specifies the number of candidates per 16x16 block. */
|
||||
uint32_t numCandsPerBlk16x8 : 4; /**< [in]: Supported for H264 only.Specifies the number of candidates per 16x8 block. */
|
||||
uint32_t numCandsPerBlk8x16 : 4; /**< [in]: Supported for H264 only.Specifies the number of candidates per 8x16 block. */
|
||||
uint32_t numCandsPerBlk8x8 : 4; /**< [in]: Supported for H264,HEVC.Specifies the number of candidates per 8x8 block. */
|
||||
uint32_t numCandsPerBlk16x16 : 4; /**< [in]: Specifies the number of candidates per 16x16 block. */
|
||||
uint32_t numCandsPerBlk16x8 : 4; /**< [in]: Specifies the number of candidates per 16x8 block. */
|
||||
uint32_t numCandsPerBlk8x16 : 4; /**< [in]: Specifies the number of candidates per 8x16 block. */
|
||||
uint32_t numCandsPerBlk8x8 : 4; /**< [in]: Specifies the number of candidates per 8x8 block. */
|
||||
uint32_t reserved : 16; /**< [in]: Reserved for padding. */
|
||||
uint32_t reserved1[3]; /**< [in]: Reserved for future use. */
|
||||
} NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE;
|
||||
@@ -1193,8 +1156,7 @@ typedef struct _NV_ENC_CONFIG_H264
|
||||
uint32_t hierarchicalPFrames :1; /**< [in]: Set to 1 to enable hierarchical PFrames */
|
||||
uint32_t hierarchicalBFrames :1; /**< [in]: Set to 1 to enable hierarchical BFrames */
|
||||
uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set to 1 to write SEI buffering period syntax in the bitstream */
|
||||
uint32_t outputPictureTimingSEI :1; /**< [in]: Set to 1 to write SEI picture timing syntax in the bitstream. When set for following rateControlMode : NV_ENC_PARAMS_RC_CBR, NV_ENC_PARAMS_RC_CBR_LOWDELAY_HQ,
|
||||
NV_ENC_PARAMS_RC_CBR_HQ, filler data is inserted if needed to achieve hrd bitrate */
|
||||
uint32_t outputPictureTimingSEI :1; /**< [in]: Set to 1 to write SEI picture timing syntax in the bitstream */
|
||||
uint32_t outputAUD :1; /**< [in]: Set to 1 to write access unit delimiter syntax in bitstream */
|
||||
uint32_t disableSPSPPS :1; /**< [in]: Set to 1 to disable writing of Sequence and Picture parameter info in bitstream */
|
||||
uint32_t outputFramePackingSEI :1; /**< [in]: Set to 1 to enable writing of frame packing arrangement SEI messages to bitstream */
|
||||
@@ -1204,13 +1166,7 @@ typedef struct _NV_ENC_CONFIG_H264
|
||||
Check support for constrained encoding using ::NV_ENC_CAPS_SUPPORT_CONSTRAINED_ENCODING caps. */
|
||||
uint32_t repeatSPSPPS :1; /**< [in]: Set to 1 to enable writing of Sequence and Picture parameter for every IDR frame */
|
||||
uint32_t enableVFR :1; /**< [in]: Set to 1 to enable variable frame rate. */
|
||||
uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode.
|
||||
LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1.
|
||||
Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future.
|
||||
LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting
|
||||
ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode
|
||||
for using LTR.
|
||||
Note that LTRs are not supported if encoding session is configured with B-frames */
|
||||
uint32_t enableLTR :1; /**< [in]: Currently this feature is not available and must be set to 0. Set to 1 to enable LTR support and auto-mark the first */
|
||||
uint32_t qpPrimeYZeroTransformBypassFlag :1; /**< [in]: To enable lossless encode set this to 1, set QP to 0 and RC_mode to NV_ENC_PARAMS_RC_CONSTQP and profile to HIGH_444_PREDICTIVE_PROFILE.
|
||||
Check support for lossless encoding using ::NV_ENC_CAPS_SUPPORT_LOSSLESS_ENCODE caps. */
|
||||
uint32_t useConstrainedIntraPred :1; /**< [in]: Set 1 to enable constrained intra prediction. */
|
||||
@@ -1220,8 +1176,8 @@ typedef struct _NV_ENC_CONFIG_H264
|
||||
uint32_t separateColourPlaneFlag; /**< [in]: Set to 1 to enable 4:4:4 separate colour planes */
|
||||
uint32_t disableDeblockingFilterIDC; /**< [in]: Specifies the deblocking filter mode. Permissible value range: [0,2] */
|
||||
uint32_t numTemporalLayers; /**< [in]: Specifies max temporal layers to be used for hierarchical coding. Valid value range is [1,::NV_ENC_CAPS_NUM_MAX_TEMPORAL_LAYERS] */
|
||||
uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */
|
||||
uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */
|
||||
uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header. Currently reserved and must be set to 0. */
|
||||
uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header. Currently reserved and must be set to 0. */
|
||||
NV_ENC_H264_ADAPTIVE_TRANSFORM_MODE adaptiveTransformMode; /**< [in]: Specifies the AdaptiveTransform Mode. Check support for AdaptiveTransform mode using ::NV_ENC_CAPS_SUPPORT_ADAPTIVE_TRANSFORM caps. */
|
||||
NV_ENC_H264_FMO_MODE fmoMode; /**< [in]: Specified the FMO Mode. Check support for FMO using ::NV_ENC_CAPS_SUPPORT_FMO caps. */
|
||||
NV_ENC_H264_BDIRECT_MODE bdirectMode; /**< [in]: Specifies the BDirect mode. Check support for BDirect mode using ::NV_ENC_CAPS_SUPPORT_BDIRECT_MODE caps.*/
|
||||
@@ -1244,13 +1200,13 @@ typedef struct _NV_ENC_CONFIG_H264
|
||||
sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice)
|
||||
sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
|
||||
NV_ENC_CONFIG_H264_VUI_PARAMETERS h264VUIParameters; /**< [in]: Specifies the H264 video usability info pamameters */
|
||||
uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames. This parameter has different meaning in two LTR modes.
|
||||
In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR.
|
||||
In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB. */
|
||||
uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_H264::enableLTR for description of the two modes.
|
||||
Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may
|
||||
be deprecated in future releases.
|
||||
Set to 0 when using "LTR Per Picture" mode of LTR operation. */
|
||||
uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames used.
|
||||
If ltrTrustMode=1, encoder will mark first numLTRFrames base layer reference frames within each IDR interval as LTR.
|
||||
If ltrMarkFrame=1, ltrNumFrames specifies maximum number of ltr frames in DPB.
|
||||
If ltrNumFrames value is more that DPB size(maxNumRefFrames) encoder will take decision on its own. */
|
||||
uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode.
|
||||
Set to 0 to disallow encoding using LTR frames until later specified.
|
||||
Set to 1 to allow encoding using LTR frames unless later invalidated.*/
|
||||
uint32_t chromaFormatIDC; /**< [in]: Specifies the chroma format. Should be set to 1 for yuv420 input, 3 for yuv444 input.
|
||||
Check support for YUV444 encoding using ::NV_ENC_CAPS_SUPPORT_YUV444_ENCODE caps.*/
|
||||
uint32_t maxTemporalLayers; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */
|
||||
@@ -1274,13 +1230,7 @@ typedef struct _NV_ENC_CONFIG_HEVC
|
||||
uint32_t outputBufferingPeriodSEI :1; /**< [in]: Set 1 to write SEI buffering period syntax in the bitstream */
|
||||
uint32_t outputPictureTimingSEI :1; /**< [in]: Set 1 to write SEI picture timing syntax in the bitstream */
|
||||
uint32_t outputAUD :1; /**< [in]: Set 1 to write Access Unit Delimiter syntax. */
|
||||
uint32_t enableLTR :1; /**< [in]: Set to 1 to enable LTR (Long Term Reference) frame support. LTR can be used in two modes: "LTR Trust" mode and "LTR Per Picture" mode.
|
||||
LTR Trust mode: In this mode, ltrNumFrames pictures after IDR are automatically marked as LTR. This mode is enabled by setting ltrTrustMode = 1.
|
||||
Use of LTR Trust mode is strongly discouraged as this mode may be deprecated in future releases.
|
||||
LTR Per Picture mode: In this mode, client can control whether the current picture should be marked as LTR. Enable this mode by setting
|
||||
ltrTrustMode = 0 and ltrMarkFrame = 1 for the picture to be marked as LTR. This is the preferred mode
|
||||
for using LTR.
|
||||
Note that LTRs are not supported if encoding session is configured with B-frames */
|
||||
uint32_t enableLTR :1; /**< [in]: Set 1 to enable use of long term reference pictures for inter prediction. */
|
||||
uint32_t disableSPSPPS :1; /**< [in]: Set 1 to disable VPS,SPS and PPS signalling in the bitstream. */
|
||||
uint32_t repeatSPSPPS :1; /**< [in]: Set 1 to output VPS,SPS and PPS for every IDR frame.*/
|
||||
uint32_t enableIntraRefresh :1; /**< [in]: Set 1 to enable gradual decoder refresh or intra refresh. If the GOP structure uses B frames this will be ignored */
|
||||
@@ -1292,12 +1242,13 @@ typedef struct _NV_ENC_CONFIG_HEVC
|
||||
Will be disabled if NV_ENC_CONFIG::gopLength is not set to NVENC_INFINITE_GOPLENGTH. */
|
||||
uint32_t intraRefreshCnt; /**< [in]: Specifies the length of intra refresh in number of frames for periodic intra refresh. This value should be smaller than intraRefreshPeriod */
|
||||
uint32_t maxNumRefFramesInDPB; /**< [in]: Specifies the maximum number of references frames in the DPB.*/
|
||||
uint32_t ltrNumFrames; /**< [in]: This parameter has different meaning in two LTR modes.
|
||||
In "LTR Trust" mode (ltrTrustMode = 1), encoder will mark the first ltrNumFrames base layer reference frames within each IDR interval as LTR.
|
||||
In "LTR Per Picture" mode (ltrTrustMode = 0 and ltrMarkFrame = 1), ltrNumFrames specifies maximum number of LTR frames in DPB. */
|
||||
uint32_t vpsId; /**< [in]: Specifies the VPS id of the video parameter set */
|
||||
uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header */
|
||||
uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header */
|
||||
uint32_t ltrNumFrames; /**< [in]: Specifies the number of LTR frames used.
|
||||
If ltrTrustMode=1, encoder will mark first numLTRFrames base layer reference frames within each IDR interval as LTR.
|
||||
If ltrMarkFrame=1, ltrNumFrames specifies maximum number of ltr frames in DPB.
|
||||
If ltrNumFrames value is more that DPB size(maxNumRefFramesInDPB) encoder will take decision on its own. */
|
||||
uint32_t vpsId; /**< [in]: Specifies the VPS id of the video parameter set. Currently reserved and must be set to 0. */
|
||||
uint32_t spsId; /**< [in]: Specifies the SPS id of the sequence header. Currently reserved and must be set to 0. */
|
||||
uint32_t ppsId; /**< [in]: Specifies the PPS id of the picture header. Currently reserved and must be set to 0. */
|
||||
uint32_t sliceMode; /**< [in]: This parameter in conjunction with sliceModeData specifies the way in which the picture is divided into slices
|
||||
sliceMode = 0 CTU based slices, sliceMode = 1 Byte based slices, sliceMode = 2 CTU row based slices, sliceMode = 3, numSlices in Picture
|
||||
When sliceMode == 0 and sliceModeData == 0 whole picture will be coded with one slice */
|
||||
@@ -1308,55 +1259,22 @@ typedef struct _NV_ENC_CONFIG_HEVC
|
||||
sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
|
||||
uint32_t maxTemporalLayersMinus1; /**< [in]: Specifies the max temporal layer used for hierarchical coding. */
|
||||
NV_ENC_CONFIG_HEVC_VUI_PARAMETERS hevcVUIParameters; /**< [in]: Specifies the HEVC video usability info pamameters */
|
||||
uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode. See comments near NV_ENC_CONFIG_HEVC::enableLTR for description of the two modes.
|
||||
Set to 1 to use "LTR Trust" mode of LTR operation. Clients are discouraged to use "LTR Trust" mode as this mode may
|
||||
be deprecated in future releases.
|
||||
Set to 0 when using "LTR Per Picture" mode of LTR operation. */
|
||||
uint32_t ltrTrustMode; /**< [in]: Specifies the LTR operating mode.
|
||||
Set to 0 to disallow encoding using LTR frames until later specified.
|
||||
Set to 1 to allow encoding using LTR frames unless later invalidated.*/
|
||||
uint32_t reserved1[217]; /**< [in]: Reserved and must be set to 0.*/
|
||||
void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
|
||||
} NV_ENC_CONFIG_HEVC;
|
||||
|
||||
/**
|
||||
* \struct _NV_ENC_CONFIG_H264_MEONLY
|
||||
* H264 encoder configuration parameters for ME only Mode
|
||||
*
|
||||
*/
|
||||
typedef struct _NV_ENC_CONFIG_H264_MEONLY
|
||||
{
|
||||
uint32_t disablePartition16x16 :1; /**< [in]: Disable MotionEstimation on 16x16 blocks*/
|
||||
uint32_t disablePartition8x16 :1; /**< [in]: Disable MotionEstimation on 8x16 blocks*/
|
||||
uint32_t disablePartition16x8 :1; /**< [in]: Disable MotionEstimation on 16x8 blocks*/
|
||||
uint32_t disablePartition8x8 :1; /**< [in]: Disable MotionEstimation on 8x8 blocks*/
|
||||
uint32_t disableIntraSearch :1; /**< [in]: Disable Intra search during MotionEstimation*/
|
||||
uint32_t bStereoEnable :1; /**< [in]: Enable Stereo Mode for Motion Estimation where each view is independently executed*/
|
||||
uint32_t reserved :26; /**< [in]: Reserved and must be set to 0 */
|
||||
uint32_t reserved1 [255]; /**< [in]: Reserved and must be set to 0 */
|
||||
void* reserved2[64]; /**< [in]: Reserved and must be set to NULL */
|
||||
} NV_ENC_CONFIG_H264_MEONLY;
|
||||
|
||||
|
||||
/**
|
||||
* \struct _NV_ENC_CONFIG_HEVC_MEONLY
|
||||
* HEVC encoder configuration parameters for ME only Mode
|
||||
*
|
||||
*/
|
||||
typedef struct _NV_ENC_CONFIG_HEVC_MEONLY
|
||||
{
|
||||
uint32_t reserved [256]; /**< [in]: Reserved and must be set to 0 */
|
||||
void* reserved1[64]; /**< [in]: Reserved and must be set to NULL */
|
||||
} NV_ENC_CONFIG_HEVC_MEONLY;
|
||||
|
||||
/**
|
||||
* \struct _NV_ENC_CODEC_CONFIG
|
||||
* Codec-specific encoder configuration parameters to be set during initialization.
|
||||
*/
|
||||
typedef union _NV_ENC_CODEC_CONFIG
|
||||
{
|
||||
NV_ENC_CONFIG_H264 h264Config; /**< [in]: Specifies the H.264-specific encoder configuration. */
|
||||
NV_ENC_CONFIG_HEVC hevcConfig; /**< [in]: Specifies the HEVC-specific encoder configuration. */
|
||||
NV_ENC_CONFIG_H264_MEONLY h264MeOnlyConfig; /**< [in]: Specifies the H.264-specific ME only encoder configuration. */
|
||||
NV_ENC_CONFIG_HEVC_MEONLY hevcMeOnlyConfig; /**< [in]: Specifies the HEVC-specific ME only encoder configuration. */
|
||||
uint32_t reserved[320]; /**< [in]: Reserved and must be set to 0 */
|
||||
NV_ENC_CONFIG_H264 h264Config; /**< [in]: Specifies the H.264-specific encoder configuration. */
|
||||
NV_ENC_CONFIG_HEVC hevcConfig; /**< [in]: Specifies the HEVC-specific encoder configuration. */
|
||||
uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0 */
|
||||
} NV_ENC_CODEC_CONFIG;
|
||||
|
||||
|
||||
@@ -1407,8 +1325,7 @@ typedef struct _NV_ENC_INITIALIZE_PARAMS
|
||||
uint32_t enableExternalMEHints :1; /**< [in]: Set to 1 to enable external ME hints for the current frame. For NV_ENC_INITIALIZE_PARAMS::enablePTD=1 with B frames, programming L1 hints is optional for B frames since Client doesn't know internal GOP structure.
|
||||
NV_ENC_PIC_PARAMS::meHintRefPicDist should preferably be set with enablePTD=1. */
|
||||
uint32_t enableMEOnlyMode :1; /**< [in]: Set to 1 to enable ME Only Mode .*/
|
||||
uint32_t enableWeightedPrediction :1; /**< [in]: Set this to 1 to enable weighted prediction. Not supported if encode session is configured for B-Frames( 'frameIntervalP' in NV_ENC_CONFIG is greater than 1).*/
|
||||
uint32_t reservedBitFields :27; /**< [in]: Reserved bitfields and must be set to 0 */
|
||||
uint32_t reservedBitFields :28; /**< [in]: Reserved bitfields and must be set to 0 */
|
||||
uint32_t privDataSize; /**< [in]: Reserved private data buffer size and must be set to 0 */
|
||||
void* privData; /**< [in]: Reserved private data buffer and must be set to NULL */
|
||||
NV_ENC_CONFIG* encodeConfig; /**< [in]: Specifies the advanced codec specific structure. If client has sent a valid codec config structure, it will override parameters set by the NV_ENC_INITIALIZE_PARAMS::presetGUID parameter. If set to NULL the NvEncodeAPI interface will use the NV_ENC_INITIALIZE_PARAMS::presetGUID to set the codec specific parameters.
|
||||
@@ -1511,8 +1428,8 @@ typedef struct _NV_ENC_PIC_PARAMS_H264
|
||||
sliceMode = 2, sliceModeData specifies # of MB rows in each slice (except last slice)
|
||||
sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
|
||||
uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term referenceframe index to use for marking this frame as LTR.*/
|
||||
uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the the associated bitmap of LTR frame indices to use when encoding this frame. */
|
||||
uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */
|
||||
uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the the associated bitmap of LTR frame indices when encoding this frame. */
|
||||
uint32_t ltrUsageMode; /**< [in]: Specifies additional usage constraints for encoding using LTR frames from this point further. 0: no constraints, 1: no short term refs older than current, no previous LTR frames.*/
|
||||
uint32_t reserved [243]; /**< [in]: Reserved and must be set to 0. */
|
||||
void* reserved2[62]; /**< [in]: Reserved and must be set to NULL. */
|
||||
} NV_ENC_PIC_PARAMS_H264;
|
||||
@@ -1551,8 +1468,8 @@ typedef struct _NV_ENC_PIC_PARAMS_HEVC
|
||||
sliceMode = 2, sliceModeData specifies # of CTU rows in each slice (except last slice)
|
||||
sliceMode = 3, sliceModeData specifies number of slices in the picture. Driver will divide picture into slices optimally */
|
||||
uint32_t ltrMarkFrameIdx; /**< [in]: Specifies the long term reference frame index to use for marking this frame as LTR.*/
|
||||
uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices to use when encoding this frame. */
|
||||
uint32_t ltrUsageMode; /**< [in]: Not supported. Reserved for future use and must be set to 0. */
|
||||
uint32_t ltrUseFrameBitmap; /**< [in]: Specifies the associated bitmap of LTR frame indices when encoding this frame. */
|
||||
uint32_t ltrUsageMode; /**< [in]: Specifies additional usage constraints for encoding using LTR frames from this point further. 0: no constraints, 1: no short term refs older than current, no previous LTR frames.*/
|
||||
uint32_t seiPayloadArrayCnt; /**< [in]: Specifies the number of elements allocated in seiPayloadArray array. */
|
||||
uint32_t reserved; /**< [in]: Reserved and must be set to 0. */
|
||||
NV_ENC_SEI_PAYLOAD* seiPayloadArray; /**< [in]: Array of SEI payloads which will be inserted for this frame. */
|
||||
@@ -1567,7 +1484,7 @@ typedef struct _NV_ENC_PIC_PARAMS_HEVC
|
||||
typedef union _NV_ENC_CODEC_PIC_PARAMS
|
||||
{
|
||||
NV_ENC_PIC_PARAMS_H264 h264PicParams; /**< [in]: H264 encode picture params. */
|
||||
NV_ENC_PIC_PARAMS_HEVC hevcPicParams; /**< [in]: HEVC encode picture params. */
|
||||
NV_ENC_PIC_PARAMS_HEVC hevcPicParams; /**< [in]: HEVC encode picture params. Currently unsupported and must not to be used. */
|
||||
uint32_t reserved[256]; /**< [in]: Reserved and must be set to 0. */
|
||||
} NV_ENC_CODEC_PIC_PARAMS;
|
||||
|
||||
@@ -1594,7 +1511,7 @@ typedef struct _NV_ENC_PIC_PARAMS
|
||||
NV_ENC_CODEC_PIC_PARAMS codecPicParams; /**< [in]: Specifies the codec specific per-picture encoding parameters. */
|
||||
NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE meHintCountsPerBlock[2]; /**< [in]: Specifies the number of hint candidates per block per direction for the current frame. meHintCountsPerBlock[0] is for L0 predictors and meHintCountsPerBlock[1] is for L1 predictors.
|
||||
The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder intialization. */
|
||||
NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock.
|
||||
NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks multiplied by the total number of candidates per macroblock.
|
||||
The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8
|
||||
+ 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */
|
||||
uint32_t reserved1[6]; /**< [in]: Reserved and must be set to 0 */
|
||||
@@ -1615,7 +1532,6 @@ typedef struct _NV_ENC_PIC_PARAMS
|
||||
/**
|
||||
* \struct _NV_ENC_MEONLY_PARAMS
|
||||
* MEOnly parameters that need to be sent on a per motion estimation basis.
|
||||
* NV_ENC_MEONLY_PARAMS::meExternalHints is supported for H264 only.
|
||||
*/
|
||||
typedef struct _NV_ENC_MEONLY_PARAMS
|
||||
{
|
||||
@@ -1629,20 +1545,12 @@ typedef struct _NV_ENC_MEONLY_PARAMS
|
||||
void* completionEvent; /**< [in]: Specifies an event to be signalled on completion of motion estimation
|
||||
of this Frame [only if operating in Asynchronous mode].
|
||||
Each output buffer should be associated with a distinct event pointer. */
|
||||
uint32_t viewID; /**< [in]: Specifies left,right viewID if NV_ENC_CONFIG_H264_MEONLY::bStereoEnable is set.
|
||||
viewID can be 0,1 if bStereoEnable is set, 0 otherwise. */
|
||||
NVENC_EXTERNAL_ME_HINT_COUNTS_PER_BLOCKTYPE
|
||||
meHintCountsPerBlock[2]; /**< [in]: Specifies the number of hint candidates per block for the current frame. meHintCountsPerBlock[0] is for L0 predictors.
|
||||
The candidate count in NV_ENC_PIC_PARAMS::meHintCountsPerBlock[lx] must never exceed NV_ENC_INITIALIZE_PARAMS::maxMEHintCountsPerBlock[lx] provided during encoder intialization. */
|
||||
NVENC_EXTERNAL_ME_HINT *meExternalHints; /**< [in]: Specifies the pointer to ME external hints for the current frame. The size of ME hint buffer should be equal to number of macroblocks * the total number of candidates per macroblock.
|
||||
The total number of candidates per MB per direction = 1*meHintCountsPerBlock[Lx].numCandsPerBlk16x16 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk16x8 + 2*meHintCountsPerBlock[Lx].numCandsPerBlk8x8
|
||||
+ 4*meHintCountsPerBlock[Lx].numCandsPerBlk8x8. For frames using bidirectional ME , the total number of candidates for single macroblock is sum of total number of candidates per MB for each direction (L0 and L1) */
|
||||
uint32_t reserved1[243]; /**< [in]: Reserved and must be set to 0 */
|
||||
void* reserved2[59]; /**< [in]: Reserved and must be set to NULL */
|
||||
uint32_t reserved1[252]; /**< [in]: Reserved and must be set to 0 */
|
||||
void* reserved2[60]; /**< [in]: Reserved and must be set to NULL */
|
||||
} NV_ENC_MEONLY_PARAMS;
|
||||
|
||||
/** NV_ENC_MEONLY_PARAMS struct version*/
|
||||
#define NV_ENC_MEONLY_PARAMS_VER NVENCAPI_STRUCT_VERSION(3)
|
||||
#define NV_ENC_MEONLY_PARAMS_VER NVENCAPI_STRUCT_VERSION(2)
|
||||
|
||||
|
||||
/**
|
||||
@@ -1719,17 +1627,6 @@ typedef struct _NV_ENC_MAP_INPUT_RESOURCE
|
||||
/** Macro for constructing the version field of ::_NV_ENC_MAP_INPUT_RESOURCE */
|
||||
#define NV_ENC_MAP_INPUT_RESOURCE_VER NVENCAPI_STRUCT_VERSION(4)
|
||||
|
||||
/**
|
||||
* \struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX
|
||||
* NV_ENC_REGISTER_RESOURCE::resourceToRegister must be a pointer to a variable of this type,
|
||||
* when NV_ENC_REGISTER_RESOURCE::resourceType is NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX
|
||||
*/
|
||||
typedef struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX
|
||||
{
|
||||
uint32_t texture; /**< [in]: The name of the texture to be used. */
|
||||
uint32_t target; /**< [in]: Accepted values are GL_TEXTURE_RECTANGLE and GL_TEXTURE_2D. */
|
||||
} NV_ENC_INPUT_RESOURCE_OPENGL_TEX;
|
||||
|
||||
/**
|
||||
* \struct _NV_ENC_REGISTER_RESOURCE
|
||||
* Register a resource for future use with the Nvidia Video Encoder Interface.
|
||||
@@ -1737,11 +1634,7 @@ typedef struct _NV_ENC_INPUT_RESOURCE_OPENGL_TEX
|
||||
typedef struct _NV_ENC_REGISTER_RESOURCE
|
||||
{
|
||||
uint32_t version; /**< [in]: Struct version. Must be set to ::NV_ENC_REGISTER_RESOURCE_VER. */
|
||||
NV_ENC_INPUT_RESOURCE_TYPE resourceType; /**< [in]: Specifies the type of resource to be registered.
|
||||
Supported values are
|
||||
::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX,
|
||||
::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR,
|
||||
::NV_ENC_INPUT_RESOURCE_TYPE_OPENGL_TEX */
|
||||
NV_ENC_INPUT_RESOURCE_TYPE resourceType; /**< [in]: Specifies the type of resource to be registered. Supported values are ::NV_ENC_INPUT_RESOURCE_TYPE_DIRECTX, ::NV_ENC_INPUT_RESOURCE_TYPE_CUDADEVICEPTR. */
|
||||
uint32_t width; /**< [in]: Input buffer Width. */
|
||||
uint32_t height; /**< [in]: Input buffer Height. */
|
||||
uint32_t pitch; /**< [in]: Input buffer Pitch. */
|
||||
@@ -2841,7 +2734,7 @@ NVENCSTATUS NVENCAPI NvEncUnregisterAsyncEvent (void* encoder,
|
||||
* mapped resource is returned in the field NV_ENC_MAP_INPUT_RESOURCE::outputResourcePtr.
|
||||
* The NvEncodeAPI interface also returns the buffer format of the mapped resource
|
||||
* in the field NV_ENC_MAP_INPUT_RESOURCE::outbufferFmt.
|
||||
* This function provides synchronization guarantee that any graphics or compute
|
||||
* This function provides synchronization guarantee that any direct3d or cuda
|
||||
* work submitted on the input buffer is completed before the buffer is used for encoding.
|
||||
* The client should not access any input buffer while they are mapped by the encoder.
|
||||
*
|
||||
@@ -2972,9 +2865,7 @@ NVENCSTATUS NVENCAPI NvEncInvalidateRefFrames(void* encoder, uint64_t invalidRef
|
||||
* Opens an encoding session and returns a pointer to the encoder interface in
|
||||
* the \p **encoder parameter. The client should start encoding process by calling
|
||||
* this API first.
|
||||
* The client must pass a pointer to IDirect3DDevice9 device or CUDA context in the \p *device parameter.
|
||||
* For the OpenGL interface, \p device must be NULL. An OpenGL context must be current when
|
||||
* calling all NvEncodeAPI functions.
|
||||
* The client must pass a pointer to IDirect3DDevice9/CUDA interface in the \p *device parameter.
|
||||
* If the creation of encoder session fails, the client must call ::NvEncDestroyEncoder API
|
||||
* before exiting.
|
||||
*
|
||||
@@ -3001,6 +2892,8 @@ NVENCSTATUS NVENCAPI NvEncOpenEncodeSessionEx (NV_ENC_OPEN_ENC
|
||||
*
|
||||
* Registers a resource with the Nvidia Video Encoder Interface for book keeping.
|
||||
* The client is expected to pass the registered resource handle as well, while calling ::NvEncMapInputResource API.
|
||||
* This API is not implemented for the DirectX Interface.
|
||||
* DirectX based clients need not change their implementation.
|
||||
*
|
||||
* \param [in] encoder
|
||||
* Pointer to the NVEncodeAPI interface.
|
||||
@@ -3032,6 +2925,8 @@ NVENCSTATUS NVENCAPI NvEncRegisterResource (void* encoder,
|
||||
* Unregisters a resource previously registered with the Nvidia Video Encoder Interface.
|
||||
* The client is expected to unregister any resource that it has registered with the
|
||||
* Nvidia Video Encoder Interface before destroying the resource.
|
||||
* This API is not implemented for the DirectX Interface.
|
||||
* DirectX based clients need not change their implementation.
|
||||
*
|
||||
* \param [in] encoder
|
||||
* Pointer to the NVEncodeAPI interface.
|
||||
|
||||
10
compat/plan9/head
Executable file
10
compat/plan9/head
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
n=10
|
||||
|
||||
case "$1" in
|
||||
-n) n=$2; shift 2 ;;
|
||||
-n*) n=${1#-n}; shift ;;
|
||||
esac
|
||||
|
||||
exec sed ${n}q "$@"
|
||||
@@ -1,7 +1,4 @@
|
||||
/*
|
||||
* NewTek NDI common code
|
||||
* Copyright (c) 2017 Maksym Veremeyenko
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
@@ -19,12 +16,19 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVDEVICE_LIBNDI_NEWTEK_COMMON_H
|
||||
#define AVDEVICE_LIBNDI_NEWTEK_COMMON_H
|
||||
|
||||
#include <Processing.NDI.Lib.h>
|
||||
|
||||
#define NDI_TIME_BASE 10000000
|
||||
#define NDI_TIME_BASE_Q (AVRational){1, NDI_TIME_BASE}
|
||||
int plan9_main(int argc, char **argv);
|
||||
|
||||
#undef main
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
/* The setfcr() function in lib9 is broken, must use asm. */
|
||||
#ifdef __i386
|
||||
short fcr;
|
||||
__asm__ volatile ("fstcw %0 \n"
|
||||
"or $63, %0 \n"
|
||||
"fldcw %0 \n"
|
||||
: "=m"(fcr));
|
||||
#endif
|
||||
|
||||
return plan9_main(argc, argv);
|
||||
}
|
||||
2
compat/plan9/printf
Executable file
2
compat/plan9/printf
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/sh
|
||||
exec awk "BEGIN { for (i = 2; i < ARGC; i++) printf \"$1\", ARGV[i] }" "$@"
|
||||
@@ -25,9 +25,9 @@
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/mathematics.h"
|
||||
|
||||
static const char *check_nan_suffix(const char *s)
|
||||
static char *check_nan_suffix(char *s)
|
||||
{
|
||||
const char *start = s;
|
||||
char *start = s;
|
||||
|
||||
if (*s++ != '(')
|
||||
return start;
|
||||
@@ -44,7 +44,7 @@ double strtod(const char *, char **);
|
||||
|
||||
double avpriv_strtod(const char *nptr, char **endptr)
|
||||
{
|
||||
const char *end;
|
||||
char *end;
|
||||
double res;
|
||||
|
||||
/* Skip leading spaces */
|
||||
@@ -81,13 +81,13 @@ double avpriv_strtod(const char *nptr, char **endptr)
|
||||
!av_strncasecmp(nptr, "+0x", 3)) {
|
||||
/* FIXME this doesn't handle exponents, non-integers (float/double)
|
||||
* and numbers too large for long long */
|
||||
res = strtoll(nptr, (char **)&end, 16);
|
||||
res = strtoll(nptr, &end, 16);
|
||||
} else {
|
||||
res = strtod(nptr, (char **)&end);
|
||||
res = strtod(nptr, &end);
|
||||
}
|
||||
|
||||
if (endptr)
|
||||
*endptr = (char *)end;
|
||||
*endptr = end;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <windows.h>
|
||||
#if (_WIN32_WINNT < 0x0602) || HAVE_WINRT
|
||||
#if _WIN32_WINNT < 0x0602
|
||||
#include "libavutil/wchar_filename.h"
|
||||
#endif
|
||||
/**
|
||||
@@ -71,17 +71,7 @@ exit:
|
||||
#ifndef LOAD_LIBRARY_SEARCH_SYSTEM32
|
||||
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
|
||||
#endif
|
||||
#if HAVE_WINRT
|
||||
wchar_t *name_w = NULL;
|
||||
int ret;
|
||||
if (utf8towchar(name, &name_w))
|
||||
return NULL;
|
||||
ret = LoadPackagedLibrary(name_w, 0);
|
||||
av_free(name_w);
|
||||
return ret;
|
||||
#else
|
||||
return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32);
|
||||
#endif
|
||||
}
|
||||
#define dlopen(name, flags) win32_dlopen(name)
|
||||
#define dlclose FreeLibrary
|
||||
|
||||
@@ -77,7 +77,7 @@ typedef struct pthread_cond_t {
|
||||
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
pthread_t *h = arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
@@ -270,7 +270,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
}
|
||||
|
||||
/* non native condition variables */
|
||||
win32_cond = (win32_cond_t*)av_mallocz(sizeof(win32_cond_t));
|
||||
win32_cond = av_mallocz(sizeof(win32_cond_t));
|
||||
if (!win32_cond)
|
||||
return ENOMEM;
|
||||
cond->Ptr = win32_cond;
|
||||
@@ -288,7 +288,7 @@ static av_unused int pthread_cond_init(pthread_cond_t *cond, const void *unused_
|
||||
|
||||
static av_unused int pthread_cond_destroy(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = (win32_cond_t*)cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
/* native condition variables do not destroy */
|
||||
if (cond_init)
|
||||
return 0;
|
||||
@@ -305,7 +305,7 @@ static av_unused int pthread_cond_destroy(pthread_cond_t *cond)
|
||||
|
||||
static av_unused int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = (win32_cond_t*)cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
int have_waiter;
|
||||
|
||||
if (cond_broadcast) {
|
||||
@@ -337,7 +337,7 @@ static av_unused int pthread_cond_broadcast(pthread_cond_t *cond)
|
||||
|
||||
static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
|
||||
{
|
||||
win32_cond_t *win32_cond = (win32_cond_t*)cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
int last_waiter;
|
||||
if (cond_wait) {
|
||||
cond_wait(cond, mutex, INFINITE);
|
||||
@@ -369,7 +369,7 @@ static av_unused int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mu
|
||||
|
||||
static av_unused int pthread_cond_signal(pthread_cond_t *cond)
|
||||
{
|
||||
win32_cond_t *win32_cond = (win32_cond_t*)cond->Ptr;
|
||||
win32_cond_t *win32_cond = cond->Ptr;
|
||||
int have_waiter;
|
||||
if (cond_signal) {
|
||||
cond_signal(cond);
|
||||
@@ -397,20 +397,20 @@ static av_unused int pthread_cond_signal(pthread_cond_t *cond)
|
||||
static av_unused void w32thread_init(void)
|
||||
{
|
||||
#if _WIN32_WINNT < 0x0600
|
||||
HMODULE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
|
||||
HANDLE kernel_dll = GetModuleHandle(TEXT("kernel32.dll"));
|
||||
/* if one is available, then they should all be available */
|
||||
cond_init = (void (WINAPI*)(pthread_cond_t *))
|
||||
GetProcAddress(kernel_dll, "InitializeConditionVariable");
|
||||
cond_broadcast = (void (WINAPI*)(pthread_cond_t *))
|
||||
GetProcAddress(kernel_dll, "WakeAllConditionVariable");
|
||||
cond_signal = (void (WINAPI*)(pthread_cond_t *))
|
||||
GetProcAddress(kernel_dll, "WakeConditionVariable");
|
||||
cond_wait = (BOOL (WINAPI*)(pthread_cond_t *, pthread_mutex_t *, DWORD))
|
||||
GetProcAddress(kernel_dll, "SleepConditionVariableCS");
|
||||
initonce_begin = (BOOL (WINAPI*)(pthread_once_t *, DWORD, BOOL *, void **))
|
||||
GetProcAddress(kernel_dll, "InitOnceBeginInitialize");
|
||||
initonce_complete = (BOOL (WINAPI*)(pthread_once_t *, DWORD, void *))
|
||||
GetProcAddress(kernel_dll, "InitOnceComplete");
|
||||
cond_init =
|
||||
(void*)GetProcAddress(kernel_dll, "InitializeConditionVariable");
|
||||
cond_broadcast =
|
||||
(void*)GetProcAddress(kernel_dll, "WakeAllConditionVariable");
|
||||
cond_signal =
|
||||
(void*)GetProcAddress(kernel_dll, "WakeConditionVariable");
|
||||
cond_wait =
|
||||
(void*)GetProcAddress(kernel_dll, "SleepConditionVariableCS");
|
||||
initonce_begin =
|
||||
(void*)GetProcAddress(kernel_dll, "InitOnceBeginInitialize");
|
||||
initonce_complete =
|
||||
(void*)GetProcAddress(kernel_dll, "InitOnceComplete");
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
120
doc/APIchanges
120
doc/APIchanges
@@ -15,122 +15,6 @@ libavutil: 2015-08-28
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
-------- 8< --------- FFmpeg 3.4 was cut here -------- 8< ---------
|
||||
|
||||
2017-09-28 - b6cf66ae1c - lavc 57.106.104 - avcodec.h
|
||||
Add AV_PKT_DATA_A53_CC packet side data, to export closed captions
|
||||
|
||||
2017-09-27 - 7aa6b8a68f - lavu 55.77.101 / lavu 55.31.1 - frame.h
|
||||
Allow passing the value of 0 (meaning "automatic") as the required alignment
|
||||
to av_frame_get_buffer().
|
||||
|
||||
2017-09-27 - 522f877086 - lavu 55.77.100 / lavu 55.31.0 - cpu.h
|
||||
Add av_cpu_max_align() for querying maximum required data alignment.
|
||||
|
||||
2017-09-26 - b1cf151c4d - lavc 57.106.102 - avcodec.h
|
||||
Deprecate AVCodecContext.refcounted_frames. This was useful for deprecated
|
||||
API only (avcodec_decode_video2/avcodec_decode_audio4). The new decode APIs
|
||||
(avcodec_send_packet/avcodec_receive_frame) always work with reference
|
||||
counted frames.
|
||||
|
||||
2017-09-21 - 6f15f1cdc8 - lavu 55.76.100 / 56.6.0 - pixdesc.h
|
||||
Add av_color_range_from_name(), av_color_primaries_from_name(),
|
||||
av_color_transfer_from_name(), av_color_space_from_name(), and
|
||||
av_chroma_location_from_name().
|
||||
|
||||
2017-09-13 - 82342cead1 - lavc 57.106.100 - avcodec.h
|
||||
Add AV_PKT_FLAG_TRUSTED.
|
||||
|
||||
2017-09-13 - 9cb23cd9fe - lavu 55.75.100 - hwcontext.h hwcontext_drm.h
|
||||
Add AV_HWDEVICE_TYPE_DRM and implementation.
|
||||
|
||||
2017-09-08 - 5ba2aef6ec - lavfi 6.103.100 - buffersrc.h
|
||||
Add av_buffersrc_close().
|
||||
|
||||
2017-09-04 - 6cadbb16e9 - lavc 57.105.100 - avcodec.h
|
||||
Add AV_HWACCEL_CODEC_CAP_EXPERIMENTAL, replacing the deprecated
|
||||
HWACCEL_CODEC_CAP_EXPERIMENTAL flag.
|
||||
|
||||
2017-09-01 - 5d76674756 - lavf 57.81.100 - avio.h
|
||||
Add avio_read_partial().
|
||||
|
||||
2017-09-01 - xxxxxxx - lavf 57.80.100 / 57.11.0 - avio.h
|
||||
Add avio_context_free(). From now on it must be used for freeing AVIOContext.
|
||||
|
||||
2017-08-08 - 1460408703 - lavu 55.74.100 - pixdesc.h
|
||||
Add AV_PIX_FMT_FLAG_FLOAT pixel format flag.
|
||||
|
||||
2017-08-08 - 463b81de2b - lavu 55.72.100 - imgutils.h
|
||||
Add av_image_fill_black().
|
||||
|
||||
2017-08-08 - caa12027ba - lavu 55.71.100 - frame.h
|
||||
Add av_frame_apply_cropping().
|
||||
|
||||
2017-07-25 - 24de4fddca - lavu 55.69.100 - frame.h
|
||||
Add AV_FRAME_DATA_ICC_PROFILE side data type.
|
||||
|
||||
2017-06-27 - 70143a3954 - lavc 57.100.100 - avcodec.h
|
||||
DXVA2 and D3D11 hardware accelerated decoding now supports the new hwaccel API,
|
||||
which can create the decoder context and allocate hardware frame automatically.
|
||||
See AVCodecContext.hw_device_ctx and AVCodecContext.hw_frames_ctx. For D3D11,
|
||||
the new AV_PIX_FMT_D3D11 pixfmt must be used with the new API.
|
||||
|
||||
2017-06-27 - 3303511f33 - lavu 56.67.100 - hwcontext.h
|
||||
Add AV_HWDEVICE_TYPE_D3D11VA and AV_PIX_FMT_D3D11.
|
||||
|
||||
2017-06-24 - 09891c5391 - lavf 57.75.100 - avio.h
|
||||
Add AVIO_DATA_MARKER_FLUSH_POINT to signal preferred flush points to aviobuf.
|
||||
|
||||
2017-06-14 - d59c6a3aeb - lavu 55.66.100 - hwcontext.h
|
||||
av_hwframe_ctx_create_derived() now takes some AV_HWFRAME_MAP_* combination
|
||||
as its flags argument (which was previously unused).
|
||||
|
||||
2017-06-14 - 49ae8a5e87 - lavc 57.99.100 - avcodec.h
|
||||
Add AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH.
|
||||
|
||||
2017-06-14 - 0b1794a43e - lavu 55.65.100 - hwcontext.h
|
||||
Add AV_HWDEVICE_TYPE_NONE, av_hwdevice_find_type_by_name(),
|
||||
av_hwdevice_get_type_name() and av_hwdevice_iterate_types().
|
||||
|
||||
2017-06-14 - b22172f6f3 - lavu 55.64.100 - hwcontext.h
|
||||
Add av_hwdevice_ctx_create_derived().
|
||||
|
||||
2017-05-15 - 532b23f079 - lavc 57.96.100 - avcodec.h
|
||||
VideoToolbox hardware-accelerated decoding now supports the new hwaccel API,
|
||||
which can create the decoder context and allocate hardware frames automatically.
|
||||
See AVCodecContext.hw_device_ctx and AVCodecContext.hw_frames_ctx.
|
||||
|
||||
2017-05-15 - 532b23f079 - lavu 57.63.100 - hwcontext.h
|
||||
Add AV_HWDEVICE_TYPE_VIDEOTOOLBOX and implementation.
|
||||
|
||||
2017-05-08 - f089e02fa2 - lavc 57.95.100 / 57.31.0 - avcodec.h
|
||||
Add AVCodecContext.apply_cropping to control whether cropping
|
||||
is handled by libavcodec or the caller.
|
||||
|
||||
2017-05-08 - a47bd5d77e - lavu 55.62.100 / 55.30.0 - frame.h
|
||||
Add AVFrame.crop_left/right/top/bottom fields for attaching cropping
|
||||
information to video frames.
|
||||
|
||||
2017-xx-xx - xxxxxxxxxx
|
||||
Change av_sha_update(), av_sha512_update() and av_md5_sum()/av_md5_update() length
|
||||
parameter type to size_t at next major bump.
|
||||
|
||||
2017-05-05 - c0f17a905f - lavc 57.94.100 - avcodec.h
|
||||
The cuvid decoders now support AVCodecContext.hw_device_ctx, which removes
|
||||
the requirement to set an incomplete AVCodecContext.hw_frames_ctx only to
|
||||
set the Cuda device handle.
|
||||
|
||||
2017-04-11 - 8378466507 - lavu 55.61.100 - avstring.h
|
||||
Add av_strireplace().
|
||||
|
||||
2016-04-06 - 157e57a181 - lavc 57.92.100 - avcodec.h
|
||||
Add AV_PKT_DATA_CONTENT_LIGHT_LEVEL packet side data.
|
||||
|
||||
2016-04-06 - b378f5bd64 - lavu 55.60.100 - mastering_display_metadata.h
|
||||
Add AV_FRAME_DATA_CONTENT_LIGHT_LEVEL value, av_content_light_metadata_alloc()
|
||||
and av_content_light_metadata_create_side_data() API, and AVContentLightMetadata
|
||||
type to export content light level video properties.
|
||||
|
||||
2017-03-31 - 9033e8723c - lavu 55.57.100 - spherical.h
|
||||
Add av_spherical_projection_name().
|
||||
Add av_spherical_from_name().
|
||||
@@ -742,7 +626,7 @@ API changes, most recent first:
|
||||
Add av_opt_get_dict_val/set_dict_val with AV_OPT_TYPE_DICT to support
|
||||
dictionary types being set as options.
|
||||
|
||||
2014-08-13 - afbd4b7e09 - lavf 56.01.0 - avformat.h
|
||||
2014-08-13 - afbd4b8 - lavf 56.01.0 - avformat.h
|
||||
Add AVFormatContext.event_flags and AVStream.event_flags for signaling to
|
||||
the user when events happen in the file/stream.
|
||||
|
||||
@@ -759,7 +643,7 @@ API changes, most recent first:
|
||||
2014-08-08 - 5c3c671 - lavf 55.53.100 - avio.h
|
||||
Add avio_feof() and deprecate url_feof().
|
||||
|
||||
2014-08-07 - bb789016d4 - lsws 2.1.3 - swscale.h
|
||||
2014-08-07 - bb78903 - lsws 2.1.3 - swscale.h
|
||||
sws_getContext is not going to be removed in the future.
|
||||
|
||||
2014-08-07 - a561662 / ad1ee5f - lavc 55.73.101 / 55.57.3 - avcodec.h
|
||||
|
||||
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 3.4.14
|
||||
PROJECT_NUMBER = 3.3
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
||||
42
doc/Makefile
42
doc/Makefile
@@ -24,7 +24,6 @@ HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMP
|
||||
doc/fate.html \
|
||||
doc/general.html \
|
||||
doc/git-howto.html \
|
||||
doc/mailing-list-faq.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
|
||||
@@ -37,6 +36,33 @@ DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
DOC_EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
DOC_EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
DOC_EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
DOC_EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
DOC_EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
DOC_EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
|
||||
|
||||
DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
ALL_DOC_EXAMPLES := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
ALL_DOC_EXAMPLES_G := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
PROGS += $(DOC_EXAMPLES)
|
||||
|
||||
all-$(CONFIG_DOC): doc
|
||||
|
||||
doc: documentation
|
||||
@@ -44,6 +70,8 @@ doc: documentation
|
||||
apidoc: doc/doxy/html
|
||||
documentation: $(DOCS)
|
||||
|
||||
examples: $(DOC_EXAMPLES)
|
||||
|
||||
TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d)
|
||||
|
||||
doc/%.txt: TAG = TXT
|
||||
@@ -96,9 +124,11 @@ doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
|
||||
DOXY_INPUT = $(INSTHEADERS)
|
||||
DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
||||
DOXY_INPUT = $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c)
|
||||
DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) config.mak
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||
@@ -144,7 +174,11 @@ clean:: docclean
|
||||
distclean:: docclean
|
||||
$(RM) doc/config.texi
|
||||
|
||||
docclean::
|
||||
examplesclean:
|
||||
$(RM) $(ALL_DOC_EXAMPLES) $(ALL_DOC_EXAMPLES_G)
|
||||
$(RM) $(CLEANSUFFIXES:%=doc/examples/%)
|
||||
|
||||
docclean: examplesclean
|
||||
$(RM) $(CLEANSUFFIXES:%=doc/%)
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
|
||||
@@ -3,9 +3,9 @@
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(https://git.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{https://git.ffmpeg.org/ffmpeg}.
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
||||
|
||||
@@ -220,30 +220,19 @@ ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
|
||||
|
||||
@section noise
|
||||
|
||||
Damages the contents of packets or simply drops them without damaging the
|
||||
container. Can be used for fuzzing or testing error resilience/concealment.
|
||||
Damages the contents of packets without damaging the container. Can be
|
||||
used for fuzzing or testing error resilience/concealment.
|
||||
|
||||
Parameters:
|
||||
@table @option
|
||||
@item amount
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
@item dropamount
|
||||
A numeral string, whose value is related to how often packets will be dropped.
|
||||
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||
@end table
|
||||
|
||||
The following example applies the modification to every byte but does not drop
|
||||
any packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
This bitstream filter passes the packets through unchanged.
|
||||
applies the modification to every byte.
|
||||
|
||||
@section remove_extra
|
||||
|
||||
@@ -281,13 +270,4 @@ Merge VP9 invisible (alt-ref) frames back into VP9 superframes. This
|
||||
fixes merging of split/segmented VP9 streams where the alt-ref frame
|
||||
was split from its visible counterpart.
|
||||
|
||||
@section vp9_superframe_split
|
||||
|
||||
Split VP9 superframes into single frames.
|
||||
|
||||
@section vp9_raw_reorder
|
||||
|
||||
Given a VP9 stream with correct timestamps but possibly out of order,
|
||||
insert additional show-existing-frame packets to correct the ordering.
|
||||
|
||||
@c man end BITSTREAM FILTERS
|
||||
|
||||
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -45,9 +45,6 @@ libswscale/swscale-test
|
||||
config
|
||||
Reconfigure the project with the current configuration.
|
||||
|
||||
tools/target_dec_<decoder>_fuzzer
|
||||
Build fuzzer to fuzz the specified decoder.
|
||||
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
|
||||
@@ -1258,7 +1258,7 @@ Interlaced video, top coded first, bottom displayed first
|
||||
Interlaced video, bottom coded first, top displayed first
|
||||
@end table
|
||||
|
||||
@item skip_alpha @var{bool} (@emph{decoding,video})
|
||||
@item skip_alpha @var{integer} (@emph{decoding,video})
|
||||
Set to 1 to disable processing alpha (transparency). This works like the
|
||||
@samp{gray} flag in the @option{flags} option which skips chroma information
|
||||
instead of alpha. Default is 0.
|
||||
@@ -1279,16 +1279,6 @@ ffprobe -dump_separator "
|
||||
Maximum number of pixels per image. This value can be used to avoid out of
|
||||
memory failures due to large images.
|
||||
|
||||
@item apply_cropping @var{bool} (@emph{decoding,video})
|
||||
Enable cropping if cropping parameters are multiples of the required
|
||||
alignment for the left and top parameters. If the alignment is not met the
|
||||
cropping will be partially applied to maintain alignment.
|
||||
Default is 1 (enabled).
|
||||
Note: The required alignment depends on if @code{AV_CODEC_FLAG_UNALIGNED} is set and the
|
||||
CPU. @code{AV_CODEC_FLAG_UNALIGNED} cannot be changed from the command line. Also hardware
|
||||
decoders will not apply left/top Cropping.
|
||||
|
||||
|
||||
@end table
|
||||
|
||||
@c man end CODEC OPTIONS
|
||||
|
||||
@@ -109,7 +109,7 @@ correctly by using lavc's old buggy lpc logic for decoding.
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthesizer.
|
||||
Internal wave synthetizer.
|
||||
|
||||
This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
@@ -275,7 +275,7 @@ Y offset of generated bitmaps, default is 0.
|
||||
Chops leading and trailing spaces and removes empty lines from the generated
|
||||
text. This option is useful for teletext based subtitles where empty spaces may
|
||||
be present at the start or at the end of the lines or empty lines may be
|
||||
present between the subtitle lines because of double-sized teletext characters.
|
||||
present between the subtitle lines because of double-sized teletext charactes.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
|
||||
@@ -300,24 +300,6 @@ used to end the output video at the length of the shortest input file,
|
||||
which in this case is @file{input.mp4} as the GIF in this example loops
|
||||
infinitely.
|
||||
|
||||
@section hls
|
||||
|
||||
HLS demuxer
|
||||
|
||||
It accepts the following options:
|
||||
|
||||
@table @option
|
||||
@item live_start_index
|
||||
segment index to start live streams at (negative values are from the end).
|
||||
|
||||
@item allowed_extensions
|
||||
',' separated list of file extensions that hls is allowed to access.
|
||||
|
||||
@item max_reload
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
||||
@@ -131,11 +131,6 @@ designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
casts are binary truncated. Shift right of signed values uses sign extension.
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
@@ -743,25 +738,6 @@ In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Maintenance}
|
||||
@chapter Maintenance process
|
||||
|
||||
@anchor{MAINTAINERS}
|
||||
@section MAINTAINERS
|
||||
|
||||
The developers maintaining each part of the codebase are listed in @file{MAINTAINERS}.
|
||||
Being listed in @file{MAINTAINERS}, gives one the right to have git write access to
|
||||
the specific repository.
|
||||
|
||||
@anchor{Becoming a maintainer}
|
||||
@section Becoming a maintainer
|
||||
|
||||
People add themselves to @file{MAINTAINERS} by sending a patch like any other code
|
||||
change. These get reviewed by the community like any other patch. It is expected
|
||||
that, if someone has an objection to a new maintainer, she is willing to object
|
||||
in public with her full name and is willing to take over maintainership for the area.
|
||||
|
||||
|
||||
@anchor{Release process}
|
||||
@section Release process
|
||||
|
||||
|
||||
@@ -92,12 +92,12 @@ using the value "enable", which is mainly useful for debugging or disabled using
|
||||
|
||||
@item aac_is
|
||||
Sets intensity stereo coding tool usage. By default, it's enabled and will
|
||||
automatically toggle IS for similar pairs of stereo bands if it's beneficial.
|
||||
automatically toggle IS for similar pairs of stereo bands if it's benefitial.
|
||||
Can be disabled for debugging by setting the value to "disable".
|
||||
|
||||
@item aac_pns
|
||||
Uses perceptual noise substitution to replace low entropy high frequency bands
|
||||
with imperceptible white noise during the decoding process. By default, it's
|
||||
with imperceivable white noise during the decoding process. By default, it's
|
||||
enabled, but can be disabled for debugging purposes by using "disable".
|
||||
|
||||
@item aac_tns
|
||||
@@ -599,7 +599,7 @@ Channel mode
|
||||
@item auto
|
||||
The mode is chosen automatically for each frame
|
||||
@item indep
|
||||
Channels are independently coded
|
||||
Chanels are independently coded
|
||||
@item left_side
|
||||
@item right_side
|
||||
@item mid_side
|
||||
@@ -1666,7 +1666,7 @@ option to 2.
|
||||
Enable frame parallel decodability features.
|
||||
@item aq-mode
|
||||
Set adaptive quantization mode (0: off (default), 1: variance 2: complexity, 3:
|
||||
cyclic refresh, 4: equator360).
|
||||
cyclic refresh).
|
||||
@item colorspace @emph{color-space}
|
||||
Set input color space. The VP9 bitstream supports signaling the following
|
||||
colorspaces:
|
||||
@@ -1679,8 +1679,6 @@ colorspaces:
|
||||
@item @samp{smpte240m} @emph{smpte240}
|
||||
@item @samp{bt2020_ncl} @emph{bt2020}
|
||||
@end table
|
||||
@item row-mt @var{boolean}
|
||||
Enable row based multi-threading.
|
||||
@end table
|
||||
|
||||
@end table
|
||||
@@ -1793,7 +1791,7 @@ the documentation of the undocumented generic options, see
|
||||
@ref{codec-options,,the Codec Options chapter}.
|
||||
|
||||
To get a more accurate and extensive documentation of the libx264
|
||||
options, invoke the command @command{x264 --fullhelp} or consult
|
||||
options, invoke the command @command{x264 --full-help} or consult
|
||||
the libx264 documentation.
|
||||
|
||||
@table @option
|
||||
@@ -2106,7 +2104,7 @@ is kept undocumented for some reason.
|
||||
|
||||
For example to specify libx264 encoding options with @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -i foo.mpg -c:v libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
|
||||
@end example
|
||||
|
||||
@item a53cc @var{boolean}
|
||||
@@ -2381,7 +2379,6 @@ Select the ProRes profile to encode
|
||||
@item standard
|
||||
@item hq
|
||||
@item 4444
|
||||
@item 4444xq
|
||||
@end table
|
||||
|
||||
@item quant_mat @var{integer}
|
||||
@@ -2528,117 +2525,6 @@ encoder use CAVLC instead of CABAC.
|
||||
dia size for the iterative motion estimation
|
||||
@end table
|
||||
|
||||
@section VAAPI encoders
|
||||
|
||||
Wrappers for hardware encoders accessible via VAAPI.
|
||||
|
||||
These encoders only accept input in VAAPI hardware surfaces. If you have input
|
||||
in software frames, use the @option{hwupload} filter to upload them to the GPU.
|
||||
|
||||
The following standard libavcodec options are used:
|
||||
@itemize
|
||||
@item
|
||||
@option{g} / @option{gop_size}
|
||||
@item
|
||||
@option{bf} / @option{max_b_frames}
|
||||
@item
|
||||
@option{profile}
|
||||
@item
|
||||
@option{level}
|
||||
@item
|
||||
@option{b} / @option{bit_rate}
|
||||
@item
|
||||
@option{maxrate} / @option{rc_max_rate}
|
||||
@item
|
||||
@option{bufsize} / @option{rc_buffer_size}
|
||||
@item
|
||||
@option{rc_init_occupancy} / @option{rc_initial_buffer_occupancy}
|
||||
@item
|
||||
@option{compression_level}
|
||||
|
||||
Speed / quality tradeoff: higher values are faster / worse quality.
|
||||
@item
|
||||
@option{q} / @option{global_quality}
|
||||
|
||||
Size / quality tradeoff: higher values are smaller / worse quality.
|
||||
@item
|
||||
@option{qmin}
|
||||
(only: @option{qmax} is not supported)
|
||||
@item
|
||||
@option{i_qfactor} / @option{i_quant_factor}
|
||||
@item
|
||||
@option{i_qoffset} / @option{i_quant_offset}
|
||||
@item
|
||||
@option{b_qfactor} / @option{b_quant_factor}
|
||||
@item
|
||||
@option{b_qoffset} / @option{b_quant_offset}
|
||||
@end itemize
|
||||
|
||||
@table @option
|
||||
|
||||
@item h264_vaapi
|
||||
@option{profile} sets the value of @emph{profile_idc} and the @emph{constraint_set*_flag}s.
|
||||
@option{level} sets the value of @emph{level_idc}.
|
||||
|
||||
@table @option
|
||||
@item low_power
|
||||
Use low-power encoding mode.
|
||||
@item coder
|
||||
Set entropy encoder (default is @emph{cabac}). Possible values:
|
||||
|
||||
@table @samp
|
||||
@item ac
|
||||
@item cabac
|
||||
Use CABAC.
|
||||
|
||||
@item vlc
|
||||
@item cavlc
|
||||
Use CAVLC.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item hevc_vaapi
|
||||
@option{profile} and @option{level} set the values of
|
||||
@emph{general_profile_idc} and @emph{general_level_idc} respectively.
|
||||
|
||||
@item mjpeg_vaapi
|
||||
Always encodes using the standard quantisation and huffman tables -
|
||||
@option{global_quality} scales the standard quantisation table (range 1-100).
|
||||
|
||||
@item mpeg2_vaapi
|
||||
@option{profile} and @option{level} set the value of @emph{profile_and_level_indication}.
|
||||
|
||||
No rate control is supported.
|
||||
|
||||
@item vp8_vaapi
|
||||
B-frames are not supported.
|
||||
|
||||
@option{global_quality} sets the @emph{q_idx} used for non-key frames (range 0-127).
|
||||
|
||||
@table @option
|
||||
@item loop_filter_level
|
||||
@item loop_filter_sharpness
|
||||
Manually set the loop filter parameters.
|
||||
@end table
|
||||
|
||||
@item vp9_vaapi
|
||||
@option{global_quality} sets the @emph{q_idx} used for P-frames (range 0-255).
|
||||
|
||||
@table @option
|
||||
@item loop_filter_level
|
||||
@item loop_filter_sharpness
|
||||
Manually set the loop filter parameters.
|
||||
@end table
|
||||
|
||||
B-frames are supported, but the output stream is always in encode order rather than display
|
||||
order. If B-frames are enabled, it may be necessary to use the @option{vp9_raw_reorder}
|
||||
bitstream filter to modify the output stream to display frames in the correct order.
|
||||
|
||||
Only normal frames are produced - the @option{vp9_superframe} bitstream filter may be
|
||||
required to produce a stream usable with all decoders.
|
||||
|
||||
@end table
|
||||
|
||||
@section vc2
|
||||
|
||||
SMPTE VC-2 (previously BBC Dirac Pro). This codec was primarily aimed at
|
||||
|
||||
1
doc/examples/.gitignore
vendored
1
doc/examples/.gitignore
vendored
@@ -10,7 +10,6 @@
|
||||
/filtering_audio
|
||||
/filtering_video
|
||||
/http_multiclient
|
||||
/hw_decode
|
||||
/metadata
|
||||
/muxing
|
||||
/pc-uninstalled
|
||||
|
||||
@@ -1,62 +1,49 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
EXAMPLES-$(CONFIG_HW_DECODE_EXAMPLE) += hw_decode
|
||||
EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
EXAMPLES := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
EXAMPLES_G := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
ALL_EXAMPLES := $(EXAMPLES) $(EXAMPLES-:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
ALL_EXAMPLES_G := $(EXAMPLES_G) $(EXAMPLES-:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
PROGS += $(EXAMPLES)
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLE_MAKEFILE := $(SRC_PATH)/doc/examples/Makefile
|
||||
EXAMPLES_FILES := $(wildcard $(SRC_PATH)/doc/examples/*.c) $(SRC_PATH)/doc/examples/README $(EXAMPLE_MAKEFILE)
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_video \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
$(foreach P,$(EXAMPLES),$(eval OBJS-$(P:%$(PROGSSUF)$(EXESUF)=%) = $(P:%$(PROGSSUF)$(EXESUF)=%).o))
|
||||
$(EXAMPLES_G): %$(PROGSSUF)_g$(EXESUF): %.o
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
examples: $(EXAMPLES)
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
$(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.o): | doc/examples
|
||||
OBJDIRS += doc/examples
|
||||
.phony: all clean-test clean
|
||||
|
||||
DOXY_INPUT += $(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.c)
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
install: install-examples
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
install-examples: $(EXAMPLES_FILES)
|
||||
$(Q)mkdir -p "$(DATADIR)/examples"
|
||||
$(INSTALL) -m 644 $(EXAMPLES_FILES) "$(DATADIR)/examples"
|
||||
$(INSTALL) -m 644 $(EXAMPLE_MAKEFILE:%=%.example) "$(DATADIR)/examples/Makefile"
|
||||
|
||||
uninstall: uninstall-examples
|
||||
|
||||
uninstall-examples:
|
||||
$(RM) -r "$(DATADIR)/examples"
|
||||
|
||||
examplesclean:
|
||||
$(RM) $(ALL_EXAMPLES) $(ALL_EXAMPLES_G)
|
||||
$(RM) $(CLEANSUFFIXES:%=doc/examples/%)
|
||||
|
||||
docclean:: examplesclean
|
||||
|
||||
-include $(wildcard $(EXAMPLES:%$(PROGSSUF)$(EXESUF)=%.d))
|
||||
|
||||
.PHONY: examples
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_video \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
hw_decode \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
||||
@@ -39,52 +39,15 @@
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
FILE *outfile)
|
||||
{
|
||||
int i, ch;
|
||||
int ret, data_size;
|
||||
|
||||
/* send the packet with the compressed data to the decoder */
|
||||
ret = avcodec_send_packet(dec_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting the packet to the decoder\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* read all the output frames (in general there may be any number of them */
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
data_size = av_get_bytes_per_sample(dec_ctx->sample_fmt);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++)
|
||||
for (ch = 0; ch < dec_ctx->channels; ch++)
|
||||
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *outfilename, *filename;
|
||||
const AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVCodecParserContext *parser = NULL;
|
||||
int len, ret;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
AVPacket *pkt;
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
if (argc <= 2) {
|
||||
@@ -97,7 +60,7 @@ int main(int argc, char **argv)
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* find the MPEG audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
@@ -106,12 +69,6 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
parser = av_parser_init(codec->id);
|
||||
if (!parser) {
|
||||
fprintf(stderr, "Parser not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
@@ -136,10 +93,13 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
data = inbuf;
|
||||
data_size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
while (data_size > 0) {
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
@@ -147,41 +107,46 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size,
|
||||
AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while parsing\n");
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
data += ret;
|
||||
data_size -= ret;
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
if (data_size < AUDIO_REFILL_THRESH) {
|
||||
memmove(inbuf, data, data_size);
|
||||
data = inbuf;
|
||||
len = fread(data + data_size, 1,
|
||||
AUDIO_INBUF_SIZE - data_size, f);
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
data_size += len;
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt->data = NULL;
|
||||
pkt->size = 0;
|
||||
decode(c, pkt, decoded_frame, outfile);
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_free_context(&c);
|
||||
av_parser_close(parser);
|
||||
av_frame_free(&decoded_frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -48,51 +48,44 @@ static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
const char *filename)
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
int ret;
|
||||
|
||||
ret = avcodec_send_packet(dec_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a packet for decoding\n");
|
||||
exit(1);
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("saving frame %3d\n", dec_ctx->frame_number);
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%d", outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *filename, *outfilename;
|
||||
const AVCodec *codec;
|
||||
AVCodecParserContext *parser;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
int ret;
|
||||
AVPacket *pkt;
|
||||
AVPacket avpkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
@@ -103,9 +96,7 @@ int main(int argc, char **argv)
|
||||
|
||||
avcodec_register_all();
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt)
|
||||
exit(1);
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged MPEG streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
@@ -117,18 +108,15 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
parser = av_parser_init(codec->id);
|
||||
if (!parser) {
|
||||
fprintf(stderr, "parser not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
|
||||
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
@@ -151,38 +139,44 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (!feof(f)) {
|
||||
/* read raw data from the input file */
|
||||
data_size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (!data_size)
|
||||
frame_count = 0;
|
||||
for (;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* use the parser to split the data into frames */
|
||||
data = inbuf;
|
||||
while (data_size > 0) {
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while parsing\n");
|
||||
exit(1);
|
||||
}
|
||||
data += ret;
|
||||
data_size -= ret;
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, frame, pkt, outfilename);
|
||||
}
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
decode(c, frame, NULL, outfilename);
|
||||
/* Some codecs, such as MPEG, transmit the I- and P-frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video. */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
av_parser_close(parser);
|
||||
avcodec_free_context(&c);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -92,42 +92,14 @@ static int select_channel_layout(const AVCodec *codec)
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
|
||||
FILE *output)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* send the frame for encoding */
|
||||
ret = avcodec_send_frame(ctx, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending the frame to the encoder\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* read all the available output packets (in general there may be any
|
||||
* number of them */
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(ctx, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
fwrite(pkt->data, 1, pkt->size, output);
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *filename;
|
||||
const AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket *pkt;
|
||||
int i, j, k, ret;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
@@ -182,13 +154,6 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* packet for holding encoded output */
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "could not allocate the packet\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
@@ -211,6 +176,10 @@ int main(int argc, char **argv)
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for (i = 0; i < 200; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
/* make sure the frame is writable -- makes a copy if the encoder
|
||||
* kept a reference internally */
|
||||
ret = av_frame_make_writable(frame);
|
||||
@@ -225,16 +194,34 @@ int main(int argc, char **argv)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
encode(c, frame, pkt, f);
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
avcodec_free_context(&c);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -36,45 +36,15 @@
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
|
||||
static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
FILE *outfile)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* send the frame to the encoder */
|
||||
if (frame)
|
||||
printf("Send frame %3"PRId64"\n", frame->pts);
|
||||
|
||||
ret = avcodec_send_frame(enc_ctx, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame for encoding\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(enc_ctx, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error during encoding\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
|
||||
fwrite(pkt->data, 1, pkt->size, outfile);
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *filename, *codec_name;
|
||||
const AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket *pkt;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
if (argc <= 2) {
|
||||
@@ -89,7 +59,7 @@ int main(int argc, char **argv)
|
||||
/* find the mpeg1video encoder */
|
||||
codec = avcodec_find_encoder_by_name(codec_name);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec '%s' not found\n", codec_name);
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -99,10 +69,6 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt)
|
||||
exit(1);
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
@@ -126,9 +92,8 @@ int main(int argc, char **argv)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -155,6 +120,10 @@ int main(int argc, char **argv)
|
||||
|
||||
/* encode 1 second of video */
|
||||
for (i = 0; i < 25; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
|
||||
/* make sure the frame data is writable */
|
||||
@@ -181,11 +150,35 @@ int main(int argc, char **argv)
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
encode(c, frame, pkt, f);
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
@@ -193,7 +186,6 @@ int main(int argc, char **argv)
|
||||
|
||||
avcodec_free_context(&c);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -31,26 +31,23 @@ static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(const AVPacket *pkt)
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = avcodec_send_packet(video_dec_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while sending a packet to the decoder: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
int decoded = pkt.size;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(video_dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while receiving a frame from the decoder: %s\n", av_err2str(ret));
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
@@ -61,16 +58,15 @@ static int decode_packet(const AVPacket *pkt)
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
@@ -120,8 +116,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket pkt = { 0 };
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
@@ -160,17 +155,30 @@ int main(int argc, char **argv)
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(&pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_packet_unref(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
decode_packet(NULL);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
|
||||
@@ -201,7 +201,7 @@ end:
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
|
||||
@@ -253,7 +253,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017 Jun Zhao
|
||||
* Copyright (c) 2017 Kaixuan Liu
|
||||
*
|
||||
* HW Acceleration API (video decoding) decode sample
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* HW-Accelerated decoding example.
|
||||
*
|
||||
* @example hw_decode.c
|
||||
* This example shows how to do HW-accelerated decoding with output
|
||||
* frames from the HW video surfaces.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
|
||||
static AVBufferRef *hw_device_ctx = NULL;
|
||||
static enum AVPixelFormat hw_pix_fmt;
|
||||
static FILE *output_file = NULL;
|
||||
|
||||
static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
|
||||
{
|
||||
enum AVPixelFormat fmt;
|
||||
|
||||
switch (type) {
|
||||
case AV_HWDEVICE_TYPE_VAAPI:
|
||||
fmt = AV_PIX_FMT_VAAPI;
|
||||
break;
|
||||
case AV_HWDEVICE_TYPE_DXVA2:
|
||||
fmt = AV_PIX_FMT_DXVA2_VLD;
|
||||
break;
|
||||
case AV_HWDEVICE_TYPE_D3D11VA:
|
||||
fmt = AV_PIX_FMT_D3D11;
|
||||
break;
|
||||
case AV_HWDEVICE_TYPE_VDPAU:
|
||||
fmt = AV_PIX_FMT_VDPAU;
|
||||
break;
|
||||
case AV_HWDEVICE_TYPE_VIDEOTOOLBOX:
|
||||
fmt = AV_PIX_FMT_VIDEOTOOLBOX;
|
||||
break;
|
||||
default:
|
||||
fmt = AV_PIX_FMT_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
return fmt;
|
||||
}
|
||||
|
||||
static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type,
|
||||
NULL, NULL, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to create specified HW device.\n");
|
||||
return err;
|
||||
}
|
||||
ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
|
||||
const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
const enum AVPixelFormat *p;
|
||||
|
||||
for (p = pix_fmts; *p != -1; p++) {
|
||||
if (*p == hw_pix_fmt)
|
||||
return *p;
|
||||
}
|
||||
|
||||
fprintf(stderr, "Failed to get HW surface format.\n");
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_write(AVCodecContext *avctx, AVPacket *packet)
|
||||
{
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
AVFrame *tmp_frame = NULL;
|
||||
uint8_t *buffer = NULL;
|
||||
int size;
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(avctx, packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Can not alloc frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = avcodec_receive_frame(avctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (frame->format == hw_pix_fmt) {
|
||||
/* retrieve data from GPU to CPU */
|
||||
if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
|
||||
fprintf(stderr, "Error transferring the data to system memory\n");
|
||||
goto fail;
|
||||
}
|
||||
tmp_frame = sw_frame;
|
||||
} else
|
||||
tmp_frame = frame;
|
||||
|
||||
size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width,
|
||||
tmp_frame->height, 1);
|
||||
buffer = av_malloc(size);
|
||||
if (!buffer) {
|
||||
fprintf(stderr, "Can not alloc buffer\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
ret = av_image_copy_to_buffer(buffer, size,
|
||||
(const uint8_t * const *)tmp_frame->data,
|
||||
(const int *)tmp_frame->linesize, tmp_frame->format,
|
||||
tmp_frame->width, tmp_frame->height, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Can not copy image to buffer\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = fwrite(buffer, 1, size, output_file)) < 0) {
|
||||
fprintf(stderr, "Failed to dump raw data.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
if (buffer)
|
||||
av_freep(&buffer);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
int video_stream, ret;
|
||||
AVStream *video = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVPacket packet;
|
||||
enum AVHWDeviceType type;
|
||||
|
||||
if (argc < 4) {
|
||||
fprintf(stderr, "Usage: %s <vaapi|vdpau|dxva2|d3d11va> <input file> <output file>\n", argv[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
|
||||
type = av_hwdevice_find_type_by_name(argv[1]);
|
||||
hw_pix_fmt = find_fmt_by_hw_type(type);
|
||||
if (hw_pix_fmt == -1) {
|
||||
fprintf(stderr, "Cannot support '%s' in this example.\n", argv[1]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(input_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Cannot find input stream information.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* find the video stream information */
|
||||
ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot find a video stream in the input file\n");
|
||||
return -1;
|
||||
}
|
||||
video_stream = ret;
|
||||
|
||||
if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
video = input_ctx->streams[video_stream];
|
||||
if (avcodec_parameters_to_context(decoder_ctx, video->codecpar) < 0)
|
||||
return -1;
|
||||
|
||||
decoder_ctx->get_format = get_hw_format;
|
||||
av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
if (hw_decoder_init(decoder_ctx, type) < 0)
|
||||
return -1;
|
||||
|
||||
if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open codec for stream #%u\n", video_stream);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the file to dump raw data */
|
||||
output_file = fopen(argv[3], "w+");
|
||||
|
||||
/* actual decoding and dump the raw data */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == packet.stream_index)
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
packet.data = NULL;
|
||||
packet.size = 0;
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
av_packet_unref(&packet);
|
||||
|
||||
if (output_file)
|
||||
fclose(output_file);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avformat_close_input(&input_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -26,55 +26,185 @@
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the GPU video surfaces.
|
||||
* frames in the VA-API video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <mfx/mfxvideo.h>
|
||||
|
||||
#include <va/va.h>
|
||||
#include <va/va_x11.h>
|
||||
#include <X11/Xlib.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/qsv.h"
|
||||
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/hwcontext_qsv.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
AVBufferRef *hw_device_ref;
|
||||
mfxSession mfx_session;
|
||||
VADisplay va_dpy;
|
||||
|
||||
VASurfaceID *surfaces;
|
||||
mfxMemId *surface_ids;
|
||||
int *surface_used;
|
||||
int nb_surfaces;
|
||||
|
||||
mfxFrameInfo frame_info;
|
||||
} DecodeContext;
|
||||
|
||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
||||
mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
int err, i;
|
||||
|
||||
if (decode->surfaces) {
|
||||
fprintf(stderr, "Multiple allocation requests.\n");
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
||||
fprintf(stderr, "Unsupported surface properties.\n");
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
||||
goto fail;
|
||||
|
||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
||||
req->Info.Width, req->Info.Height,
|
||||
decode->surfaces, req->NumFrameSuggested,
|
||||
NULL, 0);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
||||
goto fail;
|
||||
}
|
||||
decode->nb_surfaces = req->NumFrameSuggested;
|
||||
|
||||
for (i = 0; i < decode->nb_surfaces; i++)
|
||||
decode->surface_ids[i] = &decode->surfaces[i];
|
||||
|
||||
resp->mids = decode->surface_ids;
|
||||
resp->NumFrameActual = decode->nb_surfaces;
|
||||
|
||||
decode->frame_info = req->Info;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
fail:
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
|
||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
||||
{
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
||||
{
|
||||
*hdl = mid;
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static void free_surfaces(DecodeContext *decode)
|
||||
{
|
||||
if (decode->surfaces)
|
||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
decode->nb_surfaces = 0;
|
||||
}
|
||||
|
||||
static void free_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
int *used = opaque;
|
||||
*used = 0;
|
||||
av_freep(&data);
|
||||
}
|
||||
|
||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
|
||||
mfxFrameSurface1 *surf;
|
||||
AVBufferRef *surf_buf;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
||||
if (!decode->surface_used[idx])
|
||||
break;
|
||||
}
|
||||
if (idx == decode->nb_surfaces) {
|
||||
fprintf(stderr, "No free surfaces\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf = av_mallocz(sizeof(*surf));
|
||||
if (!surf)
|
||||
return AVERROR(ENOMEM);
|
||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
||||
if (!surf_buf) {
|
||||
av_freep(&surf);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf->Info = decode->frame_info;
|
||||
surf->Data.MemId = &decode->surfaces[idx];
|
||||
|
||||
frame->buf[0] = surf_buf;
|
||||
frame->data[3] = (uint8_t*)surf;
|
||||
|
||||
decode->surface_used[idx] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
AVQSVFramesContext *frames_hwctx;
|
||||
int ret;
|
||||
if (!avctx->hwaccel_context) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
||||
if (!qsv)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
/* create a pool of surfaces to be used by the decoder */
|
||||
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||
if (!avctx->hw_frames_ctx)
|
||||
return AV_PIX_FMT_NONE;
|
||||
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
qsv->session = decode->mfx_session;
|
||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||
frames_ctx->initial_pool_size = 32;
|
||||
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
avctx->hwaccel_context = qsv;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
@@ -88,47 +218,86 @@ static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int got_frame = 1;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
int i, j;
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
ret = av_hwframe_transfer_data(sw_frame, frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error transferring the data to system memory\n");
|
||||
goto fail;
|
||||
}
|
||||
if (got_frame) {
|
||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sw_frame->data) && sw_frame->data[i]; i++)
|
||||
for (j = 0; j < (sw_frame->height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, sw_frame->data[i] + j * sw_frame->linesize[i], sw_frame->width);
|
||||
VAImageFormat img_fmt = {
|
||||
.fourcc = VA_FOURCC_NV12,
|
||||
.byte_order = VA_LSB_FIRST,
|
||||
.bits_per_pixel = 8,
|
||||
.depth = 8,
|
||||
};
|
||||
|
||||
VAImage img;
|
||||
|
||||
VAStatus err;
|
||||
uint8_t *data;
|
||||
int i, j;
|
||||
|
||||
img.buf = VA_INVALID_ID;
|
||||
img.image_id = VA_INVALID_ID;
|
||||
|
||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
||||
frame->width, frame->height, &img);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error creating an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
||||
frame->width, frame->height,
|
||||
img.image_id);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error getting an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < img.num_planes; i++)
|
||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
||||
|
||||
fail:
|
||||
av_frame_unref(sw_frame);
|
||||
av_frame_unref(frame);
|
||||
if (img.buf != VA_INVALID_ID)
|
||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
||||
if (img.image_id != VA_INVALID_ID)
|
||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -142,13 +311,28 @@ int main(int argc, char **argv)
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
Display *dpy = NULL;
|
||||
int va_ver_major, va_ver_minor;
|
||||
|
||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
||||
mfxVersion mfx_ver = { { 1, 1 } };
|
||||
|
||||
mfxFrameAllocator frame_allocator = {
|
||||
.pthis = &decode,
|
||||
.Alloc = frame_alloc,
|
||||
.Lock = frame_lock,
|
||||
.Unlock = frame_unlock,
|
||||
.GetHDL = frame_get_hdl,
|
||||
.Free = frame_free,
|
||||
};
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
int ret, i, err;
|
||||
|
||||
av_register_all();
|
||||
|
||||
@@ -178,13 +362,34 @@ int main(int argc, char **argv)
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
/* initialize VA-API */
|
||||
dpy = XOpenDisplay(NULL);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Cannot open the X display\n");
|
||||
goto finish;
|
||||
}
|
||||
decode.va_dpy = vaGetDisplay(dpy);
|
||||
if (!decode.va_dpy) {
|
||||
fprintf(stderr, "Cannot open the VA display\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
||||
goto finish;
|
||||
}
|
||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
||||
|
||||
/* initialize an MFX session */
|
||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
||||
if (err != MFX_ERR_NONE) {
|
||||
fprintf(stderr, "Error initializing an MFX session\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
@@ -213,6 +418,7 @@ int main(int argc, char **argv)
|
||||
decoder_ctx->refcounted_frames = 1;
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_buffer2 = get_buffer;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
@@ -228,9 +434,8 @@ int main(int argc, char **argv)
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
if (!frame || !sw_frame) {
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
@@ -242,7 +447,7 @@ int main(int argc, char **argv)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
@@ -250,7 +455,7 @@ int main(int argc, char **argv)
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
@@ -262,11 +467,19 @@ finish:
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
|
||||
if (decoder_ctx)
|
||||
av_freep(&decoder_ctx->hwaccel_context);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&decode.hw_device_ref);
|
||||
free_surfaces(&decode);
|
||||
|
||||
if (decode.mfx_session)
|
||||
MFXClose(decode.mfx_session);
|
||||
if (decode.va_dpy)
|
||||
vaTerminate(decode.va_dpy);
|
||||
if (dpy)
|
||||
XCloseDisplay(dpy);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
|
||||
@@ -173,9 +173,6 @@ static int open_output_file(const char *filename)
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
@@ -187,6 +184,8 @@ static int open_output_file(const char *filename)
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
out_stream->time_base = enc_ctx->time_base;
|
||||
stream_ctx[i].enc_ctx = enc_ctx;
|
||||
@@ -559,7 +558,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
|
||||
@@ -385,7 +385,7 @@ mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -c:a libmp3lame output.avi
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
@end example
|
||||
|
||||
@subsection Concatenating using raw audio and video
|
||||
@@ -407,13 +407,13 @@ mkfifo temp2.a
|
||||
mkfifo temp2.v
|
||||
mkfifo all.a
|
||||
mkfifo all.v
|
||||
ffmpeg -i input1.flv -vn -f u16le -c:a pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
|
||||
ffmpeg -i input2.flv -vn -f u16le -c:a pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
|
||||
ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
|
||||
ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
|
||||
ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
|
||||
@{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; @} &
|
||||
cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -c:a pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
|
||||
@@ -6,7 +6,6 @@ workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
build_only= # set to "yes" for a compile-only instance that skips tests
|
||||
ignore_tests=
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
@@ -27,7 +26,5 @@ extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#makeopts_fate= # extra options passed to 'make' when running tests,
|
||||
# defaulting to makeopts above if this is not set
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
||||
|
||||
135
doc/ffmpeg.texi
135
doc/ffmpeg.texi
@@ -715,67 +715,6 @@ would be more efficient.
|
||||
When doing stream copy, copy also non-key frames found at the
|
||||
beginning.
|
||||
|
||||
@item -init_hw_device @var{type}[=@var{name}][:@var{device}[,@var{key=value}...]]
|
||||
Initialise a new hardware device of type @var{type} called @var{name}, using the
|
||||
given device parameters.
|
||||
If no name is specified it will receive a default name of the form "@var{type}%d".
|
||||
|
||||
The meaning of @var{device} and the following arguments depends on the
|
||||
device type:
|
||||
@table @option
|
||||
|
||||
@item cuda
|
||||
@var{device} is the number of the CUDA device.
|
||||
|
||||
@item dxva2
|
||||
@var{device} is the number of the Direct3D 9 display adapter.
|
||||
|
||||
@item vaapi
|
||||
@var{device} is either an X11 display name or a DRM render node.
|
||||
If not specified, it will attempt to open the default X11 display (@emph{$DISPLAY})
|
||||
and then the first DRM render node (@emph{/dev/dri/renderD128}).
|
||||
|
||||
@item vdpau
|
||||
@var{device} is an X11 display name.
|
||||
If not specified, it will attempt to open the default X11 display (@emph{$DISPLAY}).
|
||||
|
||||
@item qsv
|
||||
@var{device} selects a value in @samp{MFX_IMPL_*}. Allowed values are:
|
||||
@table @option
|
||||
@item auto
|
||||
@item sw
|
||||
@item hw
|
||||
@item auto_any
|
||||
@item hw_any
|
||||
@item hw2
|
||||
@item hw3
|
||||
@item hw4
|
||||
@end table
|
||||
If not specified, @samp{auto_any} is used.
|
||||
(Note that it may be easier to achieve the desired result for QSV by creating the
|
||||
platform-appropriate subdevice (@samp{dxva2} or @samp{vaapi}) and then deriving a
|
||||
QSV device from that.)
|
||||
|
||||
@end table
|
||||
|
||||
@item -init_hw_device @var{type}[=@var{name}]@@@var{source}
|
||||
Initialise a new hardware device of type @var{type} called @var{name},
|
||||
deriving it from the existing device with the name @var{source}.
|
||||
|
||||
@item -init_hw_device list
|
||||
List all hardware device types supported in this build of ffmpeg.
|
||||
|
||||
@item -filter_hw_device @var{name}
|
||||
Pass the hardware device called @var{name} to all filters in any filter graph.
|
||||
This can be used to set the device to upload to with the @code{hwupload} filter,
|
||||
or the device to map to with the @code{hwmap} filter. Other filters may also
|
||||
make use of this parameter when they require a hardware device. Note that this
|
||||
is typically only required when the input is not already in hardware frames -
|
||||
when it is, filters will derive the device they require from the context of the
|
||||
frames they receive as input.
|
||||
|
||||
This is a global setting, so all filters will receive the same device.
|
||||
|
||||
@item -hwaccel[:@var{stream_specifier}] @var{hwaccel} (@emph{input,per-stream})
|
||||
Use hardware acceleration to decode the matching stream(s). The allowed values
|
||||
of @var{hwaccel} are:
|
||||
@@ -795,9 +734,6 @@ Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
|
||||
@item dxva2
|
||||
Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
|
||||
|
||||
@item vaapi
|
||||
Use VAAPI (Video Acceleration API) hardware acceleration.
|
||||
|
||||
@item qsv
|
||||
Use the Intel QuickSync Video acceleration for video transcoding.
|
||||
|
||||
@@ -821,11 +757,33 @@ useful for testing.
|
||||
@item -hwaccel_device[:@var{stream_specifier}] @var{hwaccel_device} (@emph{input,per-stream})
|
||||
Select a device to use for hardware acceleration.
|
||||
|
||||
This option only makes sense when the @option{-hwaccel} option is also specified.
|
||||
It can either refer to an existing device created with @option{-init_hw_device}
|
||||
by name, or it can create a new device as if
|
||||
@samp{-init_hw_device} @var{type}:@var{hwaccel_device}
|
||||
were called immediately before.
|
||||
This option only makes sense when the @option{-hwaccel} option is also
|
||||
specified. Its exact meaning depends on the specific hardware acceleration
|
||||
method chosen.
|
||||
|
||||
@table @option
|
||||
@item vdpau
|
||||
For VDPAU, this option specifies the X11 display/screen to use. If this option
|
||||
is not specified, the value of the @var{DISPLAY} environment variable is used
|
||||
|
||||
@item dxva2
|
||||
For DXVA2, this option should contain the number of the display adapter to use.
|
||||
If this option is not specified, the default adapter is used.
|
||||
|
||||
@item qsv
|
||||
For QSV, this option corresponds to the values of MFX_IMPL_* . Allowed values
|
||||
are:
|
||||
@table @option
|
||||
@item auto
|
||||
@item sw
|
||||
@item hw
|
||||
@item auto_any
|
||||
@item hw_any
|
||||
@item hw2
|
||||
@item hw3
|
||||
@item hw4
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -hwaccels
|
||||
List all hardware acceleration methods supported in this build of ffmpeg.
|
||||
@@ -935,7 +893,7 @@ It disables matching streams from already created mappings.
|
||||
A trailing @code{?} after the stream index will allow the map to be
|
||||
optional: if the map matches no streams the map will be ignored instead
|
||||
of failing. Note the map will still fail if an invalid input file index
|
||||
is used; such as if the map refers to a non-existent input.
|
||||
is used; such as if the map refers to a non-existant input.
|
||||
|
||||
An alternative @var{[linklabel]} form will map outputs from complex filter
|
||||
graphs (see the @option{-filter_complex} option) to the output file.
|
||||
@@ -996,7 +954,7 @@ such streams is attempted.
|
||||
Allow input streams with unknown type to be copied instead of failing if copying
|
||||
such streams is attempted.
|
||||
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][?][:@var{output_file_id}.@var{stream_specifier}]
|
||||
@item -map_channel [@var{input_file_id}.@var{stream_specifier}.@var{channel_id}|-1][:@var{output_file_id}.@var{stream_specifier}]
|
||||
Map an audio channel from a given input to an output. If
|
||||
@var{output_file_id}.@var{stream_specifier} is not set, the audio channel will
|
||||
be mapped on all the audio streams.
|
||||
@@ -1005,10 +963,6 @@ Using "-1" instead of
|
||||
@var{input_file_id}.@var{stream_specifier}.@var{channel_id} will map a muted
|
||||
channel.
|
||||
|
||||
A trailing @code{?} will allow the map_channel to be
|
||||
optional: if the map_channel matches no channel the map_channel will be ignored instead
|
||||
of failing.
|
||||
|
||||
For example, assuming @var{INPUT} is a stereo audio file, you can switch the
|
||||
two audio channels with the following command:
|
||||
@example
|
||||
@@ -1056,13 +1010,6 @@ video stream), you can use the following command:
|
||||
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
|
||||
@end example
|
||||
|
||||
To map the first two audio channels from the first input, and using the
|
||||
trailing @code{?}, ignore the audio channel mapping if the first input is
|
||||
mono instead of stereo:
|
||||
@example
|
||||
ffmpeg -i INPUT -map_channel 0.0.0 -map_channel 0.0.1? OUTPUT
|
||||
@end example
|
||||
|
||||
@item -map_metadata[:@var{metadata_spec_out}] @var{infile}[:@var{metadata_spec_in}] (@emph{output,per-metadata})
|
||||
Set metadata information of the next output file from @var{infile}. Note that
|
||||
those are file indices (zero-based), not filenames.
|
||||
@@ -1229,30 +1176,6 @@ Try to make the choice automatically, in order to generate a sane output.
|
||||
|
||||
Default value is -1.
|
||||
|
||||
@item -enc_time_base[:@var{stream_specifier}] @var{timebase} (@emph{output,per-stream})
|
||||
Set the encoder timebase. @var{timebase} is a floating point number,
|
||||
and can assume one of the following values:
|
||||
|
||||
@table @option
|
||||
@item 0
|
||||
Assign a default value according to the media type.
|
||||
|
||||
For video - use 1/framerate, for audio - use 1/samplerate.
|
||||
|
||||
@item -1
|
||||
Use the input stream timebase when possible.
|
||||
|
||||
If an input stream is not available, the default timebase will be used.
|
||||
|
||||
@item >0
|
||||
Use the provided number as the timebase.
|
||||
|
||||
This field can be provided as a ratio of two integers (e.g. 1:24, 1:48000)
|
||||
or as a floating point number (e.g. 0.04166, 2.0833e-5)
|
||||
@end table
|
||||
|
||||
Default value is 0.
|
||||
|
||||
@item -shortest (@emph{output})
|
||||
Finish encoding when the shortest input stream ends.
|
||||
@item -dts_delta_threshold
|
||||
|
||||
@@ -471,7 +471,7 @@ Perform no escaping.
|
||||
@end table
|
||||
|
||||
@item print_section, p
|
||||
Print the section name at the beginning of each line if the value is
|
||||
Print the section name at the begin of each line if the value is
|
||||
@code{1}, disable it with value set to @code{0}. Default value is
|
||||
@code{1}.
|
||||
|
||||
|
||||
@@ -120,11 +120,6 @@
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
|
||||
@@ -327,7 +327,7 @@ Name of options and sections are case-insensitive.
|
||||
An ACL (Access Control List) specifies the address which are allowed
|
||||
to access a given stream, or to write a given feed.
|
||||
|
||||
It accepts the following forms
|
||||
It accepts the folling forms
|
||||
@itemize
|
||||
@item
|
||||
Allow/deny access to @var{address}.
|
||||
@@ -416,7 +416,7 @@ deprecated.
|
||||
@item NoDefaults
|
||||
Control whether default codec options are used for the all streams or not.
|
||||
Each stream may overwrite this setting for its own. Default is @var{UseDefaults}.
|
||||
The last occurrence overrides the previous if multiple definitions exist.
|
||||
The lastest occurrence overrides previous if multiple definitions.
|
||||
@end table
|
||||
|
||||
@section Feed section
|
||||
|
||||
@@ -153,7 +153,7 @@ Show channel names and standard channel layouts.
|
||||
Show recognized color names.
|
||||
|
||||
@item -sources @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
|
||||
Show autodetected sources of the input device.
|
||||
Show autodetected sources of the intput device.
|
||||
Some devices may provide system-dependent source names that cannot be autodetected.
|
||||
The returned list cannot be assumed to be always complete.
|
||||
@example
|
||||
|
||||
@@ -5,7 +5,7 @@ This document explains guidelines that should be observed (or ignored with
|
||||
good reason) when writing filters for libavfilter.
|
||||
|
||||
In this document, the word “frame” indicates either a video frame or a group
|
||||
of audio samples, as stored in an AVFrame structure.
|
||||
of audio samples, as stored in an AVFilterBuffer structure.
|
||||
|
||||
|
||||
Format negotiation
|
||||
@@ -35,31 +35,32 @@ Format negotiation
|
||||
to set the formats supported on another.
|
||||
|
||||
|
||||
Frame references ownership and permissions
|
||||
==========================================
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
Principle
|
||||
---------
|
||||
|
||||
Audio and video data are voluminous; the frame and frame reference
|
||||
Audio and video data are voluminous; the buffer and buffer reference
|
||||
mechanism is intended to avoid, as much as possible, expensive copies of
|
||||
that data while still allowing the filters to produce correct results.
|
||||
|
||||
The data is stored in buffers represented by AVFrame structures.
|
||||
Several references can point to the same frame buffer; the buffer is
|
||||
automatically deallocated once all corresponding references have been
|
||||
destroyed.
|
||||
The data is stored in buffers represented by AVFilterBuffer structures.
|
||||
They must not be accessed directly, but through references stored in
|
||||
AVFilterBufferRef structures. Several references can point to the
|
||||
same buffer; the buffer is automatically deallocated once all
|
||||
corresponding references have been destroyed.
|
||||
|
||||
The characteristics of the data (resolution, sample rate, etc.) are
|
||||
stored in the reference; different references for the same buffer can
|
||||
show different characteristics. In particular, a video reference can
|
||||
point to only a part of a video buffer.
|
||||
|
||||
A reference is usually obtained as input to the filter_frame method or
|
||||
requested using the ff_get_video_buffer or ff_get_audio_buffer
|
||||
functions. A new reference on an existing buffer can be created with
|
||||
av_frame_ref(). A reference is destroyed using
|
||||
the av_frame_free() function.
|
||||
A reference is usually obtained as input to the start_frame or
|
||||
filter_frame method or requested using the ff_get_video_buffer or
|
||||
ff_get_audio_buffer functions. A new reference on an existing buffer can
|
||||
be created with the avfilter_ref_buffer. A reference is destroyed using
|
||||
the avfilter_unref_bufferp function.
|
||||
|
||||
Reference ownership
|
||||
-------------------
|
||||
@@ -72,13 +73,17 @@ Frame references ownership and permissions
|
||||
|
||||
Here are the (fairly obvious) rules for reference ownership:
|
||||
|
||||
* A reference received by the filter_frame method belongs to the
|
||||
corresponding filter.
|
||||
* A reference received by the filter_frame method (or its start_frame
|
||||
deprecated version) belongs to the corresponding filter.
|
||||
|
||||
* A reference passed to ff_filter_frame is given away and must no longer
|
||||
be used.
|
||||
Special exception: for video references: the reference may be used
|
||||
internally for automatic copying and must not be destroyed before
|
||||
end_frame; it can be given away to ff_start_frame.
|
||||
|
||||
* A reference created with av_frame_ref() belongs to the code that
|
||||
* A reference passed to ff_filter_frame (or the deprecated
|
||||
ff_start_frame) is given away and must no longer be used.
|
||||
|
||||
* A reference created with avfilter_ref_buffer belongs to the code that
|
||||
created it.
|
||||
|
||||
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
|
||||
@@ -90,32 +95,89 @@ Frame references ownership and permissions
|
||||
Link reference fields
|
||||
---------------------
|
||||
|
||||
The AVFilterLink structure has a few AVFrame fields.
|
||||
|
||||
partial_buf is used by libavfilter internally and must not be accessed
|
||||
by filters.
|
||||
|
||||
fifo contains frames queued in the filter's input. They belong to the
|
||||
framework until they are taken by the filter.
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. The
|
||||
cur_buf and out_buf were used with the deprecated
|
||||
start_frame/draw_slice/end_frame API and should no longer be used.
|
||||
src_buf and partial_buf are used by libavfilter internally
|
||||
and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
---------------------
|
||||
|
||||
Since the same frame data can be shared by several frames, modifying may
|
||||
have unintended consequences. A frame is considered writable if only one
|
||||
reference to it exists. The code owning that reference it then allowed
|
||||
to modify the data.
|
||||
The AVFilterBufferRef structure has a perms field that describes what
|
||||
the code that owns the reference is allowed to do to the buffer data.
|
||||
Different references for the same buffer can have different permissions.
|
||||
|
||||
A filter can check if a frame is writable by using the
|
||||
av_frame_is_writable() function.
|
||||
For video filters that implement the deprecated
|
||||
start_frame/draw_slice/end_frame API, the permissions only apply to the
|
||||
parts of the buffer that have already been covered by the draw_slice
|
||||
method.
|
||||
|
||||
A filter can ensure that a frame is writable at some point of the code
|
||||
by using the ff_inlink_make_frame_writable() function. It will duplicate
|
||||
the frame if needed.
|
||||
The value is a binary OR of the following constants:
|
||||
|
||||
A filter can ensure that the frame passed to the filter_frame() callback
|
||||
is writable by setting the needs_writable flag on the corresponding
|
||||
input pad. It does not apply to the activate() callback.
|
||||
* AV_PERM_READ: the owner can read the buffer data; this is essentially
|
||||
always true and is there for self-documentation.
|
||||
|
||||
* AV_PERM_WRITE: the owner can modify the buffer data.
|
||||
|
||||
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
|
||||
will not be modified by previous filters.
|
||||
|
||||
* AV_PERM_REUSE: the owner can output the buffer several times, without
|
||||
modifying the data in between.
|
||||
|
||||
* AV_PERM_REUSE2: the owner can output the buffer several times and
|
||||
modify the data in between (useless without the WRITE permissions).
|
||||
|
||||
* AV_PERM_ALIGN: the owner can access the data using fast operations
|
||||
that require data alignment.
|
||||
|
||||
The READ, WRITE and PRESERVE permissions are about sharing the same
|
||||
buffer between several filters to avoid expensive copies without them
|
||||
doing conflicting changes on the data.
|
||||
|
||||
The REUSE and REUSE2 permissions are about special memory for direct
|
||||
rendering. For example a buffer directly allocated in video memory must
|
||||
not modified once it is displayed on screen, or it will cause tearing;
|
||||
it will therefore not have the REUSE2 permission.
|
||||
|
||||
The ALIGN permission is about extracting part of the buffer, for
|
||||
copy-less padding or cropping for example.
|
||||
|
||||
|
||||
References received on input pads are guaranteed to have all the
|
||||
permissions stated in the min_perms field and none of the permissions
|
||||
stated in the rej_perms.
|
||||
|
||||
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
|
||||
guaranteed to have at least all the permissions requested as argument.
|
||||
|
||||
References created by avfilter_ref_buffer have the same permissions as
|
||||
the original reference minus the ones explicitly masked; the mask is
|
||||
usually ~0 to keep the same permissions.
|
||||
|
||||
Filters should remove permissions on reference they give to output
|
||||
whenever necessary. It can be automatically done by setting the
|
||||
rej_perms field on the output pad.
|
||||
|
||||
Here are a few guidelines corresponding to common situations:
|
||||
|
||||
* Filters that modify and forward their frame (like drawtext) need the
|
||||
WRITE permission.
|
||||
|
||||
* Filters that read their input to produce a new frame on output (like
|
||||
scale) need the READ permission on input and must request a buffer
|
||||
with the WRITE permission.
|
||||
|
||||
* Filters that intend to keep a reference after the filtering process
|
||||
is finished (after filter_frame returns) must have the PRESERVE
|
||||
permission on it and remove the WRITE permission if they create a new
|
||||
reference to give it away.
|
||||
|
||||
* Filters that intend to modify a reference they have kept after the end
|
||||
of the filtering process need the REUSE2 permission and must remove
|
||||
the PRESERVE permission if they create a new reference to give it
|
||||
away.
|
||||
|
||||
|
||||
Frame scheduling
|
||||
@@ -127,100 +189,11 @@ Frame scheduling
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
There are two design for filters: one using the filter_frame() and
|
||||
request_frame() callbacks and the other using the activate() callback.
|
||||
|
||||
The design using filter_frame() and request_frame() is legacy, but it is
|
||||
suitable for filters that have a single input and process one frame at a
|
||||
time. New filters with several inputs, that treat several frames at a time
|
||||
or that require a special treatment at EOF should probably use the design
|
||||
using activate().
|
||||
|
||||
activate
|
||||
--------
|
||||
|
||||
This method is called when something must be done in a filter; the
|
||||
definition of that "something" depends on the semantic of the filter.
|
||||
|
||||
The callback must examine the status of the filter's links and proceed
|
||||
accordingly.
|
||||
|
||||
The status of output links is stored in the frame_wanted_out, status_in
|
||||
and status_out fields and tested by the ff_outlink_frame_wanted()
|
||||
function. If this function returns true, then the processing requires a
|
||||
frame on this link and the filter is expected to make efforts in that
|
||||
direction.
|
||||
|
||||
The status of input links is stored by the status_in, fifo and
|
||||
status_out fields; they must not be accessed directly. The fifo field
|
||||
contains the frames that are queued in the input for processing by the
|
||||
filter. The status_in and status_out fields contains the queued status
|
||||
(EOF or error) of the link; status_in is a status change that must be
|
||||
taken into account after all frames in fifo have been processed;
|
||||
status_out is the status that have been taken into account, it is final
|
||||
when it is not 0.
|
||||
|
||||
The typical task of an activate callback is to first check the backward
|
||||
status of output links, and if relevant forward it to the corresponding
|
||||
input. Then, if relevant, for each input link: test the availability of
|
||||
frames in fifo and process them; if no frame is available, test and
|
||||
acknowledge a change of status using ff_inlink_acknowledge_status(); and
|
||||
forward the result (frame or status change) to the corresponding input.
|
||||
If nothing is possible, test the status of outputs and forward it to the
|
||||
corresponding input(s). If still not possible, return FFERROR_NOT_READY.
|
||||
|
||||
If the filters stores internally one or a few frame for some input, it
|
||||
can consider them to be part of the FIFO and delay acknowledging a
|
||||
status change accordingly.
|
||||
|
||||
Example code:
|
||||
|
||||
ret = ff_outlink_get_status(outlink);
|
||||
if (ret) {
|
||||
ff_inlink_set_status(inlink, ret);
|
||||
return 0;
|
||||
}
|
||||
if (priv->next_frame) {
|
||||
/* use it */
|
||||
return 0;
|
||||
}
|
||||
ret = ff_inlink_consume_frame(inlink, &frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret) {
|
||||
/* use it */
|
||||
return 0;
|
||||
}
|
||||
ret = ff_inlink_acknowledge_status(inlink, &status, &pts);
|
||||
if (ret) {
|
||||
/* flush */
|
||||
ff_outlink_set_status(outlink, status, pts);
|
||||
return 0;
|
||||
}
|
||||
if (ff_outlink_frame_wanted(outlink)) {
|
||||
ff_inlink_request_frame(inlink);
|
||||
return 0;
|
||||
}
|
||||
return FFERROR_NOT_READY;
|
||||
|
||||
The exact code depends on how similar the /* use it */ blocks are and
|
||||
how related they are to the /* flush */ block, and needs to apply these
|
||||
operations to the correct inlink or outlink if there are several.
|
||||
|
||||
Macros are available to factor that when no extra processing is needed:
|
||||
|
||||
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
|
||||
FF_FILTER_FORWARD_STATUS_ALL(outlink, filter);
|
||||
FF_FILTER_FORWARD_STATUS(inlink, outlink);
|
||||
FF_FILTER_FORWARD_STATUS_ALL(inlink, filter);
|
||||
FF_FILTER_FORWARD_WANTED(outlink, inlink);
|
||||
|
||||
filter_frame
|
||||
------------
|
||||
|
||||
For filters that do not use the activate() callback, this method is
|
||||
called when a frame is pushed to the filter's input. It can be called at
|
||||
any time except in a reentrant way.
|
||||
This method is called when a frame is pushed to the filter's input. It
|
||||
can be called at any time except in a reentrant way.
|
||||
|
||||
If the input frame is enough to produce output, then the filter should
|
||||
push the output frames on the output link immediately.
|
||||
@@ -249,10 +222,9 @@ Frame scheduling
|
||||
request_frame
|
||||
-------------
|
||||
|
||||
For filters that do not use the activate() callback, this method is
|
||||
called when a frame is wanted on an output.
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For a source, it should directly call filter_frame on the corresponding
|
||||
For an input, it should directly call filter_frame on the corresponding
|
||||
output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
@@ -282,7 +254,16 @@ Frame scheduling
|
||||
}
|
||||
return 0;
|
||||
|
||||
Note that, except for filters that can have queued frames and sources,
|
||||
request_frame does not push frames: it requests them to its input, and
|
||||
as a reaction, the filter_frame method possibly will be called and do
|
||||
the work.
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the filter_frame method possibly will be called and do the work.
|
||||
|
||||
Legacy API
|
||||
==========
|
||||
|
||||
Until libavfilter 3.23, the filter_frame method was split:
|
||||
|
||||
- for video filters, it was made of start_frame, draw_slice (that could be
|
||||
called several times on distinct parts of the frame) and end_frame;
|
||||
|
||||
- for audio filters, it was called filter_samples.
|
||||
|
||||
1424
doc/filters.texi
1424
doc/filters.texi
File diff suppressed because it is too large
Load Diff
@@ -182,10 +182,9 @@ Default is 0.
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
|
||||
@item flush_packets @var{integer} (@emph{output})
|
||||
Flush the underlying I/O stream after each packet. Default is -1 (auto), which
|
||||
means that the underlying protocol will decide, 1 enables it, and has the
|
||||
effect of reducing the latency, 0 disables it and may increase IO throughput in
|
||||
some cases.
|
||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||
has the effect of reducing the latency; 0 disables it and may slightly
|
||||
increase performance in some cases.
|
||||
|
||||
@item output_ts_offset @var{offset} (@emph{output})
|
||||
Set the output time offset.
|
||||
|
||||
@@ -101,14 +101,6 @@ Go to @url{http://www.wavpack.com/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libwavpack} to configure to
|
||||
enable it.
|
||||
|
||||
@section libxavs
|
||||
|
||||
FFmpeg can make use of the libxavs library for Xavs encoding.
|
||||
|
||||
Go to @url{http://xavs.sf.net/} and follow the instructions for
|
||||
installing the library. Then pass @code{--enable-libxavs} to configure to
|
||||
enable it.
|
||||
|
||||
@section OpenH264
|
||||
|
||||
FFmpeg can make use of the OpenH264 library for H.264 encoding and decoding.
|
||||
@@ -196,7 +188,7 @@ For Linux and OS X, the supported AviSynth variant is
|
||||
|
||||
@float NOTE
|
||||
There is currently a regression in AviSynth+'s @code{capi.h} header as of
|
||||
October 2016, which interferes with the ability for builds of FFmpeg to use
|
||||
October 2016, which interferes with the ability for builds of Libav to use
|
||||
MSVC-built binaries of AviSynth. Until this is resolved, you can make sure
|
||||
a known good version is installed by checking out a version from before
|
||||
the regression occurred:
|
||||
@@ -341,7 +333,6 @@ library:
|
||||
@item FunCom ISS @tab @tab X
|
||||
@tab Audio format used in various games from FunCom like The Longest Journey.
|
||||
@item G.723.1 @tab X @tab X
|
||||
@item G.726 @tab @tab X @tab Both left- and right-justified.
|
||||
@item G.729 BIT @tab X @tab X
|
||||
@item G.729 raw @tab @tab X
|
||||
@item GENH @tab @tab X
|
||||
@@ -521,7 +512,6 @@ library:
|
||||
@tab Multimedia format used by many games.
|
||||
@item SMJPEG @tab X @tab X
|
||||
@tab Used in certain Loki game ports.
|
||||
@item SMPTE 337M encapsulation @tab @tab X
|
||||
@item Smush @tab @tab X
|
||||
@tab Multimedia format used in some LucasArts games.
|
||||
@item Sony OpenMG (OMA) @tab X @tab X
|
||||
@@ -530,7 +520,7 @@ library:
|
||||
@item Sony Wave64 (W64) @tab X @tab X
|
||||
@item SoX native format @tab X @tab X
|
||||
@item SUN AU format @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab X @tab X
|
||||
@item SUP raw PGS subtitles @tab @tab X
|
||||
@item SVAG @tab @tab X
|
||||
@tab Audio format used in Konami PS2 games.
|
||||
@item TDSC @tab @tab X
|
||||
@@ -593,8 +583,6 @@ following image formats are supported:
|
||||
@tab Digital Picture Exchange
|
||||
@item EXR @tab @tab X
|
||||
@tab OpenEXR
|
||||
@item FITS @tab X @tab X
|
||||
@tab Flexible Image Transport System
|
||||
@item JPEG @tab X @tab X
|
||||
@tab Progressive JPEG is not supported.
|
||||
@item JPEG 2000 @tab X @tab X
|
||||
@@ -715,7 +703,7 @@ following image formats are supported:
|
||||
@item DFA @tab @tab X
|
||||
@tab Codec used in Chronomaster game.
|
||||
@item Dirac @tab E @tab X
|
||||
@tab supported though the native vc2 (Dirac Pro) encoder
|
||||
@tab supported through external library libschroedinger
|
||||
@item Deluxe Paint Animation @tab @tab X
|
||||
@item DNxHD @tab X @tab X
|
||||
@tab aka SMPTE VC3
|
||||
@@ -793,7 +781,6 @@ following image formats are supported:
|
||||
@tab Used in LucasArts games / SMUSH animations.
|
||||
@item lossless MJPEG @tab X @tab X
|
||||
@item MagicYUV Video @tab @tab X
|
||||
@item Mandsoft Screen Capture Codec @tab @tab X
|
||||
@item Microsoft ATC Screen @tab @tab X
|
||||
@tab Also known as Microsoft Screen 3.
|
||||
@item Microsoft Expression Encoder Screen @tab @tab X
|
||||
@@ -861,7 +848,6 @@ following image formats are supported:
|
||||
@tab used in some games by Entertainment Software Partners
|
||||
@item ScreenPressor @tab @tab X
|
||||
@item Screenpresso @tab @tab X
|
||||
@item Screen Recorder Gold Codec @tab @tab X
|
||||
@item Sierra VMD video @tab @tab X
|
||||
@tab Used in Sierra VMD files.
|
||||
@item Silicon Graphics Motion Video Compressor 1 (MVC1) @tab @tab X
|
||||
@@ -1005,7 +991,6 @@ following image formats are supported:
|
||||
@tab All versions except 5.1 are supported.
|
||||
@item DCA (DTS Coherent Acoustics) @tab X @tab X
|
||||
@tab supported extensions: XCh, XXCH, X96, XBR, XLL, LBR (partially)
|
||||
@item Dolby E @tab @tab X
|
||||
@item DPCM id RoQ @tab X @tab X
|
||||
@tab Used in Quake III, Jedi Knight 2 and other computer games.
|
||||
@item DPCM Interplay @tab @tab X
|
||||
|
||||
@@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it.
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone https://git.ffmpeg.org/ffmpeg.git <target>
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
@@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions.
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told Git your name, email address and GPG key
|
||||
Make sure you have told Git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
git config --global user.signingkey ABCDEF0123245
|
||||
@end example
|
||||
|
||||
Enable signing all commits or use -S
|
||||
|
||||
@example
|
||||
git config --global commit.gpgsign true
|
||||
@end example
|
||||
|
||||
Use @option{--global} to set the global configuration for all your Git checkouts.
|
||||
@@ -400,19 +393,6 @@ git checkout -b svn_23456 $SHA1
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter gpg key generation
|
||||
|
||||
If you have no gpg key yet, we recommend that you create a ed25519 based key as it
|
||||
is small, fast and secure. Especially it results in small signatures in git.
|
||||
|
||||
@example
|
||||
gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com"
|
||||
@end example
|
||||
|
||||
When generating a key, make sure the email specified matches the email used in git as some sites like
|
||||
github consider mismatches a reason to declare such commits unverified. After generating a key you
|
||||
can add it to the MAINTAINER file and upload it to a keyserver.
|
||||
|
||||
@chapter Pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
|
||||
232
doc/indevs.texi
232
doc/indevs.texi
@@ -68,6 +68,7 @@ Set the number of channels. Default is 2.
|
||||
AVFoundation input device.
|
||||
|
||||
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
|
||||
The older QTKit framework has been marked deprecated since OSX version 10.7.
|
||||
|
||||
The input filename has to be given in the following syntax:
|
||||
@example
|
||||
@@ -214,9 +215,8 @@ need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
On Windows, you need to run the IDL files through @command{widl}.
|
||||
|
||||
DeckLink is very picky about the formats it supports. Pixel format of the
|
||||
input can be set with @option{raw_format}.
|
||||
Framerate and video size must be determined for your device with
|
||||
DeckLink is very picky about the formats it supports. Pixel format is
|
||||
uyvy422 or v210, framerate and video size must be determined for your device with
|
||||
@command{-list_formats 1}. Audio sample rate is always 48 kHz and the number
|
||||
of channels can be 2, 8 or 16. Note that all audio channels are bundled in one single
|
||||
audio track.
|
||||
@@ -240,41 +240,18 @@ Note that there is a FourCC @option{'pal '} that can also be used
|
||||
as @option{pal} (3 letters).
|
||||
|
||||
@item bm_v210
|
||||
This is a deprecated option, you can use @option{raw_format} instead.
|
||||
If set to @samp{1}, video is captured in 10 bit v210 instead
|
||||
of uyvy422. Not all Blackmagic devices support this option.
|
||||
|
||||
@item raw_format
|
||||
Set the pixel format of the captured video.
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item uyvy422
|
||||
|
||||
@item yuv422p10
|
||||
|
||||
@item argb
|
||||
|
||||
@item bgra
|
||||
|
||||
@item rgb10
|
||||
|
||||
@end table
|
||||
|
||||
@item teletext_lines
|
||||
If set to nonzero, an additional teletext stream will be captured from the
|
||||
vertical ancillary data. Both SD PAL (576i) and HD (1080i or 1080p)
|
||||
sources are supported. In case of HD sources, OP47 packets are decoded.
|
||||
|
||||
This option is a bitmask of the SD PAL VBI lines captured, specifically lines 6
|
||||
to 22, and lines 318 to 335. Line 6 is the LSB in the mask. Selected lines
|
||||
which do not contain teletext information will be ignored. You can use the
|
||||
special @option{all} constant to select all possible lines, or
|
||||
@option{standard} to skip lines 6, 318 and 319, which are not compatible with
|
||||
all receivers.
|
||||
|
||||
For SD sources, ffmpeg needs to be compiled with @code{--enable-libzvbi}. For
|
||||
HD sources, on older (pre-4K) DeckLink card models you have to capture in 10
|
||||
bit mode.
|
||||
vertical ancillary data. This option is a bitmask of the VBI lines checked,
|
||||
specifically lines 6 to 22, and lines 318 to 335. Line 6 is the LSB in the mask.
|
||||
Selected lines which do not contain teletext information will be ignored. You
|
||||
can use the special @option{all} constant to select all possible lines, or
|
||||
@option{standard} to skip lines 6, 318 and 319, which are not compatible with all
|
||||
receivers. Capturing teletext only works for SD PAL sources in 8 bit mode.
|
||||
To use this option, ffmpeg needs to be compiled with @code{--enable-libzvbi}.
|
||||
|
||||
@item channels
|
||||
Defines number of audio channels to capture. Must be @samp{2}, @samp{8} or @samp{16}.
|
||||
@@ -306,11 +283,6 @@ Sets the audio packet timestamp source. Must be @samp{video}, @samp{audio},
|
||||
If set to @samp{true}, color bars are drawn in the event of a signal loss.
|
||||
Defaults to @samp{true}.
|
||||
|
||||
@item queue_size
|
||||
Sets maximum input buffer size in bytes. If the buffering reaches this value,
|
||||
incoming frames will be dropped.
|
||||
Defaults to @samp{1073741824}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@@ -332,129 +304,19 @@ ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
|
||||
@item
|
||||
Capture video clip at 1080i50:
|
||||
@example
|
||||
ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -format_code Hi50 -f decklink -i 'Intensity Pro' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 10 bit:
|
||||
@example
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
ffmpeg -bm_v210 1 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture video clip at 1080i50 with 16 audio channels:
|
||||
@example
|
||||
ffmpeg -channels 16 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -c:a copy -c:v copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section kmsgrab
|
||||
|
||||
KMS video input device.
|
||||
|
||||
Captures the KMS scanout framebuffer associated with a specified CRTC or plane as a
|
||||
DRM object that can be passed to other hardware functions.
|
||||
|
||||
Requires either DRM master or CAP_SYS_ADMIN to run.
|
||||
|
||||
If you don't understand what all of that means, you probably don't want this. Look at
|
||||
@option{x11grab} instead.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item device
|
||||
DRM device to capture on. Defaults to @option{/dev/dri/card0}.
|
||||
|
||||
@item format
|
||||
Pixel format of the framebuffer. Defaults to @option{bgr0}.
|
||||
|
||||
@item format_modifier
|
||||
Format modifier to signal on output frames. This is necessary to import correctly into
|
||||
some APIs, but can't be autodetected. See the libdrm documentation for possible values.
|
||||
|
||||
@item crtc_id
|
||||
KMS CRTC ID to define the capture source. The first active plane on the given CRTC
|
||||
will be used.
|
||||
|
||||
@item plane_id
|
||||
KMS plane ID to define the capture source. Defaults to the first active plane found if
|
||||
neither @option{crtc_id} nor @option{plane_id} are specified.
|
||||
|
||||
@item framerate
|
||||
Framerate to capture at. This is not synchronised to any page flipping or framebuffer
|
||||
changes - it just defines the interval at which the framebuffer is sampled. Sampling
|
||||
faster than the framebuffer update rate will generate independent frames with the same
|
||||
content. Defaults to @code{30}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Capture from the first active plane, download the result to normal frames and encode.
|
||||
This will only work if the framebuffer is both linear and mappable - if not, the result
|
||||
may be scrambled or fail to download.
|
||||
@example
|
||||
ffmpeg -f kmsgrab -i - -vf 'hwdownload,format=bgr0' output.mp4
|
||||
@end example
|
||||
|
||||
@item
|
||||
Capture from CRTC ID 42 at 60fps, map the result to VAAPI, convert to NV12 and encode as H.264.
|
||||
@example
|
||||
ffmpeg -crtc_id 42 -framerate 60 -f kmsgrab -i - -vf 'hwmap=derive_device=vaapi,scale_vaapi=w=1920:h=1080:format=nv12' -c:v h264_vaapi output.mp4
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek input device provides capture capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Input filename is a NDI source name that could be found by sending -find_sources 1
|
||||
to command line - it has no specific syntax but human-readable formatted.
|
||||
|
||||
To enable this input device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item find_sources
|
||||
If set to @option{true}, print a list of found/available NDI sources and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item wait_sources
|
||||
Override time to wait until the number of online sources have changed.
|
||||
Defaults to @option{0.5}.
|
||||
|
||||
@item allow_video_fields
|
||||
When this flag is @option{false}, all video that you receive will be progressive.
|
||||
Defaults to @option{true}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
List input devices:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -find_sources 1 -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
Restream to NDI:
|
||||
@example
|
||||
ffmpeg -f libndi_newtek -i "DEV-5.INTERNAL.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2
|
||||
ffmpeg -channels 16 -format_code Hi50 -f decklink -i 'UltraStudio Mini Recorder' -acodec copy -vcodec copy output.avi
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
@@ -659,6 +521,31 @@ $ ffmpeg -f dshow -show_video_device_dialog true -crossbar_video_input_pin_numbe
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dv1394
|
||||
|
||||
Linux DV 1394 input device.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate. Default is 25.
|
||||
|
||||
@item standard
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item pal
|
||||
|
||||
@item ntsc
|
||||
|
||||
@end table
|
||||
|
||||
Default value is @code{ntsc}.
|
||||
|
||||
@end table
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer input device.
|
||||
@@ -1195,6 +1082,49 @@ Record a stream from default device:
|
||||
ffmpeg -f pulse -i default /tmp/pulse.wav
|
||||
@end example
|
||||
|
||||
@section qtkit
|
||||
|
||||
QTKit input device.
|
||||
|
||||
The filename passed as input is parsed to contain either a device name or index.
|
||||
The device index can also be given by using -video_device_index.
|
||||
A given device index will override any given device name.
|
||||
If the desired device consists of numbers only, use -video_device_index to identify it.
|
||||
The default device will be chosen if an empty string or the device name "default" is given.
|
||||
The available devices can be enumerated by using -list_devices.
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -i "0" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -i "default" out.mpg
|
||||
@end example
|
||||
|
||||
@example
|
||||
ffmpeg -f qtkit -list_devices true -i ""
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item frame_rate
|
||||
Set frame rate. Default is 30.
|
||||
|
||||
@item list_devices
|
||||
If set to @code{true}, print a list of devices and exit. Default is
|
||||
@code{false}.
|
||||
|
||||
@item video_device_index
|
||||
Select the video device by index for devices with the same name (starts at 0).
|
||||
|
||||
@end table
|
||||
|
||||
@section sndio
|
||||
|
||||
sndio input device.
|
||||
|
||||
@@ -96,18 +96,17 @@ Stuff that didn't reach the codebase:
|
||||
- e7078e842 hevcdsp: add x86 SIMD for MC
|
||||
- VAAPI VP8 decode hwaccel (currently under review: http://ffmpeg.org/pipermail/ffmpeg-devel/2017-February/thread.html#207348)
|
||||
- Removal of the custom atomic API (5cc0057f49, see http://ffmpeg.org/pipermail/ffmpeg-devel/2017-March/209003.html)
|
||||
- new bitstream reader (see http://ffmpeg.org/pipermail/ffmpeg-devel/2017-April/209609.html)
|
||||
- use of the bsf instead of our parser for vp9 superframes (see fa1749dd34)
|
||||
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
|
||||
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
|
||||
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
|
||||
- Use the new bitstream filter for extracting extradata (8e2ea69135 and 096a8effa3, see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-March/209068.html)
|
||||
- Read aac_adtstoasc extradata updates from packet side data on Matroska once mov and the bsf in question are fixed (See 13a211e632 and 5ef1959080)
|
||||
|
||||
Collateral damage that needs work locally:
|
||||
------------------------------------------
|
||||
|
||||
- Merge proresdec2.c and proresdec_lgpl.c
|
||||
- Merge proresenc_anatoliy.c and proresenc_kostya.c
|
||||
- Remove ADVANCED_PARSER in libavcodec/hevc_parser.c
|
||||
- Fix MIPS AC3 downmix
|
||||
- hlsenc encryption support may need some adjustment (see edc43c571d)
|
||||
|
||||
Extra changes needed to be aligned with Libav:
|
||||
----------------------------------------------
|
||||
|
||||
@@ -1,366 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle FFmpeg Mailing List FAQ
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Mailing List FAQ}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter General Questions
|
||||
|
||||
@section What is a mailing list?
|
||||
|
||||
A mailing list is not much different than emailing someone, but the
|
||||
main difference is that your message is received by everyone who
|
||||
subscribes to the list. It is somewhat like a forum but in email form.
|
||||
|
||||
See the @url{https://lists.ffmpeg.org/pipermail/ffmpeg-user/, ffmpeg-user archives}
|
||||
for examples.
|
||||
|
||||
@section What type of questions can I ask?
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user}:
|
||||
For questions involving unscripted usage or compilation of the FFmpeg
|
||||
command-line tools (@command{ffmpeg}, @command{ffprobe}, @command{ffplay},
|
||||
@command{ffserver}).
|
||||
|
||||
@item
|
||||
@url{https://lists.ffmpeg.org/mailman/listinfo/libav-user/, libav-user}:
|
||||
For questions involving the FFmpeg libav* libraries (libavcodec,
|
||||
libavformat, libavfilter, etc).
|
||||
|
||||
@item
|
||||
@url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel/, ffmpeg-devel}:
|
||||
For discussions involving the development of FFmpeg and for submitting
|
||||
patches. User questions should be asked at ffmpeg-user or libav-user.
|
||||
@end itemize
|
||||
|
||||
To report a bug see @url{https://ffmpeg.org/bugreports.html}.
|
||||
|
||||
We cannot provide help for scripts and/or third-party tools.
|
||||
|
||||
@anchor{How do I ask a question or send a message to a mailing list?}
|
||||
@section How do I ask a question or send a message to a mailing list?
|
||||
|
||||
All you have to do is send an email:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Email @email{ffmpeg-user@@ffmpeg.org} to send a message to the
|
||||
ffmpeg-user mailing list.
|
||||
|
||||
@item
|
||||
Email @email{libav-user@@ffmpeg.org} to send a message to the
|
||||
libav-user mailing list.
|
||||
@end itemize
|
||||
|
||||
If you are not subscribed to the mailing list then your question must be
|
||||
manually approved. Approval may take several days, but the wait is
|
||||
usually less. If you want the message to be sent with no delay then you
|
||||
must subscribe first. See @ref{How do I subscribe?}
|
||||
|
||||
Please do not send a message, subscribe, and re-send the message: this
|
||||
results in duplicates, causes more work for the admins, and may lower
|
||||
your chance at getting an answer. However, you may do so if you first
|
||||
@ref{How do I delete my message in the moderation queue?, delete your original message from the moderation queue}.
|
||||
|
||||
@chapter Subscribing / Unsubscribing
|
||||
|
||||
@section What does subscribing do?
|
||||
|
||||
Subscribing allows two things:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Your messages will show up in the mailing list without waiting in the
|
||||
moderation queue and needing to be manually approved by a mailing list
|
||||
admin.
|
||||
|
||||
@item
|
||||
You will receive all messages to the mailing list including replies to
|
||||
your messages. Non-subscribed users do not receive any messages.
|
||||
@end itemize
|
||||
|
||||
@section Do I need to subscribe?
|
||||
|
||||
No. You can still send a message to the mailing list without
|
||||
subscribing. See @ref{How do I ask a question or send a message to a mailing list?}
|
||||
|
||||
However, your message will need to be manually approved by a mailing
|
||||
list admin, and you will not receive any mailing list messages or
|
||||
replies.
|
||||
|
||||
You can ask to be CCd in your message, but replying users will
|
||||
sometimes forget to do so.
|
||||
|
||||
You may also view and reply to messages via the @ref{Where are the archives?, archives}.
|
||||
|
||||
@anchor{How do I subscribe?}
|
||||
@section How do I subscribe?
|
||||
|
||||
Email @email{ffmpeg-user-request@@ffmpeg.org} with the subject
|
||||
@emph{subscribe}.
|
||||
|
||||
Or visit the @url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user mailing list info page}
|
||||
and refer to the @emph{Subscribing to ffmpeg-user} section.
|
||||
|
||||
The process is the same for the other mailing lists.
|
||||
|
||||
@section How do I unsubscribe?
|
||||
|
||||
Email @email{ffmpeg-user-request@@ffmpeg.org} with subject @emph{unsubscribe}.
|
||||
|
||||
Or visit the @url{https://lists.ffmpeg.org/mailman/listinfo/ffmpeg-user/, ffmpeg-user mailing list info page},
|
||||
scroll to bottom of page, enter your email address in the box, and click
|
||||
the @emph{Unsubscribe or edit options} button.
|
||||
|
||||
The process is the same for the other mailing lists.
|
||||
|
||||
Please avoid asking a mailing list admin to unsubscribe you unless you
|
||||
are absolutely unable to do so by yourself. See @ref{Who do I contact if I have a problem with the mailing list?}
|
||||
|
||||
@chapter Moderation Queue
|
||||
@anchor{Why is my message awaiting moderator approval?}
|
||||
@section Why is my message awaiting moderator approval?
|
||||
|
||||
Some messages are automatically held in the @emph{moderation queue} and
|
||||
must be manually approved by a mailing list admin:
|
||||
|
||||
These are:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Messages from users who are @strong{not} subscribed.
|
||||
|
||||
@item
|
||||
Messages that exceed the @ref{What is the message size limit?, message size limit}.
|
||||
|
||||
@item
|
||||
Messages from users whose accounts have been set with the @emph{moderation flag}
|
||||
(very rarely occurs, but may if a user repeatedly ignores the rules
|
||||
or is abusive towards others).
|
||||
@end itemize
|
||||
|
||||
@section How long does it take for my message in the moderation queue to be approved?
|
||||
|
||||
The queue is usually checked once or twice a day, but on occasion
|
||||
several days may pass before someone checks the queue.
|
||||
|
||||
@anchor{How do I delete my message in the moderation queue?}
|
||||
@section How do I delete my message in the moderation queue?
|
||||
|
||||
You should have received an email with the subject @emph{Your message to ffmpeg-user awaits moderator approval}.
|
||||
A link is in the message that will allow you to delete your message
|
||||
unless a mailing list admin already approved or rejected it.
|
||||
|
||||
@chapter Archives
|
||||
|
||||
@anchor{Where are the archives?}
|
||||
@section Where are the archives?
|
||||
|
||||
See the @emph{Archives} section on the @url{https://ffmpeg.org/contact.html, FFmpeg Contact}
|
||||
page for links to all FFmpeg mailing list archives.
|
||||
|
||||
Note that the archives are split by month. Discussions that span
|
||||
several months will be split into separate months in the archives.
|
||||
|
||||
@section How do I reply to a message in the archives?
|
||||
|
||||
Click the email link at the top of the message just under the subject
|
||||
title. The link will provide the proper headers to keep the message
|
||||
within the thread.
|
||||
|
||||
@section How do I search the archives?
|
||||
|
||||
Perform a site search using your favorite search engine. Example:
|
||||
|
||||
@t{site:lists.ffmpeg.org/pipermail/ffmpeg-user/ "search term"}
|
||||
|
||||
@chapter Other
|
||||
|
||||
@section Is there an alternative to the mailing list?
|
||||
|
||||
You can ask for help in the official @t{#ffmpeg} IRC channel on Freenode.
|
||||
|
||||
Some users prefer the third-party Nabble interface which presents the
|
||||
mailing lists in a typical forum layout.
|
||||
|
||||
There are also numerous third-party help sites such as Super User and
|
||||
r/ffmpeg on reddit.
|
||||
|
||||
@anchor{What is top-posting?}
|
||||
@section What is top-posting?
|
||||
|
||||
See @url{https://en.wikipedia.org/wiki/Posting_style#Top-posting}.
|
||||
|
||||
Instead, use trimmed interleaved/inline replies (@url{https://lists.ffmpeg.org/pipermail/ffmpeg-user/2017-April/035849.html, example}).
|
||||
|
||||
@anchor{What is the message size limit?}
|
||||
@section What is the message size limit?
|
||||
|
||||
The message size limit is 500 kilobytes for the user lists and 1000
|
||||
kilobytes for ffmpeg-devel. Please provide links to larger files instead
|
||||
of attaching them.
|
||||
|
||||
@section Where can I upload sample files?
|
||||
|
||||
Anywhere that is not too annoying for us to use.
|
||||
|
||||
Google Drive and Dropbox are acceptable if you need a file host, and
|
||||
0x0.st is good for files under 256 MiB.
|
||||
|
||||
Small, short samples are preferred if possible.
|
||||
|
||||
@section Will I receive spam if I send and/or subscribe to a mailing list?
|
||||
|
||||
Highly unlikely.
|
||||
|
||||
@itemize
|
||||
@item
|
||||
The list of subscribed users is not public.
|
||||
|
||||
@item
|
||||
Email addresses in the archives are obfuscated.
|
||||
|
||||
@item
|
||||
Several unique test email accounts were utilized and none have yet
|
||||
received any spam.
|
||||
@end itemize
|
||||
|
||||
However, you may see a spam in the mailing lists on rare occasions:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Spam in the moderation queue may be accidentally approved due to human
|
||||
error.
|
||||
|
||||
@item
|
||||
There have been a few messages from subscribed users who had their own
|
||||
email addresses hacked and spam messages from (or appearing to be from)
|
||||
the hacked account were sent to their contacts (a mailing list being a
|
||||
contact in these cases).
|
||||
|
||||
@item
|
||||
If you are subscribed to the bug tracker mailing list (ffmpeg-trac) you
|
||||
may see the occasional spam as a false bug report, but we take measures
|
||||
to try to prevent this.
|
||||
@end itemize
|
||||
|
||||
@section How do I filter mailing list messages?
|
||||
|
||||
Use the @emph{List-Id}. For example, the ffmpeg-user mailing list is
|
||||
@t{ffmpeg-user.ffmpeg.org}. You can view the List-Id in the raw message
|
||||
or headers.
|
||||
|
||||
You can then filter the mailing list messages to their own folder.
|
||||
|
||||
@chapter Rules and Etiquette
|
||||
|
||||
@section What are the rules and the proper etiquette?
|
||||
|
||||
There may seem to be many things to remember, but we want to help and
|
||||
following these guidelines will allow you to get answers more quickly
|
||||
and help avoid getting ignored.
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Always show your actual, unscripted @command{ffmpeg} command and the
|
||||
complete, uncut console output from your command.
|
||||
|
||||
@item
|
||||
Use the most simple and minimal command that still shows the issue you
|
||||
are encountering.
|
||||
|
||||
@item
|
||||
Provide all necessary information so others can attempt to duplicate
|
||||
your issue. This includes the actual command, complete uncut console
|
||||
output, and any inputs that are required to duplicate the issue.
|
||||
|
||||
@item
|
||||
Use the latest @command{ffmpeg} build you can get. See the @url{https://ffmpeg.org/download.html, FFmpeg Download}
|
||||
page for links to recent builds for Linux, macOS, and Windows. Or
|
||||
compile from the current git master branch.
|
||||
|
||||
@item
|
||||
Avoid @url{https://en.wikipedia.org/wiki/Posting_style#Top-posting, top-posting}.
|
||||
Also see @ref{What is top-posting?}
|
||||
|
||||
@item
|
||||
Avoid hijacking threads. Thread hijacking is replying to a message and
|
||||
changing the subject line to something unrelated to the original thread.
|
||||
Most email clients will still show the renamed message under the
|
||||
original thread. This can be confusing and these types of messages are
|
||||
often ignored.
|
||||
|
||||
@item
|
||||
Do not send screenshots. Copy and paste console text instead of making
|
||||
screenshots of the text.
|
||||
|
||||
@item
|
||||
Avoid sending email disclaimers and legalese if possible as this is a
|
||||
public list.
|
||||
|
||||
@item
|
||||
Avoid using the @code{-loglevel debug}, @code{-loglevel quiet}, and
|
||||
@command{-hide_banner} options unless requested to do so.
|
||||
|
||||
@item
|
||||
If you attach files avoid compressing small files. Uncompressed is
|
||||
preferred.
|
||||
|
||||
@item
|
||||
Please do not send HTML-only messages. The mailing list will ignore the
|
||||
HTML component of your message. Most mail clients will automatically
|
||||
include a text component: this is what the mailing list will use.
|
||||
|
||||
@item
|
||||
Configuring your mail client to break lines after 70 or so characters is
|
||||
recommended.
|
||||
|
||||
@item
|
||||
Avoid sending the same message to multiple mailing lists.
|
||||
|
||||
@item
|
||||
Please follow our @url{https://ffmpeg.org/developer.html#Code-of-conduct, Code of Conduct}.
|
||||
@end itemize
|
||||
|
||||
@chapter Help
|
||||
|
||||
@section Why am I not receiving any messages?
|
||||
|
||||
Some email providers have blacklists or spam filters that block or mark
|
||||
the mailing list messages as false positives. Unfortunately, the user is
|
||||
often not aware of this and is often out of their control.
|
||||
|
||||
When possible we attempt to notify the provider to be removed from the
|
||||
blacklists or filters.
|
||||
|
||||
@section Why are my sent messages not showing up?
|
||||
|
||||
Excluding @ref{Why is my message awaiting moderator approval?, messages that are held in the moderation queue}
|
||||
there are a few other reasons why your messages may fail to appear:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
HTML-only messages are ignored by the mailing lists. Most mail clients
|
||||
automatically include a text component alongside HTML email: this is what
|
||||
the mailing list will use. If it does not then consider your client to be
|
||||
broken, because sending a text component along with the HTML component to
|
||||
form a multi-part message is recommended by email standards.
|
||||
|
||||
@item
|
||||
Check your spam folder.
|
||||
@end itemize
|
||||
|
||||
@anchor{Who do I contact if I have a problem with the mailing list?}
|
||||
@section Who do I contact if I have a problem with the mailing list?
|
||||
|
||||
Send a message to @email{ffmpeg-user-owner@@ffmpeg.org}.
|
||||
|
||||
@bye
|
||||
116
doc/muxers.texi
116
doc/muxers.texi
@@ -194,68 +194,6 @@ Used to facilitate seeking; particularly for HTTP pseudo streaming.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@anchor{dash}
|
||||
@section dash
|
||||
|
||||
Dynamic Adaptive Streaming over HTTP (DASH) muxer that creates segments
|
||||
and manifest files according to the MPEG-DASH standard ISO/IEC 23009-1:2014.
|
||||
|
||||
For more information see:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
ISO DASH Specification: @url{http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip}
|
||||
@item
|
||||
WebM DASH Specification: @url{https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification}
|
||||
@end itemize
|
||||
|
||||
It creates a MPD manifest file and segment files for each stream.
|
||||
|
||||
The segment filename might contain pre-defined identifiers used with SegmentTemplate
|
||||
as defined in section 5.3.9.4.4 of the standard. Available identifiers are "$RepresentationID$",
|
||||
"$Number$", "$Bandwidth$" and "$Time$".
|
||||
|
||||
@example
|
||||
ffmpeg -re -i <input> -map 0 -map 0 -c:a libfdk_aac -c:v libx264
|
||||
-b:v:0 800k -b:v:1 300k -s:v:1 320x170 -profile:v:1 baseline
|
||||
-profile:v:0 main -bf 1 -keyint_min 120 -g 120 -sc_threshold 0
|
||||
-b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1
|
||||
-window_size 5 -adaptation_sets "id=0,streams=v id=1,streams=a"
|
||||
-f dash /path/to/out.mpd
|
||||
@end example
|
||||
|
||||
@table @option
|
||||
@item -min_seg_duration @var{microseconds}
|
||||
Set the segment length in microseconds.
|
||||
@item -window_size @var{size}
|
||||
Set the maximum number of segments kept in the manifest.
|
||||
@item -extra_window_size @var{size}
|
||||
Set the maximum number of segments kept outside of the manifest before removing from disk.
|
||||
@item -remove_at_exit @var{remove}
|
||||
Enable (1) or disable (0) removal of all segments when finished.
|
||||
@item -use_template @var{template}
|
||||
Enable (1) or disable (0) use of SegmentTemplate instead of SegmentList.
|
||||
@item -use_timeline @var{timeline}
|
||||
Enable (1) or disable (0) use of SegmentTimeline in SegmentTemplate.
|
||||
@item -single_file @var{single_file}
|
||||
Enable (1) or disable (0) storing all segments in one file, accessed using byte ranges.
|
||||
@item -single_file_name @var{file_name}
|
||||
DASH-templated name to be used for baseURL. Implies @var{single_file} set to "1".
|
||||
@item -init_seg_name @var{init_name}
|
||||
DASH-templated name to used for the initialization segment. Default is "init-stream$RepresentationID$.m4s"
|
||||
@item -media_seg_name @var{segment_name}
|
||||
DASH-templated name to used for the media segments. Default is "chunk-stream$RepresentationID$-$Number%05d$.m4s"
|
||||
@item -utc_timing_url @var{utc_url}
|
||||
URL of the page that will return the UTC timestamp in ISO format. Example: "https://time.akamai.com/?iso"
|
||||
@item -adaptation_sets @var{adaptation_sets}
|
||||
Assign streams to AdaptationSets. Syntax is "id=x,streams=a,b,c id=y,streams=d,e" with x and y being the IDs
|
||||
of the adaptation sets and a,b,c,d and e are the indices of the mapped streams.
|
||||
|
||||
To map all video (or audio) streams to an AdaptationSet, "v" (or "a") can be used as stream identifier instead of IDs.
|
||||
|
||||
When no assignment is defined, this defaults to an AdaptationSet for each stream.
|
||||
@end table
|
||||
|
||||
@anchor{framecrc}
|
||||
@section framecrc
|
||||
|
||||
@@ -613,7 +551,7 @@ format. The optional third line specifies the initialization vector (IV) as a
|
||||
hexadecimal string to be used instead of the segment sequence number (default)
|
||||
for encryption. Changes to @var{key_info_file} will result in segment
|
||||
encryption with the new key/IV and an entry in the playlist for the new key
|
||||
URI/IV if @code{hls_flags periodic_rekey} is enabled.
|
||||
URI/IV.
|
||||
|
||||
Key info file format:
|
||||
@example
|
||||
@@ -659,39 +597,6 @@ ffmpeg -f lavfi -re -i testsrc -c:v h264 -hls_flags delete_segments \
|
||||
-hls_key_info_file file.keyinfo out.m3u8
|
||||
@end example
|
||||
|
||||
@item -hls_enc @var{enc}
|
||||
Enable (1) or disable (0) the AES128 encryption.
|
||||
When enabled every segment generated is encrypted and the encryption key
|
||||
is saved as @var{playlist name}.key.
|
||||
|
||||
@item -hls_enc_key @var{key}
|
||||
Hex-coded 16byte key to encrypt the segments, by default it
|
||||
is randomly generated.
|
||||
|
||||
@item -hls_enc_key_url @var{keyurl}
|
||||
If set, @var{keyurl} is prepended instead of @var{baseurl} to the key filename
|
||||
in the playlist.
|
||||
|
||||
@item -hls_enc_iv @var{iv}
|
||||
Hex-coded 16byte initialization vector for every segment instead
|
||||
of the autogenerated ones.
|
||||
|
||||
@item hls_segment_type @var{flags}
|
||||
Possible values:
|
||||
|
||||
@table @samp
|
||||
@item mpegts
|
||||
If this flag is set, the hls segment files will format to mpegts.
|
||||
the mpegts files is used in all hls versions.
|
||||
|
||||
@item fmp4
|
||||
If this flag is set, the hls segment files will format to fragment mp4 looks like dash.
|
||||
the fmp4 files is used in hls after version 7.
|
||||
|
||||
@end table
|
||||
|
||||
@item hls_fmp4_init_filename @var{filename}
|
||||
set filename to the fragment files header file, default filename is @file{init.mp4}.
|
||||
|
||||
@item hls_flags @var{flags}
|
||||
Possible values:
|
||||
@@ -727,11 +632,6 @@ first segment's information.
|
||||
@item omit_endlist
|
||||
Do not append the @code{EXT-X-ENDLIST} tag at the end of the playlist.
|
||||
|
||||
@item periodic_rekey
|
||||
The file specified by @code{hls_key_info_file} will be checked periodically and
|
||||
detect updates to the encryption info. Be sure to replace this file atomically,
|
||||
including the file containing the AES encryption key.
|
||||
|
||||
@item split_by_time
|
||||
Allow segments to start on frames other than keyframes. This improves
|
||||
behavior on some players when the time between keyframes is inconsistent,
|
||||
@@ -790,10 +690,6 @@ server using the HTTP PUT method, and update the m3u8 files every
|
||||
@code{refresh} times using the same method.
|
||||
Note that the HTTP server must support the given method for uploading
|
||||
files.
|
||||
|
||||
@item http_user_agent
|
||||
Override User-Agent field in HTTP header. Applicable only for HTTP output.
|
||||
|
||||
@end table
|
||||
|
||||
@anchor{ico}
|
||||
@@ -898,7 +794,7 @@ ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
|
||||
|
||||
@table @option
|
||||
@item start_number
|
||||
Start the sequence from the specified number. Default value is 1.
|
||||
Start the sequence from the specified number. Default value is 0.
|
||||
|
||||
@item update
|
||||
If set to 1, the filename will always be interpreted as just a
|
||||
@@ -1117,12 +1013,6 @@ on the implicit end of the previous track fragment).
|
||||
@item -write_tmcd
|
||||
Specify @code{on} to force writing a timecode track, @code{off} to disable it
|
||||
and @code{auto} to write a timecode track only for mov and mp4 output (default).
|
||||
@item -movflags negative_cts_offsets
|
||||
Enables utilization of version 1 of the CTTS box, in which the CTS offsets can
|
||||
be negative. This enables the initial sample to have DTS/CTS of zero, and
|
||||
reduces the need for edit lists for some cases such as video tracks with
|
||||
B-frames. Additionally, eases conformance with the DASH-IF interoperability
|
||||
guidelines.
|
||||
@end table
|
||||
|
||||
@subsection Example
|
||||
@@ -1634,7 +1524,7 @@ inconsistent, but may make things worse on others, and can cause some oddities
|
||||
during seeking. Defaults to @code{0}.
|
||||
|
||||
@item reset_timestamps @var{1|0}
|
||||
Reset timestamps at the beginning of each segment, so that each segment
|
||||
Reset timestamps at the begin of each segment, so that each segment
|
||||
will start with near-zero timestamps. It is meant to ease the playback
|
||||
of the generated segments. May not work with some combinations of
|
||||
muxers/codecs. It is set to @code{0} by default.
|
||||
|
||||
@@ -161,8 +161,8 @@ do{
|
||||
For x86, mark registers that are clobbered in your asm. This means both
|
||||
general x86 registers (e.g. eax) as well as XMM registers. This last one is
|
||||
particularly important on Win64, where xmm6-15 are callee-save, and not
|
||||
restoring their contents leads to undefined results. In external asm,
|
||||
you do this by using:
|
||||
restoring their contents leads to undefined results. In external asm (e.g.
|
||||
yasm), you do this by using:
|
||||
cglobal function_name, num_args, num_regs, num_xmm_regs
|
||||
In inline asm, you specify clobbered registers at the end of your asm:
|
||||
__asm__(".." ::: "%eax").
|
||||
@@ -199,12 +199,12 @@ actual lines causing issues.
|
||||
Inline asm vs. external asm
|
||||
---------------------------
|
||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||
and external asm (.s or .asm files, handled by an assembler such as nasm/yasm)
|
||||
and external asm (.s or .asm files, handled by an assembler such as yasm/nasm)
|
||||
are accepted in FFmpeg. Which one to use differs per specific case.
|
||||
|
||||
- if your code is intended to be inlined in a C function, inline asm is always
|
||||
better, because external asm cannot be inlined
|
||||
- if your code calls external functions, external asm is always better
|
||||
- if your code calls external functions, yasm is always better
|
||||
- if your code takes huge and complex structs as function arguments (e.g.
|
||||
MpegEncContext; note that this is not ideal and is discouraged if there
|
||||
are alternatives), then inline asm is always better, because predicting
|
||||
|
||||
@@ -104,7 +104,7 @@ The argument must be one of @code{algorithms}, @code{antialiases},
|
||||
The following command shows the @command{ffmpeg} output is an
|
||||
CACA window, forcing its size to 80x25:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
|
||||
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
|
||||
@end example
|
||||
|
||||
@item
|
||||
@@ -182,51 +182,6 @@ ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLi
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libndi_newtek
|
||||
|
||||
The libndi_newtek output device provides playback capabilities for using NDI (Network
|
||||
Device Interface, standard created by NewTek).
|
||||
|
||||
Output filename is a NDI name.
|
||||
|
||||
To enable this output device, you need the NDI SDK and you
|
||||
need to configure with the appropriate @code{--extra-cflags}
|
||||
and @code{--extra-ldflags}.
|
||||
|
||||
NDI uses uyvy422 pixel format natively, but also supports bgra, bgr0, rgba and
|
||||
rgb0.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item reference_level
|
||||
The audio reference level in dB. This specifies how many dB above the
|
||||
reference level (+4dBU) is the full range of 16 bit audio.
|
||||
Defaults to @option{0}.
|
||||
|
||||
@item clock_video
|
||||
These specify whether video "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@item clock_audio
|
||||
These specify whether audio "clock" themselves.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Play video clip:
|
||||
@example
|
||||
ffmpeg -i "udp://@@239.1.1.1:10480?fifo_size=1000000&overrun_nonfatal=1" -vf "scale=720:576,fps=fps=25,setdar=dar=16/9,format=pix_fmts=uyvy422" -f libndi_newtek NEW_NDI1
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer output device.
|
||||
@@ -251,7 +206,7 @@ Set x/y coordinate of top left corner. Default is 0.
|
||||
Play a file on framebuffer device @file{/dev/fb0}.
|
||||
Required pixel format depends on current framebuffer settings.
|
||||
@example
|
||||
ffmpeg -re -i INPUT -c:v rawvideo -pix_fmt bgra -f fbdev /dev/fb0
|
||||
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
|
||||
@end example
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
@@ -413,7 +368,7 @@ Quit the device immediately.
|
||||
The following command shows the @command{ffmpeg} output is an
|
||||
SDL window, forcing its size to the qcif format:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
|
||||
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
|
||||
@end example
|
||||
|
||||
@section sndio
|
||||
|
||||
@@ -71,9 +71,9 @@ Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
|
||||
assembly functions. Put the Perl script somewhere
|
||||
in your PATH, FFmpeg's configure will pick it up automatically.
|
||||
|
||||
Mac OS X on amd64 and x86 requires @command{nasm} to build most of the
|
||||
Mac OS X on amd64 and x86 requires @command{yasm} to build most of the
|
||||
optimized assembly functions. @uref{http://www.finkproject.org/, Fink},
|
||||
@uref{https://wiki.gentoo.org/wiki/Project:Prefix, Gentoo Prefix},
|
||||
@uref{http://www.gentoo.org/proj/en/gentoo-alt/prefix/bootstrap-macos.xml, Gentoo Prefix},
|
||||
@uref{https://mxcl.github.com/homebrew/, Homebrew}
|
||||
or @uref{http://www.macports.org, MacPorts} can easily provide it.
|
||||
|
||||
@@ -141,7 +141,7 @@ them under @command{MinGW-w64 Win64 Shell} and @command{MinGW-w64 Win32 Shell}.
|
||||
pacman -S make pkgconf diffutils
|
||||
|
||||
# mingw-w64 packages and toolchains
|
||||
pacman -S mingw-w64-x86_64-nasm mingw-w64-x86_64-gcc mingw-w64-x86_64-SDL2
|
||||
pacman -S mingw-w64-x86_64-yasm mingw-w64-x86_64-gcc mingw-w64-x86_64-SDL
|
||||
@end example
|
||||
|
||||
To target 32 bits replace @code{x86_64} with @code{i686} in the command above.
|
||||
@@ -159,7 +159,7 @@ You will need the following prerequisites:
|
||||
@item @uref{http://code.google.com/p/msinttypes/, msinttypes}
|
||||
(if using MSVC 2012 or earlier)
|
||||
@item @uref{http://msys2.github.io/, MSYS2}
|
||||
@item @uref{http://www.nasm.us/, NASM}
|
||||
@item @uref{http://yasm.tortall.net/, YASM}
|
||||
(Also available via MSYS2's package manager.)
|
||||
@end itemize
|
||||
|
||||
@@ -315,7 +315,7 @@ These library packages are only available from
|
||||
|
||||
@example
|
||||
yasm, libSDL-devel, libgsm-devel, libmp3lame-devel,
|
||||
speex-devel, libtheora-devel, libxvidcore-devel
|
||||
libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
|
||||
@end example
|
||||
|
||||
The recommendation for x264 is to build it from source, as it evolves too
|
||||
@@ -343,4 +343,67 @@ and for a build with shared libraries
|
||||
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
|
||||
@end example
|
||||
|
||||
@chapter Plan 9
|
||||
|
||||
The native @uref{http://plan9.bell-labs.com/plan9/, Plan 9} compiler
|
||||
does not implement all the C99 features needed by FFmpeg so the gcc
|
||||
port must be used. Furthermore, a few items missing from the C
|
||||
library and shell environment need to be fixed.
|
||||
|
||||
@itemize
|
||||
|
||||
@item GNU awk, grep, make, and sed
|
||||
|
||||
Working packages of these tools can be found at
|
||||
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}.
|
||||
They can be installed with @uref{http://9front.org/, 9front's} @code{pkg}
|
||||
utility by setting @code{pkgpath} to
|
||||
@code{http://ports2plan9.googlecode.com/files/}.
|
||||
|
||||
@item Missing/broken @code{head} and @code{printf} commands
|
||||
|
||||
Replacements adequate for building FFmpeg can be found in the
|
||||
@code{compat/plan9} directory. Place these somewhere they will be
|
||||
found by the shell. These are not full implementations of the
|
||||
commands and are @emph{not} suitable for general use.
|
||||
|
||||
@item Missing C99 @code{stdint.h} and @code{inttypes.h}
|
||||
|
||||
Replacement headers are available from
|
||||
@url{http://code.google.com/p/plan9front/issues/detail?id=152}.
|
||||
|
||||
@item Missing or non-standard library functions
|
||||
|
||||
Some functions in the C library are missing or incomplete. The
|
||||
@code{@uref{http://ports2plan9.googlecode.com/files/gcc-apelibs-1207.tbz,
|
||||
gcc-apelibs-1207}} package from
|
||||
@uref{http://code.google.com/p/ports2plan9/downloads/list, ports2plan9}
|
||||
includes an updated C library, but installing the full package gives
|
||||
unusable executables. Instead, keep the files from @code{gccbin.tgz}
|
||||
under @code{/386/lib/gnu}. From the @code{libc.a} archive in the
|
||||
@code{gcc-apelibs-1207} package, extract the following object files and
|
||||
turn them into a library:
|
||||
|
||||
@itemize
|
||||
@item @code{strerror.o}
|
||||
@item @code{strtoll.o}
|
||||
@item @code{snprintf.o}
|
||||
@item @code{vsnprintf.o}
|
||||
@item @code{vfprintf.o}
|
||||
@item @code{_IO_getc.o}
|
||||
@item @code{_IO_putc.o}
|
||||
@end itemize
|
||||
|
||||
Use the @code{--extra-libs} option of @code{configure} to inform the
|
||||
build system of this library.
|
||||
|
||||
@item FPU exceptions enabled by default
|
||||
|
||||
Unlike most other systems, Plan 9 enables FPU exceptions by default.
|
||||
These must be disabled before calling any FFmpeg functions. While the
|
||||
included tools will do this automatically, other users of the
|
||||
libraries must do it themselves.
|
||||
|
||||
@end itemize
|
||||
|
||||
@bye
|
||||
|
||||
122
doc/t2h.pm
122
doc/t2h.pm
@@ -20,45 +20,8 @@
|
||||
# License along with FFmpeg; if not, write to the Free Software
|
||||
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
# Texinfo 7.0 changed the syntax of various functions.
|
||||
# Provide a shim for older versions.
|
||||
sub ff_set_from_init_file($$) {
|
||||
my $key = shift;
|
||||
my $value = shift;
|
||||
if (exists &{'texinfo_set_from_init_file'}) {
|
||||
texinfo_set_from_init_file($key, $value);
|
||||
} else {
|
||||
set_from_init_file($key, $value);
|
||||
}
|
||||
}
|
||||
|
||||
sub ff_get_conf($) {
|
||||
my $key = shift;
|
||||
if (exists &{'texinfo_get_conf'}) {
|
||||
texinfo_get_conf($key);
|
||||
} else {
|
||||
get_conf($key);
|
||||
}
|
||||
}
|
||||
|
||||
sub get_formatting_function($$) {
|
||||
my $obj = shift;
|
||||
my $func = shift;
|
||||
|
||||
my $sub = $obj->can('formatting_function');
|
||||
if ($sub) {
|
||||
return $obj->formatting_function($func);
|
||||
} else {
|
||||
return $obj->{$func};
|
||||
}
|
||||
}
|
||||
|
||||
# determine texinfo version
|
||||
my $program_version_num = version->declare(ff_get_conf('PACKAGE_VERSION'))->numify;
|
||||
my $program_version_6_8 = $program_version_num >= 6.008000;
|
||||
|
||||
# no navigation elements
|
||||
ff_set_from_init_file('HEADERS', 0);
|
||||
set_from_init_file('HEADERS', 0);
|
||||
|
||||
sub ffmpeg_heading_command($$$$$)
|
||||
{
|
||||
@@ -92,7 +55,7 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$element = $command->{'parent'};
|
||||
}
|
||||
if ($element) {
|
||||
$result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname,
|
||||
$result .= &{$self->{'format_element_header'}}($self, $cmdname,
|
||||
$command, $element);
|
||||
}
|
||||
|
||||
@@ -149,11 +112,7 @@ sub ffmpeg_heading_command($$$$$)
|
||||
$cmdname
|
||||
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
|
||||
}
|
||||
# format_heading_text expects an array of headings for texinfo >= 7.0
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$heading = [$heading];
|
||||
}
|
||||
$result .= &{get_formatting_function($self,'format_heading_text')}(
|
||||
$result .= &{$self->{'format_heading_text'}}(
|
||||
$self, $cmdname, $heading,
|
||||
$heading_level +
|
||||
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
|
||||
@@ -168,18 +127,14 @@ foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
|
||||
}
|
||||
|
||||
# print the TOC where @contents is used
|
||||
if ($program_version_6_8) {
|
||||
ff_set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline');
|
||||
} else {
|
||||
ff_set_from_init_file('INLINE_CONTENTS', 1);
|
||||
}
|
||||
set_from_init_file('INLINE_CONTENTS', 1);
|
||||
|
||||
# make chapters <h2>
|
||||
ff_set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
set_from_init_file('CHAPTER_HEADER_LEVEL', 2);
|
||||
|
||||
# Do not add <hr>
|
||||
ff_set_from_init_file('DEFAULT_RULE', '');
|
||||
ff_set_from_init_file('BIG_RULE', '');
|
||||
set_from_init_file('DEFAULT_RULE', '');
|
||||
set_from_init_file('BIG_RULE', '');
|
||||
|
||||
# Customized file beginning
|
||||
sub ffmpeg_begin_file($$$)
|
||||
@@ -196,18 +151,7 @@ sub ffmpeg_begin_file($$$)
|
||||
my ($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_information($command);
|
||||
} else {
|
||||
($title, $description, $encoding, $date, $css_lines,
|
||||
$doctype, $bodytext, $copying_comment, $after_body_open,
|
||||
$extra_head, $program_and_version, $program_homepage,
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
}
|
||||
$program, $generator) = $self->_file_header_informations($command);
|
||||
|
||||
my $links = $self->_get_links ($filename, $element);
|
||||
|
||||
@@ -240,11 +184,7 @@ EOT
|
||||
|
||||
return $head1 . $head_title . $head2 . $head_title . $head3;
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_begin_file', \&ffmpeg_begin_file);
|
||||
} else {
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
}
|
||||
texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file);
|
||||
|
||||
sub ffmpeg_program_string($)
|
||||
{
|
||||
@@ -261,17 +201,13 @@ sub ffmpeg_program_string($)
|
||||
$self->gdt('This document was generated automatically.'));
|
||||
}
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_program_string', \&ffmpeg_program_string);
|
||||
} else {
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
}
|
||||
texinfo_register_formatting_function('program_string', \&ffmpeg_program_string);
|
||||
|
||||
# Customized file ending
|
||||
sub ffmpeg_end_file($)
|
||||
{
|
||||
my $self = shift;
|
||||
my $program_string = &{get_formatting_function($self,'format_program_string')}($self);
|
||||
my $program_string = &{$self->{'format_program_string'}}($self);
|
||||
my $program_text = <<EOT;
|
||||
<p style="font-size: small;">
|
||||
$program_string
|
||||
@@ -284,15 +220,11 @@ EOT
|
||||
EOT
|
||||
return $program_text . $footer;
|
||||
}
|
||||
if ($program_version_6_8) {
|
||||
texinfo_register_formatting_function('format_end_file', \&ffmpeg_end_file);
|
||||
} else {
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
}
|
||||
texinfo_register_formatting_function('end_file', \&ffmpeg_end_file);
|
||||
|
||||
# Dummy title command
|
||||
# Ignore title. Title is handled through ffmpeg_begin_file().
|
||||
ff_set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1);
|
||||
sub ffmpeg_title($$$$)
|
||||
{
|
||||
return '';
|
||||
@@ -310,14 +242,8 @@ sub ffmpeg_float($$$$$)
|
||||
my $args = shift;
|
||||
my $content = shift;
|
||||
|
||||
my ($caption, $prepended);
|
||||
if ($program_version_num >= 7.000000) {
|
||||
($caption, $prepended) = Texinfo::Convert::Converter::float_name_caption($self,
|
||||
$command);
|
||||
} else {
|
||||
($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
}
|
||||
my ($caption, $prepended) = Texinfo::Common::float_name_caption($self,
|
||||
$command);
|
||||
my $caption_text = '';
|
||||
my $prepended_text;
|
||||
my $prepended_save = '';
|
||||
@@ -389,13 +315,8 @@ sub ffmpeg_float($$$$$)
|
||||
$caption->{'args'}->[0], 'float caption');
|
||||
}
|
||||
if ($prepended_text.$caption_text ne '') {
|
||||
if ($program_version_num >= 7.000000) {
|
||||
$prepended_text = $self->html_attribute_class('div',['float-caption']). '>'
|
||||
. $prepended_text;
|
||||
} else {
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
}
|
||||
$prepended_text = $self->_attribute_class('div','float-caption'). '>'
|
||||
. $prepended_text;
|
||||
$caption_text .= '</div>';
|
||||
}
|
||||
my $html_class = '';
|
||||
@@ -408,13 +329,8 @@ sub ffmpeg_float($$$$$)
|
||||
$prepended_text = '';
|
||||
$caption_text = '';
|
||||
}
|
||||
if ($program_version_num >= 7.000000) {
|
||||
return $self->html_attribute_class('div', [$html_class]). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
} else {
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
return $self->_attribute_class('div', $html_class). '>' . "\n" .
|
||||
$prepended_text . $caption_text . $content . '</div>';
|
||||
}
|
||||
|
||||
texinfo_register_command_formatting('float',
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
Undefined Behavior
|
||||
------------------
|
||||
In the C language, some operations are undefined, like signed integer overflow,
|
||||
dereferencing freed pointers, accessing outside allocated space, ...
|
||||
|
||||
Undefined Behavior must not occur in a C program, it is not safe even if the
|
||||
output of undefined operations is unused. The unsafety may seem nit picking
|
||||
but Optimizing compilers have in fact optimized code on the assumption that
|
||||
no undefined Behavior occurs.
|
||||
Optimizing code based on wrong assumptions can and has in some cases lead to
|
||||
effects beyond the output of computations.
|
||||
|
||||
|
||||
The signed integer overflow problem in speed critical code
|
||||
----------------------------------------------------------
|
||||
Code which is highly optimized and works with signed integers sometimes has the
|
||||
problem that some (invalid) inputs can trigger overflows (undefined behavior).
|
||||
In these cases, often the output of the computation does not matter (as it is
|
||||
from invalid input).
|
||||
In some cases the input can be checked easily in others checking the input is
|
||||
computationally too intensive.
|
||||
In these remaining cases a unsigned type can be used instead of a signed type.
|
||||
unsigned overflows are defined in C.
|
||||
|
||||
SUINT
|
||||
-----
|
||||
As we have above established there is a need to use "unsigned" sometimes in
|
||||
computations which work with signed integers (which overflow).
|
||||
Using "unsigned" for signed integers has the very significant potential to
|
||||
cause confusion
|
||||
as in
|
||||
unsigned a,b,c;
|
||||
...
|
||||
a+b*c;
|
||||
The reader does not expect b to be semantically -5 here and if the code is
|
||||
changed by maybe adding a cast, a division or other the signedness will almost
|
||||
certainly be mistaken.
|
||||
To avoid this confusion a new type was introduced, "SUINT" is the C "unsigned"
|
||||
type but it holds a signed "int".
|
||||
to use the same example
|
||||
SUINT a,b,c;
|
||||
...
|
||||
a+b*c;
|
||||
here the reader knows that a,b,c are meant to be signed integers but for C
|
||||
standard compliance / to avoid undefined behavior they are stored in unsigned
|
||||
ints.
|
||||
|
||||
@@ -864,9 +864,6 @@ Load the value of the internal variable with number
|
||||
@var{var}, which was previously stored with st(@var{var}, @var{expr}).
|
||||
The function returns the loaded value.
|
||||
|
||||
@item lerp(x, y, z)
|
||||
Return linear interpolation between @var{x} and @var{y} by amount of @var{z}.
|
||||
|
||||
@item log(x)
|
||||
Compute natural logarithm of @var{x}.
|
||||
|
||||
@@ -917,9 +914,6 @@ various input values that the expression can access through
|
||||
@code{ld(0)}. When the expression evaluates to 0 then the
|
||||
corresponding input value will be returned.
|
||||
|
||||
@item round(expr)
|
||||
Round the value of expression @var{expr} to the nearest integer. For example, "round(1.5)" is "2.0".
|
||||
|
||||
@item sin(x)
|
||||
Compute sine of @var{x}.
|
||||
|
||||
|
||||
@@ -38,14 +38,14 @@ the build system and the C:
|
||||
|
||||
--- after running configure ---
|
||||
|
||||
$ grep FOOBAR ffbuild/config.mak
|
||||
$ grep FOOBAR config.mak
|
||||
CONFIG_FOOBAR_FILTER=yes
|
||||
$ grep FOOBAR config.h
|
||||
#define CONFIG_FOOBAR_FILTER 1
|
||||
|
||||
CONFIG_FOOBAR_FILTER=yes from the ffbuild/config.mak is later used to enable
|
||||
the filter in libavfilter/Makefile and CONFIG_FOOBAR_FILTER=1 from the config.h
|
||||
will be used for registering the filter in libavfilter/allfilters.c.
|
||||
CONFIG_FOOBAR_FILTER=yes from the config.mak is later used to enable the filter in
|
||||
libavfilter/Makefile and CONFIG_FOOBAR_FILTER=1 from the config.h will be used
|
||||
for registering the filter in libavfilter/allfilters.c.
|
||||
|
||||
Filter code layout
|
||||
==================
|
||||
@@ -420,4 +420,4 @@ done:
|
||||
|
||||
When all of this is done, you can submit your patch to the ffmpeg-devel
|
||||
mailing-list for review. If you need any help, feel free to come on our IRC
|
||||
channel, #ffmpeg-devel on irc.libera.chat.
|
||||
channel, #ffmpeg-devel on irc.freenode.net.
|
||||
|
||||
5
ffbuild/.gitignore
vendored
5
ffbuild/.gitignore
vendored
@@ -1,5 +0,0 @@
|
||||
/.config
|
||||
/config.fate
|
||||
/config.log
|
||||
/config.mak
|
||||
/config.sh
|
||||
@@ -1,13 +0,0 @@
|
||||
toupper(){
|
||||
echo "$@" | tr abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
}
|
||||
|
||||
name=lib$1
|
||||
ucname=$(toupper ${name})
|
||||
file=$2
|
||||
|
||||
eval $(awk "/#define ${ucname}_VERSION_M/ { print \$2 \"=\" \$3 }" "$file")
|
||||
eval ${ucname}_VERSION=\$${ucname}_VERSION_MAJOR.\$${ucname}_VERSION_MINOR.\$${ucname}_VERSION_MICRO
|
||||
eval echo "${name}_VERSION=\$${ucname}_VERSION"
|
||||
eval echo "${name}_VERSION_MAJOR=\$${ucname}_VERSION_MAJOR"
|
||||
eval echo "${name}_VERSION_MINOR=\$${ucname}_VERSION_MINOR"
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
. ffbuild/config.sh
|
||||
|
||||
if test "$shared" = "yes"; then
|
||||
shared=true
|
||||
else
|
||||
shared=false
|
||||
fi
|
||||
|
||||
shortname=$1
|
||||
name=lib${shortname}
|
||||
fullname=${name}${build_suffix}
|
||||
comment=$2
|
||||
libs=$(eval echo \$extralibs_${shortname})
|
||||
deps=$(eval echo \$${shortname}_deps)
|
||||
|
||||
for dep in $deps; do
|
||||
depname=lib${dep}
|
||||
fulldepname=${depname}${build_suffix}
|
||||
. ${depname}/${depname}.version
|
||||
depversion=$(eval echo \$${depname}_VERSION)
|
||||
requires="$requires ${fulldepname} >= ${depversion}, "
|
||||
done
|
||||
requires=${requires%, }
|
||||
|
||||
version=$(grep ${name}_VERSION= $name/${name}.version | cut -d= -f2)
|
||||
|
||||
cat <<EOF > $name/$fullname.pc
|
||||
prefix=$prefix
|
||||
exec_prefix=\${prefix}
|
||||
libdir=$libdir
|
||||
includedir=$incdir
|
||||
|
||||
Name: $fullname
|
||||
Description: $comment
|
||||
Version: $version
|
||||
Requires: $($shared || echo $requires)
|
||||
Requires.private: $($shared && echo $requires)
|
||||
Conflicts:
|
||||
Libs: -L\${libdir} $rpath -l${fullname#lib} $($shared || echo $libs)
|
||||
Libs.private: $($shared && echo $libs)
|
||||
Cflags: -I\${includedir}
|
||||
EOF
|
||||
|
||||
mkdir -p doc/examples/pc-uninstalled
|
||||
includedir=${source_path}
|
||||
[ "$includedir" = . ] && includedir="\${pcfiledir}/../../.."
|
||||
cat <<EOF > doc/examples/pc-uninstalled/${name}-uninstalled.pc
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=\${pcfiledir}/../../../$name
|
||||
includedir=${source_path}
|
||||
|
||||
Name: $fullname
|
||||
Description: $comment
|
||||
Version: $version
|
||||
Requires: $requires
|
||||
Conflicts:
|
||||
Libs: -L\${libdir} -Wl,-rpath,\${libdir} -l${fullname#lib} $($shared || echo $libs)
|
||||
Cflags: -I\${includedir}
|
||||
EOF
|
||||
@@ -443,9 +443,8 @@ static int read_key(void)
|
||||
}
|
||||
//Read it
|
||||
if(nchars != 0) {
|
||||
if (read(0, &ch, 1) == 1)
|
||||
return ch;
|
||||
return 0;
|
||||
read(0, &ch, 1);
|
||||
return ch;
|
||||
}else{
|
||||
return -1;
|
||||
}
|
||||
@@ -483,7 +482,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
sizeof(frame), NULL);
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
av_fifo_freep(&fg->inputs[j]->frame_queue);
|
||||
av_fifo_free(fg->inputs[j]->frame_queue);
|
||||
if (fg->inputs[j]->ist->sub2video.sub_queue) {
|
||||
while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
|
||||
AVSubtitle sub;
|
||||
@@ -491,7 +490,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
&sub, sizeof(sub), NULL);
|
||||
avsubtitle_free(&sub);
|
||||
}
|
||||
av_fifo_freep(&fg->inputs[j]->ist->sub2video.sub_queue);
|
||||
av_fifo_free(fg->inputs[j]->ist->sub2video.sub_queue);
|
||||
}
|
||||
av_buffer_unref(&fg->inputs[j]->hw_frames_ctx);
|
||||
av_freep(&fg->inputs[j]->name);
|
||||
@@ -537,6 +536,7 @@ static void ffmpeg_cleanup(int ret)
|
||||
for (j = 0; j < ost->nb_bitstream_filters; j++)
|
||||
av_bsf_free(&ost->bsf_ctx[j]);
|
||||
av_freep(&ost->bsf_ctx);
|
||||
av_freep(&ost->bsf_extradata_updated);
|
||||
|
||||
av_frame_free(&ost->filtered_frame);
|
||||
av_frame_free(&ost->last_frame);
|
||||
@@ -554,7 +554,6 @@ static void ffmpeg_cleanup(int ret)
|
||||
ost->audio_channels_mapped = 0;
|
||||
|
||||
av_dict_free(&ost->sws_dict);
|
||||
av_dict_free(&ost->swr_opts);
|
||||
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
avcodec_parameters_free(&ost->ref_par);
|
||||
@@ -670,28 +669,12 @@ static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream,
|
||||
}
|
||||
}
|
||||
|
||||
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
|
||||
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
|
||||
{
|
||||
AVFormatContext *s = of->ctx;
|
||||
AVStream *st = ost->st;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Audio encoders may split the packets -- #frames in != #packets out.
|
||||
* But there is no reordering, so we can limit the number of output packets
|
||||
* by simply dropping them here.
|
||||
* Counting encoded video frames needs to be done separately because of
|
||||
* reordering, see do_video_out().
|
||||
* Do not count the packet when unqueued because it has been counted when queued.
|
||||
*/
|
||||
if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
|
||||
if (ost->frame_number >= ost->max_frames) {
|
||||
av_packet_unref(pkt);
|
||||
return;
|
||||
}
|
||||
ost->frame_number++;
|
||||
}
|
||||
|
||||
if (!of->header_written) {
|
||||
AVPacket tmp_pkt = {0};
|
||||
/* the muxer is not initialized yet, buffer the packet */
|
||||
@@ -720,6 +703,20 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int u
|
||||
(st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
|
||||
pkt->pts = pkt->dts = AV_NOPTS_VALUE;
|
||||
|
||||
/*
|
||||
* Audio encoders may split the packets -- #frames in != #packets out.
|
||||
* But there is no reordering, so we can limit the number of output packets
|
||||
* by simply dropping them here.
|
||||
* Counting encoded video frames needs to be done separately because of
|
||||
* reordering, see do_video_out()
|
||||
*/
|
||||
if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
|
||||
if (ost->frame_number >= ost->max_frames) {
|
||||
av_packet_unref(pkt);
|
||||
return;
|
||||
}
|
||||
ost->frame_number++;
|
||||
}
|
||||
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
int i;
|
||||
uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
|
||||
@@ -816,19 +813,7 @@ static void close_output_stream(OutputStream *ost)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Send a single packet to the output, applying any bitstream filters
|
||||
* associated with the output stream. This may result in any number
|
||||
* of packets actually being written, depending on what bitstream
|
||||
* filters are applied. The supplied packet is consumed and will be
|
||||
* blank (as if newly-allocated) when this function returns.
|
||||
*
|
||||
* If eof is set, instead indicate EOF to all bitstream filters and
|
||||
* therefore flush any delayed packets to the output. A blank packet
|
||||
* must be supplied in this case.
|
||||
*/
|
||||
static void output_packet(OutputFile *of, AVPacket *pkt,
|
||||
OutputStream *ost, int eof)
|
||||
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@@ -836,11 +821,10 @@ static void output_packet(OutputFile *of, AVPacket *pkt,
|
||||
if (ost->nb_bitstream_filters) {
|
||||
int idx;
|
||||
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[0], pkt);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
|
||||
eof = 0;
|
||||
idx = 1;
|
||||
while (idx) {
|
||||
/* get a packet from the previous filter up the chain */
|
||||
@@ -849,25 +833,38 @@ static void output_packet(OutputFile *of, AVPacket *pkt,
|
||||
ret = 0;
|
||||
idx--;
|
||||
continue;
|
||||
} else if (ret == AVERROR_EOF) {
|
||||
eof = 1;
|
||||
} else if (ret < 0)
|
||||
goto finish;
|
||||
/* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
|
||||
* the api states this shouldn't happen after init(). Propagate it here to the
|
||||
* muxer and to the next filters in the chain to workaround this.
|
||||
* TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
|
||||
* par_out->extradata and adapt muxers accordingly to get rid of this. */
|
||||
if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
|
||||
ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
ost->bsf_extradata_updated[idx - 1] |= 1;
|
||||
}
|
||||
|
||||
/* send it to the next filter down the chain or to the muxer */
|
||||
if (idx < ost->nb_bitstream_filters) {
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
|
||||
/* HACK/FIXME! - See above */
|
||||
if (!(ost->bsf_extradata_updated[idx] & 2)) {
|
||||
ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
ost->bsf_extradata_updated[idx] |= 2;
|
||||
}
|
||||
ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
|
||||
if (ret < 0)
|
||||
goto finish;
|
||||
idx++;
|
||||
eof = 0;
|
||||
} else if (eof)
|
||||
goto finish;
|
||||
else
|
||||
write_packet(of, pkt, ost, 0);
|
||||
} else
|
||||
write_packet(of, pkt, ost);
|
||||
}
|
||||
} else if (!eof)
|
||||
write_packet(of, pkt, ost, 0);
|
||||
} else
|
||||
write_packet(of, pkt, ost);
|
||||
|
||||
finish:
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
@@ -942,7 +939,7 @@ static void do_audio_out(OutputFile *of, OutputStream *ost,
|
||||
av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
|
||||
}
|
||||
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
output_packet(of, &pkt, ost);
|
||||
}
|
||||
|
||||
return;
|
||||
@@ -1030,7 +1027,7 @@ static void do_subtitle_out(OutputFile *of,
|
||||
pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
|
||||
}
|
||||
pkt.dts = pkt.pts;
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
output_packet(of, &pkt, ost);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1065,8 +1062,8 @@ static void do_video_out(OutputFile *of,
|
||||
!ost->filters &&
|
||||
next_picture &&
|
||||
ist &&
|
||||
lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
||||
duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
||||
lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
|
||||
duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
|
||||
}
|
||||
|
||||
if (!next_picture) {
|
||||
@@ -1216,7 +1213,7 @@ static void do_video_out(OutputFile *of,
|
||||
pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->mux_timebase);
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
output_packet(of, &pkt, ost);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
@@ -1319,7 +1316,7 @@ static void do_video_out(OutputFile *of,
|
||||
}
|
||||
|
||||
frame_size = pkt.size;
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
output_packet(of, &pkt, ost);
|
||||
|
||||
/* if two pass, output log */
|
||||
if (ost->logfile && enc->stats_out) {
|
||||
@@ -1509,7 +1506,7 @@ static int reap_filters(int flush)
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
|
||||
enc->channels != filtered_frame->channels) {
|
||||
enc->channels != av_frame_get_channels(filtered_frame)) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Audio filter graph output is not normalized and encoder does not support parameter changes\n");
|
||||
break;
|
||||
@@ -1907,6 +1904,8 @@ static void flush_encoders(void)
|
||||
if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
|
||||
continue;
|
||||
|
||||
avcodec_send_frame(enc, NULL);
|
||||
|
||||
for (;;) {
|
||||
const char *desc = NULL;
|
||||
AVPacket pkt;
|
||||
@@ -1928,17 +1927,7 @@ static void flush_encoders(void)
|
||||
pkt.size = 0;
|
||||
|
||||
update_benchmark(NULL);
|
||||
|
||||
while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
|
||||
ret = avcodec_send_frame(enc, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
|
||||
desc,
|
||||
av_err2str(ret));
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
ret = avcodec_receive_packet(enc, &pkt);
|
||||
update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
|
||||
@@ -1950,7 +1939,6 @@ static void flush_encoders(void)
|
||||
fprintf(ost->logfile, "%s", enc->stats_out);
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
output_packet(of, &pkt, ost, 1);
|
||||
break;
|
||||
}
|
||||
if (ost->finished & MUXER_FINISHED) {
|
||||
@@ -1959,7 +1947,7 @@ static void flush_encoders(void)
|
||||
}
|
||||
av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
|
||||
pkt_size = pkt.size;
|
||||
output_packet(of, &pkt, ost, 0);
|
||||
output_packet(of, &pkt, ost);
|
||||
if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
|
||||
do_video_stats(ost, pkt_size);
|
||||
}
|
||||
@@ -2098,7 +2086,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
|
||||
}
|
||||
#endif
|
||||
|
||||
output_packet(of, &opkt, ost, 0);
|
||||
output_packet(of, &opkt, ost);
|
||||
}
|
||||
|
||||
int guess_input_channel_layout(InputStream *ist)
|
||||
@@ -2130,7 +2118,7 @@ static void check_decode_result(InputStream *ist, int *got_output, int ret)
|
||||
exit_program(1);
|
||||
|
||||
if (exit_on_error && *got_output && ist) {
|
||||
if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
|
||||
if (av_frame_get_decode_error_flags(ist->decoded_frame) || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->filename, ist->st->index);
|
||||
exit_program(1);
|
||||
}
|
||||
@@ -2156,6 +2144,9 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
|
||||
|
||||
/* determine if the parameters for this input changed */
|
||||
need_reinit = ifilter->format != frame->format;
|
||||
if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
|
||||
(ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
|
||||
need_reinit = 1;
|
||||
|
||||
switch (ifilter->ist->st->codecpar->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2169,13 +2160,6 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ifilter->ist->reinit_filters && fg->graph)
|
||||
need_reinit = 0;
|
||||
|
||||
if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
|
||||
(ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
|
||||
need_reinit = 1;
|
||||
|
||||
if (need_reinit) {
|
||||
ret = ifilter_parameters_from_frame(ifilter, frame);
|
||||
if (ret < 0)
|
||||
@@ -2221,22 +2205,21 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
|
||||
|
||||
ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
|
||||
if (ret < 0) {
|
||||
if (ret != AVERROR_EOF)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while filtering\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
|
||||
static int ifilter_send_eof(InputFilter *ifilter)
|
||||
{
|
||||
int i, j, ret;
|
||||
|
||||
ifilter->eof = 1;
|
||||
|
||||
if (ifilter->filter) {
|
||||
ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
|
||||
ret = av_buffersrc_add_frame_flags(ifilter->filter, NULL, AV_BUFFERSRC_FLAG_PUSH);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
@@ -2259,8 +2242,8 @@ static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
|
||||
|
||||
// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
|
||||
// There is the following difference: if you got a frame, you must call
|
||||
// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
|
||||
// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
|
||||
// it again with pkt=NULL. pkt==NULL is treated differently from pkt.size==0
|
||||
// (pkt==NULL means get more output, pkt.size==0 is a flush/drain packet)
|
||||
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
@@ -2374,7 +2357,7 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
|
||||
return err < 0 ? err : ret;
|
||||
}
|
||||
|
||||
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
|
||||
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int eof,
|
||||
int *decode_failed)
|
||||
{
|
||||
AVFrame *decoded_frame;
|
||||
@@ -2464,8 +2447,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_
|
||||
}
|
||||
ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
|
||||
|
||||
best_effort_timestamp= decoded_frame->best_effort_timestamp;
|
||||
*duration_pts = decoded_frame->pkt_duration;
|
||||
best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
|
||||
|
||||
if (ist->framerate.num)
|
||||
best_effort_timestamp = ist->cfr_next_pts++;
|
||||
@@ -2587,12 +2569,8 @@ out:
|
||||
static int send_filter_eof(InputStream *ist)
|
||||
{
|
||||
int i, ret;
|
||||
/* TODO keep pts also in stream time base to avoid converting back */
|
||||
int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
|
||||
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
|
||||
|
||||
for (i = 0; i < ist->nb_filters; i++) {
|
||||
ret = ifilter_send_eof(ist->filters[i], pts);
|
||||
ret = ifilter_send_eof(ist->filters[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@@ -2639,8 +2617,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
||||
|
||||
// while we have more to decode or while the decoder did output something on EOF
|
||||
while (ist->decoding_needed) {
|
||||
int64_t duration_dts = 0;
|
||||
int64_t duration_pts = 0;
|
||||
int duration = 0;
|
||||
int got_output = 0;
|
||||
int decode_failed = 0;
|
||||
|
||||
@@ -2653,31 +2630,26 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
||||
&decode_failed);
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
|
||||
ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, !pkt,
|
||||
&decode_failed);
|
||||
if (!repeating || !pkt || got_output) {
|
||||
if (pkt && pkt->duration) {
|
||||
duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
duration = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
|
||||
int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
|
||||
duration_dts = ((int64_t)AV_TIME_BASE *
|
||||
duration = ((int64_t)AV_TIME_BASE *
|
||||
ist->dec_ctx->framerate.den * ticks) /
|
||||
ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
|
||||
}
|
||||
|
||||
if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
|
||||
ist->next_dts += duration_dts;
|
||||
if(ist->dts != AV_NOPTS_VALUE && duration) {
|
||||
ist->next_dts += duration;
|
||||
}else
|
||||
ist->next_dts = AV_NOPTS_VALUE;
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
if (duration_pts > 0) {
|
||||
ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
} else {
|
||||
ist->next_pts += duration_dts;
|
||||
}
|
||||
}
|
||||
if (got_output)
|
||||
ist->next_pts += duration; //FIXME the duration is not correct in some cases
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
if (repeating)
|
||||
@@ -2743,12 +2715,8 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo
|
||||
ist->dts = ist->next_dts;
|
||||
switch (ist->dec_ctx->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (ist->dec_ctx->sample_rate) {
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
|
||||
ist->dec_ctx->sample_rate;
|
||||
} else {
|
||||
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
|
||||
}
|
||||
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
|
||||
ist->dec_ctx->sample_rate;
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if (ist->framerate.num) {
|
||||
@@ -2924,18 +2892,6 @@ static int init_input_stream(int ist_index, char *error, int error_len)
|
||||
|
||||
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
|
||||
av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
|
||||
/* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
|
||||
if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
|
||||
av_dict_set(&ist->decoder_opts, "threads", "1", 0);
|
||||
|
||||
ret = hw_device_setup_for_decode(ist);
|
||||
if (ret < 0) {
|
||||
snprintf(error, error_len, "Device setup failed for "
|
||||
"decoder on input stream #%d:%d : %s",
|
||||
ist->file_index, ist->st->index, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
|
||||
if (ret == AVERROR_EXPERIMENTAL)
|
||||
abort_codec_experimental(codec, 0);
|
||||
@@ -3007,7 +2963,7 @@ static int check_init_output_file(OutputFile *of, int file_index)
|
||||
while (av_fifo_size(ost->muxing_queue)) {
|
||||
AVPacket pkt;
|
||||
av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
|
||||
write_packet(of, &pkt, ost, 1);
|
||||
write_packet(of, &pkt, ost);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3106,14 +3062,23 @@ static int init_output_stream_streamcopy(OutputStream *ost)
|
||||
ost->st->disposition = ist->st->disposition;
|
||||
|
||||
if (ist->st->nb_side_data) {
|
||||
ost->st->side_data = av_realloc_array(NULL, ist->st->nb_side_data,
|
||||
sizeof(*ist->st->side_data));
|
||||
if (!ost->st->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ost->st->nb_side_data = 0;
|
||||
for (i = 0; i < ist->st->nb_side_data; i++) {
|
||||
const AVPacketSideData *sd_src = &ist->st->side_data[i];
|
||||
uint8_t *dst_data;
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[ost->st->nb_side_data];
|
||||
|
||||
dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
|
||||
if (!dst_data)
|
||||
sd_dst->data = av_malloc(sd_src->size);
|
||||
if (!sd_dst->data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(dst_data, sd_src->data, sd_src->size);
|
||||
memcpy(sd_dst->data, sd_src->data, sd_src->size);
|
||||
sd_dst->size = sd_src->size;
|
||||
sd_dst->type = sd_src->type;
|
||||
ost->st->nb_side_data++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3267,30 +3232,6 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
|
||||
ost->forced_kf_pts = pts;
|
||||
}
|
||||
|
||||
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
|
||||
{
|
||||
InputStream *ist = get_input_stream(ost);
|
||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||
AVFormatContext *oc;
|
||||
|
||||
if (ost->enc_timebase.num > 0) {
|
||||
enc_ctx->time_base = ost->enc_timebase;
|
||||
return;
|
||||
}
|
||||
|
||||
if (ost->enc_timebase.num < 0) {
|
||||
if (ist) {
|
||||
enc_ctx->time_base = ist->st->time_base;
|
||||
return;
|
||||
}
|
||||
|
||||
oc = output_files[ost->file_index]->ctx;
|
||||
av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
|
||||
}
|
||||
|
||||
enc_ctx->time_base = default_time_base;
|
||||
}
|
||||
|
||||
static int init_output_stream_encode(OutputStream *ost)
|
||||
{
|
||||
InputStream *ist = get_input_stream(ost);
|
||||
@@ -3361,13 +3302,10 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
|
||||
enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
|
||||
enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
|
||||
|
||||
init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
|
||||
enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
|
||||
|
||||
enc_ctx->time_base = av_inv_q(ost->frame_rate);
|
||||
if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
|
||||
enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
|
||||
if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
|
||||
@@ -3386,7 +3324,20 @@ static int init_output_stream_encode(OutputStream *ost)
|
||||
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
|
||||
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
|
||||
av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
|
||||
|
||||
if (!strncmp(ost->enc->name, "libx264", 7) &&
|
||||
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
|
||||
av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"No pixel format specified, %s for H.264 encoding chosen.\n"
|
||||
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
|
||||
av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
|
||||
if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
|
||||
enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
|
||||
av_buffersink_get_format(ost->filter->filter) != AV_PIX_FMT_YUV420P)
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"No pixel format specified, %s for MPEG-2 encoding chosen.\n"
|
||||
"Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
|
||||
av_get_pix_fmt_name(av_buffersink_get_format(ost->filter->filter)));
|
||||
enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
|
||||
if (dec_ctx)
|
||||
enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
|
||||
@@ -3480,14 +3431,6 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(av_buffersink_get_hw_frames_ctx(ost->filter->filter));
|
||||
if (!ost->enc_ctx->hw_frames_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
ret = hw_device_setup_for_encode(ost);
|
||||
if (ret < 0) {
|
||||
snprintf(error, error_len, "Device setup failed for "
|
||||
"encoder on output stream #%d:%d : %s",
|
||||
ost->file_index, ost->index, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
|
||||
@@ -3524,14 +3467,22 @@ static int init_output_stream(OutputStream *ost, char *error, int error_len)
|
||||
if (ost->enc_ctx->nb_coded_side_data) {
|
||||
int i;
|
||||
|
||||
ost->st->side_data = av_realloc_array(NULL, ost->enc_ctx->nb_coded_side_data,
|
||||
sizeof(*ost->st->side_data));
|
||||
if (!ost->st->side_data)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
|
||||
const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
|
||||
uint8_t *dst_data;
|
||||
AVPacketSideData *sd_dst = &ost->st->side_data[i];
|
||||
|
||||
dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
|
||||
if (!dst_data)
|
||||
sd_dst->data = av_malloc(sd_src->size);
|
||||
if (!sd_dst->data)
|
||||
return AVERROR(ENOMEM);
|
||||
memcpy(dst_data, sd_src->data, sd_src->size);
|
||||
memcpy(sd_dst->data, sd_src->data, sd_src->size);
|
||||
sd_dst->size = sd_src->size;
|
||||
sd_dst->type = sd_src->type;
|
||||
ost->st->nb_side_data++;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3952,11 +3903,7 @@ static int check_keyboard_interaction(int64_t cur_time)
|
||||
if(key == 'D') {
|
||||
debug = input_streams[0]->st->codec->debug<<1;
|
||||
if(!debug) debug = 1;
|
||||
while(debug & (FF_DEBUG_DCT_COEFF
|
||||
#if FF_API_DEBUG_MV
|
||||
|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
|
||||
#endif
|
||||
)) //unsupported, would just crash
|
||||
while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
|
||||
debug += debug;
|
||||
}else{
|
||||
char buf[32];
|
||||
@@ -4203,8 +4150,7 @@ static int seek_to_start(InputFile *ifile, AVFormatContext *is)
|
||||
ifile->time_base = ist->st->time_base;
|
||||
/* the total duration of the stream, max_pts - min_pts is
|
||||
* the duration of the stream without the last frame */
|
||||
if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
|
||||
duration += ist->max_pts - ist->min_pts;
|
||||
duration += ist->max_pts - ist->min_pts;
|
||||
ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
|
||||
ifile->time_base);
|
||||
}
|
||||
@@ -4547,15 +4493,6 @@ static int transcode_step(void)
|
||||
}
|
||||
|
||||
if (ost->filter && ost->filter->graph->graph) {
|
||||
if (!ost->initialized) {
|
||||
char error[1024] = {0};
|
||||
ret = init_output_stream(ost, error, sizeof(error));
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
|
||||
ost->file_index, ost->index, error);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
|
||||
return ret;
|
||||
if (!ist)
|
||||
@@ -4704,7 +4641,6 @@ static int transcode(void)
|
||||
}
|
||||
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
hw_device_free_all();
|
||||
|
||||
/* finished ! */
|
||||
ret = 0;
|
||||
@@ -16,8 +16,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFTOOLS_FFMPEG_H
|
||||
#define FFTOOLS_FFMPEG_H
|
||||
#ifndef FFMPEG_H
|
||||
#define FFMPEG_H
|
||||
|
||||
#include "config.h"
|
||||
|
||||
@@ -42,7 +42,6 @@
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/fifo.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "libavutil/rational.h"
|
||||
#include "libavutil/threadmessage.h"
|
||||
@@ -68,7 +67,6 @@ enum HWAccelID {
|
||||
HWACCEL_QSV,
|
||||
HWACCEL_VAAPI,
|
||||
HWACCEL_CUVID,
|
||||
HWACCEL_D3D11VA,
|
||||
};
|
||||
|
||||
typedef struct HWAccel {
|
||||
@@ -76,15 +74,8 @@ typedef struct HWAccel {
|
||||
int (*init)(AVCodecContext *s);
|
||||
enum HWAccelID id;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
enum AVHWDeviceType device_type;
|
||||
} HWAccel;
|
||||
|
||||
typedef struct HWDevice {
|
||||
char *name;
|
||||
enum AVHWDeviceType type;
|
||||
AVBufferRef *device_ref;
|
||||
} HWDevice;
|
||||
|
||||
/* select an input stream for an output stream */
|
||||
typedef struct StreamMap {
|
||||
int disabled; /* 1 is this mapping is disabled by a negative map */
|
||||
@@ -235,8 +226,6 @@ typedef struct OptionsContext {
|
||||
int nb_program;
|
||||
SpecifierOpt *time_bases;
|
||||
int nb_time_bases;
|
||||
SpecifierOpt *enc_time_bases;
|
||||
int nb_enc_time_bases;
|
||||
} OptionsContext;
|
||||
|
||||
typedef struct InputFilter {
|
||||
@@ -464,9 +453,9 @@ typedef struct OutputStream {
|
||||
int64_t last_mux_dts;
|
||||
// the timebase of the packets sent to the muxer
|
||||
AVRational mux_timebase;
|
||||
AVRational enc_timebase;
|
||||
|
||||
int nb_bitstream_filters;
|
||||
uint8_t *bsf_extradata_updated;
|
||||
AVBSFContext **bsf_ctx;
|
||||
|
||||
AVCodecContext *enc_ctx;
|
||||
@@ -629,7 +618,6 @@ extern AVBufferRef *hw_device_ctx;
|
||||
#if CONFIG_QSV
|
||||
extern char *qsv_device;
|
||||
#endif
|
||||
extern HWDevice *filter_hw_device;
|
||||
|
||||
|
||||
void term_init(void);
|
||||
@@ -650,7 +638,6 @@ void choose_sample_fmt(AVStream *st, AVCodec *codec);
|
||||
|
||||
int configure_filtergraph(FilterGraph *fg);
|
||||
int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out);
|
||||
void check_filter_outputs(void);
|
||||
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist);
|
||||
int filtergraph_is_simple(FilterGraph *fg);
|
||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
|
||||
@@ -662,18 +649,13 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
|
||||
|
||||
int ffmpeg_parse_options(int argc, char **argv);
|
||||
|
||||
int vdpau_init(AVCodecContext *s);
|
||||
int dxva2_init(AVCodecContext *s);
|
||||
int vda_init(AVCodecContext *s);
|
||||
int videotoolbox_init(AVCodecContext *s);
|
||||
int qsv_init(AVCodecContext *s);
|
||||
int vaapi_decode_init(AVCodecContext *avctx);
|
||||
int vaapi_device_init(const char *device);
|
||||
int cuvid_init(AVCodecContext *s);
|
||||
|
||||
HWDevice *hw_device_get_by_name(const char *name);
|
||||
int hw_device_init_from_string(const char *arg, HWDevice **dev);
|
||||
void hw_device_free_all(void);
|
||||
|
||||
int hw_device_setup_for_decode(InputStream *ist);
|
||||
int hw_device_setup_for_encode(OutputStream *ost);
|
||||
|
||||
int hwaccel_decode_init(AVCodecContext *avctx);
|
||||
|
||||
#endif /* FFTOOLS_FFMPEG_H */
|
||||
#endif /* FFMPEG_H */
|
||||
444
ffmpeg_dxva2.c
Normal file
444
ffmpeg_dxva2.c
Normal file
@@ -0,0 +1,444 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#ifdef _WIN32_WINNT
|
||||
#undef _WIN32_WINNT
|
||||
#endif
|
||||
#define _WIN32_WINNT 0x0600
|
||||
#define DXVA2API_USE_BITFIELDS
|
||||
#define COBJMACROS
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include <d3d9.h>
|
||||
#include <dxva2api.h>
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
#include "libavcodec/dxva2.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/hwcontext_dxva2.h"
|
||||
|
||||
/* define all the GUIDs used directly here,
|
||||
to avoid problems with inconsistent dxva2api.h versions in mingw-w64 and different MSVC version */
|
||||
#include <initguid.h>
|
||||
DEFINE_GUID(IID_IDirectXVideoDecoderService, 0xfc51a551,0xd5e7,0x11d9,0xaf,0x55,0x00,0x05,0x4e,0x43,0xff,0x02);
|
||||
|
||||
DEFINE_GUID(DXVA2_ModeMPEG2_VLD, 0xee27417f, 0x5e28,0x4e65,0xbe,0xea,0x1d,0x26,0xb5,0x08,0xad,0xc9);
|
||||
DEFINE_GUID(DXVA2_ModeMPEG2and1_VLD, 0x86695f12, 0x340e,0x4f04,0x9f,0xd3,0x92,0x53,0xdd,0x32,0x74,0x60);
|
||||
DEFINE_GUID(DXVA2_ModeH264_E, 0x1b81be68, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeH264_F, 0x1b81be69, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVADDI_Intel_ModeH264_E, 0x604F8E68, 0x4951,0x4C54,0x88,0xFE,0xAB,0xD2,0x5C,0x15,0xB3,0xD6);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D, 0x1b81beA3, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeVC1_D2010, 0x1b81beA4, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main, 0x5b11d51b, 0x2f4c,0x4452,0xbc,0xc3,0x09,0xf2,0xa1,0x16,0x0c,0xc0);
|
||||
DEFINE_GUID(DXVA2_ModeHEVC_VLD_Main10,0x107af0e0, 0xef1a,0x4d19,0xab,0xa8,0x67,0xa1,0x63,0x07,0x3d,0x13);
|
||||
DEFINE_GUID(DXVA2_ModeVP9_VLD_Profile0, 0x463707f8, 0xa1d0,0x4585,0x87,0x6d,0x83,0xaa,0x6d,0x60,0xb8,0x9e);
|
||||
DEFINE_GUID(DXVA2_NoEncrypt, 0x1b81beD0, 0xa0c7,0x11d3,0xb9,0x84,0x00,0xc0,0x4f,0x2e,0x73,0xc5);
|
||||
DEFINE_GUID(GUID_NULL, 0x00000000, 0x0000,0x0000,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00);
|
||||
|
||||
typedef struct dxva2_mode {
|
||||
const GUID *guid;
|
||||
enum AVCodecID codec;
|
||||
} dxva2_mode;
|
||||
|
||||
static const dxva2_mode dxva2_modes[] = {
|
||||
/* MPEG-2 */
|
||||
{ &DXVA2_ModeMPEG2_VLD, AV_CODEC_ID_MPEG2VIDEO },
|
||||
{ &DXVA2_ModeMPEG2and1_VLD, AV_CODEC_ID_MPEG2VIDEO },
|
||||
|
||||
/* H.264 */
|
||||
{ &DXVA2_ModeH264_F, AV_CODEC_ID_H264 },
|
||||
{ &DXVA2_ModeH264_E, AV_CODEC_ID_H264 },
|
||||
/* Intel specific H.264 mode */
|
||||
{ &DXVADDI_Intel_ModeH264_E, AV_CODEC_ID_H264 },
|
||||
|
||||
/* VC-1 / WMV3 */
|
||||
{ &DXVA2_ModeVC1_D2010, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D2010, AV_CODEC_ID_WMV3 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_VC1 },
|
||||
{ &DXVA2_ModeVC1_D, AV_CODEC_ID_WMV3 },
|
||||
|
||||
/* HEVC/H.265 */
|
||||
{ &DXVA2_ModeHEVC_VLD_Main, AV_CODEC_ID_HEVC },
|
||||
{ &DXVA2_ModeHEVC_VLD_Main10,AV_CODEC_ID_HEVC },
|
||||
|
||||
/* VP8/9 */
|
||||
{ &DXVA2_ModeVP9_VLD_Profile0, AV_CODEC_ID_VP9 },
|
||||
|
||||
{ NULL, 0 },
|
||||
};
|
||||
|
||||
typedef struct DXVA2Context {
|
||||
IDirectXVideoDecoder *decoder;
|
||||
|
||||
GUID decoder_guid;
|
||||
DXVA2_ConfigPictureDecode decoder_config;
|
||||
IDirectXVideoDecoderService *decoder_service;
|
||||
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVBufferRef *hw_device_ctx;
|
||||
AVBufferRef *hw_frames_ctx;
|
||||
} DXVA2Context;
|
||||
|
||||
static void dxva2_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_get_buffer = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
if (ctx->decoder_service)
|
||||
IDirectXVideoDecoderService_Release(ctx->decoder_service);
|
||||
|
||||
av_buffer_unref(&ctx->hw_frames_ctx);
|
||||
av_buffer_unref(&ctx->hw_device_ctx);
|
||||
|
||||
av_frame_free(&ctx->tmp_frame);
|
||||
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
av_freep(&s->hwaccel_context);
|
||||
}
|
||||
|
||||
static int dxva2_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
|
||||
return av_hwframe_get_buffer(ctx->hw_frames_ctx, frame, 0);
|
||||
}
|
||||
|
||||
static int dxva2_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
int ret;
|
||||
|
||||
ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = av_frame_copy_props(ctx->tmp_frame, frame);
|
||||
if (ret < 0) {
|
||||
av_frame_unref(ctx->tmp_frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, ctx->tmp_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dxva2_alloc(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx;
|
||||
HANDLE device_handle;
|
||||
HRESULT hr;
|
||||
|
||||
AVHWDeviceContext *device_ctx;
|
||||
AVDXVA2DeviceContext *device_hwctx;
|
||||
int ret;
|
||||
|
||||
ctx = av_mallocz(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ist->hwaccel_ctx = ctx;
|
||||
ist->hwaccel_uninit = dxva2_uninit;
|
||||
ist->hwaccel_get_buffer = dxva2_get_buffer;
|
||||
ist->hwaccel_retrieve_data = dxva2_retrieve_data;
|
||||
|
||||
ret = av_hwdevice_ctx_create(&ctx->hw_device_ctx, AV_HWDEVICE_TYPE_DXVA2,
|
||||
ist->hwaccel_device, NULL, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
device_ctx = (AVHWDeviceContext*)ctx->hw_device_ctx->data;
|
||||
device_hwctx = device_ctx->hwctx;
|
||||
|
||||
hr = IDirect3DDeviceManager9_OpenDeviceHandle(device_hwctx->devmgr,
|
||||
&device_handle);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to open a device handle\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirect3DDeviceManager9_GetVideoService(device_hwctx->devmgr, device_handle,
|
||||
&IID_IDirectXVideoDecoderService,
|
||||
(void **)&ctx->decoder_service);
|
||||
IDirect3DDeviceManager9_CloseDeviceHandle(device_hwctx->devmgr, device_handle);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create IDirectXVideoDecoderService\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->tmp_frame = av_frame_alloc();
|
||||
if (!ctx->tmp_frame)
|
||||
goto fail;
|
||||
|
||||
s->hwaccel_context = av_mallocz(sizeof(struct dxva_context));
|
||||
if (!s->hwaccel_context)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
dxva2_uninit(s);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
static int dxva2_get_decoder_configuration(AVCodecContext *s, const GUID *device_guid,
|
||||
const DXVA2_VideoDesc *desc,
|
||||
DXVA2_ConfigPictureDecode *config)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
unsigned cfg_count = 0, best_score = 0;
|
||||
DXVA2_ConfigPictureDecode *cfg_list = NULL;
|
||||
DXVA2_ConfigPictureDecode best_cfg = {{0}};
|
||||
HRESULT hr;
|
||||
int i;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderConfigurations(ctx->decoder_service, device_guid, desc, NULL, &cfg_count, &cfg_list);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Unable to retrieve decoder configurations\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
for (i = 0; i < cfg_count; i++) {
|
||||
DXVA2_ConfigPictureDecode *cfg = &cfg_list[i];
|
||||
|
||||
unsigned score;
|
||||
if (cfg->ConfigBitstreamRaw == 1)
|
||||
score = 1;
|
||||
else if (s->codec_id == AV_CODEC_ID_H264 && cfg->ConfigBitstreamRaw == 2)
|
||||
score = 2;
|
||||
else
|
||||
continue;
|
||||
if (IsEqualGUID(&cfg->guidConfigBitstreamEncryption, &DXVA2_NoEncrypt))
|
||||
score += 16;
|
||||
if (score > best_score) {
|
||||
best_score = score;
|
||||
best_cfg = *cfg;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(cfg_list);
|
||||
|
||||
if (!best_score) {
|
||||
av_log(NULL, loglevel, "No valid decoder configuration available\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
*config = best_cfg;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dxva2_create_decoder(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx = ist->hwaccel_ctx;
|
||||
struct dxva_context *dxva_ctx = s->hwaccel_context;
|
||||
GUID *guid_list = NULL;
|
||||
unsigned guid_count = 0, i, j;
|
||||
GUID device_guid = GUID_NULL;
|
||||
const D3DFORMAT surface_format = (s->sw_pix_fmt == AV_PIX_FMT_YUV420P10) ? MKTAG('P','0','1','0') : MKTAG('N','V','1','2');
|
||||
D3DFORMAT target_format = 0;
|
||||
DXVA2_VideoDesc desc = { 0 };
|
||||
DXVA2_ConfigPictureDecode config;
|
||||
HRESULT hr;
|
||||
int surface_alignment, num_surfaces;
|
||||
int ret;
|
||||
|
||||
AVDXVA2FramesContext *frames_hwctx;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderDeviceGuids(ctx->decoder_service, &guid_count, &guid_list);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to retrieve decoder device GUIDs\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; dxva2_modes[i].guid; i++) {
|
||||
D3DFORMAT *target_list = NULL;
|
||||
unsigned target_count = 0;
|
||||
const dxva2_mode *mode = &dxva2_modes[i];
|
||||
if (mode->codec != s->codec_id)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < guid_count; j++) {
|
||||
if (IsEqualGUID(mode->guid, &guid_list[j]))
|
||||
break;
|
||||
}
|
||||
if (j == guid_count)
|
||||
continue;
|
||||
|
||||
hr = IDirectXVideoDecoderService_GetDecoderRenderTargets(ctx->decoder_service, mode->guid, &target_count, &target_list);
|
||||
if (FAILED(hr)) {
|
||||
continue;
|
||||
}
|
||||
for (j = 0; j < target_count; j++) {
|
||||
const D3DFORMAT format = target_list[j];
|
||||
if (format == surface_format) {
|
||||
target_format = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(target_list);
|
||||
if (target_format) {
|
||||
device_guid = *mode->guid;
|
||||
break;
|
||||
}
|
||||
}
|
||||
CoTaskMemFree(guid_list);
|
||||
|
||||
if (IsEqualGUID(&device_guid, &GUID_NULL)) {
|
||||
av_log(NULL, loglevel, "No decoder device for codec found\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
desc.SampleWidth = s->coded_width;
|
||||
desc.SampleHeight = s->coded_height;
|
||||
desc.Format = target_format;
|
||||
|
||||
ret = dxva2_get_decoder_configuration(s, &device_guid, &desc, &config);
|
||||
if (ret < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* decoding MPEG-2 requires additional alignment on some Intel GPUs,
|
||||
but it causes issues for H.264 on certain AMD GPUs..... */
|
||||
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
surface_alignment = 32;
|
||||
/* the HEVC DXVA2 spec asks for 128 pixel aligned surfaces to ensure
|
||||
all coding features have enough room to work with */
|
||||
else if (s->codec_id == AV_CODEC_ID_HEVC)
|
||||
surface_alignment = 128;
|
||||
else
|
||||
surface_alignment = 16;
|
||||
|
||||
/* 4 base work surfaces */
|
||||
num_surfaces = 4;
|
||||
|
||||
/* add surfaces based on number of possible refs */
|
||||
if (s->codec_id == AV_CODEC_ID_H264 || s->codec_id == AV_CODEC_ID_HEVC)
|
||||
num_surfaces += 16;
|
||||
else if (s->codec_id == AV_CODEC_ID_VP9)
|
||||
num_surfaces += 8;
|
||||
else
|
||||
num_surfaces += 2;
|
||||
|
||||
/* add extra surfaces for frame threading */
|
||||
if (s->active_thread_type & FF_THREAD_FRAME)
|
||||
num_surfaces += s->thread_count;
|
||||
|
||||
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(ctx->hw_device_ctx);
|
||||
if (!ctx->hw_frames_ctx)
|
||||
goto fail;
|
||||
frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_DXVA2_VLD;
|
||||
frames_ctx->sw_format = (target_format == MKTAG('P','0','1','0') ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12);
|
||||
frames_ctx->width = FFALIGN(s->coded_width, surface_alignment);
|
||||
frames_ctx->height = FFALIGN(s->coded_height, surface_alignment);
|
||||
frames_ctx->initial_pool_size = num_surfaces;
|
||||
|
||||
frames_hwctx->surface_type = DXVA2_VideoDecoderRenderTarget;
|
||||
|
||||
ret = av_hwframe_ctx_init(ctx->hw_frames_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel, "Failed to initialize the HW frames context\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
hr = IDirectXVideoDecoderService_CreateVideoDecoder(ctx->decoder_service, &device_guid,
|
||||
&desc, &config, frames_hwctx->surfaces,
|
||||
frames_hwctx->nb_surfaces, &frames_hwctx->decoder_to_release);
|
||||
if (FAILED(hr)) {
|
||||
av_log(NULL, loglevel, "Failed to create DXVA2 video decoder\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->decoder_guid = device_guid;
|
||||
ctx->decoder_config = config;
|
||||
|
||||
dxva_ctx->cfg = &ctx->decoder_config;
|
||||
dxva_ctx->decoder = frames_hwctx->decoder_to_release;
|
||||
dxva_ctx->surface = frames_hwctx->surfaces;
|
||||
dxva_ctx->surface_count = frames_hwctx->nb_surfaces;
|
||||
|
||||
if (IsEqualGUID(&ctx->decoder_guid, &DXVADDI_Intel_ModeH264_E))
|
||||
dxva_ctx->workaround |= FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
av_buffer_unref(&ctx->hw_frames_ctx);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
int dxva2_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
DXVA2Context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!ist->hwaccel_ctx) {
|
||||
ret = dxva2_alloc(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
ctx = ist->hwaccel_ctx;
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_H264 &&
|
||||
(s->profile & ~FF_PROFILE_H264_CONSTRAINED) > FF_PROFILE_H264_HIGH) {
|
||||
av_log(NULL, loglevel, "Unsupported H.264 profile for DXVA2 HWAccel: %d\n", s->profile);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (s->codec_id == AV_CODEC_ID_HEVC &&
|
||||
s->profile != FF_PROFILE_HEVC_MAIN && s->profile != FF_PROFILE_HEVC_MAIN_10) {
|
||||
av_log(NULL, loglevel, "Unsupported HEVC profile for DXVA2 HWAccel: %d\n", s->profile);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
av_buffer_unref(&ctx->hw_frames_ctx);
|
||||
|
||||
ret = dxva2_create_decoder(s);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, loglevel, "Error creating the DXVA2 decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -678,21 +678,6 @@ int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOu
|
||||
}
|
||||
}
|
||||
|
||||
void check_filter_outputs(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nb_filtergraphs; i++) {
|
||||
int n;
|
||||
for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
|
||||
OutputFilter *output = filtergraphs[i]->outputs[n];
|
||||
if (!output->ost) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
|
||||
{
|
||||
AVFormatContext *avf = input_files[ist->file_index]->ctx;
|
||||
@@ -1046,15 +1031,9 @@ int configure_filtergraph(FilterGraph *fg)
|
||||
if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
|
||||
goto fail;
|
||||
|
||||
if (filter_hw_device || hw_device_ctx) {
|
||||
AVBufferRef *device = filter_hw_device ? filter_hw_device->device_ref
|
||||
: hw_device_ctx;
|
||||
if (hw_device_ctx) {
|
||||
for (i = 0; i < fg->graph->nb_filters; i++) {
|
||||
fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(device);
|
||||
if (!fg->graph->filters[i]->hw_device_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
fg->graph->filters[i]->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1183,7 +1162,7 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
|
||||
ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
|
||||
|
||||
ifilter->sample_rate = frame->sample_rate;
|
||||
ifilter->channels = frame->channels;
|
||||
ifilter->channels = av_frame_get_channels(frame);
|
||||
ifilter->channel_layout = frame->channel_layout;
|
||||
|
||||
if (frame->hw_frames_ctx) {
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
/*
|
||||
* ffmpeg option parsing
|
||||
*
|
||||
@@ -68,42 +67,30 @@
|
||||
|
||||
const HWAccel hwaccels[] = {
|
||||
#if HAVE_VDPAU_X11
|
||||
{ "vdpau", hwaccel_decode_init, HWACCEL_VDPAU, AV_PIX_FMT_VDPAU,
|
||||
AV_HWDEVICE_TYPE_VDPAU },
|
||||
{ "vdpau", vdpau_init, HWACCEL_VDPAU, AV_PIX_FMT_VDPAU },
|
||||
#endif
|
||||
#if CONFIG_D3D11VA
|
||||
{ "d3d11va", hwaccel_decode_init, HWACCEL_D3D11VA, AV_PIX_FMT_D3D11,
|
||||
AV_HWDEVICE_TYPE_D3D11VA },
|
||||
#endif
|
||||
#if CONFIG_DXVA2
|
||||
{ "dxva2", hwaccel_decode_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD,
|
||||
AV_HWDEVICE_TYPE_DXVA2 },
|
||||
#if HAVE_DXVA2_LIB
|
||||
{ "dxva2", dxva2_init, HWACCEL_DXVA2, AV_PIX_FMT_DXVA2_VLD },
|
||||
#endif
|
||||
#if CONFIG_VDA
|
||||
{ "vda", videotoolbox_init, HWACCEL_VDA, AV_PIX_FMT_VDA,
|
||||
AV_HWDEVICE_TYPE_NONE },
|
||||
{ "vda", videotoolbox_init, HWACCEL_VDA, AV_PIX_FMT_VDA },
|
||||
#endif
|
||||
#if CONFIG_VIDEOTOOLBOX
|
||||
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX,
|
||||
AV_HWDEVICE_TYPE_NONE },
|
||||
{ "videotoolbox", videotoolbox_init, HWACCEL_VIDEOTOOLBOX, AV_PIX_FMT_VIDEOTOOLBOX },
|
||||
#endif
|
||||
#if CONFIG_LIBMFX
|
||||
{ "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV,
|
||||
AV_HWDEVICE_TYPE_NONE },
|
||||
{ "qsv", qsv_init, HWACCEL_QSV, AV_PIX_FMT_QSV },
|
||||
#endif
|
||||
#if CONFIG_VAAPI
|
||||
{ "vaapi", hwaccel_decode_init, HWACCEL_VAAPI, AV_PIX_FMT_VAAPI,
|
||||
AV_HWDEVICE_TYPE_VAAPI },
|
||||
{ "vaapi", vaapi_decode_init, HWACCEL_VAAPI, AV_PIX_FMT_VAAPI },
|
||||
#endif
|
||||
#if CONFIG_CUVID
|
||||
{ "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA,
|
||||
AV_HWDEVICE_TYPE_NONE },
|
||||
{ "cuvid", cuvid_init, HWACCEL_CUVID, AV_PIX_FMT_CUDA },
|
||||
#endif
|
||||
{ 0 },
|
||||
};
|
||||
int hwaccel_lax_profile_check = 0;
|
||||
AVBufferRef *hw_device_ctx;
|
||||
HWDevice *filter_hw_device;
|
||||
|
||||
char *vstats_filename;
|
||||
char *sdp_filename;
|
||||
@@ -146,7 +133,6 @@ static int override_ffserver = 0;
|
||||
static int input_stream_potentially_available = 0;
|
||||
static int ignore_unknown_streams = 0;
|
||||
static int copy_unknown_streams = 0;
|
||||
static int find_stream_info = 1;
|
||||
|
||||
static void uninit_options(OptionsContext *o)
|
||||
{
|
||||
@@ -406,11 +392,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
int n;
|
||||
AVStream *st;
|
||||
AudioChannelMap *m;
|
||||
char *allow_unused;
|
||||
char *mapchan;
|
||||
mapchan = av_strdup(arg);
|
||||
if (!mapchan)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
GROW_ARRAY(o->audio_channel_maps, o->nb_audio_channel_maps);
|
||||
m = &o->audio_channel_maps[o->nb_audio_channel_maps - 1];
|
||||
@@ -421,7 +402,6 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
m->file_idx = m->stream_idx = -1;
|
||||
if (n == 1)
|
||||
m->ofile_idx = m->ostream_idx = -1;
|
||||
av_free(mapchan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -457,22 +437,11 @@ static int opt_map_channel(void *optctx, const char *opt, const char *arg)
|
||||
m->file_idx, m->stream_idx);
|
||||
exit_program(1);
|
||||
}
|
||||
/* allow trailing ? to map_channel */
|
||||
if (allow_unused = strchr(mapchan, '?'))
|
||||
*allow_unused = 0;
|
||||
if (m->channel_idx < 0 || m->channel_idx >= st->codecpar->channels) {
|
||||
if (allow_unused) {
|
||||
av_log(NULL, AV_LOG_VERBOSE, "mapchan: invalid audio channel #%d.%d.%d\n",
|
||||
m->file_idx, m->stream_idx, m->channel_idx);
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n"
|
||||
"To ignore this, add a trailing '?' to the map_channel.\n",
|
||||
m->file_idx, m->stream_idx, m->channel_idx);
|
||||
exit_program(1);
|
||||
}
|
||||
|
||||
av_log(NULL, AV_LOG_FATAL, "mapchan: invalid audio channel #%d.%d.%d\n",
|
||||
m->file_idx, m->stream_idx, m->channel_idx);
|
||||
exit_program(1);
|
||||
}
|
||||
av_free(mapchan);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -486,53 +455,14 @@ static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
#if CONFIG_VAAPI
|
||||
static int opt_vaapi_device(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
HWDevice *dev;
|
||||
const char *prefix = "vaapi:";
|
||||
char *tmp;
|
||||
int err;
|
||||
tmp = av_asprintf("%s%s", prefix, arg);
|
||||
if (!tmp)
|
||||
return AVERROR(ENOMEM);
|
||||
err = hw_device_init_from_string(tmp, &dev);
|
||||
av_free(tmp);
|
||||
err = vaapi_device_init(arg);
|
||||
if (err < 0)
|
||||
return err;
|
||||
hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
exit_program(1);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
if (!strcmp(arg, "list")) {
|
||||
enum AVHWDeviceType type = AV_HWDEVICE_TYPE_NONE;
|
||||
printf("Supported hardware device types:\n");
|
||||
while ((type = av_hwdevice_iterate_types(type)) !=
|
||||
AV_HWDEVICE_TYPE_NONE)
|
||||
printf("%s\n", av_hwdevice_get_type_name(type));
|
||||
printf("\n");
|
||||
exit_program(0);
|
||||
} else {
|
||||
return hw_device_init_from_string(arg, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
if (filter_hw_device) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Only one filter device can be used.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
filter_hw_device = hw_device_get_by_name(arg);
|
||||
if (!filter_hw_device) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid filter device %s.\n", arg);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a metadata specifier passed as 'arg' parameter.
|
||||
* @param arg metadata string to parse
|
||||
@@ -792,16 +722,14 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
if(!ist->dec)
|
||||
ist->dec = avcodec_find_decoder(par->codec_id);
|
||||
#if FF_API_LOWRES
|
||||
#if FF_API_EMU_EDGE
|
||||
if (av_codec_get_lowres(st->codec)) {
|
||||
av_codec_set_lowres(ist->dec_ctx, av_codec_get_lowres(st->codec));
|
||||
ist->dec_ctx->width = st->codec->width;
|
||||
ist->dec_ctx->height = st->codec->height;
|
||||
ist->dec_ctx->coded_width = st->codec->coded_width;
|
||||
ist->dec_ctx->coded_height = st->codec->coded_height;
|
||||
#if FF_API_EMU_EDGE
|
||||
ist->dec_ctx->flags |= CODEC_FLAG_EMU_EDGE;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -970,8 +898,10 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
AVInputFormat *file_iformat = NULL;
|
||||
int err, i, ret;
|
||||
int64_t timestamp;
|
||||
AVDictionary **opts;
|
||||
AVDictionary *unused_opts = NULL;
|
||||
AVDictionaryEntry *e = NULL;
|
||||
int orig_nb_streams; // number of streams before avformat_find_stream_info
|
||||
char * video_codec_name = NULL;
|
||||
char * audio_codec_name = NULL;
|
||||
char *subtitle_codec_name = NULL;
|
||||
@@ -1074,24 +1004,18 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
for (i = 0; i < ic->nb_streams; i++)
|
||||
choose_decoder(o, ic, ic->streams[i]);
|
||||
|
||||
if (find_stream_info) {
|
||||
AVDictionary **opts = setup_find_stream_info_opts(ic, o->g->codec_opts);
|
||||
int orig_nb_streams = ic->nb_streams;
|
||||
/* Set AVCodecContext options for avformat_find_stream_info */
|
||||
opts = setup_find_stream_info_opts(ic, o->g->codec_opts);
|
||||
orig_nb_streams = ic->nb_streams;
|
||||
|
||||
/* If not enough info to get the stream parameters, we decode the
|
||||
first frames to get it. (used in mpeg case for example) */
|
||||
ret = avformat_find_stream_info(ic, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
||||
if (ic->nb_streams == 0) {
|
||||
avformat_close_input(&ic);
|
||||
exit_program(1);
|
||||
}
|
||||
/* If not enough info to get the stream parameters, we decode the
|
||||
first frames to get it. (used in mpeg case for example) */
|
||||
ret = avformat_find_stream_info(ic, opts);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
|
||||
if (ic->nb_streams == 0) {
|
||||
avformat_close_input(&ic);
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1205,6 +1129,10 @@ static int open_input_file(OptionsContext *o, const char *filename)
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
input_stream_potentially_available = 1;
|
||||
|
||||
return 0;
|
||||
@@ -1383,17 +1311,6 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
st->time_base = q;
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(enc_time_bases, str, time_base, oc, st);
|
||||
if (time_base) {
|
||||
AVRational q;
|
||||
if (av_parse_ratio(&q, time_base, INT_MAX, 0, NULL) < 0 ||
|
||||
q.den <= 0) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Invalid time base: %s\n", time_base);
|
||||
exit_program(1);
|
||||
}
|
||||
ost->enc_timebase = q;
|
||||
}
|
||||
|
||||
ost->max_frames = INT64_MAX;
|
||||
MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
|
||||
for (i = 0; i<o->nb_max_frames; i++) {
|
||||
@@ -1457,6 +1374,13 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
|
||||
if (*bsfs)
|
||||
bsfs++;
|
||||
}
|
||||
if (ost->nb_bitstream_filters) {
|
||||
ost->bsf_extradata_updated = av_mallocz_array(ost->nb_bitstream_filters, sizeof(*ost->bsf_extradata_updated));
|
||||
if (!ost->bsf_extradata_updated) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Bitstream filter memory allocation failed\n");
|
||||
exit_program(1);
|
||||
}
|
||||
}
|
||||
|
||||
MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
|
||||
if (codec_tag) {
|
||||
@@ -2379,14 +2303,12 @@ loop_end:
|
||||
o->attachments[i]);
|
||||
exit_program(1);
|
||||
}
|
||||
if (len > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE ||
|
||||
!(attachment = av_malloc(len + AV_INPUT_BUFFER_PADDING_SIZE))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Attachment %s too large.\n",
|
||||
if (!(attachment = av_malloc(len))) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
|
||||
o->attachments[i]);
|
||||
exit_program(1);
|
||||
}
|
||||
avio_read(pb, attachment, len);
|
||||
memset(attachment + len, 0, AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
ost = new_attachment_stream(o, oc, -1);
|
||||
ost->stream_copy = 0;
|
||||
@@ -2778,14 +2700,13 @@ static int opt_target(void *optctx, const char *opt, const char *arg)
|
||||
} else {
|
||||
/* Try to determine PAL/NTSC by peeking in the input files */
|
||||
if (nb_input_files) {
|
||||
int i, j;
|
||||
int i, j, fr;
|
||||
for (j = 0; j < nb_input_files; j++) {
|
||||
for (i = 0; i < input_files[j]->nb_streams; i++) {
|
||||
AVStream *st = input_files[j]->ctx->streams[i];
|
||||
int64_t fr;
|
||||
if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
continue;
|
||||
fr = st->time_base.den * 1000LL / st->time_base.num;
|
||||
fr = st->time_base.den * 1000 / st->time_base.num;
|
||||
if (fr == 25000) {
|
||||
norm = PAL;
|
||||
break;
|
||||
@@ -3096,7 +3017,7 @@ static int opt_timecode(void *optctx, const char *opt, const char *arg)
|
||||
if (ret >= 0)
|
||||
ret = av_dict_set(&o->g->codec_opts, "gop_timecode", arg, 0);
|
||||
av_free(tcr);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int opt_channel_layout(void *optctx, const char *opt, const char *arg)
|
||||
@@ -3275,7 +3196,6 @@ static int open_files(OptionGroupList *l, const char *inout,
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error parsing options for %s file "
|
||||
"%s.\n", inout, g->arg);
|
||||
uninit_options(&o);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -3340,8 +3260,6 @@ int ffmpeg_parse_options(int argc, char **argv)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
check_filter_outputs();
|
||||
|
||||
fail:
|
||||
uninit_parse_context(&octx);
|
||||
if (ret < 0) {
|
||||
@@ -3371,7 +3289,7 @@ static int opt_progress(void *optctx, const char *opt, const char *arg)
|
||||
#define OFFSET(x) offsetof(OptionsContext, x)
|
||||
const OptionDef options[] = {
|
||||
/* main options */
|
||||
CMDUTILS_COMMON_OPTIONS
|
||||
#include "cmdutils_common_opts.h"
|
||||
{ "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
|
||||
OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
|
||||
"force format", "fmt" },
|
||||
@@ -3545,8 +3463,6 @@ const OptionDef options[] = {
|
||||
{ "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(thread_queue_size) },
|
||||
"set the maximum number of queued packets from the demuxer" },
|
||||
{ "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||
"read and decode the streams to fill missing information with heuristics" },
|
||||
|
||||
/* video options */
|
||||
{ "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
|
||||
@@ -3711,11 +3627,6 @@ const OptionDef options[] = {
|
||||
|
||||
{ "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
|
||||
"set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
|
||||
{ "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
|
||||
"set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
|
||||
"two special values are defined - "
|
||||
"0 = use frame rate (video) or sample rate (audio),"
|
||||
"-1 = match source time base", "ratio" },
|
||||
|
||||
{ "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
|
||||
"A comma-separated list of bitstream filters", "bitstream_filters" },
|
||||
@@ -3752,10 +3663,5 @@ const OptionDef options[] = {
|
||||
"set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
|
||||
#endif
|
||||
|
||||
{ "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
|
||||
"initialise hardware device", "args" },
|
||||
{ "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
|
||||
"set hardware device used when filtering", "device" },
|
||||
|
||||
{ NULL, },
|
||||
};
|
||||
233
ffmpeg_vaapi.c
Normal file
233
ffmpeg_vaapi.c
Normal file
@@ -0,0 +1,233 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/log.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
|
||||
static AVClass vaapi_class = {
|
||||
.class_name = "vaapi",
|
||||
.item_name = av_default_item_name,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
};
|
||||
|
||||
#define DEFAULT_SURFACES 20
|
||||
|
||||
typedef struct VAAPIDecoderContext {
|
||||
const AVClass *class;
|
||||
|
||||
AVBufferRef *device_ref;
|
||||
AVHWDeviceContext *device;
|
||||
AVBufferRef *frames_ref;
|
||||
AVHWFramesContext *frames;
|
||||
|
||||
// The output need not have the same format, width and height as the
|
||||
// decoded frames - the copy for non-direct-mapped access is actually
|
||||
// a whole vpp instance which can do arbitrary scaling and format
|
||||
// conversion.
|
||||
enum AVPixelFormat output_format;
|
||||
} VAAPIDecoderContext;
|
||||
|
||||
|
||||
static int vaapi_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
|
||||
int err;
|
||||
|
||||
err = av_hwframe_get_buffer(ctx->frames_ref, frame, 0);
|
||||
if (err < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to allocate decoder surface.\n");
|
||||
} else {
|
||||
av_log(ctx, AV_LOG_DEBUG, "Decoder given surface %#x.\n",
|
||||
(unsigned int)(uintptr_t)frame->data[3]);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static int vaapi_retrieve_data(AVCodecContext *avctx, AVFrame *input)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
|
||||
AVFrame *output = 0;
|
||||
int err;
|
||||
|
||||
av_assert0(input->format == AV_PIX_FMT_VAAPI);
|
||||
|
||||
if (ctx->output_format == AV_PIX_FMT_VAAPI) {
|
||||
// Nothing to do.
|
||||
return 0;
|
||||
}
|
||||
|
||||
av_log(ctx, AV_LOG_DEBUG, "Retrieve data from surface %#x.\n",
|
||||
(unsigned int)(uintptr_t)input->data[3]);
|
||||
|
||||
output = av_frame_alloc();
|
||||
if (!output)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
output->format = ctx->output_format;
|
||||
|
||||
err = av_hwframe_transfer_data(output, input, 0);
|
||||
if (err < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to transfer data to "
|
||||
"output frame: %d.\n", err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = av_frame_copy_props(output, input);
|
||||
if (err < 0) {
|
||||
av_frame_unref(output);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
av_frame_unref(input);
|
||||
av_frame_move_ref(input, output);
|
||||
av_frame_free(&output);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
if (output)
|
||||
av_frame_free(&output);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void vaapi_decode_uninit(AVCodecContext *avctx)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
VAAPIDecoderContext *ctx = ist->hwaccel_ctx;
|
||||
|
||||
if (ctx) {
|
||||
av_buffer_unref(&ctx->frames_ref);
|
||||
av_buffer_unref(&ctx->device_ref);
|
||||
av_free(ctx);
|
||||
}
|
||||
|
||||
av_buffer_unref(&ist->hw_frames_ctx);
|
||||
|
||||
ist->hwaccel_ctx = NULL;
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_get_buffer = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
}
|
||||
|
||||
int vaapi_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
VAAPIDecoderContext *ctx;
|
||||
int err;
|
||||
int loglevel = (ist->hwaccel_id != HWACCEL_VAAPI ? AV_LOG_VERBOSE
|
||||
: AV_LOG_ERROR);
|
||||
|
||||
if (ist->hwaccel_ctx)
|
||||
vaapi_decode_uninit(avctx);
|
||||
|
||||
// We have -hwaccel without -vaapi_device, so just initialise here with
|
||||
// the device passed as -hwaccel_device (if -vaapi_device was passed, it
|
||||
// will always have been called before now).
|
||||
if (!hw_device_ctx) {
|
||||
err = vaapi_device_init(ist->hwaccel_device);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
ctx = av_mallocz(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
ctx->class = &vaapi_class;
|
||||
ist->hwaccel_ctx = ctx;
|
||||
|
||||
ctx->device_ref = av_buffer_ref(hw_device_ctx);
|
||||
ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
|
||||
|
||||
ctx->output_format = ist->hwaccel_output_format;
|
||||
avctx->pix_fmt = ctx->output_format;
|
||||
|
||||
ctx->frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
|
||||
if (!ctx->frames_ref) {
|
||||
av_log(ctx, loglevel, "Failed to create VAAPI frame context.\n");
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ctx->frames = (AVHWFramesContext*)ctx->frames_ref->data;
|
||||
|
||||
ctx->frames->format = AV_PIX_FMT_VAAPI;
|
||||
ctx->frames->width = avctx->coded_width;
|
||||
ctx->frames->height = avctx->coded_height;
|
||||
|
||||
// It would be nice if we could query the available formats here,
|
||||
// but unfortunately we don't have a VAConfigID to do it with.
|
||||
// For now, just assume an NV12 format (or P010 if 10-bit).
|
||||
ctx->frames->sw_format = (avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ?
|
||||
AV_PIX_FMT_P010 : AV_PIX_FMT_NV12);
|
||||
|
||||
// For frame-threaded decoding, at least one additional surface
|
||||
// is needed for each thread.
|
||||
ctx->frames->initial_pool_size = DEFAULT_SURFACES;
|
||||
if (avctx->active_thread_type & FF_THREAD_FRAME)
|
||||
ctx->frames->initial_pool_size += avctx->thread_count;
|
||||
|
||||
err = av_hwframe_ctx_init(ctx->frames_ref);
|
||||
if (err < 0) {
|
||||
av_log(ctx, loglevel, "Failed to initialise VAAPI frame "
|
||||
"context: %d\n", err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ist->hw_frames_ctx = av_buffer_ref(ctx->frames_ref);
|
||||
if (!ist->hw_frames_ctx) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ist->hwaccel_uninit = &vaapi_decode_uninit;
|
||||
ist->hwaccel_get_buffer = &vaapi_get_buffer;
|
||||
ist->hwaccel_retrieve_data = &vaapi_retrieve_data;
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
vaapi_decode_uninit(avctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
static AVClass *vaapi_log = &vaapi_class;
|
||||
|
||||
av_cold int vaapi_device_init(const char *device)
|
||||
{
|
||||
int err;
|
||||
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
|
||||
device, NULL, 0);
|
||||
if (err < 0) {
|
||||
av_log(&vaapi_log, AV_LOG_ERROR, "Failed to create a VAAPI device\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
159
ffmpeg_vdpau.c
Normal file
159
ffmpeg_vdpau.c
Normal file
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
#include "libavcodec/vdpau.h"
|
||||
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/hwcontext_vdpau.h"
|
||||
#include "libavutil/pixfmt.h"
|
||||
|
||||
typedef struct VDPAUContext {
|
||||
AVBufferRef *hw_frames_ctx;
|
||||
AVFrame *tmp_frame;
|
||||
} VDPAUContext;
|
||||
|
||||
static void vdpau_uninit(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
|
||||
ist->hwaccel_uninit = NULL;
|
||||
ist->hwaccel_get_buffer = NULL;
|
||||
ist->hwaccel_retrieve_data = NULL;
|
||||
|
||||
av_buffer_unref(&ctx->hw_frames_ctx);
|
||||
av_frame_free(&ctx->tmp_frame);
|
||||
|
||||
av_freep(&ist->hwaccel_ctx);
|
||||
av_freep(&s->hwaccel_context);
|
||||
}
|
||||
|
||||
static int vdpau_get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
|
||||
return av_hwframe_get_buffer(ctx->hw_frames_ctx, frame, 0);
|
||||
}
|
||||
|
||||
static int vdpau_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
VDPAUContext *ctx = ist->hwaccel_ctx;
|
||||
int ret;
|
||||
|
||||
ret = av_hwframe_transfer_data(ctx->tmp_frame, frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = av_frame_copy_props(ctx->tmp_frame, frame);
|
||||
if (ret < 0) {
|
||||
av_frame_unref(ctx->tmp_frame);
|
||||
return ret;
|
||||
}
|
||||
|
||||
av_frame_unref(frame);
|
||||
av_frame_move_ref(frame, ctx->tmp_frame);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vdpau_alloc(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
int loglevel = (ist->hwaccel_id == HWACCEL_AUTO) ? AV_LOG_VERBOSE : AV_LOG_ERROR;
|
||||
VDPAUContext *ctx;
|
||||
int ret;
|
||||
|
||||
AVBufferRef *device_ref = NULL;
|
||||
AVHWDeviceContext *device_ctx;
|
||||
AVVDPAUDeviceContext *device_hwctx;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
|
||||
ctx = av_mallocz(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ist->hwaccel_ctx = ctx;
|
||||
ist->hwaccel_uninit = vdpau_uninit;
|
||||
ist->hwaccel_get_buffer = vdpau_get_buffer;
|
||||
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
|
||||
|
||||
ctx->tmp_frame = av_frame_alloc();
|
||||
if (!ctx->tmp_frame)
|
||||
goto fail;
|
||||
|
||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_VDPAU,
|
||||
ist->hwaccel_device, NULL, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
device_ctx = (AVHWDeviceContext*)device_ref->data;
|
||||
device_hwctx = device_ctx->hwctx;
|
||||
|
||||
ctx->hw_frames_ctx = av_hwframe_ctx_alloc(device_ref);
|
||||
if (!ctx->hw_frames_ctx)
|
||||
goto fail;
|
||||
av_buffer_unref(&device_ref);
|
||||
|
||||
frames_ctx = (AVHWFramesContext*)ctx->hw_frames_ctx->data;
|
||||
frames_ctx->format = AV_PIX_FMT_VDPAU;
|
||||
frames_ctx->sw_format = s->sw_pix_fmt;
|
||||
frames_ctx->width = s->coded_width;
|
||||
frames_ctx->height = s->coded_height;
|
||||
|
||||
ret = av_hwframe_ctx_init(ctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (av_vdpau_bind_context(s, device_hwctx->device, device_hwctx->get_proc_address, 0))
|
||||
goto fail;
|
||||
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Using VDPAU to decode input stream #%d:%d.\n",
|
||||
ist->file_index, ist->st->index);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_log(NULL, loglevel, "VDPAU init failed for stream #%d:%d.\n",
|
||||
ist->file_index, ist->st->index);
|
||||
av_buffer_unref(&device_ref);
|
||||
vdpau_uninit(s);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
int vdpau_init(AVCodecContext *s)
|
||||
{
|
||||
InputStream *ist = s->opaque;
|
||||
|
||||
if (!ist->hwaccel_ctx) {
|
||||
int ret = vdpau_alloc(s);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ist->hwaccel_get_buffer = vdpau_get_buffer;
|
||||
ist->hwaccel_retrieve_data = vdpau_retrieve_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -187,6 +187,7 @@ enum {
|
||||
|
||||
typedef struct Decoder {
|
||||
AVPacket pkt;
|
||||
AVPacket pkt_temp;
|
||||
PacketQueue *queue;
|
||||
AVCodecContext *avctx;
|
||||
int pkt_serial;
|
||||
@@ -349,7 +350,6 @@ static int nb_vfilters = 0;
|
||||
static char *afilters = NULL;
|
||||
#endif
|
||||
static int autorotate = 1;
|
||||
static int find_stream_info = 1;
|
||||
|
||||
/* current context */
|
||||
static int is_full_screen;
|
||||
@@ -361,34 +361,6 @@ static AVPacket flush_pkt;
|
||||
|
||||
static SDL_Window *window;
|
||||
static SDL_Renderer *renderer;
|
||||
static SDL_RendererInfo renderer_info = {0};
|
||||
static SDL_AudioDeviceID audio_dev;
|
||||
|
||||
static const struct TextureFormatEntry {
|
||||
enum AVPixelFormat format;
|
||||
int texture_fmt;
|
||||
} sdl_texture_format_map[] = {
|
||||
{ AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
|
||||
{ AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
|
||||
{ AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
|
||||
{ AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
|
||||
{ AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
|
||||
{ AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
|
||||
{ AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
|
||||
{ AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
|
||||
{ AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
|
||||
{ AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
|
||||
{ AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
|
||||
{ AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
|
||||
{ AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
|
||||
{ AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
|
||||
{ AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
|
||||
{ AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
|
||||
{ AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
|
||||
{ AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
|
||||
{ AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
|
||||
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
|
||||
};
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
|
||||
@@ -579,96 +551,88 @@ static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue,
|
||||
d->queue = queue;
|
||||
d->empty_queue_cond = empty_queue_cond;
|
||||
d->start_pts = AV_NOPTS_VALUE;
|
||||
d->pkt_serial = -1;
|
||||
}
|
||||
|
||||
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
|
||||
int ret = AVERROR(EAGAIN);
|
||||
int got_frame = 0;
|
||||
|
||||
for (;;) {
|
||||
AVPacket pkt;
|
||||
do {
|
||||
int ret = -1;
|
||||
|
||||
if (d->queue->serial == d->pkt_serial) {
|
||||
if (d->queue->abort_request)
|
||||
return -1;
|
||||
|
||||
if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
|
||||
AVPacket pkt;
|
||||
do {
|
||||
if (d->queue->abort_request)
|
||||
return -1;
|
||||
|
||||
switch (d->avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
ret = avcodec_receive_frame(d->avctx, frame);
|
||||
if (ret >= 0) {
|
||||
if (decoder_reorder_pts == -1) {
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
} else if (!decoder_reorder_pts) {
|
||||
frame->pts = frame->pkt_dts;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
ret = avcodec_receive_frame(d->avctx, frame);
|
||||
if (ret >= 0) {
|
||||
AVRational tb = (AVRational){1, frame->sample_rate};
|
||||
if (frame->pts != AV_NOPTS_VALUE)
|
||||
frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
|
||||
else if (d->next_pts != AV_NOPTS_VALUE)
|
||||
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
d->next_pts = frame->pts + frame->nb_samples;
|
||||
d->next_pts_tb = tb;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
d->finished = d->pkt_serial;
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
return 0;
|
||||
}
|
||||
if (ret >= 0)
|
||||
return 1;
|
||||
} while (ret != AVERROR(EAGAIN));
|
||||
}
|
||||
|
||||
do {
|
||||
if (d->queue->nb_packets == 0)
|
||||
SDL_CondSignal(d->empty_queue_cond);
|
||||
if (d->packet_pending) {
|
||||
av_packet_move_ref(&pkt, &d->pkt);
|
||||
d->packet_pending = 0;
|
||||
} else {
|
||||
if (d->queue->nb_packets == 0)
|
||||
SDL_CondSignal(d->empty_queue_cond);
|
||||
if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
|
||||
return -1;
|
||||
}
|
||||
} while (d->queue->serial != d->pkt_serial);
|
||||
|
||||
if (pkt.data == flush_pkt.data) {
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
d->finished = 0;
|
||||
d->next_pts = d->start_pts;
|
||||
d->next_pts_tb = d->start_pts_tb;
|
||||
} else {
|
||||
if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
|
||||
int got_frame = 0;
|
||||
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
ret = AVERROR(EAGAIN);
|
||||
} else {
|
||||
if (got_frame && !pkt.data) {
|
||||
d->packet_pending = 1;
|
||||
av_packet_move_ref(&d->pkt, &pkt);
|
||||
}
|
||||
ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
|
||||
if (pkt.data == flush_pkt.data) {
|
||||
avcodec_flush_buffers(d->avctx);
|
||||
d->finished = 0;
|
||||
d->next_pts = d->start_pts;
|
||||
d->next_pts_tb = d->start_pts_tb;
|
||||
}
|
||||
} else {
|
||||
if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
|
||||
av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
|
||||
d->packet_pending = 1;
|
||||
av_packet_move_ref(&d->pkt, &pkt);
|
||||
}
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
} while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
|
||||
av_packet_unref(&d->pkt);
|
||||
d->pkt_temp = d->pkt = pkt;
|
||||
d->packet_pending = 1;
|
||||
}
|
||||
}
|
||||
|
||||
switch (d->avctx->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
|
||||
if (got_frame) {
|
||||
if (decoder_reorder_pts == -1) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
} else if (!decoder_reorder_pts) {
|
||||
frame->pts = frame->pkt_dts;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
|
||||
if (got_frame) {
|
||||
AVRational tb = (AVRational){1, frame->sample_rate};
|
||||
if (frame->pts != AV_NOPTS_VALUE)
|
||||
frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
|
||||
else if (d->next_pts != AV_NOPTS_VALUE)
|
||||
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
d->next_pts = frame->pts + frame->nb_samples;
|
||||
d->next_pts_tb = tb;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
d->packet_pending = 0;
|
||||
} else {
|
||||
d->pkt_temp.dts =
|
||||
d->pkt_temp.pts = AV_NOPTS_VALUE;
|
||||
if (d->pkt_temp.data) {
|
||||
if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
|
||||
ret = d->pkt_temp.size;
|
||||
d->pkt_temp.data += ret;
|
||||
d->pkt_temp.size -= ret;
|
||||
if (d->pkt_temp.size <= 0)
|
||||
d->packet_pending = 0;
|
||||
} else {
|
||||
if (!got_frame) {
|
||||
d->packet_pending = 0;
|
||||
d->finished = d->pkt_serial;
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (!got_frame && !d->finished);
|
||||
|
||||
return got_frame;
|
||||
}
|
||||
|
||||
static void decoder_destroy(Decoder *d) {
|
||||
@@ -848,7 +812,6 @@ static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_wid
|
||||
memset(pixels, 0, pitch * new_height);
|
||||
SDL_UnlockTexture(*texture);
|
||||
}
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -884,33 +847,26 @@ static void calculate_display_rect(SDL_Rect *rect,
|
||||
rect->h = FFMAX(height, 1);
|
||||
}
|
||||
|
||||
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
|
||||
{
|
||||
int i;
|
||||
*sdl_blendmode = SDL_BLENDMODE_NONE;
|
||||
*sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
|
||||
if (format == AV_PIX_FMT_RGB32 ||
|
||||
format == AV_PIX_FMT_RGB32_1 ||
|
||||
format == AV_PIX_FMT_BGR32 ||
|
||||
format == AV_PIX_FMT_BGR32_1)
|
||||
*sdl_blendmode = SDL_BLENDMODE_BLEND;
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
|
||||
if (format == sdl_texture_format_map[i].format) {
|
||||
*sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
|
||||
static int upload_texture(SDL_Texture *tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
|
||||
int ret = 0;
|
||||
Uint32 sdl_pix_fmt;
|
||||
SDL_BlendMode sdl_blendmode;
|
||||
get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
|
||||
if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
|
||||
return -1;
|
||||
switch (sdl_pix_fmt) {
|
||||
case SDL_PIXELFORMAT_UNKNOWN:
|
||||
switch (frame->format) {
|
||||
case AV_PIX_FMT_YUV420P:
|
||||
if (frame->linesize[0] < 0 || frame->linesize[1] < 0 || frame->linesize[2] < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Negative linesize is not supported for YUV.\n");
|
||||
return -1;
|
||||
}
|
||||
ret = SDL_UpdateYUVTexture(tex, NULL, frame->data[0], frame->linesize[0],
|
||||
frame->data[1], frame->linesize[1],
|
||||
frame->data[2], frame->linesize[2]);
|
||||
break;
|
||||
case AV_PIX_FMT_BGRA:
|
||||
if (frame->linesize[0] < 0) {
|
||||
ret = SDL_UpdateTexture(tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
|
||||
} else {
|
||||
ret = SDL_UpdateTexture(tex, NULL, frame->data[0], frame->linesize[0]);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* This should only happen if we are not using avfilter... */
|
||||
*img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
|
||||
frame->width, frame->height, frame->format, frame->width, frame->height,
|
||||
@@ -918,37 +874,16 @@ static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext *
|
||||
if (*img_convert_ctx != NULL) {
|
||||
uint8_t *pixels[4];
|
||||
int pitch[4];
|
||||
if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
|
||||
if (!SDL_LockTexture(tex, NULL, (void **)pixels, pitch)) {
|
||||
sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
|
||||
0, frame->height, pixels, pitch);
|
||||
SDL_UnlockTexture(*tex);
|
||||
SDL_UnlockTexture(tex);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
|
||||
ret = -1;
|
||||
}
|
||||
break;
|
||||
case SDL_PIXELFORMAT_IYUV:
|
||||
if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
|
||||
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
|
||||
frame->data[1], frame->linesize[1],
|
||||
frame->data[2], frame->linesize[2]);
|
||||
} else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
|
||||
ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
|
||||
frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
|
||||
frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
if (frame->linesize[0] < 0) {
|
||||
ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
|
||||
} else {
|
||||
ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -1008,7 +943,10 @@ static void video_image_display(VideoState *is)
|
||||
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
|
||||
|
||||
if (!vp->uploaded) {
|
||||
if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
|
||||
int sdl_pix_fmt = vp->frame->format == AV_PIX_FMT_YUV420P ? SDL_PIXELFORMAT_YV12 : SDL_PIXELFORMAT_ARGB8888;
|
||||
if (realloc_texture(&is->vid_texture, sdl_pix_fmt, vp->frame->width, vp->frame->height, SDL_BLENDMODE_NONE, 0) < 0)
|
||||
return;
|
||||
if (upload_texture(is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
|
||||
return;
|
||||
vp->uploaded = 1;
|
||||
vp->flip_v = vp->frame->linesize[0] < 0;
|
||||
@@ -1193,7 +1131,7 @@ static void stream_component_close(VideoState *is, int stream_index)
|
||||
switch (codecpar->codec_type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
decoder_abort(&is->auddec, &is->sampq);
|
||||
SDL_CloseAudioDevice(audio_dev);
|
||||
SDL_CloseAudio();
|
||||
decoder_destroy(&is->auddec);
|
||||
swr_free(&is->swr_ctx);
|
||||
av_freep(&is->audio_buf1);
|
||||
@@ -1322,15 +1260,38 @@ static int video_open(VideoState *is)
|
||||
h = default_height;
|
||||
}
|
||||
|
||||
if (!window_title)
|
||||
window_title = input_filename;
|
||||
SDL_SetWindowTitle(window, window_title);
|
||||
if (!window) {
|
||||
int flags = SDL_WINDOW_SHOWN;
|
||||
if (!window_title)
|
||||
window_title = input_filename;
|
||||
if (is_full_screen)
|
||||
flags |= SDL_WINDOW_FULLSCREEN_DESKTOP;
|
||||
if (borderless)
|
||||
flags |= SDL_WINDOW_BORDERLESS;
|
||||
else
|
||||
flags |= SDL_WINDOW_RESIZABLE;
|
||||
window = SDL_CreateWindow(window_title, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, w, h, flags);
|
||||
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
|
||||
if (window) {
|
||||
SDL_RendererInfo info;
|
||||
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
|
||||
if (!renderer) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
|
||||
renderer = SDL_CreateRenderer(window, -1, 0);
|
||||
}
|
||||
if (renderer) {
|
||||
if (!SDL_GetRendererInfo(renderer, &info))
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", info.name);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SDL_SetWindowSize(window, w, h);
|
||||
}
|
||||
|
||||
SDL_SetWindowSize(window, w, h);
|
||||
SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
|
||||
if (is_full_screen)
|
||||
SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
|
||||
SDL_ShowWindow(window);
|
||||
if (!window || !renderer) {
|
||||
av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
|
||||
do_exit(is);
|
||||
}
|
||||
|
||||
is->width = w;
|
||||
is->height = h;
|
||||
@@ -1341,7 +1302,7 @@ static int video_open(VideoState *is)
|
||||
/* display the current picture, if any */
|
||||
static void video_display(VideoState *is)
|
||||
{
|
||||
if (!is->width)
|
||||
if (!window)
|
||||
video_open(is);
|
||||
|
||||
SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
|
||||
@@ -1821,7 +1782,7 @@ fail:
|
||||
|
||||
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
|
||||
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_BGRA, AV_PIX_FMT_NONE };
|
||||
char sws_flags_str[512] = "";
|
||||
char buffersrc_args[256];
|
||||
int ret;
|
||||
@@ -1829,18 +1790,6 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
||||
AVCodecParameters *codecpar = is->video_st->codecpar;
|
||||
AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
|
||||
AVDictionaryEntry *e = NULL;
|
||||
int nb_pix_fmts = 0;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < renderer_info.num_texture_formats; i++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
|
||||
if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
|
||||
pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
|
||||
|
||||
while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
|
||||
if (!strcmp(e->key, "sws_flags")) {
|
||||
@@ -2025,11 +1974,11 @@ static int audio_thread(void *arg)
|
||||
tb = (AVRational){1, frame->sample_rate};
|
||||
|
||||
#if CONFIG_AVFILTER
|
||||
dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
|
||||
dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
|
||||
|
||||
reconfigure =
|
||||
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
|
||||
frame->format, frame->channels) ||
|
||||
frame->format, av_frame_get_channels(frame)) ||
|
||||
is->audio_filter_src.channel_layout != dec_channel_layout ||
|
||||
is->audio_filter_src.freq != frame->sample_rate ||
|
||||
is->auddec.pkt_serial != last_serial;
|
||||
@@ -2041,10 +1990,10 @@ static int audio_thread(void *arg)
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
|
||||
is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
|
||||
frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
|
||||
frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
|
||||
|
||||
is->audio_filter_src.fmt = frame->format;
|
||||
is->audio_filter_src.channels = frame->channels;
|
||||
is->audio_filter_src.channels = av_frame_get_channels(frame);
|
||||
is->audio_filter_src.channel_layout = dec_channel_layout;
|
||||
is->audio_filter_src.freq = frame->sample_rate;
|
||||
last_serial = is->auddec.pkt_serial;
|
||||
@@ -2063,7 +2012,7 @@ static int audio_thread(void *arg)
|
||||
goto the_end;
|
||||
|
||||
af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
|
||||
af->pos = frame->pkt_pos;
|
||||
af->pos = av_frame_get_pkt_pos(frame);
|
||||
af->serial = is->auddec.pkt_serial;
|
||||
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
|
||||
|
||||
@@ -2190,7 +2139,7 @@ static int video_thread(void *arg)
|
||||
#endif
|
||||
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
|
||||
pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
|
||||
ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
|
||||
ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
|
||||
av_frame_unref(frame);
|
||||
#if CONFIG_AVFILTER
|
||||
}
|
||||
@@ -2334,13 +2283,13 @@ static int audio_decode_frame(VideoState *is)
|
||||
frame_queue_next(&is->sampq);
|
||||
} while (af->serial != is->audioq.serial);
|
||||
|
||||
data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
|
||||
data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(af->frame),
|
||||
af->frame->nb_samples,
|
||||
af->frame->format, 1);
|
||||
|
||||
dec_channel_layout =
|
||||
(af->frame->channel_layout && af->frame->channels == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
|
||||
af->frame->channel_layout : av_get_default_channel_layout(af->frame->channels);
|
||||
(af->frame->channel_layout && av_frame_get_channels(af->frame) == av_get_channel_layout_nb_channels(af->frame->channel_layout)) ?
|
||||
af->frame->channel_layout : av_get_default_channel_layout(av_frame_get_channels(af->frame));
|
||||
wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
|
||||
|
||||
if (af->frame->format != is->audio_src.fmt ||
|
||||
@@ -2355,13 +2304,13 @@ static int audio_decode_frame(VideoState *is)
|
||||
if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
|
||||
af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->channels,
|
||||
af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), av_frame_get_channels(af->frame),
|
||||
is->audio_tgt.freq, av_get_sample_fmt_name(is->audio_tgt.fmt), is->audio_tgt.channels);
|
||||
swr_free(&is->swr_ctx);
|
||||
return -1;
|
||||
}
|
||||
is->audio_src.channel_layout = dec_channel_layout;
|
||||
is->audio_src.channels = af->frame->channels;
|
||||
is->audio_src.channels = av_frame_get_channels(af->frame);
|
||||
is->audio_src.freq = af->frame->sample_rate;
|
||||
is->audio_src.fmt = af->frame->format;
|
||||
}
|
||||
@@ -2452,7 +2401,7 @@ static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
|
||||
else {
|
||||
memset(stream, 0, len1);
|
||||
if (!is->muted && is->audio_buf)
|
||||
SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
|
||||
SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
|
||||
}
|
||||
len -= len1;
|
||||
stream += len1;
|
||||
@@ -2497,7 +2446,7 @@ static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb
|
||||
wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
|
||||
wanted_spec.callback = sdl_audio_callback;
|
||||
wanted_spec.userdata = opaque;
|
||||
while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
|
||||
while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
|
||||
wanted_spec.channels, wanted_spec.freq, SDL_GetError());
|
||||
wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
|
||||
@@ -2667,7 +2616,7 @@ static int stream_component_open(VideoState *is, int stream_index)
|
||||
}
|
||||
if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
|
||||
goto out;
|
||||
SDL_PauseAudioDevice(audio_dev, 0);
|
||||
SDL_PauseAudio(0);
|
||||
break;
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
is->video_stream = stream_index;
|
||||
@@ -2739,6 +2688,8 @@ static int read_thread(void *arg)
|
||||
int64_t stream_start_time;
|
||||
int pkt_in_play_range = 0;
|
||||
AVDictionaryEntry *t;
|
||||
AVDictionary **opts;
|
||||
int orig_nb_streams;
|
||||
SDL_mutex *wait_mutex = SDL_CreateMutex();
|
||||
int scan_all_pmts_set = 0;
|
||||
int64_t pkt_ts;
|
||||
@@ -2788,22 +2739,20 @@ static int read_thread(void *arg)
|
||||
|
||||
av_format_inject_global_side_data(ic);
|
||||
|
||||
if (find_stream_info) {
|
||||
AVDictionary **opts = setup_find_stream_info_opts(ic, codec_opts);
|
||||
int orig_nb_streams = ic->nb_streams;
|
||||
opts = setup_find_stream_info_opts(ic, codec_opts);
|
||||
orig_nb_streams = ic->nb_streams;
|
||||
|
||||
err = avformat_find_stream_info(ic, opts);
|
||||
err = avformat_find_stream_info(ic, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"%s: could not find codec parameters\n", is->filename);
|
||||
ret = -1;
|
||||
goto fail;
|
||||
}
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"%s: could not find codec parameters\n", is->filename);
|
||||
ret = -1;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ic->pb)
|
||||
@@ -2964,8 +2913,8 @@ static int read_thread(void *arg)
|
||||
}
|
||||
if (is->queue_attachments_req) {
|
||||
if (is->video_st && is->video_st->disposition & AV_DISPOSITION_ATTACHED_PIC) {
|
||||
AVPacket copy = { 0 };
|
||||
if ((ret = av_packet_ref(©, &is->video_st->attached_pic)) < 0)
|
||||
AVPacket copy;
|
||||
if ((ret = av_copy_packet(©, &is->video_st->attached_pic)) < 0)
|
||||
goto fail;
|
||||
packet_queue_put(&is->videoq, ©);
|
||||
packet_queue_put_nullpacket(&is->videoq, is->video_stream);
|
||||
@@ -3556,7 +3505,7 @@ static int opt_codec(void *optctx, const char *opt, const char *arg)
|
||||
static int dummy;
|
||||
|
||||
static const OptionDef options[] = {
|
||||
CMDUTILS_COMMON_OPTIONS
|
||||
#include "cmdutils_common_opts.h"
|
||||
{ "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
|
||||
{ "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
|
||||
{ "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
|
||||
@@ -3601,8 +3550,6 @@ static const OptionDef options[] = {
|
||||
{ "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
|
||||
{ "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
|
||||
{ "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
|
||||
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||
"read and decode the streams to fill missing information with heuristics" },
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
@@ -3738,31 +3685,6 @@ int main(int argc, char **argv)
|
||||
av_init_packet(&flush_pkt);
|
||||
flush_pkt.data = (uint8_t *)&flush_pkt;
|
||||
|
||||
if (!display_disable) {
|
||||
int flags = SDL_WINDOW_HIDDEN;
|
||||
if (borderless)
|
||||
flags |= SDL_WINDOW_BORDERLESS;
|
||||
else
|
||||
flags |= SDL_WINDOW_RESIZABLE;
|
||||
window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
|
||||
SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
|
||||
if (window) {
|
||||
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
|
||||
if (!renderer) {
|
||||
av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
|
||||
renderer = SDL_CreateRenderer(window, -1, 0);
|
||||
}
|
||||
if (renderer) {
|
||||
if (!SDL_GetRendererInfo(renderer, &renderer_info))
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
|
||||
}
|
||||
}
|
||||
if (!window || !renderer || !renderer_info.num_texture_formats) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
|
||||
do_exit(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
is = stream_open(input_filename, file_iformat);
|
||||
if (!is) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
|
||||
@@ -35,7 +35,6 @@
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/display.h"
|
||||
#include "libavutil/hash.h"
|
||||
#include "libavutil/mastering_display_metadata.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/spherical.h"
|
||||
@@ -58,11 +57,11 @@
|
||||
# ifdef pthread_mutex_lock
|
||||
# undef pthread_mutex_lock
|
||||
# endif
|
||||
# define pthread_mutex_lock(a) do{}while(0)
|
||||
# define pthread_mutex_lock(a)
|
||||
# ifdef pthread_mutex_unlock
|
||||
# undef pthread_mutex_unlock
|
||||
# endif
|
||||
# define pthread_mutex_unlock(a) do{}while(0)
|
||||
# define pthread_mutex_unlock(a)
|
||||
#endif
|
||||
|
||||
typedef struct InputStream {
|
||||
@@ -130,8 +129,6 @@ typedef struct ReadInterval {
|
||||
static ReadInterval *read_intervals;
|
||||
static int read_intervals_nb = 0;
|
||||
|
||||
static int find_stream_info = 1;
|
||||
|
||||
/* section structure definition */
|
||||
|
||||
#define SECTION_MAX_NB_CHILDREN 10
|
||||
@@ -1896,86 +1893,12 @@ static void print_pkt_side_data(WriterContext *w,
|
||||
print_int("discard_padding", AV_RL32(sd->data + 4));
|
||||
print_int("skip_reason", AV_RL8(sd->data + 8));
|
||||
print_int("discard_reason", AV_RL8(sd->data + 9));
|
||||
} else if (sd->type == AV_PKT_DATA_MASTERING_DISPLAY_METADATA) {
|
||||
AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data;
|
||||
|
||||
if (metadata->has_primaries) {
|
||||
print_q("red_x", metadata->display_primaries[0][0], '/');
|
||||
print_q("red_y", metadata->display_primaries[0][1], '/');
|
||||
print_q("green_x", metadata->display_primaries[1][0], '/');
|
||||
print_q("green_y", metadata->display_primaries[1][1], '/');
|
||||
print_q("blue_x", metadata->display_primaries[2][0], '/');
|
||||
print_q("blue_y", metadata->display_primaries[2][1], '/');
|
||||
|
||||
print_q("white_point_x", metadata->white_point[0], '/');
|
||||
print_q("white_point_y", metadata->white_point[1], '/');
|
||||
}
|
||||
|
||||
if (metadata->has_luminance) {
|
||||
print_q("min_luminance", metadata->min_luminance, '/');
|
||||
print_q("max_luminance", metadata->max_luminance, '/');
|
||||
}
|
||||
} else if (sd->type == AV_PKT_DATA_CONTENT_LIGHT_LEVEL) {
|
||||
AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data;
|
||||
print_int("max_content", metadata->MaxCLL);
|
||||
print_int("max_average", metadata->MaxFALL);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
|
||||
static void print_color_range(WriterContext *w, enum AVColorRange color_range)
|
||||
{
|
||||
const char *val = av_color_range_name(color_range);
|
||||
if (!val || color_range == AVCOL_RANGE_UNSPECIFIED) {
|
||||
print_str_opt("color_range", "unknown");
|
||||
} else {
|
||||
print_str("color_range", val);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_color_space(WriterContext *w, enum AVColorSpace color_space)
|
||||
{
|
||||
const char *val = av_color_space_name(color_space);
|
||||
if (!val || color_space == AVCOL_SPC_UNSPECIFIED) {
|
||||
print_str_opt("color_space", "unknown");
|
||||
} else {
|
||||
print_str("color_space", val);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_primaries(WriterContext *w, enum AVColorPrimaries color_primaries)
|
||||
{
|
||||
const char *val = av_color_primaries_name(color_primaries);
|
||||
if (!val || color_primaries == AVCOL_PRI_UNSPECIFIED) {
|
||||
print_str_opt("color_primaries", "unknown");
|
||||
} else {
|
||||
print_str("color_primaries", val);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_color_trc(WriterContext *w, enum AVColorTransferCharacteristic color_trc)
|
||||
{
|
||||
const char *val = av_color_transfer_name(color_trc);
|
||||
if (!val || color_trc == AVCOL_TRC_UNSPECIFIED) {
|
||||
print_str_opt("color_transfer", "unknown");
|
||||
} else {
|
||||
print_str("color_transfer", val);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_chroma_location(WriterContext *w, enum AVChromaLocation chroma_location)
|
||||
{
|
||||
const char *val = av_chroma_location_name(chroma_location);
|
||||
if (!val || chroma_location == AVCHROMA_LOC_UNSPECIFIED) {
|
||||
print_str_opt("chroma_location", "unspecified");
|
||||
} else {
|
||||
print_str("chroma_location", val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void clear_log(int need_lock)
|
||||
{
|
||||
int i;
|
||||
@@ -2126,13 +2049,13 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
print_time("pkt_pts_time", frame->pts, &stream->time_base);
|
||||
print_ts ("pkt_dts", frame->pkt_dts);
|
||||
print_time("pkt_dts_time", frame->pkt_dts, &stream->time_base);
|
||||
print_ts ("best_effort_timestamp", frame->best_effort_timestamp);
|
||||
print_time("best_effort_timestamp_time", frame->best_effort_timestamp, &stream->time_base);
|
||||
print_duration_ts ("pkt_duration", frame->pkt_duration);
|
||||
print_duration_time("pkt_duration_time", frame->pkt_duration, &stream->time_base);
|
||||
if (frame->pkt_pos != -1) print_fmt ("pkt_pos", "%"PRId64, frame->pkt_pos);
|
||||
print_ts ("best_effort_timestamp", av_frame_get_best_effort_timestamp(frame));
|
||||
print_time("best_effort_timestamp_time", av_frame_get_best_effort_timestamp(frame), &stream->time_base);
|
||||
print_duration_ts ("pkt_duration", av_frame_get_pkt_duration(frame));
|
||||
print_duration_time("pkt_duration_time", av_frame_get_pkt_duration(frame), &stream->time_base);
|
||||
if (av_frame_get_pkt_pos (frame) != -1) print_fmt ("pkt_pos", "%"PRId64, av_frame_get_pkt_pos(frame));
|
||||
else print_str_opt("pkt_pos", "N/A");
|
||||
if (frame->pkt_size != -1) print_val ("pkt_size", frame->pkt_size, unit_byte_str);
|
||||
if (av_frame_get_pkt_size(frame) != -1) print_val ("pkt_size", av_frame_get_pkt_size(frame), unit_byte_str);
|
||||
else print_str_opt("pkt_size", "N/A");
|
||||
|
||||
switch (stream->codecpar->codec_type) {
|
||||
@@ -2156,12 +2079,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
print_int("interlaced_frame", frame->interlaced_frame);
|
||||
print_int("top_field_first", frame->top_field_first);
|
||||
print_int("repeat_pict", frame->repeat_pict);
|
||||
|
||||
print_color_range(w, frame->color_range);
|
||||
print_color_space(w, frame->colorspace);
|
||||
print_primaries(w, frame->color_primaries);
|
||||
print_color_trc(w, frame->color_trc);
|
||||
print_chroma_location(w, frame->chroma_location);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
@@ -2169,18 +2086,18 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
if (s) print_str ("sample_fmt", s);
|
||||
else print_str_opt("sample_fmt", "unknown");
|
||||
print_int("nb_samples", frame->nb_samples);
|
||||
print_int("channels", frame->channels);
|
||||
if (frame->channel_layout) {
|
||||
print_int("channels", av_frame_get_channels(frame));
|
||||
if (av_frame_get_channel_layout(frame)) {
|
||||
av_bprint_clear(&pbuf);
|
||||
av_bprint_channel_layout(&pbuf, frame->channels,
|
||||
frame->channel_layout);
|
||||
av_bprint_channel_layout(&pbuf, av_frame_get_channels(frame),
|
||||
av_frame_get_channel_layout(frame));
|
||||
print_str ("channel_layout", pbuf.str);
|
||||
} else
|
||||
print_str_opt("channel_layout", "unknown");
|
||||
break;
|
||||
}
|
||||
if (do_show_frame_tags)
|
||||
show_tags(w, frame->metadata, SECTION_ID_FRAME_TAGS);
|
||||
show_tags(w, av_frame_get_metadata(frame), SECTION_ID_FRAME_TAGS);
|
||||
if (do_show_log)
|
||||
show_log(w, SECTION_ID_FRAME_LOGS, SECTION_ID_FRAME_LOG, do_show_log);
|
||||
if (frame->nb_side_data) {
|
||||
@@ -2199,34 +2116,6 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
char tcbuf[AV_TIMECODE_STR_SIZE];
|
||||
av_timecode_make_mpeg_tc_string(tcbuf, *(int64_t *)(sd->data));
|
||||
print_str("timecode", tcbuf);
|
||||
} else if (sd->type == AV_FRAME_DATA_MASTERING_DISPLAY_METADATA) {
|
||||
AVMasteringDisplayMetadata *metadata = (AVMasteringDisplayMetadata *)sd->data;
|
||||
|
||||
if (metadata->has_primaries) {
|
||||
print_q("red_x", metadata->display_primaries[0][0], '/');
|
||||
print_q("red_y", metadata->display_primaries[0][1], '/');
|
||||
print_q("green_x", metadata->display_primaries[1][0], '/');
|
||||
print_q("green_y", metadata->display_primaries[1][1], '/');
|
||||
print_q("blue_x", metadata->display_primaries[2][0], '/');
|
||||
print_q("blue_y", metadata->display_primaries[2][1], '/');
|
||||
|
||||
print_q("white_point_x", metadata->white_point[0], '/');
|
||||
print_q("white_point_y", metadata->white_point[1], '/');
|
||||
}
|
||||
|
||||
if (metadata->has_luminance) {
|
||||
print_q("min_luminance", metadata->min_luminance, '/');
|
||||
print_q("max_luminance", metadata->max_luminance, '/');
|
||||
}
|
||||
} else if (sd->type == AV_FRAME_DATA_CONTENT_LIGHT_LEVEL) {
|
||||
AVContentLightMetadata *metadata = (AVContentLightMetadata *)sd->data;
|
||||
print_int("max_content", metadata->MaxCLL);
|
||||
print_int("max_average", metadata->MaxFALL);
|
||||
} else if (sd->type == AV_FRAME_DATA_ICC_PROFILE) {
|
||||
AVDictionaryEntry *tag = av_dict_get(sd->metadata, "name", NULL, AV_DICT_MATCH_CASE);
|
||||
if (tag)
|
||||
print_str(tag->key, tag->value);
|
||||
print_int("size", sd->size);
|
||||
}
|
||||
writer_print_section_footer(w);
|
||||
}
|
||||
@@ -2241,8 +2130,7 @@ static void show_frame(WriterContext *w, AVFrame *frame, AVStream *stream,
|
||||
|
||||
static av_always_inline int process_frame(WriterContext *w,
|
||||
InputFile *ifile,
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
int *packet_new)
|
||||
AVFrame *frame, AVPacket *pkt)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
|
||||
AVCodecContext *dec_ctx = ifile->streams[pkt->stream_index].dec_ctx;
|
||||
@@ -2254,39 +2142,24 @@ static av_always_inline int process_frame(WriterContext *w,
|
||||
if (dec_ctx && dec_ctx->codec) {
|
||||
switch (par->codec_type) {
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, pkt);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
if (*packet_new) {
|
||||
ret = avcodec_send_packet(dec_ctx, pkt);
|
||||
if (ret == AVERROR(EAGAIN)) {
|
||||
ret = 0;
|
||||
} else if (ret >= 0 || ret == AVERROR_EOF) {
|
||||
ret = 0;
|
||||
*packet_new = 0;
|
||||
}
|
||||
}
|
||||
if (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret >= 0) {
|
||||
got_frame = 1;
|
||||
} else if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, pkt);
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_SUBTITLE:
|
||||
ret = avcodec_decode_subtitle2(dec_ctx, &sub, &got_frame, pkt);
|
||||
*packet_new = 0;
|
||||
break;
|
||||
default:
|
||||
*packet_new = 0;
|
||||
}
|
||||
} else {
|
||||
*packet_new = 0;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = FFMIN(ret, pkt->size); /* guard against bogus return values */
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
if (got_frame) {
|
||||
int is_sub = (par->codec_type == AVMEDIA_TYPE_SUBTITLE);
|
||||
nb_streams_frames[pkt->stream_index]++;
|
||||
@@ -2298,7 +2171,7 @@ static av_always_inline int process_frame(WriterContext *w,
|
||||
if (is_sub)
|
||||
avsubtitle_free(&sub);
|
||||
}
|
||||
return got_frame || *packet_new;
|
||||
return got_frame;
|
||||
}
|
||||
|
||||
static void log_read_interval(const ReadInterval *interval, void *log_ctx, int log_level)
|
||||
@@ -2329,7 +2202,7 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
|
||||
const ReadInterval *interval, int64_t *cur_ts)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = ifile->fmt_ctx;
|
||||
AVPacket pkt;
|
||||
AVPacket pkt, pkt1;
|
||||
AVFrame *frame = NULL;
|
||||
int ret = 0, i = 0, frame_count = 0;
|
||||
int64_t start = -INT64_MAX, end = interval->end;
|
||||
@@ -2406,8 +2279,8 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
|
||||
nb_streams_packets[pkt.stream_index]++;
|
||||
}
|
||||
if (do_read_frames) {
|
||||
int packet_new = 1;
|
||||
while (process_frame(w, ifile, frame, &pkt, &packet_new) > 0);
|
||||
pkt1 = pkt;
|
||||
while (pkt1.size && process_frame(w, ifile, frame, &pkt1) > 0);
|
||||
}
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
@@ -2419,7 +2292,7 @@ static int read_interval_packets(WriterContext *w, InputFile *ifile,
|
||||
for (i = 0; i < fmt_ctx->nb_streams; i++) {
|
||||
pkt.stream_index = i;
|
||||
if (do_read_frames)
|
||||
while (process_frame(w, ifile, frame, &pkt, &(int){1}) > 0);
|
||||
while (process_frame(w, ifile, frame, &pkt) > 0);
|
||||
}
|
||||
|
||||
end:
|
||||
@@ -2533,12 +2406,29 @@ static int show_stream(WriterContext *w, AVFormatContext *fmt_ctx, int stream_id
|
||||
if (s) print_str ("pix_fmt", s);
|
||||
else print_str_opt("pix_fmt", "unknown");
|
||||
print_int("level", par->level);
|
||||
if (par->color_range != AVCOL_RANGE_UNSPECIFIED)
|
||||
print_str ("color_range", av_color_range_name(par->color_range));
|
||||
else
|
||||
print_str_opt("color_range", "N/A");
|
||||
|
||||
print_color_range(w, par->color_range);
|
||||
print_color_space(w, par->color_space);
|
||||
print_color_trc(w, par->color_trc);
|
||||
print_primaries(w, par->color_primaries);
|
||||
print_chroma_location(w, par->chroma_location);
|
||||
s = av_get_colorspace_name(par->color_space);
|
||||
if (s) print_str ("color_space", s);
|
||||
else print_str_opt("color_space", "unknown");
|
||||
|
||||
if (par->color_trc != AVCOL_TRC_UNSPECIFIED)
|
||||
print_str("color_transfer", av_color_transfer_name(par->color_trc));
|
||||
else
|
||||
print_str_opt("color_transfer", av_color_transfer_name(par->color_trc));
|
||||
|
||||
if (par->color_primaries != AVCOL_PRI_UNSPECIFIED)
|
||||
print_str("color_primaries", av_color_primaries_name(par->color_primaries));
|
||||
else
|
||||
print_str_opt("color_primaries", av_color_primaries_name(par->color_primaries));
|
||||
|
||||
if (par->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
|
||||
print_str("chroma_location", av_chroma_location_name(par->chroma_location));
|
||||
else
|
||||
print_str_opt("chroma_location", av_chroma_location_name(par->chroma_location));
|
||||
|
||||
if (par->field_order == AV_FIELD_PROGRESSIVE)
|
||||
print_str("field_order", "progressive");
|
||||
@@ -2817,9 +2707,10 @@ static void show_error(WriterContext *w, int err)
|
||||
|
||||
static int open_input_file(InputFile *ifile, const char *filename)
|
||||
{
|
||||
int err, i;
|
||||
int err, i, orig_nb_streams;
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *t;
|
||||
AVDictionary **opts;
|
||||
int scan_all_pmts_set = 0;
|
||||
|
||||
fmt_ctx = avformat_alloc_context();
|
||||
@@ -2847,20 +2738,19 @@ static int open_input_file(InputFile *ifile, const char *filename)
|
||||
return AVERROR_OPTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
if (find_stream_info) {
|
||||
AVDictionary **opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||
int orig_nb_streams = fmt_ctx->nb_streams;
|
||||
/* fill the streams in the format context */
|
||||
opts = setup_find_stream_info_opts(fmt_ctx, codec_opts);
|
||||
orig_nb_streams = fmt_ctx->nb_streams;
|
||||
|
||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
||||
err = avformat_find_stream_info(fmt_ctx, opts);
|
||||
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
for (i = 0; i < orig_nb_streams; i++)
|
||||
av_dict_free(&opts[i]);
|
||||
av_freep(&opts);
|
||||
|
||||
if (err < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
if (err < 0) {
|
||||
print_error(filename, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, filename, 0);
|
||||
@@ -2980,8 +2870,6 @@ static int probe_file(WriterContext *wctx, const char *filename)
|
||||
} else {
|
||||
selected_streams[i] = 1;
|
||||
}
|
||||
if (!selected_streams[i])
|
||||
ifile.fmt_ctx->streams[i]->discard = AVDISCARD_ALL;
|
||||
}
|
||||
|
||||
if (do_read_frames || do_read_packets) {
|
||||
@@ -3342,7 +3230,6 @@ static int parse_read_interval(const char *interval_spec,
|
||||
}
|
||||
interval->end = lli;
|
||||
} else {
|
||||
interval->duration_frames = 0;
|
||||
ret = av_parse_time(&us, p, 1);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Invalid interval end/duration specification '%s'\n", p);
|
||||
@@ -3476,7 +3363,7 @@ DEFINE_OPT_SHOW_SECTION(streams, STREAMS)
|
||||
DEFINE_OPT_SHOW_SECTION(programs, PROGRAMS)
|
||||
|
||||
static const OptionDef real_options[] = {
|
||||
CMDUTILS_COMMON_OPTIONS
|
||||
#include "cmdutils_common_opts.h"
|
||||
{ "f", HAS_ARG, {.func_arg = opt_format}, "force format", "format" },
|
||||
{ "unit", OPT_BOOL, {&show_value_unit}, "show unit of the displayed values" },
|
||||
{ "prefix", OPT_BOOL, {&use_value_prefix}, "use SI prefixes for the displayed values" },
|
||||
@@ -3519,8 +3406,6 @@ static const OptionDef real_options[] = {
|
||||
{ "read_intervals", HAS_ARG, {.func_arg = opt_read_intervals}, "set read intervals", "read_intervals" },
|
||||
{ "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {.func_arg = opt_default}, "generic catch all option", "" },
|
||||
{ "i", HAS_ARG, {.func_arg = opt_input_file_i}, "read specified file", "input_file"},
|
||||
{ "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
|
||||
"read and decode the streams to fill missing information with heuristics" },
|
||||
{ NULL, },
|
||||
};
|
||||
|
||||
@@ -476,7 +476,7 @@ static int compute_datarate(DataRateData *drd, int64_t count)
|
||||
static void start_children(FFServerStream *feed)
|
||||
{
|
||||
char *pathname;
|
||||
char *dirname, *prog;
|
||||
char *slash;
|
||||
int i;
|
||||
size_t cmd_length;
|
||||
|
||||
@@ -495,18 +495,22 @@ static void start_children(FFServerStream *feed)
|
||||
return;
|
||||
}
|
||||
|
||||
/* use "ffmpeg" in the path of current program. Ignore user provided path */
|
||||
prog = av_strdup(my_program_name);
|
||||
if (prog) {
|
||||
dirname = av_dirname(prog);
|
||||
pathname = *dirname ? av_asprintf("%s/%s", dirname, "ffmpeg")
|
||||
: av_asprintf("ffmpeg");
|
||||
av_free(prog);
|
||||
slash = strrchr(my_program_name, '/');
|
||||
if (!slash) {
|
||||
pathname = av_mallocz(sizeof("ffmpeg"));
|
||||
} else {
|
||||
pathname = av_mallocz(slash - my_program_name + sizeof("ffmpeg"));
|
||||
if (pathname != NULL) {
|
||||
memcpy(pathname, my_program_name, slash - my_program_name);
|
||||
}
|
||||
}
|
||||
if (!prog || !pathname) {
|
||||
if (!pathname) {
|
||||
http_log("Could not allocate memory for children cmd line\n");
|
||||
return;
|
||||
}
|
||||
/* use "ffmpeg" in the path of current program. Ignore user provided path */
|
||||
|
||||
strcat(pathname, "ffmpeg");
|
||||
|
||||
for (; feed; feed = feed->next) {
|
||||
|
||||
@@ -1912,7 +1916,7 @@ static inline void print_stream_params(AVIOContext *pb, FFServerStream *stream)
|
||||
|
||||
avio_printf(pb, "<tr><td>%d<td>%s<td>%"PRId64
|
||||
"<td>%s<td>%s\n",
|
||||
i, type, st->codecpar->bit_rate/1000,
|
||||
i, type, (int64_t)st->codecpar->bit_rate/1000,
|
||||
codec ? codec->name : "", parameters);
|
||||
}
|
||||
|
||||
@@ -3946,7 +3950,7 @@ void show_help_default(const char *opt, const char *arg)
|
||||
}
|
||||
|
||||
static const OptionDef options[] = {
|
||||
CMDUTILS_COMMON_OPTIONS
|
||||
#include "cmdutils_common_opts.h"
|
||||
{ "n", OPT_BOOL, {(void *)&no_launch }, "enable no-launch mode" },
|
||||
{ "d", 0, {(void*)opt_debug}, "enable debug mode" },
|
||||
{ "f", HAS_ARG | OPT_STRING, {(void*)&config.filename }, "use configfile instead of /etc/ffserver.conf", "configfile" },
|
||||
@@ -18,8 +18,8 @@
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFTOOLS_FFSERVER_CONFIG_H
|
||||
#define FFTOOLS_FFSERVER_CONFIG_H
|
||||
#ifndef FFSERVER_CONFIG_H
|
||||
#define FFSERVER_CONFIG_H
|
||||
|
||||
#define FFM_PACKET_SIZE 4096
|
||||
|
||||
@@ -152,4 +152,4 @@ int ffserver_parse_ffconfig(const char *filename, FFServerConfig *config);
|
||||
|
||||
void ffserver_free_child_args(void *argsp);
|
||||
|
||||
#endif /* FFTOOLS_FFSERVER_CONFIG_H */
|
||||
#endif /* FFSERVER_CONFIG_H */
|
||||
@@ -1,57 +0,0 @@
|
||||
AVPROGS-$(CONFIG_FFMPEG) += ffmpeg
|
||||
AVPROGS-$(CONFIG_FFPLAY) += ffplay
|
||||
AVPROGS-$(CONFIG_FFPROBE) += ffprobe
|
||||
AVPROGS-$(CONFIG_FFSERVER) += ffserver
|
||||
|
||||
AVPROGS := $(AVPROGS-yes:%=%$(PROGSSUF)$(EXESUF))
|
||||
PROGS += $(AVPROGS)
|
||||
|
||||
AVBASENAMES = ffmpeg ffplay ffprobe ffserver
|
||||
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
OBJS-ffmpeg += fftools/ffmpeg_opt.o fftools/ffmpeg_filter.o fftools/ffmpeg_hw.o
|
||||
OBJS-ffmpeg-$(CONFIG_CUVID) += fftools/ffmpeg_cuvid.o
|
||||
OBJS-ffmpeg-$(CONFIG_LIBMFX) += fftools/ffmpeg_qsv.o
|
||||
ifndef CONFIG_VIDEOTOOLBOX
|
||||
OBJS-ffmpeg-$(CONFIG_VDA) += fftools/ffmpeg_videotoolbox.o
|
||||
endif
|
||||
OBJS-ffmpeg-$(CONFIG_VIDEOTOOLBOX) += fftools/ffmpeg_videotoolbox.o
|
||||
OBJS-ffserver += fftools/ffserver_config.o
|
||||
|
||||
define DOFFTOOL
|
||||
OBJS-$(1) += fftools/cmdutils.o fftools/$(1).o $(OBJS-$(1)-yes)
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): | fftools
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(EXTRALIBS-$(1))
|
||||
-include $$(OBJS-$(1):.o=.d)
|
||||
endef
|
||||
|
||||
$(foreach P,$(AVPROGS-yes),$(eval OBJS-$(P)-$(CONFIG_OPENCL) += fftools/cmdutils_opencl.o))
|
||||
$(foreach P,$(AVPROGS-yes),$(eval $(call DOFFTOOL,$(P))))
|
||||
|
||||
all: $(AVPROGS)
|
||||
|
||||
fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools
|
||||
OBJDIRS += fftools
|
||||
|
||||
ifdef AVPROGS
|
||||
install: install-progs install-data
|
||||
endif
|
||||
|
||||
install-progs-yes:
|
||||
install-progs-$(CONFIG_SHARED): install-libs
|
||||
|
||||
install-progs: install-progs-yes $(AVPROGS)
|
||||
$(Q)mkdir -p "$(BINDIR)"
|
||||
$(INSTALL) -c -m 755 $(AVPROGS) "$(BINDIR)"
|
||||
|
||||
uninstall: uninstall-progs
|
||||
|
||||
uninstall-progs:
|
||||
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
|
||||
|
||||
clean::
|
||||
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G) $(CLEANSUFFIXES:%=fftools/%)
|
||||
@@ -1,385 +0,0 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "libavutil/avstring.h"
|
||||
|
||||
#include "ffmpeg.h"
|
||||
|
||||
static int nb_hw_devices;
|
||||
static HWDevice **hw_devices;
|
||||
|
||||
static HWDevice *hw_device_get_by_type(enum AVHWDeviceType type)
|
||||
{
|
||||
HWDevice *found = NULL;
|
||||
int i;
|
||||
for (i = 0; i < nb_hw_devices; i++) {
|
||||
if (hw_devices[i]->type == type) {
|
||||
if (found)
|
||||
return NULL;
|
||||
found = hw_devices[i];
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
|
||||
HWDevice *hw_device_get_by_name(const char *name)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nb_hw_devices; i++) {
|
||||
if (!strcmp(hw_devices[i]->name, name))
|
||||
return hw_devices[i];
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static HWDevice *hw_device_add(void)
|
||||
{
|
||||
int err;
|
||||
err = av_reallocp_array(&hw_devices, nb_hw_devices + 1,
|
||||
sizeof(*hw_devices));
|
||||
if (err) {
|
||||
nb_hw_devices = 0;
|
||||
return NULL;
|
||||
}
|
||||
hw_devices[nb_hw_devices] = av_mallocz(sizeof(HWDevice));
|
||||
if (!hw_devices[nb_hw_devices])
|
||||
return NULL;
|
||||
return hw_devices[nb_hw_devices++];
|
||||
}
|
||||
|
||||
int hw_device_init_from_string(const char *arg, HWDevice **dev_out)
|
||||
{
|
||||
// "type=name:device,key=value,key2=value2"
|
||||
// "type:device,key=value,key2=value2"
|
||||
// -> av_hwdevice_ctx_create()
|
||||
// "type=name@name"
|
||||
// "type@name"
|
||||
// -> av_hwdevice_ctx_create_derived()
|
||||
|
||||
AVDictionary *options = NULL;
|
||||
char *type_name = NULL, *name = NULL, *device = NULL;
|
||||
enum AVHWDeviceType type;
|
||||
HWDevice *dev, *src;
|
||||
AVBufferRef *device_ref = NULL;
|
||||
int err;
|
||||
const char *errmsg, *p, *q;
|
||||
size_t k;
|
||||
|
||||
k = strcspn(arg, ":=@");
|
||||
p = arg + k;
|
||||
|
||||
type_name = av_strndup(arg, k);
|
||||
if (!type_name) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
type = av_hwdevice_find_type_by_name(type_name);
|
||||
if (type == AV_HWDEVICE_TYPE_NONE) {
|
||||
errmsg = "unknown device type";
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
if (*p == '=') {
|
||||
k = strcspn(p + 1, ":@");
|
||||
|
||||
name = av_strndup(p + 1, k);
|
||||
if (!name) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
if (hw_device_get_by_name(name)) {
|
||||
errmsg = "named device already exists";
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
p += 1 + k;
|
||||
} else {
|
||||
// Give the device an automatic name of the form "type%d".
|
||||
// We arbitrarily limit at 1000 anonymous devices of the same
|
||||
// type - there is probably something else very wrong if you
|
||||
// get to this limit.
|
||||
size_t index_pos;
|
||||
int index, index_limit = 1000;
|
||||
index_pos = strlen(type_name);
|
||||
name = av_malloc(index_pos + 4);
|
||||
if (!name) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
for (index = 0; index < index_limit; index++) {
|
||||
snprintf(name, index_pos + 4, "%s%d", type_name, index);
|
||||
if (!hw_device_get_by_name(name))
|
||||
break;
|
||||
}
|
||||
if (index >= index_limit) {
|
||||
errmsg = "too many devices";
|
||||
goto invalid;
|
||||
}
|
||||
}
|
||||
|
||||
if (!*p) {
|
||||
// New device with no parameters.
|
||||
err = av_hwdevice_ctx_create(&device_ref, type,
|
||||
NULL, NULL, 0);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
} else if (*p == ':') {
|
||||
// New device with some parameters.
|
||||
++p;
|
||||
q = strchr(p, ',');
|
||||
if (q) {
|
||||
device = av_strndup(p, q - p);
|
||||
if (!device) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
err = av_dict_parse_string(&options, q + 1, "=", ",", 0);
|
||||
if (err < 0) {
|
||||
errmsg = "failed to parse options";
|
||||
goto invalid;
|
||||
}
|
||||
}
|
||||
|
||||
err = av_hwdevice_ctx_create(&device_ref, type,
|
||||
device ? device : p, options, 0);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
|
||||
} else if (*p == '@') {
|
||||
// Derive from existing device.
|
||||
|
||||
src = hw_device_get_by_name(p + 1);
|
||||
if (!src) {
|
||||
errmsg = "invalid source device name";
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
err = av_hwdevice_ctx_create_derived(&device_ref, type,
|
||||
src->device_ref, 0);
|
||||
if (err < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
errmsg = "parse error";
|
||||
goto invalid;
|
||||
}
|
||||
|
||||
dev = hw_device_add();
|
||||
if (!dev) {
|
||||
err = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
dev->name = name;
|
||||
dev->type = type;
|
||||
dev->device_ref = device_ref;
|
||||
|
||||
if (dev_out)
|
||||
*dev_out = dev;
|
||||
|
||||
name = NULL;
|
||||
err = 0;
|
||||
done:
|
||||
av_freep(&type_name);
|
||||
av_freep(&name);
|
||||
av_freep(&device);
|
||||
av_dict_free(&options);
|
||||
return err;
|
||||
invalid:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Invalid device specification \"%s\": %s\n", arg, errmsg);
|
||||
err = AVERROR(EINVAL);
|
||||
goto done;
|
||||
fail:
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Device creation failed: %d.\n", err);
|
||||
av_buffer_unref(&device_ref);
|
||||
goto done;
|
||||
}
|
||||
|
||||
void hw_device_free_all(void)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < nb_hw_devices; i++) {
|
||||
av_freep(&hw_devices[i]->name);
|
||||
av_buffer_unref(&hw_devices[i]->device_ref);
|
||||
av_freep(&hw_devices[i]);
|
||||
}
|
||||
av_freep(&hw_devices);
|
||||
nb_hw_devices = 0;
|
||||
}
|
||||
|
||||
static enum AVHWDeviceType hw_device_match_type_by_hwaccel(enum HWAccelID hwaccel_id)
|
||||
{
|
||||
int i;
|
||||
if (hwaccel_id == HWACCEL_NONE)
|
||||
return AV_HWDEVICE_TYPE_NONE;
|
||||
for (i = 0; hwaccels[i].name; i++) {
|
||||
if (hwaccels[i].id == hwaccel_id)
|
||||
return hwaccels[i].device_type;
|
||||
}
|
||||
return AV_HWDEVICE_TYPE_NONE;
|
||||
}
|
||||
|
||||
static enum AVHWDeviceType hw_device_match_type_in_name(const char *codec_name)
|
||||
{
|
||||
const char *type_name;
|
||||
enum AVHWDeviceType type;
|
||||
for (type = av_hwdevice_iterate_types(AV_HWDEVICE_TYPE_NONE);
|
||||
type != AV_HWDEVICE_TYPE_NONE;
|
||||
type = av_hwdevice_iterate_types(type)) {
|
||||
type_name = av_hwdevice_get_type_name(type);
|
||||
if (strstr(codec_name, type_name))
|
||||
return type;
|
||||
}
|
||||
return AV_HWDEVICE_TYPE_NONE;
|
||||
}
|
||||
|
||||
int hw_device_setup_for_decode(InputStream *ist)
|
||||
{
|
||||
enum AVHWDeviceType type;
|
||||
HWDevice *dev;
|
||||
int err;
|
||||
|
||||
if (ist->hwaccel_device) {
|
||||
dev = hw_device_get_by_name(ist->hwaccel_device);
|
||||
if (!dev) {
|
||||
char *tmp;
|
||||
type = hw_device_match_type_by_hwaccel(ist->hwaccel_id);
|
||||
if (type == AV_HWDEVICE_TYPE_NONE) {
|
||||
// No match - this isn't necessarily invalid, though,
|
||||
// because an explicit device might not be needed or
|
||||
// the hwaccel setup could be handled elsewhere.
|
||||
return 0;
|
||||
}
|
||||
tmp = av_asprintf("%s:%s", av_hwdevice_get_type_name(type),
|
||||
ist->hwaccel_device);
|
||||
if (!tmp)
|
||||
return AVERROR(ENOMEM);
|
||||
err = hw_device_init_from_string(tmp, &dev);
|
||||
av_free(tmp);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
if (ist->hwaccel_id != HWACCEL_NONE)
|
||||
type = hw_device_match_type_by_hwaccel(ist->hwaccel_id);
|
||||
else
|
||||
type = hw_device_match_type_in_name(ist->dec->name);
|
||||
if (type != AV_HWDEVICE_TYPE_NONE) {
|
||||
dev = hw_device_get_by_type(type);
|
||||
if (!dev) {
|
||||
hw_device_init_from_string(av_hwdevice_get_type_name(type),
|
||||
&dev);
|
||||
}
|
||||
} else {
|
||||
// No device required.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
av_log(ist->dec_ctx, AV_LOG_WARNING, "No device available "
|
||||
"for decoder (device type %s for codec %s).\n",
|
||||
av_hwdevice_get_type_name(type), ist->dec->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ist->dec_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!ist->dec_ctx->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hw_device_setup_for_encode(OutputStream *ost)
|
||||
{
|
||||
enum AVHWDeviceType type;
|
||||
HWDevice *dev;
|
||||
|
||||
type = hw_device_match_type_in_name(ost->enc->name);
|
||||
if (type != AV_HWDEVICE_TYPE_NONE) {
|
||||
dev = hw_device_get_by_type(type);
|
||||
if (!dev) {
|
||||
av_log(ost->enc_ctx, AV_LOG_WARNING, "No device available "
|
||||
"for encoder (device type %s for codec %s).\n",
|
||||
av_hwdevice_get_type_name(type), ost->enc->name);
|
||||
return 0;
|
||||
}
|
||||
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!ost->enc_ctx->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
} else {
|
||||
// No device required.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int hwaccel_retrieve_data(AVCodecContext *avctx, AVFrame *input)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
AVFrame *output = NULL;
|
||||
enum AVPixelFormat output_format = ist->hwaccel_output_format;
|
||||
int err;
|
||||
|
||||
if (input->format == output_format) {
|
||||
// Nothing to do.
|
||||
return 0;
|
||||
}
|
||||
|
||||
output = av_frame_alloc();
|
||||
if (!output)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
output->format = output_format;
|
||||
|
||||
err = av_hwframe_transfer_data(output, input, 0);
|
||||
if (err < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Failed to transfer data to "
|
||||
"output frame: %d.\n", err);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = av_frame_copy_props(output, input);
|
||||
if (err < 0) {
|
||||
av_frame_unref(output);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
av_frame_unref(input);
|
||||
av_frame_move_ref(input, output);
|
||||
av_frame_free(&output);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_frame_free(&output);
|
||||
return err;
|
||||
}
|
||||
|
||||
int hwaccel_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
InputStream *ist = avctx->opaque;
|
||||
|
||||
ist->hwaccel_retrieve_data = &hwaccel_retrieve_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -131,8 +131,8 @@ static int zero12v_decode_frame(AVCodecContext *avctx, void *data,
|
||||
u = x/2 + (uint16_t *)(pic->data[1] + line * pic->linesize[1]);
|
||||
v = x/2 + (uint16_t *)(pic->data[2] + line * pic->linesize[2]);
|
||||
memcpy(y, y_temp, sizeof(*y) * (width - x));
|
||||
memcpy(u, u_temp, sizeof(*u) * ((width - x + 1) / 2));
|
||||
memcpy(v, v_temp, sizeof(*v) * ((width - x + 1) / 2));
|
||||
memcpy(u, u_temp, sizeof(*u) * (width - x + 1) / 2);
|
||||
memcpy(v, v_temp, sizeof(*v) * (width - x + 1) / 2);
|
||||
}
|
||||
|
||||
line_end += stride;
|
||||
|
||||
@@ -158,7 +158,7 @@ typedef struct FourXContext {
|
||||
#define FIX_1_847759065 121095
|
||||
#define FIX_2_613125930 171254
|
||||
|
||||
#define MULTIPLY(var, const) ((int)((var) * (unsigned)(const)) >> 16)
|
||||
#define MULTIPLY(var, const) (((var) * (const)) >> 16)
|
||||
|
||||
static void idct(int16_t block[64])
|
||||
{
|
||||
@@ -351,8 +351,6 @@ static int decode_p_block(FourXContext *f, uint16_t *dst, const uint16_t *src,
|
||||
index = size2index[log2h][log2w];
|
||||
av_assert0(index >= 0);
|
||||
|
||||
if (get_bits_left(&f->gb) < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
h = 1 << log2h;
|
||||
code = get_vlc2(&f->gb, block_type_vlc[1 - (f->version > 1)][index].table,
|
||||
BLOCK_TYPE_VLC_BITS, 1);
|
||||
@@ -498,9 +496,9 @@ static int decode_i_block(FourXContext *f, int16_t *block)
|
||||
{
|
||||
int code, i, j, level, val;
|
||||
|
||||
if (get_bits_left(&f->pre_gb) < 2) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "%d bits left before decode_i_block()\n", get_bits_left(&f->pre_gb));
|
||||
return AVERROR_INVALIDDATA;
|
||||
if (get_bits_left(&f->gb) < 2){
|
||||
av_log(f->avctx, AV_LOG_ERROR, "%d bits left before decode_i_block()\n", get_bits_left(&f->gb));
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* DC coef */
|
||||
@@ -525,10 +523,6 @@ static int decode_i_block(FourXContext *f, int16_t *block)
|
||||
break;
|
||||
if (code == 0xf0) {
|
||||
i += 16;
|
||||
if (i >= 64) {
|
||||
av_log(f->avctx, AV_LOG_ERROR, "run %d overflow\n", i);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
if (code & 0xf) {
|
||||
level = get_xbits(&f->gb, code & 0xf);
|
||||
@@ -738,7 +732,7 @@ static int decode_i2_frame(FourXContext *f, const uint8_t *buf, int length)
|
||||
for (x = 0; x < width; x += 16) {
|
||||
unsigned int color[4] = { 0 }, bits;
|
||||
if (buf_end - buf < 8)
|
||||
return AVERROR_INVALIDDATA;
|
||||
return -1;
|
||||
// warning following is purely guessed ...
|
||||
color[0] = bytestream2_get_le16u(&g3);
|
||||
color[1] = bytestream2_get_le16u(&g3);
|
||||
@@ -884,8 +878,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
|
||||
}
|
||||
|
||||
if (i >= CFRAME_BUFFER_COUNT) {
|
||||
if (free_index < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
i = free_index;
|
||||
f->cfrm[i].id = id;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user