mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-06 23:10:00 +01:00
Compare commits
591 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
71fb613263 | ||
|
|
c3ad886251 | ||
|
|
6f1b402e31 | ||
|
|
268c460c97 | ||
|
|
e5a261e93d | ||
|
|
00c2073071 | ||
|
|
4d82b7bac4 | ||
|
|
c513bd4803 | ||
|
|
a56d3a1ef1 | ||
|
|
4bcb493a0e | ||
|
|
3392936e82 | ||
|
|
ea238698bc | ||
|
|
ad28b01a14 | ||
|
|
fa20129765 | ||
|
|
0f6580b169 | ||
|
|
4c34976fea | ||
|
|
8b84cffb30 | ||
|
|
bc9e149c2a | ||
|
|
0c5f3c230a | ||
|
|
545cfeeebd | ||
|
|
3eaf65305e | ||
|
|
73eb9578c1 | ||
|
|
f97265961e | ||
|
|
773d1b857b | ||
|
|
261ff2e208 | ||
|
|
8478879af6 | ||
|
|
e243076b32 | ||
|
|
e5a88d62b6 | ||
|
|
2ae1a70cd5 | ||
|
|
68da8e9703 | ||
|
|
2a47fecb09 | ||
|
|
9f828f147b | ||
|
|
fdff97fe47 | ||
|
|
79afac279b | ||
|
|
04b48cefac | ||
|
|
7436138621 | ||
|
|
b314170e2b | ||
|
|
3f4ec2d6af | ||
|
|
0891a36dd8 | ||
|
|
383a51855a | ||
|
|
37aaa56842 | ||
|
|
a73cd6aebf | ||
|
|
2177cc8eb8 | ||
|
|
a16aaae628 | ||
|
|
3f28c576e0 | ||
|
|
d585a0a1cc | ||
|
|
b48951bd29 | ||
|
|
a54187b7d9 | ||
|
|
a1f13b592b | ||
|
|
4bc4cafaef | ||
|
|
c152b69715 | ||
|
|
a8f16d4eb4 | ||
|
|
78062c6d71 | ||
|
|
ec47a3b95f | ||
|
|
e5ed035350 | ||
|
|
8ff4a080bf | ||
|
|
98ba25bd11 | ||
|
|
9dfac9e9e9 | ||
|
|
2c737a2cb0 | ||
|
|
1d9553542b | ||
|
|
7a42dcf088 | ||
|
|
ecbdaa9b4c | ||
|
|
25178bcd73 | ||
|
|
d15dfed7ac | ||
|
|
e7d1caf41f | ||
|
|
c44ce5d804 | ||
|
|
5ee0beb799 | ||
|
|
b9b148ef87 | ||
|
|
cc5c5beb98 | ||
|
|
df30441b16 | ||
|
|
21b786d628 | ||
|
|
0191e5f13e | ||
|
|
c2c9dac463 | ||
|
|
5f0afb748e | ||
|
|
ed496ac4f5 | ||
|
|
bd7c92f484 | ||
|
|
446de009f9 | ||
|
|
9a814adf89 | ||
|
|
f4a792fbb9 | ||
|
|
12043b8a6b | ||
|
|
16ab46b4fc | ||
|
|
c7f723ddb6 | ||
|
|
85f5aaa15f | ||
|
|
3ce3d5ea9c | ||
|
|
b21ebecec1 | ||
|
|
399670d668 | ||
|
|
7e2559982f | ||
|
|
7c00e515a0 | ||
|
|
df0d34caaf | ||
|
|
eb252776d8 | ||
|
|
5b9b498e1b | ||
|
|
8b644b85f4 | ||
|
|
fe87396f35 | ||
|
|
0035e034c0 | ||
|
|
9363a18e49 | ||
|
|
1882734fe1 | ||
|
|
cd76f3ed59 | ||
|
|
ff6d408ac0 | ||
|
|
815efd3f68 | ||
|
|
46f74da439 | ||
|
|
3ab3a39ec6 | ||
|
|
f18d625883 | ||
|
|
52accf7310 | ||
|
|
d46f1d89f1 | ||
|
|
48957599a5 | ||
|
|
3092b4d271 | ||
|
|
b1deea36aa | ||
|
|
d3e208f5f5 | ||
|
|
93f5b347e6 | ||
|
|
6bc0cf403e | ||
|
|
0b4c403f2a | ||
|
|
23fb7097ee | ||
|
|
7fe75d51fe | ||
|
|
8063b5e289 | ||
|
|
8f9b6ac0e8 | ||
|
|
572568cff4 | ||
|
|
d1fa43d5b9 | ||
|
|
a9f13f883d | ||
|
|
e5c8b53c68 | ||
|
|
4ecf6ca450 | ||
|
|
40065896ec | ||
|
|
732d39e353 | ||
|
|
0f5afdda0a | ||
|
|
f368a6cf68 | ||
|
|
35ccd5a569 | ||
|
|
58e57ef180 | ||
|
|
a71c87e4b5 | ||
|
|
e3a733ba8e | ||
|
|
2ccfbc888c | ||
|
|
8ea01dca10 | ||
|
|
a6df0d5140 | ||
|
|
ff9dbf3b18 | ||
|
|
b409640d3c | ||
|
|
f24ca075ff | ||
|
|
1ea783dea6 | ||
|
|
519904111b | ||
|
|
d1620856da | ||
|
|
388ed57114 | ||
|
|
dd27929fdc | ||
|
|
768d4c2e2a | ||
|
|
e24d23accc | ||
|
|
aa28df74ab | ||
|
|
944ee75106 | ||
|
|
d4b7b3c03e | ||
|
|
7ffb7d4b04 | ||
|
|
8e98dfc57f | ||
|
|
d61977cbe4 | ||
|
|
869683430d | ||
|
|
472af5873f | ||
|
|
dcb2ad9125 | ||
|
|
15006f48cd | ||
|
|
977cfb7197 | ||
|
|
30194a348e | ||
|
|
27a609a8b9 | ||
|
|
7ccd77a8ff | ||
|
|
b90c01a501 | ||
|
|
fde82db169 | ||
|
|
45a021aba1 | ||
|
|
2ad47d59af | ||
|
|
2a549b2e7d | ||
|
|
79ad18ddbd | ||
|
|
b27833f066 | ||
|
|
78eed60822 | ||
|
|
cd310f0502 | ||
|
|
cc53ce7e5b | ||
|
|
1064cf413a | ||
|
|
326dafe40a | ||
|
|
310a060e77 | ||
|
|
e9e2ddbc6c | ||
|
|
061f8b941e | ||
|
|
261557160f | ||
|
|
338444c016 | ||
|
|
811047f7c2 | ||
|
|
5cdceec2f2 | ||
|
|
166ee5fa68 | ||
|
|
badf284b52 | ||
|
|
1caf4f91fb | ||
|
|
1d8caf2e1f | ||
|
|
f73e9b73ce | ||
|
|
faf66d99c0 | ||
|
|
b356dcb2fe | ||
|
|
cc4707601d | ||
|
|
032672a8f1 | ||
|
|
d3456a374d | ||
|
|
673f8d3641 | ||
|
|
447b9a0f03 | ||
|
|
65d8418e11 | ||
|
|
6c5e26821e | ||
|
|
be0109b881 | ||
|
|
3fe61f91b3 | ||
|
|
14249d8a03 | ||
|
|
24da8685f0 | ||
|
|
e2ae9adbe1 | ||
|
|
02b0143522 | ||
|
|
59287d3880 | ||
|
|
8f83d2a94a | ||
|
|
223b5abcb1 | ||
|
|
bbea2c47c7 | ||
|
|
88f619726c | ||
|
|
f4f397ebc1 | ||
|
|
4023a8af63 | ||
|
|
8720b1b480 | ||
|
|
4846536e67 | ||
|
|
a2b5ffb4ac | ||
|
|
a3041cf48b | ||
|
|
4ff9f77240 | ||
|
|
d4ff904e30 | ||
|
|
a4015d432b | ||
|
|
d0a99fdfc6 | ||
|
|
287389faec | ||
|
|
a4e1dd6940 | ||
|
|
3e539d11e4 | ||
|
|
2de8235791 | ||
|
|
44f830e7f0 | ||
|
|
c256491f96 | ||
|
|
edaf8c9615 | ||
|
|
953ad7b362 | ||
|
|
9abd7d144d | ||
|
|
86487300ce | ||
|
|
4d8beeaa8f | ||
|
|
71eee0d4dc | ||
|
|
8c1899a71b | ||
|
|
ecd4490dd0 | ||
|
|
eb998e33ef | ||
|
|
fd5726a226 | ||
|
|
91b0684024 | ||
|
|
e7201aa246 | ||
|
|
0229916f15 | ||
|
|
b229dce2e4 | ||
|
|
952c62f658 | ||
|
|
7f7e601e81 | ||
|
|
fdf7a28b0a | ||
|
|
116dddb928 | ||
|
|
bdb5f6e7f8 | ||
|
|
566d7896c1 | ||
|
|
66c7329c6e | ||
|
|
2e850412d3 | ||
|
|
f82aaea3ff | ||
|
|
de82f92b84 | ||
|
|
1227b4e926 | ||
|
|
0066e79c9e | ||
|
|
d1f7a44e89 | ||
|
|
ab8830e348 | ||
|
|
703937c494 | ||
|
|
91aa03952a | ||
|
|
458c00ea0b | ||
|
|
5a7763bdfa | ||
|
|
8f153214e3 | ||
|
|
717c7f4ae9 | ||
|
|
2c887141b8 | ||
|
|
582fb329a4 | ||
|
|
5962698d25 | ||
|
|
5774a0524c | ||
|
|
03f5a57b93 | ||
|
|
8b9a48b7aa | ||
|
|
481d3930d9 | ||
|
|
2f45969744 | ||
|
|
8aab15a91d | ||
|
|
cc33e73618 | ||
|
|
61ed1182ee | ||
|
|
7e0d640edf | ||
|
|
73e60e4439 | ||
|
|
404c9331dd | ||
|
|
875fbddd7d | ||
|
|
32b68a6232 | ||
|
|
bac4bb747a | ||
|
|
ea190a10d3 | ||
|
|
6de9986c78 | ||
|
|
1abb9ab266 | ||
|
|
bda2d49896 | ||
|
|
cb92d65a5b | ||
|
|
4ae804b6fb | ||
|
|
4b583e5425 | ||
|
|
c989427c16 | ||
|
|
9738990542 | ||
|
|
a2bb836e55 | ||
|
|
667d62c603 | ||
|
|
6825af5c07 | ||
|
|
71d776740c | ||
|
|
58fdd476f9 | ||
|
|
97f3abe294 | ||
|
|
7d313a14a1 | ||
|
|
b90ce02f81 | ||
|
|
a0e38aceba | ||
|
|
02fd9353f2 | ||
|
|
c524a8b4be | ||
|
|
bea287bdad | ||
|
|
2a7b3e62e0 | ||
|
|
1d2a398827 | ||
|
|
598d3614fd | ||
|
|
09e0a12202 | ||
|
|
be267aa08b | ||
|
|
65d6de52f1 | ||
|
|
e075bc192d | ||
|
|
d482bf35eb | ||
|
|
26083824d7 | ||
|
|
79c114e1b2 | ||
|
|
6f24f503ef | ||
|
|
df288deb9b | ||
|
|
5e61fce832 | ||
|
|
b5cdf08cae | ||
|
|
07dec5b0c3 | ||
|
|
620fa723b8 | ||
|
|
b3e21be8e1 | ||
|
|
7d58def70a | ||
|
|
baefa5385e | ||
|
|
34aad02457 | ||
|
|
3d5f361290 | ||
|
|
e64b4a75bd | ||
|
|
fa4ac6b43a | ||
|
|
af8de920b7 | ||
|
|
fb993619d1 | ||
|
|
671e182cc4 | ||
|
|
fa6d6cc810 | ||
|
|
82fe7775a8 | ||
|
|
4254dbe20f | ||
|
|
f6f682f5aa | ||
|
|
674adf0a02 | ||
|
|
f25834ab07 | ||
|
|
725a0446b4 | ||
|
|
88264f84c9 | ||
|
|
58477f42a2 | ||
|
|
fbf576417a | ||
|
|
8a3eb4498b | ||
|
|
3a18a6acc4 | ||
|
|
a09127eacd | ||
|
|
70fa5522c7 | ||
|
|
6d86416c92 | ||
|
|
f89b52fbca | ||
|
|
622b48d1fb | ||
|
|
6666313248 | ||
|
|
3a67e33368 | ||
|
|
da9e84fabd | ||
|
|
890a801468 | ||
|
|
ebc5ea216f | ||
|
|
c7ac580288 | ||
|
|
433d93a3b6 | ||
|
|
5334967a56 | ||
|
|
aa5b8c9590 | ||
|
|
46bbf194c4 | ||
|
|
b21120a924 | ||
|
|
6a5d7fd8ad | ||
|
|
8da08ef1ff | ||
|
|
3b6f7601b7 | ||
|
|
1196932f1c | ||
|
|
09f47af747 | ||
|
|
35a2e71385 | ||
|
|
9f97a023d6 | ||
|
|
8f0d442434 | ||
|
|
7b5308045e | ||
|
|
bb1d2cf898 | ||
|
|
417bc2a5b0 | ||
|
|
5f891809d7 | ||
|
|
b49039b23e | ||
|
|
ee34b6549f | ||
|
|
de255793d2 | ||
|
|
9957286378 | ||
|
|
b425df191c | ||
|
|
acfce11c48 | ||
|
|
704e4b8213 | ||
|
|
8bddb1d3ef | ||
|
|
a04d889c52 | ||
|
|
612472c647 | ||
|
|
ce60ee3e5c | ||
|
|
982654b90c | ||
|
|
b01534293e | ||
|
|
b354bcf027 | ||
|
|
b1f2d203c0 | ||
|
|
ae057cec10 | ||
|
|
926d8b844f | ||
|
|
bf87bdd3f6 | ||
|
|
de1132a891 | ||
|
|
031c0cb0b4 | ||
|
|
3f06be77ac | ||
|
|
6136f1398a | ||
|
|
4a953e5c81 | ||
|
|
28e803d637 | ||
|
|
7015704640 | ||
|
|
91aa49218e | ||
|
|
c5df87e259 | ||
|
|
29d3e924a6 | ||
|
|
d91f3644ce | ||
|
|
4afa465eb3 | ||
|
|
71b264c8fa | ||
|
|
a21c64199c | ||
|
|
93ef174211 | ||
|
|
7af1a73711 | ||
|
|
79b69b5f4c | ||
|
|
42ae283652 | ||
|
|
f91056a25a | ||
|
|
57f68127da | ||
|
|
350bedf7e2 | ||
|
|
5664263561 | ||
|
|
78b71acab7 | ||
|
|
478c5d838b | ||
|
|
159403fc9f | ||
|
|
27727d56bf | ||
|
|
2b70d3941f | ||
|
|
6e2b18a895 | ||
|
|
cea03683b9 | ||
|
|
5f9f689497 | ||
|
|
7fbea837fd | ||
|
|
5828700d5c | ||
|
|
05cca35df9 | ||
|
|
43bdf562c3 | ||
|
|
fa38dfc0ef | ||
|
|
5d3c96cf11 | ||
|
|
95224c3ffb | ||
|
|
567eacd42e | ||
|
|
a6bd1bdd12 | ||
|
|
3f81371bd4 | ||
|
|
6f7a7b5ffb | ||
|
|
409c33e5f8 | ||
|
|
ba1cc675eb | ||
|
|
0dc1c85644 | ||
|
|
5f0c0883c2 | ||
|
|
89d8eae0c6 | ||
|
|
82de510a59 | ||
|
|
1abd61e006 | ||
|
|
2d177dba88 | ||
|
|
6f83f6de04 | ||
|
|
f0d9b5899a | ||
|
|
c1d3a0638a | ||
|
|
7f0353bc55 | ||
|
|
26369e6ca9 | ||
|
|
246b3d58a9 | ||
|
|
f4c5abe2d0 | ||
|
|
3d2af8afef | ||
|
|
48be6b27e7 | ||
|
|
b3f458f89f | ||
|
|
b2a94f5937 | ||
|
|
8d172d9409 | ||
|
|
c813f5e343 | ||
|
|
f7468a9c40 | ||
|
|
49e3dd7709 | ||
|
|
e87e006121 | ||
|
|
70405dd7e3 | ||
|
|
74bcfe5f85 | ||
|
|
01701019e0 | ||
|
|
70d1af5690 | ||
|
|
75c3969292 | ||
|
|
40b896a781 | ||
|
|
ede71f4f57 | ||
|
|
023bc5d926 | ||
|
|
7e9b9f24df | ||
|
|
e3fb0f0c88 | ||
|
|
acb339bb88 | ||
|
|
5873e06460 | ||
|
|
450f5ae49d | ||
|
|
f68ab9de4e | ||
|
|
dc91b913b6 | ||
|
|
aeba1a4c20 | ||
|
|
d22550dd61 | ||
|
|
2a7f1bc282 | ||
|
|
25e794a1ea | ||
|
|
d622923b36 | ||
|
|
c64180fac8 | ||
|
|
8ee432dc23 | ||
|
|
5f21bbed8a | ||
|
|
53ecdbfbe5 | ||
|
|
5c457c673f | ||
|
|
fb7cd45977 | ||
|
|
44d218e99a | ||
|
|
be5970fcaa | ||
|
|
c72fca598c | ||
|
|
b0997b8526 | ||
|
|
4562719c7d | ||
|
|
6d7dfabfb0 | ||
|
|
63277aa98e | ||
|
|
0155d5cd74 | ||
|
|
ed7efbe3ab | ||
|
|
6aad0b1bb5 | ||
|
|
5621d10b7a | ||
|
|
1761cc0cb0 | ||
|
|
562ff3ee0e | ||
|
|
aa8f8748ca | ||
|
|
0401246845 | ||
|
|
76b5f726aa | ||
|
|
6a7a39878f | ||
|
|
a80b0ee981 | ||
|
|
de40b2fe41 | ||
|
|
4aeedf4c2a | ||
|
|
95aacf30e3 | ||
|
|
6bc2058d00 | ||
|
|
54dd729cee | ||
|
|
1f3735892b | ||
|
|
edbc26e38b | ||
|
|
8bc3cdf007 | ||
|
|
639c60f5aa | ||
|
|
0fe3383066 | ||
|
|
eff72f86e2 | ||
|
|
632262f184 | ||
|
|
236ddfbe1c | ||
|
|
affb55d4b4 | ||
|
|
60433ae94f | ||
|
|
82b9da7662 | ||
|
|
0ccd2540b0 | ||
|
|
7f139498f5 | ||
|
|
b51d5b222e | ||
|
|
70028ce7fd | ||
|
|
ffb599458f | ||
|
|
55ad9ece31 | ||
|
|
3d473a8925 | ||
|
|
b4b2f88cab | ||
|
|
cc3b05e424 | ||
|
|
195cce45cf | ||
|
|
c7a95509b3 | ||
|
|
6906a2b471 | ||
|
|
4eb44966a6 | ||
|
|
9a2b994a71 | ||
|
|
58b961d8bb | ||
|
|
6614f33a0b | ||
|
|
67e401e3cb | ||
|
|
789dadccc0 | ||
|
|
09510d9ffd | ||
|
|
816d4bee4a | ||
|
|
a8fb9c9d27 | ||
|
|
5e2e8e1b9e | ||
|
|
28dd12c9b7 | ||
|
|
da4b64ea02 | ||
|
|
9f011f0876 | ||
|
|
955be73bc5 | ||
|
|
3f94e061cb | ||
|
|
a01cf1fe54 | ||
|
|
fe8ae68738 | ||
|
|
3f851a7719 | ||
|
|
b4b3af795c | ||
|
|
0bbf1f4785 | ||
|
|
a57ba45eb4 | ||
|
|
ceb5863d04 | ||
|
|
cc8eba0ab8 | ||
|
|
829d4b009f | ||
|
|
426c52c2ce | ||
|
|
15f1648f7f | ||
|
|
45f40cec3a | ||
|
|
de9f4351fa | ||
|
|
1ff644e509 | ||
|
|
698d768d21 | ||
|
|
137c998b48 | ||
|
|
d416d7f061 | ||
|
|
807b703a48 | ||
|
|
5978b8bd9c | ||
|
|
ac0e9506d0 | ||
|
|
be3225153e | ||
|
|
9b25cf8b06 | ||
|
|
f8fc6416b2 | ||
|
|
18bcfa81fc | ||
|
|
8c99a06c5c | ||
|
|
9179ab9227 | ||
|
|
6ef700dfb0 | ||
|
|
72a03b3c06 | ||
|
|
ee059d8ef8 | ||
|
|
419f62c902 | ||
|
|
59c05f51d5 | ||
|
|
79ff380da7 | ||
|
|
81178db83b | ||
|
|
039ecef275 | ||
|
|
c5a61adcca | ||
|
|
095f50e06e | ||
|
|
2af5b3fa08 | ||
|
|
5d1e309e67 | ||
|
|
9a3e525b7c | ||
|
|
e42efdce95 | ||
|
|
b26c6df919 | ||
|
|
7a74129fa9 | ||
|
|
fd80c0b95f | ||
|
|
a6dc1e84d2 | ||
|
|
390b6f0cba | ||
|
|
72389f7916 | ||
|
|
9315b45dd2 | ||
|
|
df9fbc442d | ||
|
|
2fd48331d5 | ||
|
|
a98413afb9 | ||
|
|
0cfea0581b | ||
|
|
0d00e151d1 | ||
|
|
990bccfad6 | ||
|
|
f0169e9d58 | ||
|
|
2057068495 | ||
|
|
0b9d7b6f8d | ||
|
|
ebe065c177 | ||
|
|
5f2018c490 | ||
|
|
3d79b9357d | ||
|
|
0be265e9a1 | ||
|
|
5996184bea | ||
|
|
16af5236ae | ||
|
|
2b114adcf4 | ||
|
|
2a5c577ef3 | ||
|
|
8f099e3a67 | ||
|
|
cfe614787d | ||
|
|
7efe57ba11 | ||
|
|
da4d578621 |
@@ -1,93 +0,0 @@
|
||||
# This file describes the expected reviewers for a PR based on the changed
|
||||
# files. Unlike what the name of the file suggests they don't own the code, but
|
||||
# merely have a good understanding of that area of the codebase and therefore
|
||||
# are usually suited as a reviewer.
|
||||
|
||||
# Lines in this file match changed paths via Go-Style regular expressions:
|
||||
# https://pkg.go.dev/regexp/syntax
|
||||
|
||||
# Mind the alphabetical order
|
||||
|
||||
# avcodec
|
||||
# =======
|
||||
libavcodec/.*aac.* @lynne
|
||||
libavcodec/.*ac3.* @lynne
|
||||
libavcodec/.*atrac9.* @lynne
|
||||
libavcodec/.*bitpacked.* @lynne
|
||||
libavcodec/.*d3d12va.* @jianhuaw
|
||||
libavcodec/.*dirac.* @lynne
|
||||
libavcodec/.*ffv1.* @lynne @michaelni
|
||||
libavcodec/golomb.* @michaelni
|
||||
libavcodec/.*h266.* @frankplow @NuoMi @jianhuaw
|
||||
libavcodec/h26x/.* @frankplow @NuoMi @jianhuaw
|
||||
libavcodec/.*jpegxl.* @lynne
|
||||
libavcodec/.*jxl.* @lynne
|
||||
libavcodec/.*opus.* @lynne
|
||||
libavcodec/.*prores.* @lynne
|
||||
libavcodec/rangecoder.* @michaelni
|
||||
libavcodec/ratecontrol.* @michaelni
|
||||
libavcodec/.*siren.* @lynne
|
||||
libavcodec/.*vc2.* @lynne
|
||||
libavcodec/.*vvc.* @frankplow @NuoMi @jianhuaw
|
||||
|
||||
libavcodec/aarch64/.* @lynne @mstorsjo
|
||||
libavcodec/arm/.* @mstorsjo
|
||||
libavcodec/ppc/.* @sean_mcg
|
||||
libavcodec/x86/.* @lynne
|
||||
|
||||
# avfilter
|
||||
# =======
|
||||
libavfilter/aarch64/.* @mstorsjo
|
||||
libavfilter/af_whisper.* @vpalmisano
|
||||
libavfilter/vf_yadif.* @michaelni
|
||||
libavfilter/vsrc_mandelbrot.* @michaelni
|
||||
|
||||
# avformat
|
||||
# =======
|
||||
libavformat/iamf.* @jamrial
|
||||
|
||||
# avutil
|
||||
# ======
|
||||
libavutil/.*crc.* @lynne @michaelni
|
||||
libavutil/.*d3d12va.* @jianhuaw
|
||||
libavutil/eval.* @michaelni
|
||||
libavutil/iamf.* @jamrial
|
||||
libavutil/integer.* @michaelni
|
||||
libavutil/lfg.* @michaelni
|
||||
libavutil/lls.* @michaelni
|
||||
libavutil/md5.* @michaelni
|
||||
libavutil/mathematics.* @michaelni
|
||||
libavutil/mem.* @michaelni
|
||||
libavutil/qsort.* @michaelni
|
||||
libavutil/random_seed.* @michaelni
|
||||
libavutil/rational.* @michaelni
|
||||
libavutil/sfc.* @michaelni
|
||||
libavutil/softfloat.* @michaelni
|
||||
libavutil/tree.* @michaelni
|
||||
libavutil/tx.* @lynne
|
||||
|
||||
libavutil/aarch64/.* @lynne @mstorsjo
|
||||
libavutil/arm/.* @mstorsjo
|
||||
libavutil/ppc/.* @sean_mcg
|
||||
libavutil/x86/.* @lynne
|
||||
|
||||
# swresample
|
||||
# =======
|
||||
libswresample/aarch64/.* @mstorsjo
|
||||
libswresample/arm/.* @mstorsjo
|
||||
libswresample/.* @michaelni
|
||||
|
||||
# swscale
|
||||
# =======
|
||||
libswscale/aarch64/.* @mstorsjo
|
||||
libswscale/arm/.* @mstorsjo
|
||||
libswscale/ppc/.* @sean_mcg
|
||||
|
||||
# doc
|
||||
# ===
|
||||
doc/.* @GyanD
|
||||
|
||||
# Frameworks
|
||||
# ==========
|
||||
.*d3d12va.* @jianhuaw
|
||||
.*vulkan.* @lynne
|
||||
@@ -1,9 +0,0 @@
|
||||
# Summary of the bug
|
||||
|
||||
Briefly describe the issue you're experiencing. Include any error messages, unexpected behavior, or relevant observations.
|
||||
|
||||
# Steps to reproduce
|
||||
|
||||
List the steps required to trigger the bug.
|
||||
Include the exact CLI command used, if any.
|
||||
Provide sample input files, logs, or scripts if available.
|
||||
@@ -1,36 +0,0 @@
|
||||
module.exports = async ({github, context}) => {
|
||||
const title = (context.payload.pull_request?.title || context.payload.issue?.title || '').toLowerCase();
|
||||
const labels = [];
|
||||
|
||||
const kwmap = {
|
||||
'avcodec': 'avcodec',
|
||||
'avdevice': 'avdevice',
|
||||
'avfilter': 'avfilter',
|
||||
'avformat': 'avformat',
|
||||
'avutil': 'avutil',
|
||||
'swresample': 'swresample',
|
||||
'swscale': 'swscale',
|
||||
'fftools': 'CLI'
|
||||
};
|
||||
|
||||
if (context.payload.action === 'opened') {
|
||||
labels.push('new');
|
||||
console.log('Detected label: new');
|
||||
}
|
||||
|
||||
for (const [kw, label] of Object.entries(kwmap)) {
|
||||
if (title.includes(kw)) {
|
||||
labels.push(label);
|
||||
console.log('Detected label: ' + label);
|
||||
}
|
||||
}
|
||||
|
||||
if (labels.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.payload.pull_request?.number || context.payload.issue?.number,
|
||||
labels: labels,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
avcodec:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavcodec/**
|
||||
|
||||
avdevice:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavdevice/**
|
||||
|
||||
avfilter:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavfilter/**
|
||||
|
||||
avformat:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavformat/**
|
||||
|
||||
avutil:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavutil/**
|
||||
|
||||
swresample:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libswresample/**
|
||||
|
||||
swscale:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libswscale/**
|
||||
|
||||
CLI:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: fftools/**
|
||||
@@ -1,28 +0,0 @@
|
||||
exclude: ^tests/ref/
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-illegal-windows-names
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: file-contents-sorter
|
||||
files:
|
||||
.forgejo/pre-commit/ignored-words.txt
|
||||
args:
|
||||
- --ignore-case
|
||||
- id: fix-byte-order-marker
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.4.1
|
||||
hooks:
|
||||
- id: codespell
|
||||
args:
|
||||
- --ignore-words=.forgejo/pre-commit/ignored-words.txt
|
||||
- --ignore-multiline-regex=codespell:off.*?(codespell:on|\Z)
|
||||
exclude: ^tools/(patcheck|clean-diff)$
|
||||
@@ -1,119 +0,0 @@
|
||||
abl
|
||||
ACN
|
||||
acount
|
||||
addin
|
||||
alis
|
||||
alls
|
||||
ALOG
|
||||
ALS
|
||||
als
|
||||
ANC
|
||||
anc
|
||||
ANS
|
||||
ans
|
||||
anull
|
||||
basf
|
||||
bloc
|
||||
brane
|
||||
BREIF
|
||||
BU
|
||||
bu
|
||||
bufer
|
||||
CAF
|
||||
caf
|
||||
clen
|
||||
clens
|
||||
Collet
|
||||
compre
|
||||
dum
|
||||
endin
|
||||
erro
|
||||
FIEL
|
||||
fiel
|
||||
filp
|
||||
fils
|
||||
FILTERD
|
||||
filterd
|
||||
fle
|
||||
fo
|
||||
FPR
|
||||
fro
|
||||
Hald
|
||||
indx
|
||||
ine
|
||||
inh
|
||||
inout
|
||||
inouts
|
||||
inport
|
||||
ist
|
||||
LAF
|
||||
laf
|
||||
lastr
|
||||
LinS
|
||||
mapp
|
||||
mis
|
||||
mot
|
||||
nd
|
||||
nIn
|
||||
offsetp
|
||||
orderd
|
||||
ot
|
||||
outout
|
||||
padd
|
||||
PAETH
|
||||
paeth
|
||||
PARM
|
||||
parm
|
||||
parms
|
||||
pEvents
|
||||
PixelX
|
||||
Psot
|
||||
quater
|
||||
readd
|
||||
recuse
|
||||
redY
|
||||
Reencode
|
||||
reencode
|
||||
remaind
|
||||
renderD
|
||||
rin
|
||||
SAV
|
||||
SEH
|
||||
SER
|
||||
ser
|
||||
setts
|
||||
shft
|
||||
SIZ
|
||||
siz
|
||||
skipd
|
||||
sme
|
||||
som
|
||||
sover
|
||||
STAP
|
||||
startd
|
||||
statics
|
||||
struc
|
||||
suble
|
||||
TE
|
||||
tE
|
||||
te
|
||||
tha
|
||||
tne
|
||||
tolen
|
||||
tpye
|
||||
tre
|
||||
TRUN
|
||||
trun
|
||||
truns
|
||||
Tung
|
||||
TYE
|
||||
ue
|
||||
UES
|
||||
ues
|
||||
vai
|
||||
vas
|
||||
vie
|
||||
VILL
|
||||
vor
|
||||
wel
|
||||
wih
|
||||
@@ -1,25 +0,0 @@
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize]
|
||||
issues:
|
||||
types: [opened, edited]
|
||||
|
||||
jobs:
|
||||
pr_labeler:
|
||||
runs-on: utilities
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Label by file-changes
|
||||
uses: https://github.com/actions/labeler@v5
|
||||
if: ${{ forge.event_name == 'pull_request_target' }}
|
||||
with:
|
||||
configuration-path: .forgejo/labeler/labeler.yml
|
||||
repo-token: ${{ secrets.AUTOLABELER_TOKEN }}
|
||||
- name: Label by title-match
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const script = require('.forgejo/labeler/labeler.js')
|
||||
await script({github, context})
|
||||
github-token: ${{ secrets.AUTOLABELER_TOKEN }}
|
||||
@@ -1,26 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: utilities
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install pre-commit CI
|
||||
id: install
|
||||
run: |
|
||||
python3 -m venv ~/pre-commit
|
||||
~/pre-commit/bin/pip install --upgrade pip setuptools
|
||||
~/pre-commit/bin/pip install pre-commit
|
||||
echo "envhash=$({ python3 --version && cat .forgejo/pre-commit/config.yaml; } | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: pre-commit-${{ steps.install.outputs.envhash }}
|
||||
- name: Run pre-commit CI
|
||||
run: ~/pre-commit/bin/pre-commit run -c .forgejo/pre-commit/config.yaml --show-diff-on-failure --color=always --all-files
|
||||
@@ -1,59 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
run_fate:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runner: [linux-amd64,linux-aarch64]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Configure
|
||||
run: ./configure --enable-gpl --enable-nonfree --enable-memory-poisoning --assert-level=2
|
||||
- name: Build
|
||||
run: make -j$(nproc)
|
||||
- name: Restore Cached Fate-Suite
|
||||
id: cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: fate-suite
|
||||
key: fate-suite
|
||||
restore-keys: |
|
||||
fate-suite-
|
||||
- name: Sync Fate-Suite
|
||||
id: fate
|
||||
run: |
|
||||
make fate-rsync SAMPLES=$PWD/fate-suite
|
||||
echo "hash=$(find fate-suite -type f | sort | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
|
||||
- name: Cache Fate-Suite
|
||||
uses: actions/cache/save@v4
|
||||
if: ${{ format('fate-suite-{0}', steps.fate.outputs.hash) != steps.cache.outputs.cache-matched-key }}
|
||||
with:
|
||||
path: fate-suite
|
||||
key: fate-suite-${{ steps.fate.outputs.hash }}
|
||||
- name: Run Fate
|
||||
run: make fate SAMPLES=$PWD/fate-suite -j$(nproc)
|
||||
compile_only:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
image: ["ghcr.io/btbn/ffmpeg-builds/win64-gpl:latest"]
|
||||
runs-on: linux-amd64
|
||||
container: ${{ matrix.image }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Configure
|
||||
run: |
|
||||
./configure --pkg-config-flags="--static" $FFBUILD_TARGET_FLAGS $FF_CONFIGURE \
|
||||
--cc="$CC" --cxx="$CXX" --ar="$AR" --ranlib="$RANLIB" --nm="$NM" \
|
||||
--extra-cflags="$FF_CFLAGS" --extra-cxxflags="$FF_CXXFLAGS" \
|
||||
--extra-libs="$FF_LIBS" --extra-ldflags="$FF_LDFLAGS" --extra-ldexeflags="$FF_LDEXEFLAGS"
|
||||
- name: Build
|
||||
run: make -j$(nproc)
|
||||
2
.gitattributes
vendored
2
.gitattributes
vendored
@@ -1,2 +1,2 @@
|
||||
*.pnm -diff -text
|
||||
Changelog merge=union
|
||||
tests/ref/fate/sub-scc eol=crlf
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,6 +1,5 @@
|
||||
*.a
|
||||
*.o
|
||||
*.objs
|
||||
*.o.*
|
||||
*.d
|
||||
*.def
|
||||
@@ -20,12 +19,8 @@
|
||||
*.swp
|
||||
*.ver
|
||||
*.version
|
||||
*.metal.air
|
||||
*.metallib
|
||||
*.metallib.c
|
||||
*.ptx
|
||||
*.ptx.c
|
||||
*.ptx.gz
|
||||
*_g
|
||||
\#*
|
||||
.\#*
|
||||
@@ -36,13 +31,9 @@
|
||||
/ffprobe
|
||||
/config.asm
|
||||
/config.h
|
||||
/config_components.h
|
||||
/coverage.info
|
||||
/avversion.h
|
||||
/lcov/
|
||||
/src
|
||||
/mapfile
|
||||
/tools/python/__pycache__/
|
||||
/libavcodec/vulkan/*.c
|
||||
/libavfilter/vulkan/*.c
|
||||
/.*/
|
||||
!/.forgejo/
|
||||
|
||||
15
.mailmap
15
.mailmap
@@ -1,8 +1,9 @@
|
||||
<james.darnley@gmail.com> <jdarnley@obe.tv>
|
||||
<jeebjp@gmail.com> <jan.ekstrom@aminocom.com>
|
||||
<sw@jkqxz.net> <mrt@jkqxz.net>
|
||||
<u@pkh.me> <cboesch@gopro.com>
|
||||
<quinkblack@foxmail.com> <wantlamy@gmail.com>
|
||||
<quinkblack@foxmail.com> <zhilizhao@tencent.com>
|
||||
<zhilizhao@tencent.com> <quinkblack@foxmail.com>
|
||||
<zhilizhao@tencent.com> <wantlamy@gmail.com>
|
||||
<modmaker@google.com> <modmaker-at-google.com@ffmpeg.org>
|
||||
<stebbins@jetheaddev.com> <jstebbins@jetheaddev.com>
|
||||
<barryjzhao@tencent.com> <mypopydev@gmail.com>
|
||||
@@ -17,14 +18,8 @@
|
||||
<atomnuker@gmail.com> <rpehlivanov@obe.tv>
|
||||
<lizhong1008@gmail.com> <zhong.li@intel.com>
|
||||
<lizhong1008@gmail.com> <zhongli_dev@126.com>
|
||||
<andreas.rheinhardt@outlook.com> <andreas.rheinhardt@gmail.com>
|
||||
<andreas.rheinhardt@outlook.com> <andreas.rheinhardt@googlemail.com>
|
||||
<andreas.rheinhardt@gmail.com> <andreas.rheinhardt@googlemail.com>
|
||||
rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
|
||||
<thilo.borgmann@mail.de> <thilo.borgmann@googlemail.com>
|
||||
<lq@chinaffmpeg.org> <liuqi05@kuaishou.com>
|
||||
<liuqi05@kuaishou.com> <lq@chinaffmpeg.org>
|
||||
<ruiling.song83@gmail.com> <ruiling.song@intel.com>
|
||||
Cosmin Stejerean <cosmin@cosmin.at> Cosmin Stejerean via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
|
||||
<wutong1208@outlook.com> <tong1.wu-at-intel.com@ffmpeg.org>
|
||||
<wutong1208@outlook.com> <tong1.wu@intel.com>
|
||||
<toqsxw@outlook.com> <jianhua.wu-at-intel.com@ffmpeg.org>
|
||||
<toqsxw@outlook.com> <jianhua.wu@intel.com>
|
||||
|
||||
30
.travis.yml
Normal file
30
.travis.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
language: c
|
||||
sudo: false
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- nasm
|
||||
- diffutils
|
||||
compiler:
|
||||
- clang
|
||||
- gcc
|
||||
matrix:
|
||||
exclude:
|
||||
- os: osx
|
||||
compiler: gcc
|
||||
cache:
|
||||
directories:
|
||||
- ffmpeg-samples
|
||||
before_install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew update; fi
|
||||
install:
|
||||
- if [ "$TRAVIS_OS_NAME" == "osx" ]; then brew install nasm; fi
|
||||
script:
|
||||
- mkdir -p ffmpeg-samples
|
||||
- ./configure --samples=ffmpeg-samples --cc=$CC
|
||||
- make -j 8
|
||||
- make fate-rsync
|
||||
- make check -j 8
|
||||
@@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know
|
||||
that what they have is not the original version, so that the original
|
||||
author's reputation will not be affected by problems that might be
|
||||
introduced by others.
|
||||
|
||||
|
||||
Finally, software patents pose a constant threat to the existence of
|
||||
any free program. We wish to make sure that a company cannot
|
||||
effectively restrict the users of a free program by obtaining a
|
||||
@@ -111,7 +111,7 @@ modification follow. Pay close attention to the difference between a
|
||||
"work based on the library" and a "work that uses the library". The
|
||||
former contains code derived from the library, whereas the latter must
|
||||
be combined with the library in order to run.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
@@ -158,7 +158,7 @@ Library.
|
||||
You may charge a fee for the physical act of transferring a copy,
|
||||
and you may at your option offer warranty protection in exchange for a
|
||||
fee.
|
||||
|
||||
|
||||
2. You may modify your copy or copies of the Library or any portion
|
||||
of it, thus forming a work based on the Library, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
@@ -216,7 +216,7 @@ instead of to this License. (If a newer version than version 2 of the
|
||||
ordinary GNU General Public License has appeared, then you can specify
|
||||
that version instead if you wish.) Do not make any other change in
|
||||
these notices.
|
||||
|
||||
|
||||
Once this change is made in a given copy, it is irreversible for
|
||||
that copy, so the ordinary GNU General Public License applies to all
|
||||
subsequent copies and derivative works made from that copy.
|
||||
@@ -267,7 +267,7 @@ Library will still fall under Section 6.)
|
||||
distribute the object code for the work under the terms of Section 6.
|
||||
Any executables containing that work also fall under Section 6,
|
||||
whether or not they are linked directly with the Library itself.
|
||||
|
||||
|
||||
6. As an exception to the Sections above, you may also combine or
|
||||
link a "work that uses the Library" with the Library to produce a
|
||||
work containing portions of the Library, and distribute that work
|
||||
@@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally
|
||||
accompany the operating system. Such a contradiction means you cannot
|
||||
use both them and the Library together in an executable that you
|
||||
distribute.
|
||||
|
||||
|
||||
7. You may place library facilities that are a work based on the
|
||||
Library side-by-side in a single library together with other library
|
||||
facilities not covered by this License, and distribute such a combined
|
||||
@@ -370,7 +370,7 @@ subject to these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties with
|
||||
this License.
|
||||
|
||||
|
||||
11. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
@@ -422,7 +422,7 @@ conditions either of that version or of any later version published by
|
||||
the Free Software Foundation. If the Library does not specify a
|
||||
license version number, you may choose any version ever published by
|
||||
the Free Software Foundation.
|
||||
|
||||
|
||||
14. If you wish to incorporate parts of the Library into other free
|
||||
programs whose distribution conditions are incompatible with these,
|
||||
write to the author to ask for permission. For software which is
|
||||
@@ -456,7 +456,7 @@ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
How to Apply These Terms to Your New Libraries
|
||||
|
||||
If you develop a new library, and you want it to be of the greatest
|
||||
|
||||
804
Changelog
804
Changelog
@@ -1,367 +1,455 @@
|
||||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version 4.4.4:
|
||||
- avcodec/tests/snowenc: Fix 2nd test
|
||||
- avcodec/tests/snowenc: return a failure if DWT/IDWT mismatches
|
||||
- avcodec/snowenc: Fix visual weight calculation
|
||||
- avcodec/tests/snowenc: unbreak DWT tests
|
||||
- avcodec/vp3: Add missing check for av_malloc
|
||||
- avformat/nutdec: Add check for avformat_new_stream
|
||||
- avcodec/mpeg12dec: Check input size
|
||||
- avcodec/escape124: Fix some return codes
|
||||
- avcodec/escape124: fix signdness of end of input check
|
||||
- Use https for repository links
|
||||
- avcodec/rpzaenc: stop accessing out of bounds frame
|
||||
- avcodec/motionpixels: Mask pixels to valid values
|
||||
- avcodec/xpmdec: Check size before allocation to avoid truncation
|
||||
- avcodec/bink: Avoid undefined out of array end pointers in binkb_decode_plane()
|
||||
- avcodec/bink: Fix off by 1 error in ref end
|
||||
- avcodec/utils: Ensure linesize for SVQ3
|
||||
- avcodec/utils: allocate a line more for VC1 and WMV3
|
||||
- avcodec/videodsp_template: Adjust pointers to avoid undefined pointer things
|
||||
- avcodec/pngdec: Check deloco index more exactly
|
||||
- avcodec/ffv1dec: Check that num h/v slices is supported
|
||||
- avformat/mov: Check samplesize and offset to avoid integer overflow
|
||||
- avcodec/pictordec: Remove mid exit branch
|
||||
- avcodec/eac3dec: avoid float noise in fixed mode addition to overflow
|
||||
- avcodec/utils: use 32pixel alignment for bink
|
||||
- avcodec/scpr3: Check bx
|
||||
- avcodec/012v: Order operations for odd size handling
|
||||
- avcodec/eatgq: : Check index increments in tgq_decode_block()
|
||||
- avcodec/scpr: Test bx before use
|
||||
- avformat/mxfdec: Use 64bit in remainder
|
||||
- avcodec/sunrast: Fix maplength check
|
||||
- avcodec/wavpack: Avoid undefined shift in get_tail()
|
||||
- avcodec/wavpack: Check for end of input in wv_unpack_dsd_high()
|
||||
- avformat/id3v2: Check taglen in read_uslt()
|
||||
- avcodec/tiff: Ignore tile_count
|
||||
- avcodec/ffv1dec: restructure slice coordinate reading a bit
|
||||
- avcodec/mlpdec: Check max matrix instead of max channel in noise check
|
||||
- swscale/input: Use more unsigned intermediates
|
||||
- avcodec/alsdec: The minimal block is at least 7 bits
|
||||
- avformat/replaygain: avoid undefined / negative abs
|
||||
- swscale/output: Bias 16bps output calculations to improve non overflowing range
|
||||
- avcodec/speedhq: Check buf_size to be big enough for DC
|
||||
- avcodec/ffv1dec: Fail earlier if prior context is corrupted
|
||||
- avcodec/nvenc: fix b-frame DTS behavior with fractional framerates
|
||||
- avfilter/vf_untile: swap the chroma shift values used for plane offsets
|
||||
- avcodec/nvenc: fix vbv buffer size in cq mode
|
||||
- avcodec/mjpegenc: take into account component count when writing the SOF header size
|
||||
- swscale: aarch64: Fix yuv2rgb with negative stride
|
||||
|
||||
version 8.0.1:
|
||||
avutil/common: cast GET_BYTE/GET_16BIT returned value
|
||||
avfilter/vf_drawtext: fix call GET_UTF8 with invalid argument
|
||||
avfilter/vf_drawtext: fix incorrect text length
|
||||
Update for 8.0.1
|
||||
avfilter/vf_drawtext: Account for bbox text seperator
|
||||
avcodec/mediacodecdec_common: Check that the input to mediacodec_wrap_sw_audio_buffer() contains channel * sample_size
|
||||
avcodec/rv60dec: Clear blk_info
|
||||
avformat/whip: Fix rtp_ctx->streams access
|
||||
avcodec/utvideodec: Set B for the width= 1 case in restore_median_planar_il()
|
||||
avcodec/osq: Fix 32bit sample overflow
|
||||
avformat/rtpdec_rfc4175: Only change PayloadContext on success
|
||||
avformat/rtpdec_rfc4175: Check dimensions
|
||||
avformat/rtpdec_rfc4175: Fix memleak of sampling
|
||||
avformat/http: Fix off by 1 error
|
||||
avcodec/exr: spelling
|
||||
avcodec/rv60dec: add upper bound check for qp
|
||||
avcodec/exr: use tile dimensions in pxr24 UINT case
|
||||
avcodec/exr: Simple check for available channels
|
||||
avformat/sctp: Check size in sctp_write()
|
||||
avformat/rtmpproto: consider command line argument lengths
|
||||
avformat/rtmpproto_ Check tcurl and flashver length
|
||||
avcodec/g723_1enc: Make min_err 64bit
|
||||
avcodec/vlc: Clear val8/16 in vlc_multi_gen() by av_mallocz()
|
||||
avformat/rtpenc_h264_hevc: Check space for nal_length_size in ff_rtp_send_h264_hevc()
|
||||
avcodec/ffv1enc: Consider variation in slice sizes
|
||||
libavcodec/cbs_apv_syntax_template: limit tile to 2gb
|
||||
swscale/output: Fix unsigned cast position in yuv2*
|
||||
swscale/output: Fix integer overflow in yuv2ya16_X_c_template()
|
||||
avcodec/exr: Check that DWA has 3 channels
|
||||
avcodec/exr: check ac_size
|
||||
avcodec/exr: Round dc_w/h up
|
||||
avcodec/mjpegdec: Explain buf_size/width/height check
|
||||
configure: strip non numeric trailer from gcc version
|
||||
avformat/dhav: Fix off by length of read element error
|
||||
avformat/aviobuf: Keep checksum_ptr consistent in avio_seek()
|
||||
doc/examples/vaapi_encode: fix invalid check on fwrite
|
||||
avcodec/librsvgdec: fix compilation with librsvg 2.50.3
|
||||
avcodec/mfenc: fix memory leak with D3D11 input surfaces
|
||||
swscale/graph: fix double-free when legacy pass fails initializing
|
||||
libavformat/udp: Fix call to recvfrom(2)
|
||||
avfilter/f_ebur128: Fix incorrect ebur128 peak calculation.
|
||||
avformat/udp: fix warning about unused varible
|
||||
avdevice/lavfi: stop setting deprecated buffersink options
|
||||
configure: unbreak glslang build
|
||||
swscale/range_convert: fix truncation bias in range conversion
|
||||
lavc/aarch64: Fix addp overflow in ff_pred16x16_plane_neon_10
|
||||
avcodec/mlpdec: don't depend on context channel layout when setting substream masks
|
||||
avformat/demux: pass new extradata to the parser
|
||||
avfilter/af_whisper: fix srt index
|
||||
avfilter/af_whisper: fix int64 printf format
|
||||
avfilter/af_whisper: fix srt file format
|
||||
avfilter/whisper: correct option formatting
|
||||
avfilter/af_whisper: fix broken output for multibyte character
|
||||
avformat/rtsp: fix leading space in RTSP reason
|
||||
avformat/rtsp: do not log invalid values
|
||||
avformat/http: Handle IPv6 Zone ID in hostname
|
||||
avformat/dump: fix log level passed to av_log when printing stream group side data
|
||||
avcodec/hevc/sei: don't attempt to use stale values in HEVCSEITDRDI
|
||||
avcodec/hevc/sei: prevent storing a potentially bogus num_ref_displays value in HEVCSEITDRDI
|
||||
avcodec/hevc/refs: don't unconditionally discard non-IRAP frames if no IRAP frame was seen before
|
||||
libavutil/arm: Rename the HWCAP defines
|
||||
libavutil/arm: Make use of elf_aux_info() on FreeBSD/OpenBSD
|
||||
fftools/ffmpeg: fix gracefully shutdown
|
||||
avcodec/decode: sync initial_pict_type and intra_only_flag with thread worker's avctx
|
||||
avcodec/x86/pngdsp: add missing emms at the end of add_png_paeth_prediction
|
||||
avcodec/videotoolboxenc: ensure bitrate is set in low_delay mode
|
||||
avcodec/videotoolboxenc: allow low latency RC with HEVC
|
||||
avcodec/videotoolboxenc: support global_quality without qscale
|
||||
avcodec/videotoolboxenc: fix the loss of precision when calculating quality
|
||||
fftools/ffmpeg_demux: ensure the display_rotation option is honored
|
||||
avcodec/mjpegdec: use ff_frame_new_side_data() to export display matrix
|
||||
avutil/tests/aes_ctr: extend the test to cover payloads smaller than a block
|
||||
avutil/aes_ctr: reintroduce the block offset state
|
||||
avfilter/vf_lcevc: support LCEVCdec version 4
|
||||
avcodec/lcevcdec: support LCEVCdec version 4
|
||||
movenc: ensure chapters track extradata is not null and populated
|
||||
version 4.4.3:
|
||||
- avformat/vividas: Check packet size
|
||||
- configure: link to libatomic when it's present
|
||||
- avcodec/dstdec: Check for overflow in build_filter()
|
||||
- avformat/spdifdec: Use 64bit to compute bit rate
|
||||
- avformat/rpl: Use 64bit for duration computation
|
||||
- avformat/xwma: Use av_rescale() for duration computation
|
||||
- avformat/sdsdec: Use av_rescale() to avoid intermediate overflow in duration calculation
|
||||
- avformat/sbgdec: Check ts_int in genrate_intervals
|
||||
- avformat/rmdec: check tag_size
|
||||
- avformat/nutdec: Check fields
|
||||
- avformat/flvdec: Use 64bit for sum_flv_tag_size
|
||||
- avformat/jacosubdec: Fix overflow in get_shift()
|
||||
- avformat/dxa: avoid bpc overflows
|
||||
- avformat/cafdec: Check that nb_frasmes fits within 64bit
|
||||
- avformat/asfdec_o: Limit packet offset
|
||||
- avformat/ape: Check frames size
|
||||
- avformat/icodec: Check nb_pal
|
||||
- avformat/aiffdec: Use 64bit for block_duration use
|
||||
- avformat/aiffdec: Check block_duration
|
||||
- avformat/mxfdec: only probe max run in
|
||||
- avformat/mxfdec: Check run_in is within 65536
|
||||
- avcodec/mjpegdec: Check for unsupported bayer case
|
||||
- avcodec/apedec: Fix integer overflow in filter_3800()
|
||||
- avcodec/tta: Check 24bit scaling for overflow
|
||||
- avcodec/mobiclip: Check quantizer for overflow
|
||||
- avcodec/exr: Check preview psize
|
||||
- avcodec/tiff: Fix loop detection
|
||||
- libavformat/hls: Free keys
|
||||
- avcodec/fmvc: Move frame allocation to a later stage
|
||||
- avfilter/vf_showinfo: remove backspaces
|
||||
- avcodec/speedhq: Check width
|
||||
- avcodec/bink: disallow odd positioned scaled blocks
|
||||
- avformat/asfdec_o: limit recursion depth in asf_read_unknown()
|
||||
- doc/git-howto.texi: Document commit signing
|
||||
- libavcodec/8bps: Check that line lengths fit within the buffer
|
||||
- avcodec/midivid: Perform lzss_uncompress() before ff_reget_buffer()
|
||||
- libavformat/iff: Check for overflow in body_end calculation
|
||||
- avformat/avidec: Prevent entity expansion attacks
|
||||
- avcodec/h263dec: Sanity check against minimal I/P frame size
|
||||
- avcodec/hevcdec: Check s->ref in the md5 path similar to hwaccel
|
||||
- avcodec/mpegaudiodec_template: use unsigned shift in handle_crc()
|
||||
- avformat/subviewerdec: Make read_ts() more flexible
|
||||
- avcodec/mjpegdec: bayer and rct are incompatible
|
||||
- MAINTAINERS: Add ED25519 key for signing my commits in the future
|
||||
- avcodec/hevc_filter: copy_CTB() only within width&height
|
||||
- avcodec/tiff: Check tile_length and tile_width
|
||||
- avcodec/mss4: Check image size with av_image_check_size2()
|
||||
- avformat/flvdec: Check for EOF in index reading
|
||||
- avformat/nutdec: Check get_packetheader() in mainheader
|
||||
- avformat/asfdec_f: Use 64bit for packet start time
|
||||
- avcodec/exr: Check x/ysize
|
||||
- tools/target_dec_fuzzer: Adjust threshold for MMVIDEO
|
||||
- avcodec/lagarith: Check dst/src in zero run code
|
||||
- avcodec/h264dec: Skip late SEI
|
||||
- avcodec/sbrdsp_fixed: Fix integer overflows in sbr_qmf_deint_neg_c()
|
||||
- avfilter/vf_signature: Fix integer overflow in filter_frame()
|
||||
- avformat/rtsp: break on unknown protocols
|
||||
- avcodec/hevcdsp_template: stay within tables in sao_band_filter()
|
||||
- avcodec/tiff: Check pixel format types for dng
|
||||
- avcodec/qpeldsp: copy less for the mc0x cases
|
||||
- avformat/aaxdec: Check for empty segments
|
||||
- avcodec/ffv1dec: Limit golomb rice coded slices to width 8M
|
||||
- avformat/iff: simplify duration calculation
|
||||
- avcodec/wnv1: Check for width =1
|
||||
- avcodec/ffv1dec_template: fix indention
|
||||
- avformat/sctp: close socket on errors
|
||||
- avcodec/aasc: Fix indention
|
||||
- avcodec/qdrw: adjust max colors to array size
|
||||
- avcodec/alacdsp: Make intermediates unsigned
|
||||
- avformat/aiffdec: cleanup size handling for extreem cases
|
||||
- avformat/matroskadec: avoid integer overflows in SAR computation
|
||||
- avcodec/jpeglsdec: fix end check for xfrm
|
||||
- avcodec/cdgraphics: limit scrolling to the line
|
||||
- avformat/hls: Limit start_seq_no to one bit less
|
||||
- avformat/aiffdec: avoid integer overflow in get_meta()
|
||||
- avformat/ape: more bits in size for less overflows
|
||||
- avformat/aviobuf: Check buf_size in ffio_ensure_seekback()
|
||||
- avformat/bfi: Check offsets better
|
||||
- avformat/asfdec_f: Check packet_frag_timestamp
|
||||
- avcodec/texturedspenc: Fix indexing in color distribution determination
|
||||
- avformat/act: Check ff_get_wav_header() for failure
|
||||
- avcodec/libxavs2: Improve r redundancy in occured
|
||||
- avformat/libzmq: Improve r redundancy in occured
|
||||
- avfilter/vsrc_mandelbrot: Check for malloc failure
|
||||
- avfilter/vf_frei0r: Copy to frame allocated according to frei0r requirements
|
||||
- avfilter/video: Add ff_default_get_video_buffer2() to set specific alignment
|
||||
- avformat/genh: Check sample rate
|
||||
- configure: bump year
|
||||
- lavc/videotoolbox: do not pass AVCodecContext to decoder output callback
|
||||
- lavc/pthread_frame: always transfer stashed hwaccel state
|
||||
- avcodec/arm/sbcenc: avoid callee preserved vfp registers
|
||||
- avfilter/vf_scale: overwrite the width and height expressions with the original values
|
||||
- lavc/pthread_frame: avoid leaving stale hwaccel state in worker threads
|
||||
- configure: extend SDL check to accept all 2.x versions
|
||||
- lavf/tls_mbedtls: add support for mbedtls version 3
|
||||
|
||||
version 8.0:
|
||||
- Whisper filter
|
||||
- Drop support for OpenSSL < 1.1.0
|
||||
- Enable TLS peer certificate verification by default (on next major version bump)
|
||||
- yasm support dropped, users need to use nasm
|
||||
- VVC VAAPI decoder
|
||||
- RealVideo 6.0 decoder
|
||||
- OpenMAX encoders deprecated
|
||||
- libx265 alpha layer encoding
|
||||
- ADPCM IMA Xbox decoder
|
||||
- Enhanced FLV v2: Multitrack audio/video, modern codec support
|
||||
- Animated JPEG XL encoding (via libjxl)
|
||||
- VVC in Matroska
|
||||
- CENC AV1 support in MP4 muxer
|
||||
- pngenc: set default prediction method to PAETH
|
||||
- APV decoder and APV raw bitstream muxing and demuxing
|
||||
- APV parser
|
||||
- APV encoding support through a libopenapv wrapper
|
||||
- VVC decoder supports all content of SCC (Screen Content Coding):
|
||||
IBC (Inter Block Copy), Palette Mode and ACT (Adaptive Color Transform
|
||||
- G.728 decoder
|
||||
- pad_cuda filter
|
||||
- Sanyo LD-ADPCM decoder
|
||||
- APV in MP4/ISOBMFF muxing and demuxing
|
||||
- OpenHarmony hardware decoder/encoder
|
||||
- Colordetect filter
|
||||
- Add vf_scale_d3d11 filter
|
||||
- No longer disabling GCC autovectorization, on X86, ARM and AArch64
|
||||
- VP9 Vulkan hwaccel
|
||||
- AV1 Vulkan encoder
|
||||
- ProRes RAW decoder
|
||||
- ProRes RAW Vulkan hwaccel
|
||||
|
||||
|
||||
version 7.1:
|
||||
- Raw Captions with Time (RCWT) closed caption demuxer
|
||||
- LC3/LC3plus decoding/encoding using external library liblc3
|
||||
- ffmpeg CLI filtergraph chaining
|
||||
- LC3/LC3plus demuxer and muxer
|
||||
- pad_vaapi, drawbox_vaapi filters
|
||||
- vf_scale supports secondary ref input and framesync options
|
||||
- vf_scale2ref deprecated
|
||||
- qsv_params option added for QSV encoders
|
||||
- VVC decoder compatible with DVB test content
|
||||
- xHE-AAC decoder
|
||||
- removed DEC Alpha DSP and support code
|
||||
- VVC encoding support via libvvenc
|
||||
- perlin video source
|
||||
- D3D12VA HEVC encoder
|
||||
- Cropping metadata parsing and writing in Matroska and MP4/MOV de/muxers
|
||||
- Intel QSV-accelerated VVC decoding
|
||||
- MediaCodec AAC/AMR-NB/AMR-WB/MP3 decoding
|
||||
- YUV colorspace negotiation for codecs and filters, obsoleting the
|
||||
YUVJ pixel format
|
||||
- Vulkan H.264 encoder
|
||||
- Vulkan H.265 encoder
|
||||
- stream specifiers in fftools can now match by stream disposition
|
||||
- LCEVC enhancement data exporting in H.26x and MP4/ISOBMFF
|
||||
- LCEVC filter
|
||||
- MV-HEVC decoding
|
||||
- minor stream specifier syntax changes:
|
||||
- when matching by metadata (:m:<key>:<val>), the colon character
|
||||
in keys or values now has to be backslash-escaped
|
||||
- in optional maps (-map ....?) with a metadata-matching stream specifier,
|
||||
the value has to be separated from the question mark by a colon, i.e.
|
||||
-map ....:m:<key>:<val>:? (otherwise it would be ambiguous whether the
|
||||
question mark is a part of <val> or not)
|
||||
- multiple stream types in a single specifier (e.g. :s:s:0) now cause an
|
||||
error, as such a specifier makes no sense
|
||||
- Mastering Display and Content Light Level metadata support in hevc_nvenc
|
||||
and av1_nvenc encoders
|
||||
- libswresample now accepts custom order channel layouts as input, with some
|
||||
constrains
|
||||
- FFV1 parser
|
||||
|
||||
|
||||
version 7.0:
|
||||
- DXV DXT1 encoder
|
||||
- LEAD MCMP decoder
|
||||
- EVC decoding using external library libxevd
|
||||
- EVC encoding using external library libxeve
|
||||
- QOA decoder and demuxer
|
||||
- aap filter
|
||||
- demuxing, decoding, filtering, encoding, and muxing in the
|
||||
ffmpeg CLI now all run in parallel
|
||||
- enable gdigrab device to grab a window using the hwnd=HANDLER syntax
|
||||
- IAMF raw demuxer and muxer
|
||||
- D3D12VA hardware accelerated H264, HEVC, VP9, AV1, MPEG-2 and VC1 decoding
|
||||
- tiltandshift filter
|
||||
- qrencode filter and qrencodesrc source
|
||||
- quirc filter
|
||||
- lavu/eval: introduce randomi() function in expressions
|
||||
- VVC decoder (experimental)
|
||||
- fsync filter
|
||||
- Raw Captions with Time (RCWT) closed caption muxer
|
||||
- ffmpeg CLI -bsf option may now be used for input as well as output
|
||||
- ffmpeg CLI options may now be used as -/opt <path>, which is equivalent
|
||||
to -opt <contents of file <path>>
|
||||
- showinfo bitstream filter
|
||||
- a C11-compliant compiler is now required; note that this requirement
|
||||
will be bumped to C17 in the near future, so consider updating your
|
||||
build environment if it lacks C17 support
|
||||
- Change the default bitrate control method from VBR to CQP for QSV encoders.
|
||||
- removed deprecated ffmpeg CLI options -psnr and -map_channel
|
||||
- DVD-Video demuxer, powered by libdvdnav and libdvdread
|
||||
- ffprobe -show_stream_groups option
|
||||
- ffprobe (with -export_side_data film_grain) now prints film grain metadata
|
||||
- AEA muxer
|
||||
- ffmpeg CLI loopback decoders
|
||||
- Support PacketTypeMetadata of PacketType in enhanced flv format
|
||||
- ffplay with hwaccel decoding support (depends on vulkan renderer via libplacebo)
|
||||
- dnn filter libtorch backend
|
||||
- Android content URIs protocol
|
||||
- AOMedia Film Grain Synthesis 1 (AFGS1)
|
||||
- RISC-V optimizations for AAC, FLAC, JPEG-2000, LPC, RV4.0, SVQ, VC1, VP8, and more
|
||||
- Loongarch optimizations for HEVC decoding
|
||||
- Important AArch64 optimizations for HEVC
|
||||
- IAMF support inside MP4/ISOBMFF
|
||||
- Support for HEIF/AVIF still images and tiled still images
|
||||
- Dolby Vision profile 10 support in AV1
|
||||
- Support for Ambient Viewing Environment metadata in MP4/ISOBMFF
|
||||
- HDR10 metadata passthrough when encoding with libx264, libx265, and libsvtav1
|
||||
|
||||
|
||||
version 6.1:
|
||||
- libaribcaption decoder
|
||||
- Playdate video decoder and demuxer
|
||||
- Extend VAAPI support for libva-win32 on Windows
|
||||
- afireqsrc audio source filter
|
||||
- arls filter
|
||||
- ffmpeg CLI new option: -readrate_initial_burst
|
||||
- zoneplate video source filter
|
||||
- command support in the setpts and asetpts filters
|
||||
- Vulkan decode hwaccel, supporting H264, HEVC and AV1
|
||||
- color_vulkan filter
|
||||
- bwdif_vulkan filter
|
||||
- nlmeans_vulkan filter
|
||||
- RivaTuner video decoder
|
||||
- xfade_vulkan filter
|
||||
- vMix video decoder
|
||||
- Essential Video Coding parser, muxer and demuxer
|
||||
- Essential Video Coding frame merge bsf
|
||||
- bwdif_cuda filter
|
||||
- Microsoft RLE video encoder
|
||||
- Raw AC-4 muxer and demuxer
|
||||
- Raw VVC bitstream parser, muxer and demuxer
|
||||
- Bitstream filter for editing metadata in VVC streams
|
||||
- Bitstream filter for converting VVC from MP4 to Annex B
|
||||
- scale_vt filter for videotoolbox
|
||||
- transpose_vt filter for videotoolbox
|
||||
- support for the P_SKIP hinting to speed up libx264 encoding
|
||||
- Support HEVC,VP9,AV1 codec in enhanced flv format
|
||||
- apsnr and asisdr audio filters
|
||||
- OSQ demuxer and decoder
|
||||
- Support HEVC,VP9,AV1 codec fourcclist in enhanced rtmp protocol
|
||||
- CRI USM demuxer
|
||||
- ffmpeg CLI '-top' option deprecated in favor of the setfield filter
|
||||
- VAAPI AV1 encoder
|
||||
- ffprobe XML output schema changed to account for multiple
|
||||
variable-fields elements within the same parent element
|
||||
- ffprobe -output_format option added as an alias of -of
|
||||
|
||||
# codespell:off
|
||||
|
||||
version 6.0:
|
||||
- Radiance HDR image support
|
||||
- ddagrab (Desktop Duplication) video capture filter
|
||||
- ffmpeg -shortest_buf_duration option
|
||||
- ffmpeg now requires threading to be built
|
||||
- ffmpeg now runs every muxer in a separate thread
|
||||
- Add new mode to cropdetect filter to detect crop-area based on motion vectors and edges
|
||||
- VAAPI decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- WBMP (Wireless Application Protocol Bitmap) image format
|
||||
- a3dscope filter
|
||||
- bonk decoder and demuxer
|
||||
- Micronas SC-4 audio decoder
|
||||
- LAF demuxer
|
||||
- APAC decoder and demuxer
|
||||
- Media 100i decoders
|
||||
- DTS to PTS reorder bsf
|
||||
- ViewQuest VQC decoder
|
||||
- backgroundkey filter
|
||||
- nvenc AV1 encoding support
|
||||
- MediaCodec decoder via NDKMediaCodec
|
||||
- MediaCodec encoder
|
||||
- oneVPL support for QSV
|
||||
- QSV AV1 encoder
|
||||
- QSV decoding and encoding for 10/12bit 422, 10/12bit 444 HEVC and VP9
|
||||
- showcwt multimedia filter
|
||||
- corr video filter
|
||||
- adrc audio filter
|
||||
- afdelaysrc audio filter
|
||||
- WADY DPCM decoder and demuxer
|
||||
- CBD2 DPCM decoder
|
||||
- ssim360 video filter
|
||||
- ffmpeg CLI new options: -stats_enc_pre[_fmt], -stats_enc_post[_fmt],
|
||||
-stats_mux_pre[_fmt]
|
||||
- hstack_vaapi, vstack_vaapi and xstack_vaapi filters
|
||||
- XMD ADPCM decoder and demuxer
|
||||
- media100 to mjpegb bsf
|
||||
- ffmpeg CLI new option: -fix_sub_duration_heartbeat
|
||||
- WavArc decoder and demuxer
|
||||
- CrystalHD decoders deprecated
|
||||
- SDNS demuxer
|
||||
- RKA decoder and demuxer
|
||||
- filtergraph syntax in ffmpeg CLI now supports passing file contents
|
||||
as option values, by prefixing option name with '/'
|
||||
- hstack_qsv, vstack_qsv and xstack_qsv filters
|
||||
|
||||
|
||||
version 5.1:
|
||||
- add ipfs/ipns gateway support
|
||||
- dialogue enhance audio filter
|
||||
- dropped obsolete XvMC hwaccel
|
||||
- pcm-bluray encoder
|
||||
- DFPWM audio encoder/decoder and raw muxer/demuxer
|
||||
- SITI filter
|
||||
- Vizrt Binary Image encoder/decoder
|
||||
- avsynctest source filter
|
||||
- feedback video filter
|
||||
- pixelize video filter
|
||||
- colormap video filter
|
||||
- colorchart video source filter
|
||||
- multiply video filter
|
||||
- PGS subtitle frame merge bitstream filter
|
||||
- blurdetect filter
|
||||
- tiltshelf audio filter
|
||||
- QOI image format support
|
||||
- ffprobe -o option
|
||||
- virtualbass audio filter
|
||||
- VDPAU AV1 hwaccel
|
||||
- PHM image format support
|
||||
- remap_opencl filter
|
||||
- added chromakey_cuda filter
|
||||
- added bilateral_cuda filter
|
||||
|
||||
|
||||
version 5.0:
|
||||
- ADPCM IMA Westwood encoder
|
||||
- Westwood AUD muxer
|
||||
- ADPCM IMA Acorn Replay decoder
|
||||
- Argonaut Games CVG demuxer
|
||||
- Argonaut Games CVG muxer
|
||||
- Concatf protocol
|
||||
- afwtdn audio filter
|
||||
- audio and video segment filters
|
||||
- Apple Graphics (SMC) encoder
|
||||
- hsvkey and hsvhold video filters
|
||||
- adecorrelate audio filter
|
||||
- atilt audio filter
|
||||
- grayworld video filter
|
||||
- AV1 Low overhead bitstream format muxer
|
||||
- swscale slice threading
|
||||
- MSN Siren decoder
|
||||
- scharr video filter
|
||||
- apsyclip audio filter
|
||||
- morpho video filter
|
||||
- amr parser
|
||||
- (a)latency filters
|
||||
- GEM Raster image decoder
|
||||
- asdr audio filter
|
||||
- speex decoder
|
||||
- limitdiff video filter
|
||||
- xcorrelate video filter
|
||||
- varblur video filter
|
||||
- huesaturation video filter
|
||||
- colorspectrum source video filter
|
||||
- RTP packetizer for uncompressed video (RFC 4175)
|
||||
- bitpacked encoder
|
||||
- VideoToolbox VP9 hwaccel
|
||||
- VideoToolbox ProRes hwaccel
|
||||
- support loongarch.
|
||||
- aspectralstats audio filter
|
||||
- adynamicsmooth audio filter
|
||||
- libplacebo filter
|
||||
- vflip_vulkan, hflip_vulkan and flip_vulkan filters
|
||||
- adynamicequalizer audio filter
|
||||
- yadif_videotoolbox filter
|
||||
- VideoToolbox ProRes encoder
|
||||
- anlmf audio filter
|
||||
- IMF demuxer (experimental)
|
||||
version 4.4.2:
|
||||
- fate: update reference files after the recent dash manifest muxer changes
|
||||
- avformat/webmdashenc: fix on-demand profile string
|
||||
- Update for FFmpeg 4.4.2
|
||||
- avcodec/exr: Avoid signed overflow in displayWindow
|
||||
- avcodec/diracdec: avoid signed integer overflow in global mv
|
||||
- avcodec/takdsp: Fix integer overflow in decorrelate_sf()
|
||||
- avcodec/apedec: fix a integer overflow in long_filter_high_3800()
|
||||
- avfilter/vf_subtitles: pass storage size to libass
|
||||
- avformat/aqtitledec: Skip unrepresentable durations
|
||||
- avformat/cafdec: Do not store empty keys in read_info_chunk()
|
||||
- avformat/mxfdec: Do not clear array in mxf_read_strong_ref_array() before writing
|
||||
- avformat/mxfdec: Check for avio_read() failure in mxf_read_strong_ref_array()
|
||||
- avformat/mxfdec: Check count in mxf_read_strong_ref_array()
|
||||
- avformat/hls: Check target_duration
|
||||
- avcodec/pixlet: Avoid signed integer overflow in scaling in filterfn()
|
||||
- avformat/matroskadec: Check pre_ns
|
||||
- avcodec/sonic: Use unsigned for predictor_k to avoid undefined behavior
|
||||
- avcodec/libuavs3d: Check ff_set_dimensions() for failure
|
||||
- avcodec/mjpegbdec: Set buf_size
|
||||
- avformat/matroskadec: Use rounded down duration in get_cue_desc() check
|
||||
- avcodec/argo: Check packet size
|
||||
- avcodec/g729_parser: Check channels
|
||||
- avformat/avidec: Check height
|
||||
- avformat/rmdec: Better duplicate tags check
|
||||
- avformat/mov: Disallow empty sidx
|
||||
- avformat/argo_asf: Fix order of operations in error check in argo_asf_write_trailer()
|
||||
- avformat/matroskadec: Check duration
|
||||
- avformat/mov: Corner case encryption error cleanup in mov_read_senc()
|
||||
- avcodec/jpeglsdec: Fix if( code style
|
||||
- avcodec/jpeglsdec: Check get_ur_golomb_jpegls() for error
|
||||
- avcodec/motion_est: fix indention of ff_get_best_fcode()
|
||||
- avcodec/motion_est: Fix xy indexing on range violation in ff_get_best_fcode()
|
||||
- avformat/hls: Use unsigned for iv computation
|
||||
- avcodec/jpeglsdec: Increase range for N in ls_get_code_runterm() by using unsigned
|
||||
- avformat/matroskadec: Check desc_bytes
|
||||
- avformat/utils: Fix invalid NULL pointer operation in ff_parse_key_value()
|
||||
- avformat/matroskadec: Fix infinite loop with bz decompression
|
||||
- avformat/mov: Check size before subtraction
|
||||
- avcodec/cfhd: Avoid signed integer overflow in coeff
|
||||
- avcodec/apedec: Fix integer overflows in predictor_update_3930()
|
||||
- avcodec/apedec: fix integer overflow in 8bit samples
|
||||
- avformat/flvdec: timestamps cannot use the full int64 range
|
||||
- avcodec/tiff: Remove messing with jpeg context
|
||||
- avcodec/tiff: Use ff_set_dimensions() for setting up mjpeg context dimensions
|
||||
- avcodec/tiff: Pass max_pixels to mjpeg context
|
||||
- avcodec/vqavideo: reset accounting on error
|
||||
- avcodec/alacdsp: fix integer overflow in decorrelate_stereo()
|
||||
- avformat/4xm: Check for duplicate track ids
|
||||
- avformat/4xm: Consider max_streams on reallocating tracks array
|
||||
- avformat/mov: Check next offset in mov_read_dref()
|
||||
- avformat/vivo: Favor setting fps from explicit fractions
|
||||
- avformat/vivo: Do not use the general expression evaluator for parsing a floating point value
|
||||
- avformat/mxfdec: Check for duplicate mxf_read_index_entry_array()
|
||||
- avcodec/apedec: Change avg to uint32_t
|
||||
- avformat/mxfdec: Check component_depth in mxf_get_color_range()
|
||||
- avformat/mov: Disallow duplicate smdm
|
||||
- avformat/mov: Check for EOF in mov_read_glbl()
|
||||
- avcodec/vp3: Check version in all cases when VP4 code is not built
|
||||
- avformat/mov: Check channels for mov_parse_stsd_audio()
|
||||
- avformat/avidec: Check read_odml_index() for failure
|
||||
- avformat/aiffdec: Use av_rescale() for bitrate
|
||||
- avformat/aiffdec: sanity check block_align
|
||||
- avformat/aiffdec: Check sample_rate
|
||||
- avcodec/libdav1d: free the Dav1dData packet on dav1d_send_data() failure
|
||||
- avcodec/zmbvenc: Fix memleak upon init error
|
||||
- avcodec/dnxhdenc: Fix segfault when using too many slice threads
|
||||
- avcodec/wma(dec|enc): Fix memleaks upon allocation error
|
||||
- avfilter/avfilter: Actually error out on init error
|
||||
- avcodec/opus_silk: Remove wrong size information in function declaration
|
||||
- avformat/omadec: Don't output uninitialized values
|
||||
- avformat/jacosubenc: Fix writing extradata
|
||||
- avformat/cafenc: Fix memleak when trailer is never written
|
||||
- avformat/cafenc: Don't segfault upon allocation error
|
||||
- avformat/cafenc: Fix potential integer overflow
|
||||
- avformat/movenc: Limit ism_lookahead to a sane value
|
||||
- avutil/utils: Remove racy check from avutil_version()
|
||||
- avformat/sccdec: Don't use uninitialized data, fix crash, simplify logic
|
||||
- avformat/subtitles: Honour ff_subtitles_read_line() documentation
|
||||
- avformat/tee: Fix leak of FIFO-options dictionary
|
||||
- avformat/tee: Fix leak of strings
|
||||
- avcodec/rasc: Fix potential use of uninitialized value
|
||||
- avfilter/vf_w3fdif: Fix segfault on allocation error
|
||||
- avfilter/af_surround: Fix memleaks upon allocation error
|
||||
- avfilter/af_vibrato: Fix segfault upon allocation error
|
||||
- avfilter/aeval: Fix leak of expressions upon reallocation error
|
||||
- avdevice/xv: Increase array size
|
||||
- avfilter/asrc_flite: Fix use-after-frees
|
||||
- avfilter/asrc_flite: Don't segfault when using list_voices option
|
||||
- Revert "avfilter/vf_idet: reduce noisyness if the filter has been auto inserted"
|
||||
- avformat/matroskadec: Don't unnecessarily reduce aspect ratio
|
||||
- avcodec/h263: Fix global-buffer-overflow with noout flag2 set
|
||||
- avcodec/vaapi_encode: Fix segfault upon closing uninitialized encoder
|
||||
- avcodec/movtextenc: Fix infinite loop due to variable truncation
|
||||
- avcodec/libopenh264dec: Increase array sizes, fix stack-buffer overread
|
||||
- avcodec/libkvazaar: Increase array size
|
||||
- avformat/aadec: Don't use the same loop counter in inner and outer loop
|
||||
- avformat/moflex: Don't use uninitialized timebase for data stream
|
||||
- lavf/udp: do not return an uninitialized value from udp_open()
|
||||
- avcodec/nvenc: zero-initialize NV_ENC_REGISTER_RESOURCE struct
|
||||
- configure: Add missing libshine->mpegaudioheader dependency
|
||||
- avcodec/Makefile: Add missing entry for ADPCM_IMA_AMV_ENCODER
|
||||
- avcodec/Makefile: Only compile nvenc.o if needed
|
||||
- avcodec/av1_vaapi: improve decode quality
|
||||
- avcodec/av1_vaapi: enable segmentation features
|
||||
- avcodec/av1_vaapi: setting 2 output surface for film grain
|
||||
- avcodec/vaapi: increase av1 decode pool size
|
||||
- avcodec/dxva2_av1: fix global motion params
|
||||
- avcodec/av1_vaapi: add gm params valid check
|
||||
- avcodec/av1dec: support setup shear process
|
||||
- avcodec/av1: extend some definitions in spec section 3
|
||||
- cbs_av1: fix incorrect data type
|
||||
- avcodec/libdav1d: let libdav1d choose optimal max frame delay
|
||||
- avcodec/libdav1d: pass auto threads value to libdav1d
|
||||
|
||||
version 4.4.1:
|
||||
- avcodec/flac_parser: Consider AV_INPUT_BUFFER_PADDING_SIZE
|
||||
- avcodec/ttadsp: Fix integer overflows in tta_filter_process_c()
|
||||
- avutil/mathematics: Document av_rescale_rnd() behavior on non int64 results
|
||||
- avcodec/utils: Ensure 8x8 alignment for ARGO in avcodec_align_dimensions2()
|
||||
- avformat/matroskadec: Reset state also on failure in matroska_reset_status()
|
||||
- avformat/wavdec: Check smv_block_size
|
||||
- avformat/rmdec: Check for multiple audio_stream_info
|
||||
- avcodec/apedec: Use 64bit to avoid overflow
|
||||
- avcodec/apedec: Fix undefined integer overflow in long_filter_ehigh_3830()
|
||||
- oavformat/avidec: Check offset in odml
|
||||
- avformat/mpegts: use actually read packet size in mpegts_resync special case
|
||||
- fftools/ffmpeg: Fix crash when flushing non-fully setup output stream
|
||||
- avfilter/scale_npp: fix non-aligned output frame dimensions
|
||||
- Revert "avformat/hlsenc: compute video_keyframe_size after write keyframe"
|
||||
- Changelog: update
|
||||
- swscale/alphablend: Fix slice handling
|
||||
- avcodec/apedec: Fix integer overflow in filter_fast_3320()
|
||||
- avformat/mov: Fix last mfra check
|
||||
- avcodec/mxpegdec: Check for AVDISCARD_ALL
|
||||
- avcodec/flicvideo: Check remaining bytes in FLI*COPY
|
||||
- avcodec/utils: ARGO writes 4x4 blocks without regard to the image dimensions
|
||||
- avcodec/cbs_h265_syntax_template: Limit sps_num_palette_predictor_initializer_minus1 to 127
|
||||
- avcodec/snowdec: Maintain avmv buffer
|
||||
- avcodec/mpeg12dec: Do not put mpeg_f_code into an invalid state on error return
|
||||
- avcodec/mpegvideo_enc: Limit bitrate tolerance to the representable
|
||||
- avcodec/apedec: Fix integer overflow in intermediate
|
||||
- avformat/mvdec: Do not set invalid sample rate
|
||||
- avformat/sbgdec: Check for t0 overflow in expand_tseq()
|
||||
- avformat/rmdec: Use 64bit for intermediate for DEINT_ID_INT4
|
||||
- avformat/sbgdec: Check opt_duration and start for overflow
|
||||
- avcodec/exr: Fix undefined integer multiplication
|
||||
- avformat/mov: Check for duplicate clli
|
||||
- avformat/utils: Ignore negative duration in codec_info_duration computation
|
||||
- avformat/jacosubdec: Check for min in t overflow in get_shift()
|
||||
- avformat/mxfdec: check channel number in mxf_get_d10_aes3_packet()
|
||||
- (origin/release/4.4) avcodec/wmadec: handle run_level_decode error
|
||||
- avcodec/wma: Return specific error code
|
||||
- avcodec/dxva2_av1: fix superres_denom parameter
|
||||
- avcodec/libdav1d: fix compilation after recent libdav1d API changes
|
||||
- Changelog: update
|
||||
- avcodec/utils: don't return negative values in av_get_audio_frame_duration()
|
||||
- avcodec/jpeg2000dec: Check that atom header is within bytsetream
|
||||
- avcodec/apedec: Fix 2 integer overflows in filter_3800()
|
||||
- avcodec/xpmdec: Move allocations down after more error checks
|
||||
- avcodec/argo: Move U, fix shift
|
||||
- avformat/mov: Check dts for overflow in mov_read_trun()
|
||||
- avformat/avidec: Use 64bit for frame number in odml index parsing
|
||||
- avcodec/mjpegbdec: Skip SOS on AVDISCARD_ALL as does mjpeg
|
||||
- avcodec/mjpegdec: Check for bits left in mjpeg_decode_scan_progressive_ac()
|
||||
- avformat/adtsenc: return value check for init_get_bits in adts_decode_extradata
|
||||
- avcodec/webp: Check available space in loop in decode_entropy_coded_image()
|
||||
- avcodec/h264dec: use picture parameters in ff_print_debug_info2()
|
||||
- avcodec/vc1dec: ff_print_debug_info() does not support WMV3 field_mode
|
||||
- avcodec/frame_thread_encoder: Free AVCodecContext structure on error during init
|
||||
- avcodec/faxcompr: Check for end of input in cmode == 1 in decode_group3_2d_line()
|
||||
- avcodec/vc1dec: Disable error concealment for *IMAGE
|
||||
- avcodec/sbrdsp_fixed: Fix negation overflow in sbr_neg_odd_64_c()
|
||||
- avcodec/argo: Check for even dimensions
|
||||
- avformat/wtvdec: Check for EOF before seeking back in parse_media_type()
|
||||
- avformat/mpc8: Check first keyframe position for overflow
|
||||
- avcodec/exr: Check ac_count
|
||||
- avformat/wavdec: Use 64bit in new_pos computation
|
||||
- avformat/sbgdec: Check for overflow in timestamp preparation
|
||||
- avformat/dsicin: Check packet size for overflow
|
||||
- avformat/dsfdec: Change order of operations in bitrate computation
|
||||
- avformat/bfi: check nframes
|
||||
- avformat/avidec: fix position overflow in avi_load_index()
|
||||
- avformat/asfdec_f: Check sizeX against padding
|
||||
- avformat/aiffdec: Check for size overflow in header parsing
|
||||
- avcodec/aaccoder: Add minimal bias in search_for_ms()
|
||||
- avformat/mov: Fix incorrect overflow detection in mov_read_sidx()
|
||||
- avformat/mov: Avoid undefined overflow in time_offset calculation
|
||||
- avfilter/af_drmeter: Check that there is data
|
||||
- avfilter/vf_fftdnoiz: Use lrintf() in export_row8()
|
||||
- avfilter/vf_mestimate: Check b_count
|
||||
- avformat/mov: do not ignore errors in mov_metadata_hmmt()
|
||||
- avformat/mxfdec: Check size for shrinking
|
||||
- avcodec/dnxhddec: check and propagate function return value
|
||||
- swscale/slice: Fix wrong return on error
|
||||
- avcodec/aacdec_template: Avoid some invalid values to be set by decode_audio_specific_config_gb()
|
||||
- swscale/slice: Check slice for allocation failure
|
||||
- avformat/matroskadec: Fix handling of huge default durations
|
||||
- avcodec/lpc: check for zero err in normalization in compute_lpc_coefs()
|
||||
- avcodec/j2kenc: Check for av_strtok() failure
|
||||
- avformat/ftp: Check for av_strtok() failure
|
||||
- tools/cws2fws: Check read() for failure
|
||||
- avcodec/cpia: Fix missing src_size update
|
||||
- avcodec/exr: Better size checks
|
||||
- avcodec/clearvideo: Check tile_size to be not too large
|
||||
- avcodec/utils: Use 64bit for intermediate in AV_CODEC_ID_ADPCM_THP* duration calculation
|
||||
- avformat/aaxdec: Check avio_seek() in header reading
|
||||
- avcodec/hevc_sei: Use get_bits_long() for time_offset_value
|
||||
- avformat/rmdec: Check old_format len for overflow
|
||||
- avformat/realtextdec: Check the pts difference before using it for the duration computation
|
||||
- avformat/qcp: Avoid negative nb_rates
|
||||
- avformat/pp_bnk: Use 64bit in bitrate computation
|
||||
- avformat/nutdec: Check tmp_size
|
||||
- avformat/msf: Check that channels doesnt overflow during extradata construction
|
||||
- avformat/subtitles: Check pts difference before use
|
||||
- avformat/mpc8: Check for position overflow in mpc8_handle_chunk()
|
||||
- avformat/mccdec: Fix overflows in num/den
|
||||
- avformat/iff: Use 64bit in duration computation
|
||||
- avformat/dxa: Check fps to be within the supported range more precissely
|
||||
- avcodec/iff: Only write palette to plane 1 if its PAL8
|
||||
- avformat/tta: Check for EOF in index reading loop
|
||||
- avfilter/vf_scale: set the RGB matrix coefficients in case of RGB
|
||||
- avfilter/vf_scale: reset color matrix in case of identity & non-RGB
|
||||
- ffmpeg: fix order between field order autodetection and override
|
||||
- avcodec/h264_slice: clear old slice POC values on parsing failure
|
||||
- avfilter/f_metadata: do not return the frame early if there is no metadata
|
||||
- ffbuild: Avoid using the --preprocessor argument to windres
|
||||
- avcodec/crystalhd: signal that the decoder sets all output frame properties
|
||||
- avcodec/cuviddec: signal that the decoder sets all output frame properties
|
||||
- avcodec/decode: reindent after the previous commit
|
||||
- avcodec/decode: add an internal codec flag to signal a decoder sets all output frame properties
|
||||
- avcodec/decode: fetch packets from the pkt_props FIFO on every frame returned
|
||||
- Update missed irc links
|
||||
- avformat/rpl: The associative law doesnt hold for signed integers in C
|
||||
- avcodec/faxcompr: Check available bits in decode_uncompressed()
|
||||
- avcodec/faxcompr: Check if bits are available before reading in cmode == 9 || cmode == 10
|
||||
- avformat/utils: Avoid overflow in codec_info_duration computation for subtitles
|
||||
- avformat/utils: check dts/duration to be representable before using them
|
||||
- avcodec/utils: do "calc from frame_bytes, channels, and block_align" in 64bit
|
||||
- avcodec/ttadata: Add sentinel at the end of ff_tta_shift_1
|
||||
- avformat/mov: Check for duplicate mdcv
|
||||
- avfilter/vf_dctdnoiz: Check threads
|
||||
- avfilter/vf_ciescope: Fix undefined behavior in rgb_to_xy() with black
|
||||
- avcodec/dpx: fix off by 1 in bits_per_color check
|
||||
- avformat/rpl: Check for EOF and zero framesize
|
||||
- avcodec/vc2enc: Check for non negative slice bounds
|
||||
- avformat/rpl: Use 64bit in bitrate computation and check it
|
||||
- avcodec/mpegvideo_enc: Reset stuffing bits if they are not supported
|
||||
- avcodec/svq1enc: Do not print debug RD value before it has been computed
|
||||
- avcodec/aacpsy: Check bandwidth
|
||||
- avcodec/aacenc: Do not divide by lambda_count if it is 0
|
||||
- avcodec/aacenc: Use FLT_EPSILON for lambda minimum
|
||||
- avfilter/vf_yadif: Fix handing of tiny images
|
||||
- avfilter/vf_vmafmotion: Check dimensions
|
||||
- avformat/movenc: Check pal_size before use
|
||||
- avcodec/lpc: Avoid floating point division by 0
|
||||
- avcodec/aacpsy: Avoid floating point division by 0 of norm_fac
|
||||
- avcodec/aacenc: Avoid 0 lambda
|
||||
- avcodec/exr: More strictly check dc_count
|
||||
- avcodec/exr: x/ymax cannot be INT_MAX
|
||||
- avformat/avio: Check av_opt_copy() for failure
|
||||
- avformat/moflex: Remove unneeded format variable
|
||||
- avformat/fifo: check for flushed packets and timeshift
|
||||
- avcodec/clearvideo: Check for 0 tile_shift
|
||||
- avcodec/vc1: Check remaining bits in ff_vc1_parse_frame_header()
|
||||
- avformat/mov: Ignore duplicate CoLL
|
||||
- avformat/mov: Limit nb_chapter_tracks to input size
|
||||
- avformat/utils: Use 64bit earlier in r_frame_rate check
|
||||
- avcodec/alsdec: Fix decoding error with mono audio files
|
||||
- avformat/mvdec: Check sample rate in parse_audio_var()
|
||||
- avcodec/faxcompr: Check for end of bitstream in decode_group3_1d_line() and decode_group3_2d_line()
|
||||
- avcodec/utils: treat PAL8 for jpegs similar to other colorspaces
|
||||
- avcodec/jpeglsdec: Set alpha plane in PAL8 so image is not 100% transparent
|
||||
- avformat/asfdec_o: Use ff_get_extradata()
|
||||
- avformat/id3v2: Check end for overflow in id3v2_parse()
|
||||
- avformat/mxfdec: Fix file position addition
|
||||
- avformat/wtvdec: Improve size overflow checks in parse_chunks()
|
||||
- avcodec/faxcompr: Check remaining bits on error in decode_group3_1d_line()
|
||||
- avformat/mov: check for pts overflow in mov_read_sidx()
|
||||
- avcodec/utils: Check ima wav duration for overflow
|
||||
- avcodec/rv10: Execute whole size check earlier for rv20
|
||||
- avformat/cafdec: Check channels
|
||||
- avcodec/exr: increase vlc depth
|
||||
- avcodec/dpx: Check bits_per_color earlier
|
||||
- avformat/mvi: Check audio_data_size to be non negative
|
||||
- avcodec/nvenc: disable s12m timestamps by default
|
||||
- aarch64: hevc_idct: Fix overflows in idct_dc
|
||||
- avcodec/vaapi_av1: pass full buffer size for each tile
|
||||
- avcodec/videotoolboxenc: #define TARGET_CPU_ARM64 to 0 if not provided by the SDK
|
||||
- lavc/pngdec: fix updating reference frames for APNG_DISPOSE_OP_BACKGROUND
|
||||
- ffmpeg: return no chosen output if an uninitialized stream is unavailable
|
||||
- avcodec/h263, h263data: Move ff_h263_init_rl_inter to h263.c
|
||||
- configure: Add missing mpegvideo dependency for IPU decoder
|
||||
- avcodec/ttmlenc: Don't confuse capabilities and caps_internal
|
||||
- avformat/mpegts: add missing sample_rate value to Opus extradata
|
||||
- avformat/movenc: fix writing dOps atoms
|
||||
- avcodec/av1_metadata: don't store the inserted TD OBU in stack
|
||||
- avcodec/nellymoserenc: Fix segfault when using unsupported channels/rate
|
||||
- avutil/cpu: Use HW_NCPUONLINE to detect # of online CPUs with OpenBSD
|
||||
- avcodec/nvenc: fix lossless tuning logic
|
||||
- avfilter/overlay_cuda: check av_buffer_ref result
|
||||
- avfilter/overlay_cuda: hold explicit reference to hw_device_ctx
|
||||
- avformat/matroskaenc: Fix leak when writing attachment without filename
|
||||
|
||||
version 4.4:
|
||||
- AudioToolbox output device
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
{
|
||||
"drips": {
|
||||
"ethereum": {
|
||||
"ownedBy": "0x2f3900e7064eE63D30d749971265858612AA7139"
|
||||
}
|
||||
}
|
||||
}
|
||||
11
INSTALL.md
11
INSTALL.md
@@ -1,8 +1,5 @@
|
||||
## Installing FFmpeg
|
||||
|
||||
0. If you like to include source plugins, merge them before configure
|
||||
for example run tools/merge-all-source-plugins
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
||||
@@ -18,11 +15,3 @@ NOTICE
|
||||
------
|
||||
|
||||
- Non system dependencies (e.g. libx264, libvpx) are disabled by default.
|
||||
|
||||
NOTICE for Package Maintainers
|
||||
------------------------------
|
||||
|
||||
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
||||
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
||||
can then be built. And last build FFmpeg with full dependencies (which may in turn depend on
|
||||
some of these 3rd party packages). This avoids circular dependencies during build.
|
||||
|
||||
@@ -12,6 +12,7 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
||||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- optional x86 optimization in the files
|
||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
||||
- `libavcodec/x86/idct_mmx.c`
|
||||
@@ -44,6 +45,7 @@ Specifically, the GPL parts of FFmpeg are:
|
||||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
- `vf_pp.c`
|
||||
- `vf_pp7.c`
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
|
||||
234
MAINTAINERS
234
MAINTAINERS
@@ -6,38 +6,28 @@ FFmpeg code.
|
||||
|
||||
Please try to keep entries where you are the maintainer up to date!
|
||||
|
||||
*Status*, one of the following:
|
||||
[X] Old code. Something tagged obsolete generally means it has been replaced by a better system and you should be using that.
|
||||
[0] No current maintainer [but maybe you could take the role as you write your new code].
|
||||
[1] It has a maintainer but they don't have time to do much other than throw the odd patch in.
|
||||
[2] Someone actually looks after it.
|
||||
|
||||
Names in () mean that the maintainer currently has no time to maintain the code.
|
||||
A (CC <address>) after the name means that the maintainer prefers to be CC-ed on
|
||||
patches and related discussions.
|
||||
|
||||
(L <address>) *Mailing list* that is relevant to this area
|
||||
(W <address>) *Web-page* with status/info
|
||||
(B <address>) URI for where to file *bugs*. A web-page with detailed bug
|
||||
filing info, a direct bug tracker link, or a mailto: URI.
|
||||
(P <address>) *Subsystem Profile* document for more details submitting
|
||||
patches to the given subsystem. This is either an in-tree file,
|
||||
or a URI. See Documentation/maintainer/maintainer-entry-profile.rst
|
||||
for details.
|
||||
(T <address>) *SCM* tree type and location.
|
||||
Type is one of: git, hg, quilt, stgit, topgit
|
||||
|
||||
Project Leader
|
||||
==============
|
||||
|
||||
final design decisions
|
||||
|
||||
|
||||
Applications
|
||||
============
|
||||
|
||||
ffmpeg:
|
||||
ffmpeg.c Michael Niedermayer, Anton Khirnov
|
||||
ffmpeg.c Michael Niedermayer
|
||||
|
||||
ffplay:
|
||||
ffplay.c [2] Marton Balint
|
||||
ffplay.c Marton Balint
|
||||
|
||||
ffprobe:
|
||||
ffprobe.c [2] Stefano Sabatini
|
||||
ffprobe.c Stefano Sabatini
|
||||
|
||||
Commandline utility code:
|
||||
cmdutils.c, cmdutils.h Michael Niedermayer
|
||||
@@ -45,32 +35,29 @@ Commandline utility code:
|
||||
QuickTime faststart:
|
||||
tools/qt-faststart.c Baptiste Coudurier
|
||||
|
||||
Execution Graph Printing
|
||||
fftools/graph, fftools/resources [2] softworkz
|
||||
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server day to day operations (L: root@ffmpeg.org) Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
project server emergencies (L: root@ffmpeg.org) Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
presets [0]
|
||||
project server Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov
|
||||
presets Robert Swain
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
API tests [0]
|
||||
samples-request [2] Thilo Borgmann, James Almer, Ben Littler
|
||||
API tests Ludmila Glinskih
|
||||
|
||||
|
||||
Communication
|
||||
=============
|
||||
website (T: https://git.ffmpeg.org/ffmpeg-web) Deby Barbara Lepage
|
||||
fate.ffmpeg.org (L: fate-admin@ffmpeg.org) (W: https://fate.ffmpeg.org) (P: https://ffmpeg.org/fate.html) (S: https://git.ffmpeg.org/fateserver) Timo Rothenpieler
|
||||
Trac bug tracker (W: https://trac.ffmpeg.org) Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork [2] (W: https://patchwork.ffmpeg.org) Andriy Gelman
|
||||
mailing lists (W: https://ffmpeg.org/contact.html#MailingLists) Baptiste Coudurier
|
||||
|
||||
website Deby Barbara Lepage
|
||||
fate.ffmpeg.org Timothy Gu
|
||||
Trac bug tracker Alexander Strasser, Michael Niedermayer, Carl Eugen Hoyos
|
||||
Patchwork Andriy Gelman
|
||||
mailing lists Baptiste Coudurier
|
||||
Twitter Reynaldo H. Verdejo Pinochet
|
||||
Launchpad Timothy Gu
|
||||
ffmpeg-security [2] (L: ffmpeg-security@ffmpeg.org) (W: https://ffmpeg.org/security.html) Michael Niedermayer, Reimar Doeffinger
|
||||
ffmpeg-security Andreas Cadhalpun, Carl Eugen Hoyos, Clément Bœsch, Michael Niedermayer, Reimar Doeffinger, rcombs, wm4
|
||||
|
||||
|
||||
libavutil
|
||||
@@ -85,26 +72,24 @@ Other:
|
||||
aes_ctr.c, aes_ctr.h Eran Kornblau
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
csp.c, csp.h Leo Izen, Ronald S. Bultje
|
||||
des Reimar Doeffinger
|
||||
dynarray.h Nicolas George
|
||||
eval.c, eval.h [2] Michael Niedermayer
|
||||
eval.c, eval.h Michael Niedermayer
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_d3d12va* Wu Jianhua
|
||||
hwcontext_vulkan* [2] Lynne
|
||||
hwcontext_vulkan* Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
lzo Reimar Doeffinger
|
||||
mathematics.c, mathematics.h [2] Michael Niedermayer
|
||||
mem.c, mem.h [2] Michael Niedermayer
|
||||
mathematics.c, mathematics.h Michael Niedermayer
|
||||
mem.c, mem.h Michael Niedermayer
|
||||
opencl.c, opencl.h Wei Gao
|
||||
opt.c, opt.h Michael Niedermayer
|
||||
rational.c, rational.h [2] Michael Niedermayer
|
||||
rational.c, rational.h Michael Niedermayer
|
||||
rc4 Reimar Doeffinger
|
||||
ripemd.c, ripemd.h James Almer
|
||||
tx* [2] Lynne
|
||||
tx* Lynne
|
||||
|
||||
|
||||
libavcodec
|
||||
@@ -126,18 +111,22 @@ Generic Parts:
|
||||
DSP utilities:
|
||||
dsputils.c, dsputils.h Michael Niedermayer
|
||||
entropy coding:
|
||||
rangecoder.c, rangecoder.h [2] Michael Niedermayer
|
||||
rangecoder.c, rangecoder.h Michael Niedermayer
|
||||
lzw.* Michael Niedermayer
|
||||
floating point AAN DCT:
|
||||
faandct.c, faandct.h [2] Michael Niedermayer
|
||||
faandct.c, faandct.h Michael Niedermayer
|
||||
Non-power-of-two MDCT:
|
||||
mdct15.c, mdct15.h Rostislav Pehlivanov
|
||||
Golomb coding:
|
||||
golomb.c, golomb.h [2] Michael Niedermayer
|
||||
golomb.c, golomb.h Michael Niedermayer
|
||||
motion estimation:
|
||||
motion* Michael Niedermayer
|
||||
rate control:
|
||||
ratecontrol.c [2] Michael Niedermayer
|
||||
ratecontrol.c Michael Niedermayer
|
||||
simple IDCT:
|
||||
simple_idct.c, simple_idct.h [2] Michael Niedermayer
|
||||
simple_idct.c, simple_idct.h Michael Niedermayer
|
||||
postprocessing:
|
||||
libpostproc/* Michael Niedermayer
|
||||
table generation:
|
||||
tableprint.c, tableprint.h Reimar Doeffinger
|
||||
fixed point FFT:
|
||||
@@ -145,14 +134,12 @@ Generic Parts:
|
||||
Text Subtitles Clément Bœsch
|
||||
|
||||
Codecs:
|
||||
4xm.c [2] Michael Niedermayer
|
||||
4xm.c Michael Niedermayer
|
||||
8bps.c Roberto Togni
|
||||
8svx.c Jaikrishnan Menon
|
||||
aacenc*, aaccoder.c Rostislav Pehlivanov
|
||||
adpcm.c Zane van Iperen
|
||||
alacenc.c Jaikrishnan Menon
|
||||
alsdec.c Thilo Borgmann, Umair Khan
|
||||
amfenc* Dmitrii Ovchinnikov
|
||||
aptx.c Aurelien Jacobs
|
||||
ass* Aurelien Jacobs
|
||||
asv* Michael Niedermayer
|
||||
@@ -162,16 +149,17 @@ Codecs:
|
||||
bgmc.c, bgmc.h Thilo Borgmann
|
||||
binkaudio.c Peter Ross
|
||||
cavs* Stefan Gehrer
|
||||
cdxl.c Paul B Mahol
|
||||
celp_filters.* Vitor Sessak
|
||||
cinepak.c Roberto Togni
|
||||
cinepakenc.c Rl / Aetey G.T. AB
|
||||
ccaption_dec.c Anshul Maheshwari, Aman Gupta
|
||||
cljr Alex Beregszaszi
|
||||
cpia.c Stephan Hilb
|
||||
crystalhd.c Philip Langdale
|
||||
cscd.c Reimar Doeffinger
|
||||
cuviddec.c Timo Rothenpieler
|
||||
dca* foo86
|
||||
dfpwm* Jack Bruienne
|
||||
dirac* Rostislav Pehlivanov
|
||||
dnxhd* Baptiste Coudurier
|
||||
dolby_e* foo86
|
||||
@@ -179,10 +167,10 @@ Codecs:
|
||||
dss_sp.c Oleksij Rempel
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxv.*, dxvenc.* Emma Worley
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
evrc* Paul B Mahol
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
ffv1* [2] Michael Niedermayer
|
||||
ffv1* Michael Niedermayer
|
||||
ffwavesynth.c Nicolas George
|
||||
fifo.c Jan Sebechlebsky
|
||||
flicvideo.c Mike Melanson
|
||||
@@ -193,23 +181,21 @@ Codecs:
|
||||
h263* Michael Niedermayer
|
||||
h264* Loren Merritt, Michael Niedermayer
|
||||
hap* Tom Butterworth
|
||||
hevc/* Anton Khirnov
|
||||
huffyuv* Michael Niedermayer
|
||||
idcinvideo.c Mike Melanson
|
||||
interplayvideo.c Mike Melanson
|
||||
jni*, ffjni* Matthieu Bouron
|
||||
jpeg2000* Nicolas Bertrand
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
lcl*.c Roberto Togni, Reimar Doeffinger
|
||||
libcelt_dec.c Nicolas George
|
||||
libcodec2.c Tomas Härdin
|
||||
libdirac* David Conrad
|
||||
libdavs2.c Huiwen Ren
|
||||
libjxl*.c, libjxl.h Leo Izen
|
||||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
@@ -228,18 +214,18 @@ Codecs:
|
||||
mqc* Nicolas Bertrand
|
||||
msmpeg4.c, msmpeg4data.h Michael Niedermayer
|
||||
msrle.c Mike Melanson
|
||||
msrleenc.c Tomas Härdin
|
||||
msvideo1.c Mike Melanson
|
||||
nuv.c Reimar Doeffinger
|
||||
nvdec*, nvenc* Timo Rothenpieler
|
||||
omx.c Martin Storsjo, Aman Gupta
|
||||
opus* Rostislav Pehlivanov
|
||||
paf.* Paul B Mahol
|
||||
pcx.c Ivo van Poorten
|
||||
pgssubdec.c Reimar Doeffinger
|
||||
ptx.c Ivo van Poorten
|
||||
qcelp* Reynaldo H. Verdejo Pinochet
|
||||
qdm2.c, qdm2data.h Roberto Togni
|
||||
qsv* Mark Thompson, Zhong Li, Haihao Xiang
|
||||
qsv* Mark Thompson, Zhong Li
|
||||
qtrle.c Mike Melanson
|
||||
ra144.c, ra144.h, ra288.c, ra288.h Roberto Togni
|
||||
resample2.c Michael Niedermayer
|
||||
@@ -247,7 +233,7 @@ Codecs:
|
||||
rpza.c Roberto Togni
|
||||
rtjpeg.c, rtjpeg.h Reimar Doeffinger
|
||||
rv10.c Michael Niedermayer
|
||||
sanm.c Manuel Lauss
|
||||
s3tc* Ivo van Poorten
|
||||
smc.c Mike Melanson
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
@@ -255,13 +241,16 @@ Codecs:
|
||||
srt* Aurelien Jacobs
|
||||
sunrast.c Ivo van Poorten
|
||||
svq3.c Michael Niedermayer
|
||||
tak* Paul B Mahol
|
||||
truemotion1* Mike Melanson
|
||||
tta.c Alex Beregszaszi, Jaikrishnan Menon
|
||||
ttaenc.c Paul B Mahol
|
||||
txd.c Ivo van Poorten
|
||||
v4l2_* Jorge Ramirez-Ortiz
|
||||
vc2* Rostislav Pehlivanov
|
||||
vcr1.c Michael Niedermayer
|
||||
videotoolboxenc.c Rick Kern, Aman Gupta
|
||||
vima.c Paul B Mahol
|
||||
vorbisdec.c Denes Balatoni, David Conrad
|
||||
vorbisenc.c Oded Shimon
|
||||
vp3* Mike Melanson
|
||||
@@ -270,23 +259,24 @@ Codecs:
|
||||
vp8 David Conrad, Ronald Bultje
|
||||
vp9 Ronald Bultje
|
||||
vqavideo.c Mike Melanson
|
||||
vvc [2] Nuo Mi, Wu Jianhua, Frank Plowman
|
||||
wmaprodec.c Sascha Sommer
|
||||
wmavoice.c Ronald S. Bultje
|
||||
wmv2.c Michael Niedermayer
|
||||
xan.c Mike Melanson
|
||||
xbm* Paul B Mahol
|
||||
xface Stefano Sabatini
|
||||
xvmc.c Ivan Kalvachev
|
||||
xwd* Paul B Mahol
|
||||
|
||||
Hardware acceleration:
|
||||
crystalhd.c Philip Langdale
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||
d3d11va* Steve Lhomme
|
||||
d3d12va* Wu Jianhua
|
||||
d3d12va_encode* Tong Wu
|
||||
mediacodec* Matthieu Bouron, Aman Gupta, Zhao Zhili
|
||||
vaapi* Haihao Xiang
|
||||
vaapi_encode* Mark Thompson, Haihao Xiang
|
||||
mediacodec* Matthieu Bouron, Aman Gupta
|
||||
vaapi* Gwenole Beauchesne
|
||||
vaapi_encode* Mark Thompson
|
||||
vdpau* Philip Langdale, Carl Eugen Hoyos
|
||||
videotoolbox* Rick Kern, Aman Gupta, Zhao Zhili
|
||||
videotoolbox* Rick Kern, Aman Gupta
|
||||
|
||||
|
||||
libavdevice
|
||||
@@ -321,38 +311,66 @@ Generic parts:
|
||||
motion_estimation.c Davinder Singh
|
||||
|
||||
Filters:
|
||||
f_drawgraph.c Paul B Mahol
|
||||
af_adelay.c Paul B Mahol
|
||||
af_aecho.c Paul B Mahol
|
||||
af_afade.c Paul B Mahol
|
||||
af_amerge.c Nicolas George
|
||||
af_aphaser.c Paul B Mahol
|
||||
af_aresample.c Michael Niedermayer
|
||||
af_astats.c Paul B Mahol
|
||||
af_atempo.c Pavel Koshevoy
|
||||
af_biquads.c Paul B Mahol
|
||||
af_chorus.c Paul B Mahol
|
||||
af_compand.c Paul B Mahol
|
||||
af_firequalizer.c Muhammad Faiz
|
||||
af_hdcd.c Burt P.
|
||||
af_ladspa.c Paul B Mahol
|
||||
af_loudnorm.c Kyle Swanson
|
||||
af_pan.c Nicolas George
|
||||
af_sidechaincompress.c Paul B Mahol
|
||||
af_silenceremove.c Paul B Mahol
|
||||
avf_aphasemeter.c Paul B Mahol
|
||||
avf_avectorscope.c Paul B Mahol
|
||||
avf_showcqt.c Muhammad Faiz
|
||||
vf_blend.c Paul B Mahol
|
||||
vf_bwdif Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_chromakey.c Timo Rothenpieler
|
||||
vf_colorchannelmixer.c Paul B Mahol
|
||||
vf_colorconstancy.c Mina Sami (CC <minas.gorgy@gmail.com>)
|
||||
vf_colorbalance.c Paul B Mahol
|
||||
vf_colorkey.c Timo Rothenpieler
|
||||
vf_colorlevels.c Paul B Mahol
|
||||
vf_coreimage.m Thilo Borgmann
|
||||
vf_deband.c Paul B Mahol
|
||||
vf_dejudder.c Nicholas Robbins
|
||||
vf_delogo.c Jean Delvare (CC <jdelvare@suse.com>)
|
||||
vf_drawbox.c/drawgrid Andrey Utkin
|
||||
vf_fsync.c Thilo Borgmann
|
||||
vf_extractplanes.c Paul B Mahol
|
||||
vf_histogram.c Paul B Mahol
|
||||
vf_hqx.c Clément Bœsch
|
||||
vf_idet.c Pascal Massimino
|
||||
vf_il.c Paul B Mahol
|
||||
vf_(t)interlace Thomas Mundt (CC <thomas.mundt@hr.de>)
|
||||
vf_lenscorrection.c Daniel Oberhoff
|
||||
vf_libplacebo.c Niklas Haas
|
||||
vf_mergeplanes.c Paul B Mahol
|
||||
vf_mestimate.c Davinder Singh
|
||||
vf_minterpolate.c Davinder Singh
|
||||
vf_neighbor.c Paul B Mahol
|
||||
vf_psnr.c Paul B Mahol
|
||||
vf_random.c Paul B Mahol
|
||||
vf_readvitc.c Tobias Rapp (CC t.rapp at noa-archive dot com)
|
||||
vf_scale.c [2] Michael Niedermayer
|
||||
vf_scale.c Michael Niedermayer
|
||||
vf_separatefields.c Paul B Mahol
|
||||
vf_ssim.c Paul B Mahol
|
||||
vf_stereo3d.c Paul B Mahol
|
||||
vf_telecine.c Paul B Mahol
|
||||
vf_tonemap_opencl.c Ruiling Song
|
||||
vf_yadif.c [2] Michael Niedermayer
|
||||
vf_xfade_vulkan.c [2] Marvin Scholz (CC <epirat07@gmail.com>)
|
||||
vf_yadif.c Michael Niedermayer
|
||||
vf_zoompan.c Paul B Mahol
|
||||
|
||||
Sources:
|
||||
vsrc_mandelbrot.c [2] Michael Niedermayer
|
||||
vsrc_mandelbrot.c Michael Niedermayer
|
||||
|
||||
dnn Yejun Guo
|
||||
|
||||
@@ -370,7 +388,8 @@ Generic parts:
|
||||
Muxers/Demuxers:
|
||||
4xm.c Mike Melanson
|
||||
aadec.c Vesselin Bontchev (vesselin.bontchev at yandex dot com)
|
||||
adtsenc.c [0]
|
||||
adtsenc.c Robert Swain
|
||||
afc.c Paul B Mahol
|
||||
aiffdec.c Baptiste Coudurier, Matthieu Bouron
|
||||
aiffenc.c Baptiste Coudurier, Matthieu Bouron
|
||||
alp.c Zane van Iperen
|
||||
@@ -379,26 +398,28 @@ Muxers/Demuxers:
|
||||
apngdec.c Benoit Fouet
|
||||
argo_asf.c Zane van Iperen
|
||||
argo_brp.c Zane van Iperen
|
||||
argo_cvg.c Zane van Iperen
|
||||
ass* Aurelien Jacobs
|
||||
astdec.c Paul B Mahol
|
||||
astenc.c James Almer
|
||||
avi* Michael Niedermayer
|
||||
avisynth.c Stephen Hutchinson
|
||||
avr.c Paul B Mahol
|
||||
bink.c Peter Ross
|
||||
boadec.c Michael Niedermayer
|
||||
brstm.c Paul B Mahol
|
||||
caf* Peter Ross
|
||||
cdxl.c Paul B Mahol
|
||||
codec2.c Tomas Härdin
|
||||
crc.c Michael Niedermayer
|
||||
dashdec.c Steven Liu
|
||||
dashenc.c Karthick Jeyapal
|
||||
daud.c Reimar Doeffinger
|
||||
dfpwmdec.c Jack Bruienne
|
||||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dtshddec.c Paul B Mahol
|
||||
dv.c Roman Shaposhnik
|
||||
dvdvideodec.c [2] Marth64
|
||||
electronicarts.c Peter Ross
|
||||
evc* Samsung (Dawid Kozinski)
|
||||
epafdec.c Paul B Mahol
|
||||
ffm* Baptiste Coudurier
|
||||
flic.c Mike Melanson
|
||||
flvdec.c Michael Niedermayer
|
||||
@@ -406,26 +427,26 @@ Muxers/Demuxers:
|
||||
gxf.c Reimar Doeffinger
|
||||
gxfenc.c Baptiste Coudurier
|
||||
hlsenc.c Christian Suloway, Steven Liu
|
||||
iamf* [2] James Almer
|
||||
idcin.c Mike Melanson
|
||||
idroqdec.c Mike Melanson
|
||||
iff.c Jaikrishnan Menon
|
||||
imf* Pierre-Anthony Lemieux
|
||||
img2*.c Michael Niedermayer
|
||||
ipmovie.c Mike Melanson
|
||||
ircam* Paul B Mahol
|
||||
iss.c Stefan Gehrer
|
||||
jpegxl* Leo Izen
|
||||
jvdec.c Peter Ross
|
||||
kvag.c Zane van Iperen
|
||||
libmodplug.c Clément Bœsch
|
||||
libopenmpt.c Josh de Kock
|
||||
lmlm4.c Ivo van Poorten
|
||||
lvfdec.c Paul B Mahol
|
||||
lxfdec.c Tomas Härdin
|
||||
matroska.c Andreas Rheinhardt
|
||||
matroskadec.c Andreas Rheinhardt
|
||||
matroskaenc.c Andreas Rheinhardt
|
||||
matroska.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskadec.c Aurelien Jacobs, Andreas Rheinhardt
|
||||
matroskaenc.c David Conrad, Andreas Rheinhardt
|
||||
matroska subtitles (matroskaenc.c) John Peebles
|
||||
metadata* Aurelien Jacobs
|
||||
mgsts.c Paul B Mahol
|
||||
microdvd* Aurelien Jacobs
|
||||
mm.c Peter Ross
|
||||
mov.c Baptiste Coudurier
|
||||
@@ -438,6 +459,7 @@ Muxers/Demuxers:
|
||||
msnwc_tcp.c Ramiro Polla
|
||||
mtv.c Reynaldo H. Verdejo Pinochet
|
||||
mxf* Baptiste Coudurier, Tomas Härdin
|
||||
nistspheredec.c Paul B Mahol
|
||||
nsvdec.c Francois Revol
|
||||
nut* Michael Niedermayer
|
||||
nuv.c Reimar Doeffinger
|
||||
@@ -445,13 +467,13 @@ Muxers/Demuxers:
|
||||
oggenc.c Baptiste Coudurier
|
||||
oggparse*.c David Conrad
|
||||
oma.c Maxim Poliakovski
|
||||
paf.c Paul B Mahol
|
||||
pp_bnk.c Zane van Iperen
|
||||
psxstr.c Mike Melanson
|
||||
pva.c Ivo van Poorten
|
||||
pvfdec.c Paul B Mahol
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rcwtdec.c [2] Marth64
|
||||
rcwtenc.c [2] Marth64
|
||||
rdt.c Ronald S. Bultje
|
||||
rl2.c Sascha Sommer
|
||||
rmdec.c, rmenc.c Ronald S. Bultje
|
||||
@@ -470,10 +492,11 @@ Muxers/Demuxers:
|
||||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
smush.c Manuel Lauss
|
||||
smjpeg* Paul B Mahol
|
||||
spdif* Anssi Hannula
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
takdec.c Paul B Mahol
|
||||
tta.c Alex Beregszaszi
|
||||
txd.c Ivo van Poorten
|
||||
voc.c Aurelien Jacobs
|
||||
@@ -483,49 +506,46 @@ Muxers/Demuxers:
|
||||
webvtt* Matthew J Heaney
|
||||
westwood.c Mike Melanson
|
||||
wtv.c Peter Ross
|
||||
wvenc.c Paul B Mahol
|
||||
|
||||
Protocols:
|
||||
async.c Zhang Rui
|
||||
bluray.c Petri Hintukainen
|
||||
ftp.c Lukasz Marek
|
||||
http.c Ronald S. Bultje
|
||||
libsrt.c Zhao Zhili
|
||||
libssh.c Lukasz Marek
|
||||
libzmq.c Andriy Gelman
|
||||
mms*.c Ronald S. Bultje
|
||||
udp.c Luca Abeni
|
||||
icecast.c [2] Marvin Scholz (CC <epirat07@gmail.com>)
|
||||
icecast.c Marvin Scholz
|
||||
|
||||
|
||||
libswresample
|
||||
=============
|
||||
|
||||
Generic parts:
|
||||
audioconvert.c [2] Michael Niedermayer
|
||||
dither.c [2] Michael Niedermayer
|
||||
rematrix*.c [2] Michael Niedermayer
|
||||
swresample*.c [2] Michael Niedermayer
|
||||
audioconvert.c Michael Niedermayer
|
||||
dither.c Michael Niedermayer
|
||||
rematrix*.c Michael Niedermayer
|
||||
swresample*.c Michael Niedermayer
|
||||
|
||||
Resamplers:
|
||||
resample*.c [2] Michael Niedermayer
|
||||
resample*.c Michael Niedermayer
|
||||
soxr_resample.c Rob Sykes
|
||||
|
||||
|
||||
Operating systems / CPU architectures
|
||||
=====================================
|
||||
|
||||
*BSD [2] Brad Smith
|
||||
Alpha [0]
|
||||
Alpha Falk Hueffner
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
LoongArch [2] Shiyou Yin
|
||||
Darwin (macOS, iOS) [2] Marvin Scholz
|
||||
Mac OS X / PowerPC [0]
|
||||
Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC [1] Lauri Kasanen
|
||||
RISC-V [2] Rémi Denis-Courmont
|
||||
Linux / PowerPC Lauri Kasanen
|
||||
Windows MinGW Alex Beregszaszi, Ramiro Polla
|
||||
Windows Cygwin Victor Paesa
|
||||
Windows MSVC Hendrik Leppkes
|
||||
Windows MSVC Matthew Oliver, Hendrik Leppkes
|
||||
Windows ICL Matthew Oliver
|
||||
ADI/Blackfin DSP Marc Hoffman
|
||||
Sparc Roman Shaposhnik
|
||||
OS/2 KO Myung-Hun
|
||||
@@ -541,7 +561,6 @@ Benjamin Larsson
|
||||
Bobby Bingham
|
||||
Daniel Verkamp
|
||||
Derek Buitenhuis
|
||||
Fei Wang
|
||||
Ganesh Ajjanagadde
|
||||
Henrik Gramner
|
||||
Ivan Uskov
|
||||
@@ -549,7 +568,6 @@ James Darnley
|
||||
Jan Ekström
|
||||
Joakim Plate
|
||||
Jun Zhao
|
||||
Kacper Michajłow
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
@@ -565,12 +583,10 @@ wm4
|
||||
Releases
|
||||
========
|
||||
|
||||
7.0 Michael Niedermayer
|
||||
6.1 Michael Niedermayer
|
||||
5.1 Michael Niedermayer
|
||||
4.4 Michael Niedermayer
|
||||
3.4 Michael Niedermayer
|
||||
2.8 Michael Niedermayer
|
||||
2.7 Michael Niedermayer
|
||||
2.6 Michael Niedermayer
|
||||
2.5 Michael Niedermayer
|
||||
|
||||
If you want to maintain an older release, please contact us
|
||||
|
||||
@@ -591,26 +607,20 @@ Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
||||
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Frank Plowman 34E2 48D6 B7DF 4769 70C7 3304 03A8 4C6A 098F 2C6B
|
||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
||||
Jaikrishnan Menon 61A1 F09F 01C9 2D45 78E1 C862 25DC 8831 AF70 D368
|
||||
James Almer 7751 2E8C FD94 A169 57E6 9A7A 1463 01AD 7376 59E0
|
||||
Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A
|
||||
Leo Izen (Traneptora) B6FD 3CFC 7ACF 83FC 9137 6945 5A71 C331 FD2F A19A
|
||||
Leo Izen (Traneptora) 1D83 0A0B CE46 709E 203B 26FC 764E 48EA 4822 1833
|
||||
Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE
|
||||
Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464
|
||||
Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB
|
||||
DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64
|
||||
Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93
|
||||
Niklas Haas (haasn) 1DDB 8076 B14D 5B48 32FC 99D9 EB52 DA9C 02BA 6FB4
|
||||
Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1
|
||||
Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029
|
||||
Peter Ross A907 E02F A6E5 0CD2 34CD 20D2 6760 79C5 AC40 DD6B
|
||||
Philip Langdale 5DC5 8D66 5FBA 3A43 18EC 045E F8D6 B194 6A75 682E
|
||||
Pierre-Anthony Lemieux (pal) F4B3 9492 E6F2 E4AF AEC8 46CB 698F A1F0 F8D4 EED4
|
||||
Ramiro Polla 7859 C65B 751B 1179 792E DAE8 8E95 8B2F 9B6C 5700
|
||||
Reimar Doeffinger C61D 16E5 9E2C D10C 8958 38A4 0899 A2B9 06D4 D9C7
|
||||
Reinhard Tartler 9300 5DC2 7E87 6C37 ED7B CA9A 9808 3544 9453 48A4
|
||||
|
||||
46
Makefile
46
Makefile
@@ -13,26 +13,18 @@ vpath %.v $(SRC_PATH)
|
||||
vpath %.texi $(SRC_PATH)
|
||||
vpath %.cu $(SRC_PATH)
|
||||
vpath %.ptx $(SRC_PATH)
|
||||
vpath %.metal $(SRC_PATH)
|
||||
vpath %/fate_config.sh.template $(SRC_PATH)
|
||||
|
||||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
|
||||
ALLFFLIBS = \
|
||||
avcodec \
|
||||
avdevice \
|
||||
avfilter \
|
||||
avformat \
|
||||
avutil \
|
||||
swscale \
|
||||
swresample \
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_AVRESAMPLE) += avresample
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
@@ -53,14 +45,11 @@ FF_DEP_LIBS := $(DEP_LIBS)
|
||||
FF_STATIC_DEP_LIBS := $(STATIC_DEP_LIBS)
|
||||
|
||||
$(TOOLS): %$(EXESUF): %.o
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(filter-out $(FF_DEP_LIBS), $^) $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(EXTRALIBS-$(*F)) $(EXTRALIBS) $(ELIBS)
|
||||
|
||||
target_dec_%_fuzzer$(EXESUF): target_dec_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
target_enc_%_fuzzer$(EXESUF): target_enc_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_bsf_%_fuzzer$(EXESUF): tools/target_bsf_%_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
@@ -73,18 +62,9 @@ tools/target_dem_fuzzer$(EXESUF): tools/target_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
tools/target_io_dem_fuzzer$(EXESUF): tools/target_io_dem_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_sws_fuzzer$(EXESUF): tools/target_sws_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/target_swr_fuzzer$(EXESUF): tools/target_swr_fuzzer.o $(FF_DEP_LIBS)
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $^ $(ELIBS) $(FF_EXTRALIBS) $(LIBFUZZER_PATH)
|
||||
|
||||
tools/enum_options$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/enum_options$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/enc_recon_frame_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/scale_slice_test$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/scale_slice_test$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/sofa2wavs$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
tools/uncoded_frame$(EXESUF): $(FF_DEP_LIBS)
|
||||
tools/uncoded_frame$(EXESUF): ELIBS = $(FF_EXTRALIBS)
|
||||
@@ -94,14 +74,13 @@ tools/target_dem_%_fuzzer$(EXESUF): $(FF_DEP_LIBS)
|
||||
CONFIGURABLE_COMPONENTS = \
|
||||
$(wildcard $(FFLIBS:%=$(SRC_PATH)/lib%/all*.c)) \
|
||||
$(SRC_PATH)/libavcodec/bitstream_filters.c \
|
||||
$(SRC_PATH)/libavcodec/hwaccels.h \
|
||||
$(SRC_PATH)/libavcodec/parsers.c \
|
||||
$(SRC_PATH)/libavformat/protocols.c \
|
||||
|
||||
config_components.h: ffbuild/.config
|
||||
config.h: ffbuild/.config
|
||||
ffbuild/.config: $(CONFIGURABLE_COMPONENTS)
|
||||
@-tput bold 2>/dev/null
|
||||
@-printf '\nWARNING: $(?) newer than config_components.h, rerun configure\n\n'
|
||||
@-printf '\nWARNING: $(?) newer than config.h, rerun configure\n\n'
|
||||
@-tput sgr0 2>/dev/null
|
||||
|
||||
SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
@@ -109,8 +88,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
||||
ARMV5TE-OBJS ARMV6-OBJS ARMV8-OBJS VFP-OBJS NEON-OBJS \
|
||||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS RVVB-OBJS \
|
||||
OBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS SIMD128-OBJS
|
||||
MMI-OBJS OBJS SLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
@@ -132,13 +110,12 @@ include $(SRC_PATH)/fftools/Makefile
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/doc/examples/Makefile
|
||||
|
||||
$(ALLFFLIBS:%=lib%/version.o): libavutil/ffversion.h
|
||||
libavcodec/avcodec.o libavformat/utils.o libavdevice/avdevice.o libavfilter/avfilter.o libavutil/utils.o libpostproc/postprocess.o libswresample/swresample.o libswscale/utils.o : libavutil/ffversion.h
|
||||
|
||||
$(PROGS): %$(PROGSSUF)$(EXESUF): %$(PROGSSUF)_g$(EXESUF)
|
||||
ifeq ($(STRIPTYPE),direct)
|
||||
$(STRIP) -o $@ $<
|
||||
else
|
||||
$(RM) $@
|
||||
$(CP) $< $@
|
||||
$(STRIP) $@
|
||||
endif
|
||||
@@ -147,18 +124,13 @@ endif
|
||||
$(LD) $(LDFLAGS) $(LDEXEFLAGS) $(LD_O) $(OBJS-$*) $(FF_EXTRALIBS)
|
||||
|
||||
VERSION_SH = $(SRC_PATH)/ffbuild/version.sh
|
||||
ifeq ($(VERSION_TRACKING),yes)
|
||||
GIT_LOG = $(SRC_PATH)/.git/logs/HEAD
|
||||
endif
|
||||
|
||||
.version: $(wildcard $(GIT_LOG)) $(VERSION_SH) ffbuild/config.mak
|
||||
.version: M=@
|
||||
|
||||
ifneq ($(VERSION_TRACKING),yes)
|
||||
libavutil/ffversion.h .version: REVISION=unknown
|
||||
endif
|
||||
libavutil/ffversion.h .version:
|
||||
$(M)revision=$(REVISION) $(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
|
||||
$(M)$(VERSION_SH) $(SRC_PATH) libavutil/ffversion.h $(EXTRA_VERSION)
|
||||
$(Q)touch .version
|
||||
|
||||
# force version.sh to run whenever version might have changed
|
||||
@@ -184,7 +156,7 @@ clean::
|
||||
$(RM) -rf coverage.info coverage.info.in lcov
|
||||
|
||||
distclean:: clean
|
||||
$(RM) .version config.asm config.h config_components.h mapfile \
|
||||
$(RM) .version avversion.h config.asm config.h mapfile \
|
||||
ffbuild/.config ffbuild/config.* libavutil/avconfig.h \
|
||||
version.h libavutil/ffversion.h libavcodec/codec_names.h \
|
||||
libavcodec/bsf_list.c libavformat/protocol_list.c \
|
||||
|
||||
@@ -9,7 +9,7 @@ such as audio, video, subtitles and related metadata.
|
||||
* `libavcodec` provides implementation of a wider range of codecs.
|
||||
* `libavformat` implements streaming protocols, container formats and basic I/O access.
|
||||
* `libavutil` includes hashers, decompressors and miscellaneous utility functions.
|
||||
* `libavfilter` provides means to alter decoded audio and video through a directed graph of connected filters.
|
||||
* `libavfilter` provides a mean to alter decoded Audio and Video through chain of filters.
|
||||
* `libavdevice` provides an abstraction to access capture and playback devices.
|
||||
* `libswresample` implements audio mixing and resampling routines.
|
||||
* `libswscale` implements color conversion and scaling routines.
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
|
||||
┌────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 8.0 "Huffman" │
|
||||
└────────────────────────────────────────┘
|
||||
┌────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.4 "Rao" │
|
||||
└────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 8.0 "Huffman", about 11
|
||||
months after the release of FFmpeg 7.1.
|
||||
The FFmpeg Project proudly presents FFmpeg 4.4 "Rao", about 10
|
||||
months after the release of FFmpeg 4.3.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
173
compat/atomics/gcc/stdatomic.h
Normal file
173
compat/atomics/gcc/stdatomic.h
Normal file
@@ -0,0 +1,173 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* based on vlc_atomic.h from VLC
|
||||
* Copyright (C) 2010 Rémi Denis-Courmont
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_ATOMICS_GCC_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_GCC_STDATOMIC_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define ATOMIC_FLAG_INIT 0
|
||||
|
||||
#define ATOMIC_VAR_INIT(value) (value)
|
||||
|
||||
#define atomic_init(obj, value) \
|
||||
do { \
|
||||
*(obj) = (value); \
|
||||
} while(0)
|
||||
|
||||
#define kill_dependency(y) ((void)0)
|
||||
|
||||
#define atomic_thread_fence(order) \
|
||||
__sync_synchronize()
|
||||
|
||||
#define atomic_signal_fence(order) \
|
||||
((void)0)
|
||||
|
||||
#define atomic_is_lock_free(obj) 0
|
||||
|
||||
typedef _Bool atomic_flag;
|
||||
typedef _Bool atomic_bool;
|
||||
typedef char atomic_char;
|
||||
typedef signed char atomic_schar;
|
||||
typedef unsigned char atomic_uchar;
|
||||
typedef short atomic_short;
|
||||
typedef unsigned short atomic_ushort;
|
||||
typedef int atomic_int;
|
||||
typedef unsigned int atomic_uint;
|
||||
typedef long atomic_long;
|
||||
typedef unsigned long atomic_ulong;
|
||||
typedef long long atomic_llong;
|
||||
typedef unsigned long long atomic_ullong;
|
||||
typedef wchar_t atomic_wchar_t;
|
||||
typedef int_least8_t atomic_int_least8_t;
|
||||
typedef uint_least8_t atomic_uint_least8_t;
|
||||
typedef int_least16_t atomic_int_least16_t;
|
||||
typedef uint_least16_t atomic_uint_least16_t;
|
||||
typedef int_least32_t atomic_int_least32_t;
|
||||
typedef uint_least32_t atomic_uint_least32_t;
|
||||
typedef int_least64_t atomic_int_least64_t;
|
||||
typedef uint_least64_t atomic_uint_least64_t;
|
||||
typedef int_fast8_t atomic_int_fast8_t;
|
||||
typedef uint_fast8_t atomic_uint_fast8_t;
|
||||
typedef int_fast16_t atomic_int_fast16_t;
|
||||
typedef uint_fast16_t atomic_uint_fast16_t;
|
||||
typedef int_fast32_t atomic_int_fast32_t;
|
||||
typedef uint_fast32_t atomic_uint_fast32_t;
|
||||
typedef int_fast64_t atomic_int_fast64_t;
|
||||
typedef uint_fast64_t atomic_uint_fast64_t;
|
||||
typedef intptr_t atomic_intptr_t;
|
||||
typedef uintptr_t atomic_uintptr_t;
|
||||
typedef size_t atomic_size_t;
|
||||
typedef ptrdiff_t atomic_ptrdiff_t;
|
||||
typedef intmax_t atomic_intmax_t;
|
||||
typedef uintmax_t atomic_uintmax_t;
|
||||
|
||||
#define atomic_store(object, desired) \
|
||||
do { \
|
||||
*(object) = (desired); \
|
||||
__sync_synchronize(); \
|
||||
} while (0)
|
||||
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
atomic_store(object, desired)
|
||||
|
||||
#define atomic_load(object) \
|
||||
(__sync_synchronize(), *(object))
|
||||
|
||||
#define atomic_load_explicit(object, order) \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
({ \
|
||||
__typeof__(object) _obj = (object); \
|
||||
__typeof__(*object) _old; \
|
||||
do \
|
||||
_old = atomic_load(_obj); \
|
||||
while (!__sync_bool_compare_and_swap(_obj, _old, (desired))); \
|
||||
_old; \
|
||||
})
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
#define atomic_compare_exchange_strong(object, expected, desired) \
|
||||
({ \
|
||||
__typeof__(object) _exp = (expected); \
|
||||
__typeof__(*object) _old = *_exp; \
|
||||
*_exp = __sync_val_compare_and_swap((object), _old, (desired)); \
|
||||
*_exp == _old; \
|
||||
})
|
||||
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak(object, expected, desired) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_weak(object, expected, desired)
|
||||
|
||||
#define atomic_fetch_add(object, operand) \
|
||||
__sync_fetch_and_add(object, operand)
|
||||
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
atomic_fetch_add(object, operand)
|
||||
|
||||
#define atomic_fetch_sub(object, operand) \
|
||||
__sync_fetch_and_sub(object, operand)
|
||||
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
atomic_fetch_sub(object, operand)
|
||||
|
||||
#define atomic_fetch_or(object, operand) \
|
||||
__sync_fetch_and_or(object, operand)
|
||||
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
atomic_fetch_or(object, operand)
|
||||
|
||||
#define atomic_fetch_xor(object, operand) \
|
||||
__sync_fetch_and_xor(object, operand)
|
||||
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
atomic_fetch_xor(object, operand)
|
||||
|
||||
#define atomic_fetch_and(object, operand) \
|
||||
__sync_fetch_and_and(object, operand)
|
||||
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
atomic_fetch_and(object, operand)
|
||||
|
||||
#define atomic_flag_test_and_set(object) \
|
||||
atomic_exchange(object, 1)
|
||||
|
||||
#define atomic_flag_test_and_set_explicit(object, order) \
|
||||
atomic_flag_test_and_set(object)
|
||||
|
||||
#define atomic_flag_clear(object) \
|
||||
atomic_store(object, 0)
|
||||
|
||||
#define atomic_flag_clear_explicit(object, order) \
|
||||
atomic_flag_clear(object)
|
||||
|
||||
#endif /* COMPAT_ATOMICS_GCC_STDATOMIC_H */
|
||||
39
compat/atomics/pthread/stdatomic.c
Normal file
39
compat/atomics/pthread/stdatomic.c
Normal file
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* based on vlc_atomic.h from VLC
|
||||
* Copyright (C) 2010 Rémi Denis-Courmont
|
||||
*/
|
||||
|
||||
#include <pthread.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#include "stdatomic.h"
|
||||
|
||||
static pthread_mutex_t atomic_lock = PTHREAD_MUTEX_INITIALIZER;
|
||||
|
||||
void avpriv_atomic_lock(void)
|
||||
{
|
||||
pthread_mutex_lock(&atomic_lock);
|
||||
}
|
||||
|
||||
void avpriv_atomic_unlock(void)
|
||||
{
|
||||
pthread_mutex_unlock(&atomic_lock);
|
||||
}
|
||||
197
compat/atomics/pthread/stdatomic.h
Normal file
197
compat/atomics/pthread/stdatomic.h
Normal file
@@ -0,0 +1,197 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/*
|
||||
* based on vlc_atomic.h from VLC
|
||||
* Copyright (C) 2010 Rémi Denis-Courmont
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_ATOMICS_PTHREAD_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_PTHREAD_STDATOMIC_H
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#define ATOMIC_FLAG_INIT 0
|
||||
|
||||
#define ATOMIC_VAR_INIT(value) (value)
|
||||
|
||||
#define atomic_init(obj, value) \
|
||||
do { \
|
||||
*(obj) = (value); \
|
||||
} while(0)
|
||||
|
||||
#define kill_dependency(y) ((void)0)
|
||||
|
||||
#define atomic_signal_fence(order) \
|
||||
((void)0)
|
||||
|
||||
#define atomic_is_lock_free(obj) 0
|
||||
|
||||
typedef intptr_t atomic_flag;
|
||||
typedef intptr_t atomic_bool;
|
||||
typedef intptr_t atomic_char;
|
||||
typedef intptr_t atomic_schar;
|
||||
typedef intptr_t atomic_uchar;
|
||||
typedef intptr_t atomic_short;
|
||||
typedef intptr_t atomic_ushort;
|
||||
typedef intptr_t atomic_int;
|
||||
typedef intptr_t atomic_uint;
|
||||
typedef intptr_t atomic_long;
|
||||
typedef intptr_t atomic_ulong;
|
||||
typedef intptr_t atomic_llong;
|
||||
typedef intptr_t atomic_ullong;
|
||||
typedef intptr_t atomic_wchar_t;
|
||||
typedef intptr_t atomic_int_least8_t;
|
||||
typedef intptr_t atomic_uint_least8_t;
|
||||
typedef intptr_t atomic_int_least16_t;
|
||||
typedef intptr_t atomic_uint_least16_t;
|
||||
typedef intptr_t atomic_int_least32_t;
|
||||
typedef intptr_t atomic_uint_least32_t;
|
||||
typedef intptr_t atomic_int_least64_t;
|
||||
typedef intptr_t atomic_uint_least64_t;
|
||||
typedef intptr_t atomic_int_fast8_t;
|
||||
typedef intptr_t atomic_uint_fast8_t;
|
||||
typedef intptr_t atomic_int_fast16_t;
|
||||
typedef intptr_t atomic_uint_fast16_t;
|
||||
typedef intptr_t atomic_int_fast32_t;
|
||||
typedef intptr_t atomic_uint_fast32_t;
|
||||
typedef intptr_t atomic_int_fast64_t;
|
||||
typedef intptr_t atomic_uint_fast64_t;
|
||||
typedef intptr_t atomic_intptr_t;
|
||||
typedef intptr_t atomic_uintptr_t;
|
||||
typedef intptr_t atomic_size_t;
|
||||
typedef intptr_t atomic_ptrdiff_t;
|
||||
typedef intptr_t atomic_intmax_t;
|
||||
typedef intptr_t atomic_uintmax_t;
|
||||
|
||||
void avpriv_atomic_lock(void);
|
||||
void avpriv_atomic_unlock(void);
|
||||
|
||||
static inline void atomic_thread_fence(int order)
|
||||
{
|
||||
avpriv_atomic_lock();
|
||||
avpriv_atomic_unlock();
|
||||
}
|
||||
|
||||
static inline void atomic_store(intptr_t *object, intptr_t desired)
|
||||
{
|
||||
avpriv_atomic_lock();
|
||||
*object = desired;
|
||||
avpriv_atomic_unlock();
|
||||
}
|
||||
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
atomic_store(object, desired)
|
||||
|
||||
static inline intptr_t atomic_load(intptr_t *object)
|
||||
{
|
||||
intptr_t ret;
|
||||
avpriv_atomic_lock();
|
||||
ret = *object;
|
||||
avpriv_atomic_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_load_explicit(object, order) \
|
||||
atomic_load(object)
|
||||
|
||||
static inline intptr_t atomic_exchange(intptr_t *object, intptr_t desired)
|
||||
{
|
||||
intptr_t ret;
|
||||
avpriv_atomic_lock();
|
||||
ret = *object;
|
||||
*object = desired;
|
||||
avpriv_atomic_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected,
|
||||
intptr_t desired)
|
||||
{
|
||||
int ret;
|
||||
avpriv_atomic_lock();
|
||||
if (*object == *expected) {
|
||||
ret = 1;
|
||||
*object = desired;
|
||||
} else {
|
||||
ret = 0;
|
||||
*expected = *object;
|
||||
}
|
||||
avpriv_atomic_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak(object, expected, desired) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_weak(object, expected, desired)
|
||||
|
||||
#define FETCH_MODIFY(opname, op) \
|
||||
static inline intptr_t atomic_fetch_ ## opname(intptr_t *object, intptr_t operand) \
|
||||
{ \
|
||||
intptr_t ret; \
|
||||
avpriv_atomic_lock(); \
|
||||
ret = *object; \
|
||||
*object = *object op operand; \
|
||||
avpriv_atomic_unlock(); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
FETCH_MODIFY(add, +)
|
||||
FETCH_MODIFY(sub, -)
|
||||
FETCH_MODIFY(or, |)
|
||||
FETCH_MODIFY(xor, ^)
|
||||
FETCH_MODIFY(and, &)
|
||||
|
||||
#undef FETCH_MODIFY
|
||||
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
atomic_fetch_add(object, operand)
|
||||
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
atomic_fetch_sub(object, operand)
|
||||
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
atomic_fetch_or(object, operand)
|
||||
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
atomic_fetch_xor(object, operand)
|
||||
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
atomic_fetch_and(object, operand)
|
||||
|
||||
#define atomic_flag_test_and_set(object) \
|
||||
atomic_exchange(object, 1)
|
||||
|
||||
#define atomic_flag_test_and_set_explicit(object, order) \
|
||||
atomic_flag_test_and_set(object)
|
||||
|
||||
#define atomic_flag_clear(object) \
|
||||
atomic_store(object, 0)
|
||||
|
||||
#define atomic_flag_clear_explicit(object, order) \
|
||||
atomic_flag_clear(object)
|
||||
|
||||
#endif /* COMPAT_ATOMICS_PTHREAD_STDATOMIC_H */
|
||||
186
compat/atomics/suncc/stdatomic.h
Normal file
186
compat/atomics/suncc/stdatomic.h
Normal file
@@ -0,0 +1,186 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef COMPAT_ATOMICS_SUNCC_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_SUNCC_STDATOMIC_H
|
||||
|
||||
#include <atomic.h>
|
||||
#include <mbarrier.h>
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
#define ATOMIC_FLAG_INIT 0
|
||||
|
||||
#define ATOMIC_VAR_INIT(value) (value)
|
||||
|
||||
#define atomic_init(obj, value) \
|
||||
do { \
|
||||
*(obj) = (value); \
|
||||
} while(0)
|
||||
|
||||
#define kill_dependency(y) ((void)0)
|
||||
|
||||
#define atomic_thread_fence(order) \
|
||||
__machine_rw_barrier();
|
||||
|
||||
#define atomic_signal_fence(order) \
|
||||
((void)0)
|
||||
|
||||
#define atomic_is_lock_free(obj) 0
|
||||
|
||||
typedef intptr_t atomic_flag;
|
||||
typedef intptr_t atomic_bool;
|
||||
typedef intptr_t atomic_char;
|
||||
typedef intptr_t atomic_schar;
|
||||
typedef intptr_t atomic_uchar;
|
||||
typedef intptr_t atomic_short;
|
||||
typedef intptr_t atomic_ushort;
|
||||
typedef intptr_t atomic_int;
|
||||
typedef intptr_t atomic_uint;
|
||||
typedef intptr_t atomic_long;
|
||||
typedef intptr_t atomic_ulong;
|
||||
typedef intptr_t atomic_llong;
|
||||
typedef intptr_t atomic_ullong;
|
||||
typedef intptr_t atomic_wchar_t;
|
||||
typedef intptr_t atomic_int_least8_t;
|
||||
typedef intptr_t atomic_uint_least8_t;
|
||||
typedef intptr_t atomic_int_least16_t;
|
||||
typedef intptr_t atomic_uint_least16_t;
|
||||
typedef intptr_t atomic_int_least32_t;
|
||||
typedef intptr_t atomic_uint_least32_t;
|
||||
typedef intptr_t atomic_int_least64_t;
|
||||
typedef intptr_t atomic_uint_least64_t;
|
||||
typedef intptr_t atomic_int_fast8_t;
|
||||
typedef intptr_t atomic_uint_fast8_t;
|
||||
typedef intptr_t atomic_int_fast16_t;
|
||||
typedef intptr_t atomic_uint_fast16_t;
|
||||
typedef intptr_t atomic_int_fast32_t;
|
||||
typedef intptr_t atomic_uint_fast32_t;
|
||||
typedef intptr_t atomic_int_fast64_t;
|
||||
typedef intptr_t atomic_uint_fast64_t;
|
||||
typedef intptr_t atomic_intptr_t;
|
||||
typedef intptr_t atomic_uintptr_t;
|
||||
typedef intptr_t atomic_size_t;
|
||||
typedef intptr_t atomic_ptrdiff_t;
|
||||
typedef intptr_t atomic_intmax_t;
|
||||
typedef intptr_t atomic_uintmax_t;
|
||||
|
||||
static inline void atomic_store(intptr_t *object, intptr_t desired)
|
||||
{
|
||||
*object = desired;
|
||||
__machine_rw_barrier();
|
||||
}
|
||||
|
||||
#define atomic_store_explicit(object, desired, order) \
|
||||
atomic_store(object, desired)
|
||||
|
||||
static inline intptr_t atomic_load(intptr_t *object)
|
||||
{
|
||||
__machine_rw_barrier();
|
||||
return *object;
|
||||
}
|
||||
|
||||
#define atomic_load_explicit(object, order) \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
atomic_swap_ptr(object, desired)
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
static inline int atomic_compare_exchange_strong(intptr_t *object, intptr_t *expected,
|
||||
intptr_t desired)
|
||||
{
|
||||
intptr_t old = *expected;
|
||||
*expected = (intptr_t)atomic_cas_ptr(object, (void *)old, (void *)desired);
|
||||
return *expected == old;
|
||||
}
|
||||
|
||||
#define atomic_compare_exchange_strong_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak(object, expected, desired) \
|
||||
atomic_compare_exchange_strong(object, expected, desired)
|
||||
|
||||
#define atomic_compare_exchange_weak_explicit(object, expected, desired, success, failure) \
|
||||
atomic_compare_exchange_weak(object, expected, desired)
|
||||
|
||||
static inline intptr_t atomic_fetch_add(intptr_t *object, intptr_t operand)
|
||||
{
|
||||
return atomic_add_ptr_nv(object, operand) - operand;
|
||||
}
|
||||
|
||||
#define atomic_fetch_sub(object, operand) \
|
||||
atomic_fetch_add(object, -(operand))
|
||||
|
||||
static inline intptr_t atomic_fetch_or(intptr_t *object, intptr_t operand)
|
||||
{
|
||||
intptr_t old;
|
||||
do {
|
||||
old = atomic_load(object);
|
||||
} while (!atomic_compare_exchange_strong(object, old, old | operand));
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline intptr_t atomic_fetch_xor(intptr_t *object, intptr_t operand)
|
||||
{
|
||||
intptr_t old;
|
||||
do {
|
||||
old = atomic_load(object);
|
||||
} while (!atomic_compare_exchange_strong(object, old, old ^ operand));
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline intptr_t atomic_fetch_and(intptr_t *object, intptr_t operand)
|
||||
{
|
||||
intptr_t old;
|
||||
do {
|
||||
old = atomic_load(object);
|
||||
} while (!atomic_compare_exchange_strong(object, old, old & operand));
|
||||
return old;
|
||||
}
|
||||
|
||||
#define atomic_fetch_add_explicit(object, operand, order) \
|
||||
atomic_fetch_add(object, operand)
|
||||
|
||||
#define atomic_fetch_sub_explicit(object, operand, order) \
|
||||
atomic_fetch_sub(object, operand)
|
||||
|
||||
#define atomic_fetch_or_explicit(object, operand, order) \
|
||||
atomic_fetch_or(object, operand)
|
||||
|
||||
#define atomic_fetch_xor_explicit(object, operand, order) \
|
||||
atomic_fetch_xor(object, operand)
|
||||
|
||||
#define atomic_fetch_and_explicit(object, operand, order) \
|
||||
atomic_fetch_and(object, operand)
|
||||
|
||||
#define atomic_flag_test_and_set(object) \
|
||||
atomic_exchange(object, 1)
|
||||
|
||||
#define atomic_flag_test_and_set_explicit(object, order) \
|
||||
atomic_flag_test_and_set(object)
|
||||
|
||||
#define atomic_flag_clear(object) \
|
||||
atomic_store(object, 0)
|
||||
|
||||
#define atomic_flag_clear_explicit(object, order) \
|
||||
atomic_flag_clear(object)
|
||||
|
||||
#endif /* COMPAT_ATOMICS_SUNCC_STDATOMIC_H */
|
||||
@@ -19,6 +19,7 @@
|
||||
#ifndef COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
#define COMPAT_ATOMICS_WIN32_STDATOMIC_H
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <windows.h>
|
||||
@@ -95,7 +96,7 @@ do { \
|
||||
atomic_load(object)
|
||||
|
||||
#define atomic_exchange(object, desired) \
|
||||
InterlockedExchangePointer((PVOID volatile *)object, (PVOID)desired)
|
||||
InterlockedExchangePointer(object, desired);
|
||||
|
||||
#define atomic_exchange_explicit(object, desired, order) \
|
||||
atomic_exchange(object, desired)
|
||||
|
||||
@@ -181,15 +181,8 @@ static inline __device__ double trunc(double a) { return __builtin_trunc(a); }
|
||||
static inline __device__ float fabsf(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ float fabs(float a) { return __builtin_fabsf(a); }
|
||||
static inline __device__ double fabs(double a) { return __builtin_fabs(a); }
|
||||
static inline __device__ float sqrtf(float a) { return __builtin_sqrtf(a); }
|
||||
|
||||
static inline __device__ float __saturatef(float a) { return __nvvm_saturate_f(a); }
|
||||
static inline __device__ float __sinf(float a) { return __nvvm_sin_approx_f(a); }
|
||||
static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a); }
|
||||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
||||
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
|
||||
|
||||
// Misc helper functions
|
||||
extern "C" __device__ int printf(const char*, ...);
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||
|
||||
34
compat/cuda/ptx2c.sh
Executable file
34
compat/cuda/ptx2c.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a
|
||||
# copy of this software and associated documentation files (the "Software"),
|
||||
# to deal in the Software without restriction, including without limitation
|
||||
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
# and/or sell copies of the Software, and to permit persons to whom the
|
||||
# Software is furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
# DEALINGS IN THE SOFTWARE.
|
||||
|
||||
set -e
|
||||
|
||||
OUT="$1"
|
||||
IN="$2"
|
||||
NAME="$(basename "$IN" | sed 's/\..*//')"
|
||||
|
||||
printf "const char %s_ptx[] = \\" "$NAME" > "$OUT"
|
||||
echo >> "$OUT"
|
||||
sed -e "$(printf 's/\r//g')" -e 's/["\\]/\\&/g' -e "$(printf 's/^/\t"/')" -e 's/$/\\n"/' < "$IN" >> "$OUT"
|
||||
echo ";" >> "$OUT"
|
||||
|
||||
exit 0
|
||||
@@ -59,7 +59,7 @@ int avpriv_vsnprintf(char *s, size_t n, const char *fmt,
|
||||
* recommends to provide _snprintf/_vsnprintf() a buffer size that
|
||||
* is one less than the actual buffer, and zero it before calling
|
||||
* _snprintf/_vsnprintf() to workaround this problem.
|
||||
* See https://web.archive.org/web/20151214111935/http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
* See http://msdn.microsoft.com/en-us/library/1kt27hek(v=vs.80).aspx */
|
||||
memset(s, 0, n);
|
||||
va_copy(ap_copy, ap);
|
||||
ret = _vsnprintf(s, n - 1, fmt, ap_copy);
|
||||
|
||||
@@ -218,7 +218,7 @@ while (<F>) {
|
||||
# Lines of the form '} SOME_VERSION_NAME_1.0;'
|
||||
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
|
||||
$glob = 'glob';
|
||||
# We tried to match symbols against this version, but none matched.
|
||||
# We tried to match symbols agains this version, but none matched.
|
||||
# Emit dummy hidden symbol to avoid marking this version WEAK.
|
||||
if ($matches_attempted && $matched_symbols == 0) {
|
||||
print " hidden:\n";
|
||||
|
||||
@@ -1,599 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2023 Rémi Denis-Courmont
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU Lesser General Public License as published by
|
||||
* the Free Software Foundation; either version 2.1 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __STDC_VERSION_STDBIT_H__
|
||||
#define __STDC_VERSION_STDBIT_H__ 202311L
|
||||
|
||||
#include <stdbool.h>
|
||||
#include <limits.h> /* CHAR_BIT */
|
||||
|
||||
#define __STDC_ENDIAN_LITTLE__ 1234
|
||||
#define __STDC_ENDIAN_BIG__ 4321
|
||||
|
||||
#ifdef __BYTE_ORDER__
|
||||
# if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
|
||||
# define __STDC_ENDIAN_NATIVE__ __STDC_ENDIAN_LITTLE__
|
||||
# elif (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
|
||||
# define __STDC_ENDIAN_NATIVE__ __STDC_ENDIAN_BIG__
|
||||
# else
|
||||
# define __STDC_ENDIAN_NATIVE__ 3412
|
||||
# endif
|
||||
#elif defined(_MSC_VER)
|
||||
# define __STDC_ENDIAN_NATIVE__ __STDC_ENDIAN_LITTLE__
|
||||
#else
|
||||
# error Not implemented.
|
||||
#endif
|
||||
|
||||
#define __stdbit_generic_type_func(func, value) \
|
||||
_Generic (value, \
|
||||
unsigned long long: stdc_##func##_ull((unsigned long long)(value)), \
|
||||
unsigned long: stdc_##func##_ul((unsigned long)(value)), \
|
||||
unsigned int: stdc_##func##_ui((unsigned int)(value)), \
|
||||
unsigned short: stdc_##func##_us((unsigned short)(value)), \
|
||||
unsigned char: stdc_##func##_uc((unsigned char)(value)))
|
||||
|
||||
#if defined (__GNUC__) || defined (__clang__)
|
||||
static inline unsigned int stdc_leading_zeros_ull(unsigned long long value)
|
||||
{
|
||||
return value ? __builtin_clzll(value) : (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_ul(unsigned long value)
|
||||
{
|
||||
return value ? __builtin_clzl(value) : (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_ui(unsigned int value)
|
||||
{
|
||||
return value ? __builtin_clz(value) : (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_us(unsigned short value)
|
||||
{
|
||||
return stdc_leading_zeros_ui(value)
|
||||
- CHAR_BIT * (sizeof (int) - sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_uc(unsigned char value)
|
||||
{
|
||||
return stdc_leading_zeros_ui(value) - (CHAR_BIT * (sizeof (int) - 1));
|
||||
}
|
||||
#else
|
||||
static inline unsigned int __stdc_leading_zeros(unsigned long long value,
|
||||
unsigned int size)
|
||||
{
|
||||
unsigned int zeros = size * CHAR_BIT;
|
||||
|
||||
while (value != 0) {
|
||||
value >>= 1;
|
||||
zeros--;
|
||||
}
|
||||
|
||||
return zeros;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_ull(unsigned long long value)
|
||||
{
|
||||
return __stdc_leading_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_ul(unsigned long value)
|
||||
{
|
||||
return __stdc_leading_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_ui(unsigned int value)
|
||||
{
|
||||
return __stdc_leading_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_us(unsigned short value)
|
||||
{
|
||||
return __stdc_leading_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_zeros_uc(unsigned char value)
|
||||
{
|
||||
return __stdc_leading_zeros(value, sizeof (value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define stdc_leading_zeros(value) \
|
||||
__stdbit_generic_type_func(leading_zeros, value)
|
||||
|
||||
static inline unsigned int stdc_leading_ones_ull(unsigned long long value)
|
||||
{
|
||||
return stdc_leading_zeros_ull(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_ones_ul(unsigned long value)
|
||||
{
|
||||
return stdc_leading_zeros_ul(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_ones_ui(unsigned int value)
|
||||
{
|
||||
return stdc_leading_zeros_ui(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_ones_us(unsigned short value)
|
||||
{
|
||||
return stdc_leading_zeros_us(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_leading_ones_uc(unsigned char value)
|
||||
{
|
||||
return stdc_leading_zeros_uc(~value);
|
||||
}
|
||||
|
||||
#define stdc_leading_ones(value) \
|
||||
__stdbit_generic_type_func(leading_ones, value)
|
||||
|
||||
#if defined (__GNUC__) || defined (__clang__)
|
||||
static inline unsigned int stdc_trailing_zeros_ull(unsigned long long value)
|
||||
{
|
||||
return value ? (unsigned int)__builtin_ctzll(value)
|
||||
: (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_ul(unsigned long value)
|
||||
{
|
||||
return value ? (unsigned int)__builtin_ctzl(value)
|
||||
: (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_ui(unsigned int value)
|
||||
{
|
||||
return value ? (unsigned int)__builtin_ctz(value)
|
||||
: (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_us(unsigned short value)
|
||||
{
|
||||
return value ? (unsigned int)__builtin_ctz(value)
|
||||
: (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_uc(unsigned char value)
|
||||
{
|
||||
return value ? (unsigned int)__builtin_ctz(value)
|
||||
: (CHAR_BIT * sizeof (value));
|
||||
}
|
||||
#else
|
||||
static inline unsigned int __stdc_trailing_zeros(unsigned long long value,
|
||||
unsigned int size)
|
||||
{
|
||||
unsigned int zeros = 0;
|
||||
|
||||
if (!value)
|
||||
return size * CHAR_BIT;
|
||||
|
||||
while ((value & 1) == 0) {
|
||||
value >>= 1;
|
||||
zeros++;
|
||||
}
|
||||
|
||||
return zeros;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_ull(unsigned long long value)
|
||||
{
|
||||
return __stdc_trailing_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_ul(unsigned long value)
|
||||
{
|
||||
return __stdc_trailing_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_ui(unsigned int value)
|
||||
{
|
||||
return __stdc_trailing_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_us(unsigned short value)
|
||||
{
|
||||
return __stdc_trailing_zeros(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_zeros_uc(unsigned char value)
|
||||
{
|
||||
return __stdc_trailing_zeros(value, sizeof (value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define stdc_trailing_zeros(value) \
|
||||
__stdbit_generic_type_func(trailing_zeros, value)
|
||||
|
||||
static inline unsigned int stdc_trailing_ones_ull(unsigned long long value)
|
||||
{
|
||||
return stdc_trailing_zeros_ull(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_ones_ul(unsigned long value)
|
||||
{
|
||||
return stdc_trailing_zeros_ul(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_ones_ui(unsigned int value)
|
||||
{
|
||||
return stdc_trailing_zeros_ui(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_ones_us(unsigned short value)
|
||||
{
|
||||
return stdc_trailing_zeros_us(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_trailing_ones_uc(unsigned char value)
|
||||
{
|
||||
return stdc_trailing_zeros_uc(~value);
|
||||
}
|
||||
|
||||
#define stdc_trailing_ones(value) \
|
||||
__stdbit_generic_type_func(trailing_ones, value)
|
||||
|
||||
static inline unsigned int stdc_first_leading_one_ull(unsigned long long value)
|
||||
{
|
||||
return value ? (stdc_leading_zeros_ull(value) + 1) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_one_ul(unsigned long value)
|
||||
{
|
||||
return value ? (stdc_leading_zeros_ul(value) + 1) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_one_ui(unsigned int value)
|
||||
{
|
||||
return value ? (stdc_leading_zeros_ui(value) + 1) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_one_us(unsigned short value)
|
||||
{
|
||||
return value ? (stdc_leading_zeros_us(value) + 1) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_one_uc(unsigned char value)
|
||||
{
|
||||
return value ? (stdc_leading_zeros_uc(value) + 1) : 0;
|
||||
}
|
||||
|
||||
#define stdc_first_leading_one(value) \
|
||||
__stdbit_generic_type_func(first_leading_one, value)
|
||||
|
||||
static inline unsigned int stdc_first_leading_zero_ull(unsigned long long value)
|
||||
{
|
||||
return stdc_leading_ones_ull(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_zero_ul(unsigned long value)
|
||||
{
|
||||
return stdc_leading_ones_ul(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_zero_ui(unsigned int value)
|
||||
{
|
||||
return stdc_leading_ones_ui(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_zero_us(unsigned short value)
|
||||
{
|
||||
return stdc_leading_ones_us(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_leading_zero_uc(unsigned char value)
|
||||
{
|
||||
return stdc_leading_ones_uc(~value);
|
||||
}
|
||||
|
||||
#define stdc_first_leading_zero(value) \
|
||||
__stdbit_generic_type_func(first_leading_zero, value)
|
||||
|
||||
#if defined (__GNUC__) || defined (__clang__)
|
||||
static inline unsigned int stdc_first_trailing_one_ull(unsigned long long value)
|
||||
{
|
||||
return __builtin_ffsll(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_ul(unsigned long value)
|
||||
{
|
||||
return __builtin_ffsl(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_ui(unsigned int value)
|
||||
{
|
||||
return __builtin_ffs(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_us(unsigned short value)
|
||||
{
|
||||
return __builtin_ffs(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_uc(unsigned char value)
|
||||
{
|
||||
return __builtin_ffs(value);
|
||||
}
|
||||
#else
|
||||
static inline unsigned int stdc_first_trailing_one_ull(unsigned long long value)
|
||||
{
|
||||
return value ? (1 + stdc_trailing_zeros_ull(value)) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_ul(unsigned long value)
|
||||
{
|
||||
return value ? (1 + stdc_trailing_zeros_ul(value)) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_ui(unsigned int value)
|
||||
{
|
||||
return value ? (1 + stdc_trailing_zeros_ui(value)) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_us(unsigned short value)
|
||||
{
|
||||
return value ? (1 + stdc_trailing_zeros_us(value)) : 0;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_one_uc(unsigned char value)
|
||||
{
|
||||
return value ? (1 + stdc_trailing_zeros_uc(value)) : 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define stdc_first_trailing_one(value) \
|
||||
__stdbit_generic_type_func(first_trailing_one, value)
|
||||
|
||||
static inline unsigned int stdc_first_trailing_zero_ull(unsigned long long value)
|
||||
{
|
||||
return stdc_first_trailing_one_ull(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_zero_ul(unsigned long value)
|
||||
{
|
||||
return stdc_first_trailing_one_ul(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_zero_ui(unsigned int value)
|
||||
{
|
||||
return stdc_first_trailing_one_ui(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_zero_us(unsigned short value)
|
||||
{
|
||||
return stdc_first_trailing_one_us(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_first_trailing_zero_uc(unsigned char value)
|
||||
{
|
||||
return stdc_first_trailing_one_uc(~value);
|
||||
}
|
||||
|
||||
#define stdc_first_trailing_zero(value) \
|
||||
__stdbit_generic_type_func(first_trailing_zero, value)
|
||||
|
||||
#if defined (__GNUC__) || defined (__clang__)
|
||||
static inline unsigned int stdc_count_ones_ull(unsigned long long value)
|
||||
{
|
||||
return __builtin_popcountll(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_ul(unsigned long value)
|
||||
{
|
||||
return __builtin_popcountl(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_ui(unsigned int value)
|
||||
{
|
||||
return __builtin_popcount(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_us(unsigned short value)
|
||||
{
|
||||
return __builtin_popcount(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_uc(unsigned char value)
|
||||
{
|
||||
return __builtin_popcount(value);
|
||||
}
|
||||
#else
|
||||
static inline unsigned int __stdc_count_ones(unsigned long long value,
|
||||
unsigned int size)
|
||||
{
|
||||
unsigned int ones = 0;
|
||||
|
||||
for (unsigned int c = 0; c < (size * CHAR_BIT); c++) {
|
||||
ones += value & 1;
|
||||
value >>= 1;
|
||||
}
|
||||
|
||||
return ones;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_ull(unsigned long long value)
|
||||
{
|
||||
return __stdc_count_ones(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_ul(unsigned long value)
|
||||
{
|
||||
return __stdc_count_ones(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_ui(unsigned int value)
|
||||
{
|
||||
return __stdc_count_ones(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_us(unsigned short value)
|
||||
{
|
||||
return __stdc_count_ones(value, sizeof (value));
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_ones_uc(unsigned char value)
|
||||
{
|
||||
return __stdc_count_ones(value, sizeof (value));
|
||||
}
|
||||
#endif
|
||||
|
||||
#define stdc_count_ones(value) \
|
||||
__stdbit_generic_type_func(count_ones, value)
|
||||
|
||||
static inline unsigned int stdc_count_zeros_ull(unsigned long long value)
|
||||
{
|
||||
return stdc_count_ones_ull(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_zeros_ul(unsigned long value)
|
||||
{
|
||||
return stdc_count_ones_ul(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_zeros_ui(unsigned int value)
|
||||
{
|
||||
return stdc_count_ones_ui(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_zeros_us(unsigned short value)
|
||||
{
|
||||
return stdc_count_ones_us(~value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_count_zeros_uc(unsigned char value)
|
||||
{
|
||||
return stdc_count_ones_uc(~value);
|
||||
}
|
||||
|
||||
#define stdc_count_zeros(value) \
|
||||
__stdbit_generic_type_func(count_zeros, value)
|
||||
|
||||
static inline bool stdc_has_single_bit_ull(unsigned long long value)
|
||||
{
|
||||
return value && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool stdc_has_single_bit_ul(unsigned long value)
|
||||
{
|
||||
return value && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool stdc_has_single_bit_ui(unsigned int value)
|
||||
{
|
||||
return value && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool stdc_has_single_bit_us(unsigned short value)
|
||||
{
|
||||
return value && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
static inline bool stdc_has_single_bit_uc(unsigned char value)
|
||||
{
|
||||
return value && (value & (value - 1)) == 0;
|
||||
}
|
||||
|
||||
#define stdc_has_single_bit(value) \
|
||||
__stdbit_generic_type_func(has_single_bit, value)
|
||||
|
||||
static inline unsigned int stdc_bit_width_ull(unsigned long long value)
|
||||
{
|
||||
return (CHAR_BIT * sizeof (value)) - stdc_leading_zeros_ull(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_width_ul(unsigned long value)
|
||||
{
|
||||
return (CHAR_BIT * sizeof (value)) - stdc_leading_zeros_ul(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_width_ui(unsigned int value)
|
||||
{
|
||||
return (CHAR_BIT * sizeof (value)) - stdc_leading_zeros_ui(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_width_us(unsigned short value)
|
||||
{
|
||||
return (CHAR_BIT * sizeof (value)) - stdc_leading_zeros_us(value);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_width_uc(unsigned char value)
|
||||
{
|
||||
return (CHAR_BIT * sizeof (value)) - stdc_leading_zeros_uc(value);
|
||||
}
|
||||
|
||||
#define stdc_bit_width(value) \
|
||||
__stdbit_generic_type_func(bit_width, value)
|
||||
|
||||
static inline unsigned long long stdc_bit_floor_ull(unsigned long long value)
|
||||
{
|
||||
return value ? (1ULL << (stdc_bit_width_ull(value) - 1)) : 0ULL;
|
||||
}
|
||||
|
||||
static inline unsigned long stdc_bit_floor_ul(unsigned long value)
|
||||
{
|
||||
return value ? (1UL << (stdc_bit_width_ul(value) - 1)) : 0UL;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_floor_ui(unsigned int value)
|
||||
{
|
||||
return value ? (1U << (stdc_bit_width_ui(value) - 1)) : 0U;
|
||||
}
|
||||
|
||||
static inline unsigned short stdc_bit_floor_us(unsigned short value)
|
||||
{
|
||||
return value ? (1U << (stdc_bit_width_us(value) - 1)) : 0U;
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_floor_uc(unsigned char value)
|
||||
{
|
||||
return value ? (1U << (stdc_bit_width_uc(value) - 1)) : 0U;
|
||||
}
|
||||
|
||||
#define stdc_bit_floor(value) \
|
||||
__stdbit_generic_type_func(bit_floor, value)
|
||||
|
||||
/* NOTE: Bit ceiling undefines overflow. */
|
||||
static inline unsigned long long stdc_bit_ceil_ull(unsigned long long value)
|
||||
{
|
||||
return 1ULL << (value ? stdc_bit_width_ull(value - 1) : 0);
|
||||
}
|
||||
|
||||
static inline unsigned long stdc_bit_ceil_ul(unsigned long value)
|
||||
{
|
||||
return 1UL << (value ? stdc_bit_width_ul(value - 1) : 0);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_ceil_ui(unsigned int value)
|
||||
{
|
||||
return 1U << (value ? stdc_bit_width_ui(value - 1) : 0);
|
||||
}
|
||||
|
||||
static inline unsigned short stdc_bit_ceil_us(unsigned short value)
|
||||
{
|
||||
return 1U << (value ? stdc_bit_width_us(value - 1) : 0);
|
||||
}
|
||||
|
||||
static inline unsigned int stdc_bit_ceil_uc(unsigned char value)
|
||||
{
|
||||
return 1U << (value ? stdc_bit_width_uc(value - 1) : 0);
|
||||
}
|
||||
|
||||
#define stdc_bit_ceil(value) \
|
||||
__stdbit_generic_type_func(bit_ceil, value)
|
||||
|
||||
#endif /* __STDC_VERSION_STDBIT_H__ */
|
||||
@@ -20,41 +20,11 @@
|
||||
#define COMPAT_W32DLFCN_H
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <stdint.h>
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "config.h"
|
||||
#include "libavutil/macros.h"
|
||||
#include "libavutil/mem.h"
|
||||
#if (_WIN32_WINNT < 0x0602) || HAVE_WINRT
|
||||
#include "libavutil/wchar_filename.h"
|
||||
|
||||
static inline wchar_t *get_module_filename(HMODULE module)
|
||||
{
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD path_size = 0, path_len;
|
||||
|
||||
do {
|
||||
path_size = path_size ? FFMIN(2 * path_size, INT16_MAX + 1) : MAX_PATH;
|
||||
new_path = av_realloc_array(path, path_size, sizeof *path);
|
||||
if (!new_path) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
path = new_path;
|
||||
// Returns path_size in case of insufficient buffer.
|
||||
// Whether the error is set or not and whether the output
|
||||
// is null-terminated or not depends on the version of Windows.
|
||||
path_len = GetModuleFileNameW(module, path, path_size);
|
||||
} while (path_len && path_size <= INT16_MAX && path_size <= path_len);
|
||||
|
||||
if (!path_len) {
|
||||
av_free(path);
|
||||
return NULL;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* Safe function used to open dynamic libs. This attempts to improve program security
|
||||
* by removing the current directory from the dll search path. Only dll's found in the
|
||||
@@ -64,53 +34,29 @@ static inline wchar_t *get_module_filename(HMODULE module)
|
||||
*/
|
||||
static inline HMODULE win32_dlopen(const char *name)
|
||||
{
|
||||
wchar_t *name_w;
|
||||
HMODULE module = NULL;
|
||||
if (utf8towchar(name, &name_w))
|
||||
name_w = NULL;
|
||||
#if _WIN32_WINNT < 0x0602
|
||||
// On Win7 and earlier we check if KB2533623 is available
|
||||
// Need to check if KB2533623 is available
|
||||
if (!GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "SetDefaultDllDirectories")) {
|
||||
wchar_t *path = NULL, *new_path;
|
||||
DWORD pathlen, pathsize, namelen;
|
||||
if (!name_w)
|
||||
HMODULE module = NULL;
|
||||
wchar_t *path = NULL, *name_w = NULL;
|
||||
DWORD pathlen;
|
||||
if (utf8towchar(name, &name_w))
|
||||
goto exit;
|
||||
namelen = wcslen(name_w);
|
||||
path = (wchar_t *)av_mallocz_array(MAX_PATH, sizeof(wchar_t));
|
||||
// Try local directory first
|
||||
path = get_module_filename(NULL);
|
||||
if (!path)
|
||||
pathlen = GetModuleFileNameW(NULL, path, MAX_PATH);
|
||||
pathlen = wcsrchr(path, '\\') - path;
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
new_path = wcsrchr(path, '\\');
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
pathlen = new_path - path;
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
if (module == NULL) {
|
||||
// Next try System32 directory
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
pathlen = GetSystemDirectoryW(path, MAX_PATH);
|
||||
if (pathlen == 0 || pathlen + wcslen(name_w) + 2 > MAX_PATH)
|
||||
goto exit;
|
||||
// Buffer is not enough in two cases:
|
||||
// 1. system directory + \ + module name
|
||||
// 2. system directory even without the module name.
|
||||
if (pathlen + namelen + 2 > pathsize) {
|
||||
pathsize = pathlen + namelen + 2;
|
||||
new_path = av_realloc_array(path, pathsize, sizeof *path);
|
||||
if (!new_path)
|
||||
goto exit;
|
||||
path = new_path;
|
||||
// Query again to handle the case #2.
|
||||
pathlen = GetSystemDirectoryW(path, pathsize);
|
||||
if (!pathlen)
|
||||
goto exit;
|
||||
}
|
||||
path[pathlen] = L'\\';
|
||||
path[pathlen] = '\\';
|
||||
wcscpy(path + pathlen + 1, name_w);
|
||||
module = LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
|
||||
}
|
||||
@@ -127,19 +73,16 @@ exit:
|
||||
# define LOAD_LIBRARY_SEARCH_SYSTEM32 0x00000800
|
||||
#endif
|
||||
#if HAVE_WINRT
|
||||
if (!name_w)
|
||||
wchar_t *name_w = NULL;
|
||||
int ret;
|
||||
if (utf8towchar(name, &name_w))
|
||||
return NULL;
|
||||
module = LoadPackagedLibrary(name_w, 0);
|
||||
#else
|
||||
#define LOAD_FLAGS (LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32)
|
||||
/* filename may be be in CP_ACP */
|
||||
if (!name_w)
|
||||
return LoadLibraryExA(name, NULL, LOAD_FLAGS);
|
||||
module = LoadLibraryExW(name_w, NULL, LOAD_FLAGS);
|
||||
#undef LOAD_FLAGS
|
||||
#endif
|
||||
ret = LoadPackagedLibrary(name_w, 0);
|
||||
av_free(name_w);
|
||||
return module;
|
||||
return ret;
|
||||
#else
|
||||
return LoadLibraryExA(name, NULL, LOAD_LIBRARY_SEARCH_APPLICATION_DIR | LOAD_LIBRARY_SEARCH_SYSTEM32);
|
||||
#endif
|
||||
}
|
||||
#define dlopen(name, flags) win32_dlopen(name)
|
||||
#define dlclose FreeLibrary
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
* As most functions here are used without checking return values,
|
||||
* only implement return values as necessary. */
|
||||
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#include <process.h>
|
||||
#include <time.h>
|
||||
@@ -50,7 +51,7 @@ typedef struct pthread_t {
|
||||
void *(*func)(void* arg);
|
||||
void *arg;
|
||||
void *ret;
|
||||
} *pthread_t;
|
||||
} pthread_t;
|
||||
|
||||
/* use light weight mutex/condition variable API for Windows Vista and later */
|
||||
typedef SRWLOCK pthread_mutex_t;
|
||||
@@ -65,16 +66,9 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
||||
#define PTHREAD_CANCEL_ENABLE 1
|
||||
#define PTHREAD_CANCEL_DISABLE 0
|
||||
|
||||
#if HAVE_WINRT
|
||||
#define THREADFUNC_RETTYPE DWORD
|
||||
#else
|
||||
#define THREADFUNC_RETTYPE unsigned
|
||||
#endif
|
||||
|
||||
static av_unused THREADFUNC_RETTYPE
|
||||
__stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static av_unused unsigned __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t h = (pthread_t)arg;
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
@@ -82,35 +76,21 @@ __stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
pthread_t ret;
|
||||
|
||||
ret = av_mallocz(sizeof(*ret));
|
||||
if (!ret)
|
||||
return EAGAIN;
|
||||
|
||||
ret->func = start_routine;
|
||||
ret->arg = arg;
|
||||
thread->func = start_routine;
|
||||
thread->arg = arg;
|
||||
#if HAVE_WINRT
|
||||
ret->handle = (void*)CreateThread(NULL, 0, win32thread_worker, ret,
|
||||
0, NULL);
|
||||
thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread,
|
||||
0, NULL);
|
||||
#else
|
||||
ret->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, ret,
|
||||
0, NULL);
|
||||
thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
|
||||
0, NULL);
|
||||
#endif
|
||||
|
||||
if (!ret->handle) {
|
||||
av_free(ret);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
*thread = ret;
|
||||
|
||||
return 0;
|
||||
return !thread->handle;
|
||||
}
|
||||
|
||||
static av_unused int pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD ret = WaitForSingleObject(thread->handle, INFINITE);
|
||||
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
|
||||
if (ret != WAIT_OBJECT_0) {
|
||||
if (ret == WAIT_ABANDONED)
|
||||
return EINVAL;
|
||||
@@ -118,9 +98,8 @@ static av_unused int pthread_join(pthread_t thread, void **value_ptr)
|
||||
return EDEADLK;
|
||||
}
|
||||
if (value_ptr)
|
||||
*value_ptr = thread->ret;
|
||||
CloseHandle(thread->handle);
|
||||
av_free(thread);
|
||||
*value_ptr = thread.ret;
|
||||
CloseHandle(thread.handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
if [ "$1" = "--version" ]; then
|
||||
rc.exe -?
|
||||
exit $?
|
||||
fi
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: mswindres [-I/include/path ...] [-DSOME_DEFINE ...] [-o output.o] input.rc [output.o]" >&2
|
||||
exit 0
|
||||
fi
|
||||
|
||||
EXTRA_OPTS="-nologo"
|
||||
|
||||
while [ $# -gt 2 ]; do
|
||||
case $1 in
|
||||
-D*) EXTRA_OPTS="$EXTRA_OPTS -d$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-I*) EXTRA_OPTS="$EXTRA_OPTS -i$(echo $1 | sed -e "s/^..//" -e "s/ /\\\\ /g")" ;;
|
||||
-o) OPT_OUT="$2"; shift ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
IN="$1"
|
||||
if [ -z "$OPT_OUT" ]; then
|
||||
OUT="$2"
|
||||
else
|
||||
OUT="$OPT_OUT"
|
||||
fi
|
||||
|
||||
eval set -- $EXTRA_OPTS
|
||||
rc.exe "$@" -fo "$OUT" "$IN"
|
||||
1101
doc/APIchanges
1101
doc/APIchanges
File diff suppressed because it is too large
Load Diff
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER = 8.0.1
|
||||
PROJECT_NUMBER = 4.4.4
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
@@ -1093,7 +1093,7 @@ HTML_STYLESHEET =
|
||||
# cascading style sheets that are included after the standard style sheets
|
||||
# created by doxygen. Using this option one can overrule certain style aspects.
|
||||
# This is preferred over using HTML_STYLESHEET since it does not replace the
|
||||
# standard style sheet and is therefore more robust against future updates.
|
||||
# standard style sheet and is therefor more robust against future updates.
|
||||
# Doxygen will copy the style sheet files to the output directory.
|
||||
# Note: The order of the extra stylesheet files is of importance (e.g. the last
|
||||
# stylesheet in the list overrules the setting of the previous ones in the
|
||||
@@ -1636,7 +1636,7 @@ EXTRA_PACKAGES =
|
||||
# Note: Only use a user-defined header if you know what you are doing! The
|
||||
# following commands have a special meaning inside the header: $title,
|
||||
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
|
||||
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty string,
|
||||
# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
|
||||
# for the replacement values of the other commands the user is referred to
|
||||
# HTML_HEADER.
|
||||
# This tag requires that the tag GENERATE_LATEX is set to YES.
|
||||
@@ -1980,7 +1980,6 @@ PREDEFINED = __attribute__(x)= \
|
||||
av_alloc_size(...)= \
|
||||
AV_GCC_VERSION_AT_LEAST(x,y)=1 \
|
||||
AV_GCC_VERSION_AT_MOST(x,y)=0 \
|
||||
"FF_PAD_STRUCTURE(name,size,...)=typedef struct name { __VA_ARGS__ } name;" \
|
||||
__GNUC__
|
||||
|
||||
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
|
||||
|
||||
@@ -19,7 +19,6 @@ MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
doc/community.html \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
@@ -28,9 +27,6 @@ HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMP
|
||||
doc/mailing-list-faq.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
$(SRC_PATH)/doc/bootstrap.min.css \
|
||||
$(SRC_PATH)/doc/style.min.css \
|
||||
$(SRC_PATH)/doc/default.css \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
@@ -60,7 +56,7 @@ GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options$(HOSTEXESUF) $* > $@
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%-all.html: TAG = HTML
|
||||
@@ -106,7 +102,7 @@ DOXY_INPUT_DEPS = $(addprefix $(SRC_PATH)/, $(DOXY_INPUT)) ffbuild/config.mak
|
||||
|
||||
doc/doxy/html: TAG = DOXY
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT_DEPS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $$PWD/doc/doxy $(SRC_PATH) doc/Doxyfile $(DOXYGEN) $(DOXY_INPUT);
|
||||
$(M)OUT_DIR=$$PWD/doc/doxy; cd $(SRC_PATH); ./doc/doxy-wrapper.sh $$OUT_DIR $< $(DOXYGEN) $(DOXY_INPUT);
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ Top-left position.
|
||||
@end table
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (@emph{time_scale / num_units_in_display_tick}) in
|
||||
Set the tick rate (@emph{num_units_in_display_tick / time_scale}) in
|
||||
the timing info in the sequence header.
|
||||
@item num_ticks_per_picture
|
||||
Set the number of ticks in each picture, to indicate that the stream
|
||||
@@ -101,29 +101,6 @@ Remove zero padding at the end of a packet.
|
||||
Extract the core from a DCA/DTS stream, dropping extensions such as
|
||||
DTS-HD.
|
||||
|
||||
@section dovi_rpu
|
||||
|
||||
Manipulate Dolby Vision metadata in a HEVC/AV1 bitstream, optionally enabling
|
||||
metadata compression.
|
||||
|
||||
@table @option
|
||||
@item strip
|
||||
If enabled, strip all Dolby Vision metadata (configuration record + RPU data
|
||||
blocks) from the stream.
|
||||
@item compression
|
||||
Which compression level to enable.
|
||||
@table @samp
|
||||
@item none
|
||||
No metadata compression.
|
||||
@item limited
|
||||
Limited metadata compression scheme. Should be compatible with most devices.
|
||||
This is the default.
|
||||
@item extended
|
||||
Extended metadata compression. Devices are not required to support this. Note
|
||||
that this level currently behaves the same as @samp{limited} in libavcodec.
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets except when
|
||||
@@ -155,86 +132,10 @@ the header stored in extradata to the key packets:
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section dv_error_marker
|
||||
|
||||
Blocks in DV which are marked as damaged are replaced by blocks of the specified color.
|
||||
|
||||
@table @option
|
||||
@item color
|
||||
The color to replace damaged blocks by
|
||||
@item sta
|
||||
A 16 bit mask which specifies which of the 16 possible error status values are
|
||||
to be replaced by colored blocks. 0xFFFE is the default which replaces all non 0
|
||||
error status values.
|
||||
@table @samp
|
||||
@item ok
|
||||
No error, no concealment
|
||||
@item err
|
||||
Error, No concealment
|
||||
@item res
|
||||
Reserved
|
||||
@item notok
|
||||
Error or concealment
|
||||
@item notres
|
||||
Not reserved
|
||||
@item Aa, Ba, Ca, Ab, Bb, Cb, A, B, C, a, b, erri, erru
|
||||
The specific error status code
|
||||
@end table
|
||||
see page 44-46 or section 5.5 of
|
||||
@url{http://web.archive.org/web/20060927044735/http://www.smpte.org/smpte_store/standards/pdf/s314m.pdf}
|
||||
|
||||
@end table
|
||||
|
||||
@section eac3_core
|
||||
|
||||
Extract the core from a E-AC-3 stream, dropping extra channels.
|
||||
|
||||
@section eia608_to_smpte436m
|
||||
|
||||
Convert from a @code{EIA_608} stream to a @code{SMPTE_436M_ANC} data stream, wrapping the closed captions in CTA-708 CDP VANC packets.
|
||||
|
||||
@table @option
|
||||
@item line_number
|
||||
Choose which line number the generated VANC packets should go on. You generally want either line 9 (the default) or 11.
|
||||
@item wrapping_type
|
||||
Choose the SMPTE 436M wrapping type, defaults to @samp{vanc_frame}.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item vanc_frame
|
||||
VANC frame (interlaced or segmented progressive frame)
|
||||
@item vanc_field_1
|
||||
@item vanc_field_2
|
||||
@item vanc_progressive_frame
|
||||
@end table
|
||||
@item sample_coding
|
||||
Choose the SMPTE 436M sample coding, defaults to @samp{8bit_luma}.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item 8bit_luma
|
||||
8-bit component luma samples
|
||||
@item 8bit_color_diff
|
||||
8-bit component color difference samples
|
||||
@item 8bit_luma_and_color_diff
|
||||
8-bit component luma and color difference samples
|
||||
@item 10bit_luma
|
||||
10-bit component luma samples
|
||||
@item 10bit_color_diff
|
||||
10-bit component color difference samples
|
||||
@item 10bit_luma_and_color_diff
|
||||
10-bit component luma and color difference samples
|
||||
@item 8bit_luma_parity_error
|
||||
8-bit component luma samples with parity error
|
||||
@item 8bit_color_diff_parity_error
|
||||
8-bit component color difference samples with parity error
|
||||
@item 8bit_luma_and_color_diff_parity_error
|
||||
8-bit component luma and color difference samples with parity error
|
||||
@end table
|
||||
@item initial_cdp_sequence_cntr
|
||||
The initial value of the CDP's 16-bit unsigned integer @code{cdp_hdr_sequence_cntr} and @code{cdp_ftr_sequence_cntr} fields. Defaults to 0.
|
||||
@item cdp_frame_rate
|
||||
Set the CDP's @code{cdp_frame_rate} field. This doesn't actually change the timing of the data stream, it just changes the values inserted in that field in the generated CDP packets. Defaults to @samp{30000/1001}.
|
||||
@end table
|
||||
|
||||
@section extract_extradata
|
||||
|
||||
Extract the in-band extradata.
|
||||
@@ -268,13 +169,6 @@ Identical to @option{pass_types}, except the units in the given set
|
||||
removed and all others passed through.
|
||||
@end table
|
||||
|
||||
The types used by pass_types and remove_types correspond to NAL unit types
|
||||
(nal_unit_type) in H.264, HEVC and H.266 (see Table 7-1 in the H.264
|
||||
and HEVC specifications or Table 5 in the H.266 specification), to
|
||||
marker values for JPEG (without 0xFF prefix) and to start codes without
|
||||
start code prefix (i.e. the byte following the 0x000001) for MPEG-2.
|
||||
For VP8 and VP9, every unit has type zero.
|
||||
|
||||
Extradata is unchanged by this transformation, but note that if the stream
|
||||
contains inline parameter sets then the output may be unusable if they are
|
||||
removed.
|
||||
@@ -289,21 +183,6 @@ To remove all AUDs, SEI and filler from an H.265 stream:
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=35|38-40' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all user data from a MPEG-2 stream, including Closed Captions:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=178' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all SEI from a H264 stream, including Closed Captions:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=6' OUTPUT
|
||||
@end example
|
||||
|
||||
To remove all prefix and suffix SEI from a HEVC stream, including Closed Captions and dynamic HDR:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v copy -bsf:v 'filter_units=remove_types=39|40' OUTPUT
|
||||
@end example
|
||||
|
||||
@section hapqa_extract
|
||||
|
||||
Extract Rgb or Alpha part of an HAPQA file, without recompression, in order to create an HAPQ or an HAPAlphaOnly file.
|
||||
@@ -338,16 +217,12 @@ Modify metadata embedded in an H.264 stream.
|
||||
Insert or remove AUD NAL units in all access units of the stream.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
@item sample_aspect_ratio
|
||||
Set the sample aspect ratio of the stream in the VUI parameters.
|
||||
See H.264 table E-1.
|
||||
|
||||
@item overscan_appropriate_flag
|
||||
Set whether the stream is suitable for display using overscan
|
||||
@@ -369,7 +244,7 @@ Set the chroma sample location in the stream (see H.264 section
|
||||
E.2.1 and figure E-1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate (time_scale / num_units_in_tick) in the VUI
|
||||
Set the tick rate (num_units_in_tick / time_scale) in the VUI
|
||||
parameters. This is the smallest time unit representable in the
|
||||
stream, and in many cases represents the field rate of the stream
|
||||
(double the frame rate).
|
||||
@@ -378,11 +253,6 @@ Set whether the stream has fixed framerate - typically this indicates
|
||||
that the framerate is exactly half the tick rate, but the exact
|
||||
meaning is dependent on interlacing and the picture structure (see
|
||||
H.264 section E.2.1 and table E-6).
|
||||
@item zero_new_constraint_set_flags
|
||||
Zero constraint_set4_flag and constraint_set5_flag in the SPS. These
|
||||
bits were reserved in a previous version of the H.264 spec, and thus
|
||||
some hardware decoders require these to be zero. The result of zeroing
|
||||
this is still a valid bitstream.
|
||||
|
||||
@item crop_left
|
||||
@item crop_right
|
||||
@@ -406,37 +276,6 @@ insert the string ``hello'' associated with the given UUID.
|
||||
@item delete_filler
|
||||
Deletes both filler NAL units and filler SEI messages.
|
||||
|
||||
@item display_orientation
|
||||
Insert, extract or remove Display orientation SEI messages.
|
||||
See H.264 section D.1.27 and D.2.27 for syntax and semantics.
|
||||
|
||||
@table @samp
|
||||
@item pass
|
||||
@item insert
|
||||
@item remove
|
||||
@item extract
|
||||
@end table
|
||||
|
||||
Default is pass.
|
||||
|
||||
Insert mode works in conjunction with @code{rotate} and @code{flip} options.
|
||||
Any pre-existing Display orientation messages will be removed in insert or remove mode.
|
||||
Extract mode attaches the display matrix to the packet as side data.
|
||||
|
||||
@item rotate
|
||||
Set rotation in display orientation SEI (anticlockwise angle in degrees).
|
||||
Range is -360 to +360. Default is NaN.
|
||||
|
||||
@item flip
|
||||
Set flip in display orientation SEI.
|
||||
|
||||
@table @samp
|
||||
@item horizontal
|
||||
@item vertical
|
||||
@end table
|
||||
|
||||
Default is unset.
|
||||
|
||||
@item level
|
||||
Set the level in the SPS. Refer to H.264 section A.3 and tables A-1
|
||||
to A-5.
|
||||
@@ -469,21 +308,12 @@ Please note that this filter is auto-inserted for MPEG-TS (muxer
|
||||
|
||||
@section h264_redundant_pps
|
||||
|
||||
This applies a specific fixup to some Blu-ray BDMV H264 streams
|
||||
which contain redundant PPSs. The PPSs modify irrelevant parameters
|
||||
of the stream, confusing other transformations which require
|
||||
the correct extradata.
|
||||
This applies a specific fixup to some Blu-ray streams which contain
|
||||
redundant PPSs modifying irrelevant parameters of the stream which
|
||||
confuse other transformations which require correct extradata.
|
||||
|
||||
The encoder used on these impacted streams adds extra PPSs throughout
|
||||
the stream, varying the initial QP and whether weighted prediction
|
||||
was enabled. This causes issues after copying the stream into
|
||||
a global header container, as the starting PPS is not suitable
|
||||
for the rest of the stream. One side effect, for example,
|
||||
is seeking will return garbled output until a new PPS appears.
|
||||
|
||||
This BSF removes the extra PPSs and rewrites the slice headers
|
||||
such that the stream uses a single leading PPS in the global header,
|
||||
which resolves the issue.
|
||||
A new single global PPS is created, and all of the redundant PPSs
|
||||
within the stream are removed.
|
||||
|
||||
@section hevc_metadata
|
||||
|
||||
@@ -517,8 +347,8 @@ Set the chroma sample location in the stream (see H.265 section
|
||||
E.3.1 and figure E.1).
|
||||
|
||||
@item tick_rate
|
||||
Set the tick rate in the VPS and VUI parameters (time_scale /
|
||||
num_units_in_tick). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
Set the tick rate in the VPS and VUI parameters (num_units_in_tick /
|
||||
time_scale). Combined with @option{num_ticks_poc_diff_one}, this can
|
||||
set a constant framerate in the stream. Note that it is likely to be
|
||||
overridden by container parameters when the stream is in a container.
|
||||
|
||||
@@ -537,10 +367,6 @@ will replace the current ones if the stream is already cropped.
|
||||
These fields are set in pixels. Note that some sizes may not be
|
||||
representable if the chroma is subsampled (H.265 section 7.4.3.2.1).
|
||||
|
||||
@item width
|
||||
@item height
|
||||
Set width and height after crop.
|
||||
|
||||
@item level
|
||||
Set the level in the VPS and SPS. See H.265 section A.4 and tables
|
||||
A.6 and A.7.
|
||||
@@ -635,6 +461,10 @@ metadata header from each subtitle packet.
|
||||
|
||||
See also the @ref{text2movsub} filter.
|
||||
|
||||
@section mp3decomp
|
||||
|
||||
Decompress non-standard compressed MP3 audio headers.
|
||||
|
||||
@section mpeg2_metadata
|
||||
|
||||
Modify metadata embedded in an MPEG-2 stream.
|
||||
@@ -699,67 +529,20 @@ container. Can be used for fuzzing or testing error resilience/concealment.
|
||||
Parameters:
|
||||
@table @option
|
||||
@item amount
|
||||
Accepts an expression whose evaluation per-packet determines how often bytes in that
|
||||
packet will be modified. A value below 0 will result in a variable frequency.
|
||||
Default is 0 which results in no modification. However, if neither amount nor drop is specified,
|
||||
amount will be set to @var{-1}. See below for accepted variables.
|
||||
@item drop
|
||||
Accepts an expression evaluated per-packet whose value determines whether that packet is dropped.
|
||||
Evaluation to a positive value results in the packet being dropped. Evaluation to a negative
|
||||
value results in a variable chance of it being dropped, roughly inverse in proportion to the magnitude
|
||||
of the value. Default is 0 which results in no drops. See below for accepted variables.
|
||||
A numeral string, whose value is related to how often output bytes will
|
||||
be modified. Therefore, values below or equal to 0 are forbidden, and
|
||||
the lower the more frequent bytes will be modified, with 1 meaning
|
||||
every byte is modified.
|
||||
@item dropamount
|
||||
Accepts a non-negative integer, which assigns a variable chance of it being dropped, roughly inverse
|
||||
in proportion to the value. Default is 0 which results in no drops. This option is kept for backwards
|
||||
compatibility and is equivalent to setting drop to a negative value with the same magnitude
|
||||
i.e. @code{dropamount=4} is the same as @code{drop=-4}. Ignored if drop is also specified.
|
||||
A numeral string, whose value is related to how often packets will be dropped.
|
||||
Therefore, values below or equal to 0 are forbidden, and the lower the more
|
||||
frequent packets will be dropped, with 1 meaning every packet is dropped.
|
||||
@end table
|
||||
|
||||
Both @code{amount} and @code{drop} accept expressions containing the following variables:
|
||||
|
||||
@table @samp
|
||||
@item n
|
||||
The index of the packet, starting from zero.
|
||||
@item tb
|
||||
The timebase for packet timestamps.
|
||||
@item pts
|
||||
Packet presentation timestamp.
|
||||
@item dts
|
||||
Packet decoding timestamp.
|
||||
@item nopts
|
||||
Constant representing AV_NOPTS_VALUE.
|
||||
@item startpts
|
||||
First non-AV_NOPTS_VALUE PTS seen in the stream.
|
||||
@item startdts
|
||||
First non-AV_NOPTS_VALUE DTS seen in the stream.
|
||||
@item duration
|
||||
@itemx d
|
||||
Packet duration, in timebase units.
|
||||
@item pos
|
||||
Packet position in input; may be -1 when unknown or not set.
|
||||
@item size
|
||||
Packet size, in bytes.
|
||||
@item key
|
||||
Whether packet is marked as a keyframe.
|
||||
@item state
|
||||
A pseudo random integer, primarily derived from the content of packet payload.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Apply modification to every byte but don't drop any packets.
|
||||
The following example applies the modification to every byte but does not drop
|
||||
any packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
||||
@end example
|
||||
|
||||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
||||
modify any of the remaining packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(pts*tb\,30)*not(key)' output.mkv
|
||||
@end example
|
||||
|
||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(pts*tb\,10)\,9\,10)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
@@ -795,14 +578,6 @@ for NTSC frame rate using the @option{frame_rate} option.
|
||||
ffmpeg -f lavfi -i sine=r=48000:d=1 -c pcm_s16le -bsf pcm_rechunk=r=30000/1001 -f framecrc -
|
||||
@end example
|
||||
|
||||
@section pgs_frame_merge
|
||||
|
||||
Merge a sequence of PGS Subtitle segments ending with an "end of display set"
|
||||
segment into a single packet.
|
||||
|
||||
This is required by some containers that support PGS subtitles
|
||||
(muxer @code{matroska}).
|
||||
|
||||
@section prores_metadata
|
||||
|
||||
Modify color property metadata embedded in prores stream.
|
||||
@@ -909,10 +684,6 @@ It accepts the following parameters:
|
||||
@item pts
|
||||
@item dts
|
||||
Set expressions for PTS, DTS or both.
|
||||
@item duration
|
||||
Set expression for duration.
|
||||
@item time_base
|
||||
Set output time base.
|
||||
@end table
|
||||
|
||||
The expressions are evaluated through the eval API and can contain the following
|
||||
@@ -936,9 +707,6 @@ The demux timestamp in input.
|
||||
@item PTS
|
||||
The presentation timestamp in input.
|
||||
|
||||
@item DURATION
|
||||
The duration in input.
|
||||
|
||||
@item STARTDTS
|
||||
The DTS of the first packet.
|
||||
|
||||
@@ -951,54 +719,19 @@ The previous input DTS.
|
||||
@item PREV_INPTS
|
||||
The previous input PTS.
|
||||
|
||||
@item PREV_INDURATION
|
||||
The previous input duration.
|
||||
|
||||
@item PREV_OUTDTS
|
||||
The previous output DTS.
|
||||
|
||||
@item PREV_OUTPTS
|
||||
The previous output PTS.
|
||||
|
||||
@item PREV_OUTDURATION
|
||||
The previous output duration.
|
||||
|
||||
@item NEXT_DTS
|
||||
The next input DTS.
|
||||
|
||||
@item NEXT_PTS
|
||||
The next input PTS.
|
||||
|
||||
@item NEXT_DURATION
|
||||
The next input duration.
|
||||
|
||||
@item TB
|
||||
The timebase of stream packet belongs.
|
||||
|
||||
@item TB_OUT
|
||||
The output timebase.
|
||||
|
||||
@item SR
|
||||
The sample rate of stream packet belongs.
|
||||
|
||||
@item NOPTS
|
||||
The AV_NOPTS_VALUE constant.
|
||||
@end table
|
||||
|
||||
For example, to set PTS equal to DTS (not recommended if B-frames are involved):
|
||||
@example
|
||||
ffmpeg -i INPUT -c:a copy -bsf:a setts=pts=DTS out.mkv
|
||||
@end example
|
||||
|
||||
@section showinfo
|
||||
Log basic packet information. Mainly useful for testing, debugging,
|
||||
and development.
|
||||
|
||||
@section smpte436m_to_eia608
|
||||
|
||||
Convert from a @code{SMPTE_436M_ANC} data stream to a @code{EIA_608} stream,
|
||||
extracting the closed captions from CTA-708 CDP VANC packets, and ignoring all other data.
|
||||
|
||||
@anchor{text2movsub}
|
||||
@section text2movsub
|
||||
|
||||
|
||||
2
doc/bootstrap.min.css
vendored
2
doc/bootstrap.min.css
vendored
File diff suppressed because one or more lines are too long
@@ -30,13 +30,6 @@ fate
|
||||
fate-list
|
||||
List all fate/regression test targets.
|
||||
|
||||
fate-list-failing
|
||||
List the fate tests that failed the last time they were executed.
|
||||
|
||||
fate-clear-reports
|
||||
Remove the test reports from previous test executions (getting rid of
|
||||
potentially stale results from fate-list-failing).
|
||||
|
||||
install
|
||||
Install headers, libraries and programs.
|
||||
|
||||
@@ -70,3 +63,4 @@ make -j<num>
|
||||
make -k
|
||||
Continue build in case of errors, this is useful for the regression tests
|
||||
sometimes but note that it will still not run all reg tests.
|
||||
|
||||
|
||||
147
doc/codecs.texi
147
doc/codecs.texi
@@ -3,7 +3,7 @@
|
||||
@c man begin CODEC OPTIONS
|
||||
|
||||
libavcodec provides some generic global options, which can be set on
|
||||
all the encoders and decoders. In addition, each codec may support
|
||||
all the encoders and decoders. In addition each codec may support
|
||||
so-called private options, which are specific for a given codec.
|
||||
|
||||
Sometimes, a global option may only affect a specific kind of codec,
|
||||
@@ -144,6 +144,21 @@ Default value is 0.
|
||||
@item b_qfactor @var{float} (@emph{encoding,video})
|
||||
Set qp factor between P and B frames.
|
||||
|
||||
@item b_strategy @var{integer} (@emph{encoding,video})
|
||||
Set strategy to choose between I/P/B-frames.
|
||||
|
||||
@item ps @var{integer} (@emph{encoding,video})
|
||||
Set RTP payload size in bytes.
|
||||
|
||||
@item mv_bits @var{integer}
|
||||
@item header_bits @var{integer}
|
||||
@item i_tex_bits @var{integer}
|
||||
@item p_tex_bits @var{integer}
|
||||
@item i_count @var{integer}
|
||||
@item p_count @var{integer}
|
||||
@item skip_count @var{integer}
|
||||
@item misc_bits @var{integer}
|
||||
@item frame_bits @var{integer}
|
||||
@item codec_tag @var{integer}
|
||||
@item bug @var{flags} (@emph{decoding,video})
|
||||
Workaround not auto detected encoder bugs.
|
||||
@@ -233,6 +248,9 @@ consider things that a sane encoder should not do as an error
|
||||
|
||||
@item block_align @var{integer}
|
||||
|
||||
@item mpeg_quant @var{integer} (@emph{encoding,video})
|
||||
Use MPEG quantizers instead of H.263.
|
||||
|
||||
@item rc_override_count @var{integer}
|
||||
|
||||
@item maxrate @var{integer} (@emph{encoding,audio,video})
|
||||
@@ -338,6 +356,19 @@ favor predicting from the previous frame instead of the current
|
||||
|
||||
@item bits_per_coded_sample @var{integer}
|
||||
|
||||
@item pred @var{integer} (@emph{encoding,video})
|
||||
Set prediction method.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item left
|
||||
|
||||
@item plane
|
||||
|
||||
@item median
|
||||
|
||||
@end table
|
||||
|
||||
@item aspect @var{rational number} (@emph{encoding,video})
|
||||
Set sample aspect ratio.
|
||||
|
||||
@@ -554,6 +585,9 @@ sab diamond motion estimation
|
||||
@item last_pred @var{integer} (@emph{encoding,video})
|
||||
Set amount of motion predictors from the previous frame.
|
||||
|
||||
@item preme @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation.
|
||||
|
||||
@item precmp @var{integer} (@emph{encoding,video})
|
||||
Set pre motion estimation compare function.
|
||||
|
||||
@@ -602,6 +636,23 @@ Set limit motion vectors range (1023 for DivX player).
|
||||
|
||||
@item global_quality @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
@item coder @var{integer} (@emph{encoding,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item vlc
|
||||
variable length coder / huffman coder
|
||||
@item ac
|
||||
arithmetic coder
|
||||
@item raw
|
||||
raw (no encoding)
|
||||
@item rle
|
||||
run-length coder
|
||||
@end table
|
||||
|
||||
@item context @var{integer} (@emph{encoding,video})
|
||||
Set context model.
|
||||
|
||||
@item slice_flags @var{integer}
|
||||
|
||||
@item mbd @var{integer} (@emph{encoding,video})
|
||||
@@ -617,6 +668,12 @@ use fewest bits
|
||||
use best rate distortion
|
||||
@end table
|
||||
|
||||
@item sc_threshold @var{integer} (@emph{encoding,video})
|
||||
Set scene change threshold.
|
||||
|
||||
@item nr @var{integer} (@emph{encoding,video})
|
||||
Set noise reduction.
|
||||
|
||||
@item rc_init_occupancy @var{integer} (@emph{encoding,video})
|
||||
Set number of bits which should be loaded into the rc buffer before
|
||||
decoding starts.
|
||||
@@ -644,8 +701,6 @@ for codecs that support it. See also @file{doc/examples/export_mvs.c}.
|
||||
Do not skip samples and export skip information as frame side data.
|
||||
@item ass_ro_flush_noop
|
||||
Do not reset ASS ReadOrder field on flush.
|
||||
@item icc_profiles
|
||||
Generate/parse embedded ICC profiles from/to colorimetry tags.
|
||||
@end table
|
||||
|
||||
@item export_side_data @var{flags} (@emph{decoding/encoding,audio,video,subtitles})
|
||||
@@ -664,8 +719,6 @@ for codecs that support it. At present, those are H.264 and VP9.
|
||||
@item film_grain
|
||||
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
|
||||
Supported at present by AV1 decoders.
|
||||
@item enhancements
|
||||
Export picture enhancement metadata through frame side data, e.g. LCEVC (see @code{AV_FRAME_DATA_LCEVC}).
|
||||
@end table
|
||||
|
||||
@item threads @var{integer} (@emph{decoding/encoding,video})
|
||||
@@ -699,24 +752,73 @@ profiles are documented in the relevant encoder documentation.
|
||||
|
||||
@item level @var{integer} (@emph{encoding,audio,video})
|
||||
|
||||
Set the encoder level. This level depends on the specific codec, and
|
||||
might correspond to the profile level. It is set by default to
|
||||
@samp{unknown}.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@end table
|
||||
|
||||
@item lowres @var{integer} (@emph{decoding,audio,video})
|
||||
Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
|
||||
|
||||
@item skip_threshold @var{integer} (@emph{encoding,video})
|
||||
Set frame skip threshold.
|
||||
|
||||
@item skip_factor @var{integer} (@emph{encoding,video})
|
||||
Set frame skip factor.
|
||||
|
||||
@item skip_exp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip exponent.
|
||||
Negative values behave identical to the corresponding positive ones, except
|
||||
that the score is normalized.
|
||||
Positive values exist primarily for compatibility reasons and are not so useful.
|
||||
|
||||
@item skipcmp @var{integer} (@emph{encoding,video})
|
||||
Set frame skip compare function.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item w53
|
||||
5/3 wavelet, only used in snow
|
||||
@item w97
|
||||
9/7 wavelet, only used in snow
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
|
||||
@item mblmin @var{integer} (@emph{encoding,video})
|
||||
Set min macroblock lagrange factor (VBR).
|
||||
|
||||
@item mblmax @var{integer} (@emph{encoding,video})
|
||||
Set max macroblock lagrange factor (VBR).
|
||||
|
||||
@item mepc @var{integer} (@emph{encoding,video})
|
||||
Set motion estimation bitrate penalty compensation (1.0 = 256).
|
||||
|
||||
@item skip_loop_filter @var{integer} (@emph{decoding,video})
|
||||
@item skip_idct @var{integer} (@emph{decoding,video})
|
||||
@item skip_frame @var{integer} (@emph{decoding,video})
|
||||
@@ -756,24 +858,45 @@ Default value is @samp{default}.
|
||||
@item bidir_refine @var{integer} (@emph{encoding,video})
|
||||
Refine the two motion vectors used in bidirectional macroblocks.
|
||||
|
||||
@item brd_scale @var{integer} (@emph{encoding,video})
|
||||
Downscale frames for dynamic B-frame decision.
|
||||
|
||||
@item keyint_min @var{integer} (@emph{encoding,video})
|
||||
Set minimum interval between IDR-frames.
|
||||
|
||||
@item refs @var{integer} (@emph{encoding,video})
|
||||
Set reference frames to consider for motion compensation.
|
||||
|
||||
@item chromaoffset @var{integer} (@emph{encoding,video})
|
||||
Set chroma qp offset from luma.
|
||||
|
||||
@item trellis @var{integer} (@emph{encoding,audio,video})
|
||||
Set rate-distortion optimal quantization.
|
||||
|
||||
@item mv0_threshold @var{integer} (@emph{encoding,video})
|
||||
@item b_sensitivity @var{integer} (@emph{encoding,video})
|
||||
Adjust sensitivity of b_frame_strategy 1.
|
||||
|
||||
@item compression_level @var{integer} (@emph{encoding,audio,video})
|
||||
@item min_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item max_prediction_order @var{integer} (@emph{encoding,audio})
|
||||
@item timecode_frame_start @var{integer} (@emph{encoding,video})
|
||||
Set GOP timecode frame start number, in non drop frame format.
|
||||
|
||||
@item bits_per_raw_sample @var{integer}
|
||||
@item channel_layout @var{integer} (@emph{decoding/encoding,audio})
|
||||
See @ref{channel layout syntax,,the Channel Layout section in the ffmpeg-utils(1) manual,ffmpeg-utils}
|
||||
for the required syntax.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item request_channel_layout @var{integer} (@emph{decoding,audio})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item rc_max_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item rc_min_vbv_use @var{float} (@emph{encoding,video})
|
||||
@item ticks_per_frame @var{integer} (@emph{decoding/encoding,audio,video})
|
||||
|
||||
@item color_primaries @var{integer} (@emph{decoding/encoding,video})
|
||||
Possible values:
|
||||
@@ -888,11 +1011,9 @@ Possible values:
|
||||
@table @samp
|
||||
@item tv
|
||||
@item mpeg
|
||||
@item limited
|
||||
MPEG (219*2^(n-8))
|
||||
@item pc
|
||||
@item jpeg
|
||||
@item full
|
||||
JPEG (2^n-1)
|
||||
@end table
|
||||
|
||||
|
||||
@@ -1,182 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
@documentencoding UTF-8
|
||||
|
||||
@settitle Community
|
||||
@titlepage
|
||||
@center @titlefont{Community}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@anchor{Organisation}
|
||||
@chapter Organisation
|
||||
|
||||
The FFmpeg project is organized through a community working on global consensus.
|
||||
|
||||
Decisions are taken by the ensemble of active members, through voting and are aided by two committees.
|
||||
|
||||
@anchor{General Assembly}
|
||||
@chapter General Assembly
|
||||
|
||||
The ensemble of active members is called the General Assembly (GA).
|
||||
|
||||
The General Assembly is sovereign and legitimate for all its decisions regarding the FFmpeg project.
|
||||
|
||||
The General Assembly is made up of active contributors.
|
||||
|
||||
Contributors are considered "active contributors" if they have authored more than 20 patches in the last 36 months in the main FFmpeg repository, or if they have been voted in by the GA.
|
||||
|
||||
The list of active contributors is updated twice each year, on 1st January and 1st July, 0:00 UTC.
|
||||
|
||||
Additional members are added to the General Assembly through a vote after proposal by a member of the General Assembly. They are part of the GA for two years, after which they need a confirmation by the GA.
|
||||
|
||||
A script to generate the current members of the general assembly (minus members voted in) can be found in `tools/general_assembly.pl`.
|
||||
|
||||
@anchor{Voting}
|
||||
@chapter Voting
|
||||
|
||||
Voting is done using a ranked voting system, currently running on https://vote.ffmpeg.org/ .
|
||||
|
||||
Majority vote means more than 50% of the expressed ballots.
|
||||
|
||||
@anchor{Technical Committee}
|
||||
@chapter Technical Committee
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project. They will consider the merits of all the positions, judge them and make a decision.
|
||||
|
||||
The TC resolves technical conflicts but is not a technical steering committee.
|
||||
|
||||
Decisions by the TC are binding for all the contributors.
|
||||
|
||||
Decisions made by the TC can be re-opened after 1 year or by a majority vote of the General Assembly, requested by one of the member of the GA.
|
||||
|
||||
The TC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the TC.
|
||||
|
||||
The members of the TC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The conflict resolution process is detailed in the resolution process document.
|
||||
|
||||
The TC can be contacted at <tc@@ffmpeg>.
|
||||
|
||||
@anchor{Resolution Process}
|
||||
@section Resolution Process
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when technical conflicts occur in the project.
|
||||
|
||||
The TC main role is to resolve technical conflicts. It is therefore not a technical steering committee, but it is understood that some decisions might impact the future of the project.
|
||||
|
||||
@subsection Seizing
|
||||
|
||||
The TC can take possession of any technical matter that it sees fit.
|
||||
|
||||
To involve the TC in a matter, email tc@ or CC them on an ongoing discussion.
|
||||
|
||||
As members of TC are developers, they also can email tc@ to raise an issue.
|
||||
@subsection Announcement
|
||||
|
||||
The TC, once seized, must announce itself on the main mailing list, with a [TC] tag.
|
||||
|
||||
The TC has 2 modes of operation: a RFC one and an internal one.
|
||||
|
||||
If the TC thinks it needs the input from the larger community, the TC can call for a RFC. Else, it can decide by itself.
|
||||
|
||||
The decision to use a RFC process or an internal discussion is a discretionary decision of the TC.
|
||||
|
||||
The TC can also reject a seizure for a few reasons such as: the matter was not discussed enough previously; it lacks expertise to reach a beneficial decision on the matter; or the matter is too trivial.
|
||||
@subsection RFC call
|
||||
|
||||
In the RFC mode, one person from the TC posts on the mailing list the technical question and will request input from the community.
|
||||
|
||||
The mail will have the following specification:
|
||||
|
||||
a precise title
|
||||
a specific tag [TC RFC]
|
||||
a top-level email
|
||||
contain a precise question that does not exceed 100 words and that is answerable by developers
|
||||
may have an extra description, or a link to a previous discussion, if deemed necessary,
|
||||
contain a precise end date for the answers.
|
||||
|
||||
The answers from the community must be on the main mailing list and must have the following specification:
|
||||
|
||||
keep the tag and the title unchanged
|
||||
limited to 400 words
|
||||
a first-level, answering directly to the main email
|
||||
answering to the question.
|
||||
|
||||
Further replies to answers are permitted, as long as they conform to the community standards of politeness, they are limited to 100 words, and are not nested more than once. (max-depth=2)
|
||||
|
||||
After the end-date, mails on the thread will be ignored.
|
||||
|
||||
Violations of those rules will be escalated through the Community Committee.
|
||||
|
||||
After all the emails are in, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay, that will be notified on the mailing list.
|
||||
@subsection Within TC
|
||||
|
||||
In the internal case, the TC has 96 hours to give its final decision. Exceptionally, the TC can request an extra delay.
|
||||
@subsection Decisions
|
||||
|
||||
The decisions from the TC will be sent on the mailing list, with the [TC] tag.
|
||||
|
||||
Internally, the TC should take decisions with a majority, or using ranked-choice voting.
|
||||
|
||||
Each TC member must vote on such decision according to what is, in their view, best for the project.
|
||||
|
||||
If a TC member feels they are affected by a conflict of interest with regards to the case, they should announce it and recuse themselves from the TC
|
||||
discussion and vote.
|
||||
|
||||
A conflict of interest is presumed to occur when a TC member has a personal interest (e.g. financial) in a specific outcome of the case.
|
||||
|
||||
The decision from the TC should be published with a summary of the reasons that lead to this decision.
|
||||
|
||||
The decisions from the TC are final, until the matters are reopened after no less than one year.
|
||||
|
||||
@anchor{Community Committee}
|
||||
@chapter Community Committee
|
||||
|
||||
The Community Committee (CC) is here to arbitrage and make decisions when inter-personal conflicts occur in the project. It will decide quickly and take actions, for the sake of the project.
|
||||
|
||||
The CC can remove privileges of offending members, including removal of commit access and temporary ban from the community.
|
||||
|
||||
Decisions made by the CC can be re-opened after 1 year or by a majority vote of the General Assembly. Indefinite bans from the community must be confirmed by the General Assembly, in a majority vote.
|
||||
|
||||
The CC is elected by the General Assembly for a duration of 1 year, and is composed of 5 members. Members can be re-elected if they wish. A majority vote in the General Assembly can trigger a new election of the CC.
|
||||
|
||||
The members of the CC can be elected from outside of the GA. Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The CC is governed by and responsible for enforcing the Code of Conduct.
|
||||
|
||||
The CC can be contacted at <cc@@ffmpeg>.
|
||||
|
||||
@anchor{Code of Conduct}
|
||||
@chapter Code of Conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@bye
|
||||
@@ -38,51 +38,6 @@ Select an operating point of a scalable AV1 bitstream (0 - 31). Default is 0.
|
||||
|
||||
@end table
|
||||
|
||||
@section hevc
|
||||
HEVC (AKA ITU-T H.265 or ISO/IEC 23008-2) decoder.
|
||||
|
||||
The decoder supports MV-HEVC multiview streams with at most two views. Views to
|
||||
be output are selected by supplying a list of view IDs to the decoder (the
|
||||
@option{view_ids} option). This option may be set either statically before
|
||||
decoder init, or from the @code{get_format()} callback - useful for the case
|
||||
when the view count or IDs change dynamically during decoding.
|
||||
|
||||
Only the base layer is decoded by default.
|
||||
|
||||
Note that if you are using the @code{ffmpeg} CLI tool, you should be using view
|
||||
specifiers as documented in its manual, rather than the options documented here.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item view_ids (MV-HEVC)
|
||||
Specify a list of view IDs that should be output. This option can also be set to
|
||||
a single '-1', which will cause all views defined in the VPS to be decoded and
|
||||
output.
|
||||
|
||||
@item view_ids_available (MV-HEVC)
|
||||
This option may be read by the caller to retrieve an array of view IDs available
|
||||
in the active VPS. The array is empty for single-layer video.
|
||||
|
||||
The value of this option is guaranteed to be accurate when read from the
|
||||
@code{get_format()} callback. It may also be set at other times (e.g. after
|
||||
opening the decoder), but the value is informational only and may be incorrect
|
||||
(e.g. when the stream contains multiple distinct VPS NALUs).
|
||||
|
||||
@item view_pos_available (MV-HEVC)
|
||||
This option may be read by the caller to retrieve an array of view positions
|
||||
(left, right, or unspecified) available in the active VPS, as
|
||||
@code{AVStereo3DView} values. When the array is available, its elements apply to
|
||||
the corresponding elements of @option{view_ids_available}, i.e.
|
||||
@code{view_pos_available[i]} contains the position of view with ID
|
||||
@code{view_ids_available[i]}.
|
||||
|
||||
Same validity restrictions as for @option{view_ids_available} apply to
|
||||
this option.
|
||||
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
@@ -121,23 +76,13 @@ The following options are supported by the libdav1d wrapper.
|
||||
|
||||
@item framethreads
|
||||
Set amount of frame threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
option @code{max_frame_delay} and the global option @code{threads} instead.
|
||||
|
||||
@item tilethreads
|
||||
Set amount of tile threads to use during decoding. The default value is 0 (autodetect).
|
||||
This option is deprecated for libdav1d >= 1.0 and will be removed in the future. Use the
|
||||
global option @code{threads} instead.
|
||||
|
||||
@item max_frame_delay
|
||||
Set max amount of frames the decoder may buffer internally. The default value is 0
|
||||
(autodetect).
|
||||
|
||||
@item filmgrain
|
||||
Apply film grain to the decoded video if present in the bitstream. Defaults to the
|
||||
internal default of the library.
|
||||
This option is deprecated and will be removed in the future. See the global option
|
||||
@code{export_side_data} to export Film Grain parameters instead of applying it.
|
||||
|
||||
@item oppoint
|
||||
Select an operating point of a scalable AV1 bitstream (0 - 31). Defaults to the
|
||||
@@ -175,87 +120,6 @@ Set amount of frame threads to use during decoding. The default value is 0 (auto
|
||||
|
||||
@end table
|
||||
|
||||
@section libxevd
|
||||
|
||||
eXtra-fast Essential Video Decoder (XEVD) MPEG-5 EVC decoder wrapper.
|
||||
|
||||
This decoder requires the presence of the libxevd headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-libxevd}.
|
||||
|
||||
The xevd project website is at @url{https://github.com/mpeg5/xevd}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the libxevd wrapper.
|
||||
The xevd-equivalent options or values are listed in parentheses for easy migration.
|
||||
|
||||
To get a more accurate and extensive documentation of the libxevd options,
|
||||
invoke the command @code{xevd_app --help} or consult the libxevd documentation.
|
||||
|
||||
@table @option
|
||||
@item threads (@emph{threads})
|
||||
Force to use a specific number of threads
|
||||
|
||||
@end table
|
||||
|
||||
@section QSV Decoders
|
||||
|
||||
The family of Intel QuickSync Video decoders (VC1, MPEG-2, H.264, HEVC,
|
||||
JPEG/MJPEG, VP8, VP9, AV1, VVC).
|
||||
|
||||
@subsection Common Options
|
||||
|
||||
The following options are supported by all qsv decoders.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{async_depth}
|
||||
Internal parallelization depth, the higher the value the higher the latency.
|
||||
|
||||
@item @var{gpu_copy}
|
||||
A GPU-accelerated copy between video and system memory
|
||||
@table @samp
|
||||
@item default
|
||||
@item on
|
||||
@item off
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@subsection HEVC Options
|
||||
Extra options for hevc_qsv.
|
||||
|
||||
@table @option
|
||||
|
||||
@item @var{load_plugin}
|
||||
A user plugin to load in an internal session
|
||||
@table @samp
|
||||
@item none
|
||||
@item hevc_sw
|
||||
@item hevc_hw
|
||||
@end table
|
||||
|
||||
@item @var{load_plugins}
|
||||
A :-separate list of hexadecimal plugin UIDs to load in an internal session
|
||||
|
||||
@end table
|
||||
|
||||
@section v210
|
||||
|
||||
Uncompressed 4:2:2 10-bit decoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item custom_stride
|
||||
Set the line size of the v210 data in bytes. The default value is 0
|
||||
(autodetect). You can use the special -1 value for a strideless v210 as seen in
|
||||
BOXX files.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@@ -395,7 +259,7 @@ without this library.
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTITLES DECODERS
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section libaribb24
|
||||
|
||||
@@ -422,169 +286,6 @@ Enabled by default.
|
||||
|
||||
@end table
|
||||
|
||||
@section libaribcaption
|
||||
|
||||
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
||||
library.
|
||||
|
||||
Implements profiles A and C of the Japanese ARIB STD-B24 standard,
|
||||
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
||||
|
||||
Requires the presence of the libaribcaption headers and library
|
||||
(@url{https://github.com/xqq/libaribcaption}) during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libaribcaption}.
|
||||
If both @dfn{libaribb24} and @dfn{libaribcaption} are enabled, @dfn{libaribcaption}
|
||||
decoder precedes.
|
||||
|
||||
@subsection libaribcaption Decoder Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -sub_type @var{subtitle_type}
|
||||
Specifies the format of the decoded subtitles.
|
||||
|
||||
@table @samp
|
||||
@item bitmap
|
||||
Graphical image.
|
||||
@item ass
|
||||
ASS formatted text.
|
||||
@item text
|
||||
Simple text based output without formatting.
|
||||
@end table
|
||||
|
||||
The default is @dfn{ass} as same as @dfn{libaribb24} decoder.
|
||||
Some present players (e.g., @dfn{mpv}) expect ASS format for ARIB caption.
|
||||
|
||||
@item -caption_encoding @var{encoding_scheme}
|
||||
Specifies the encoding scheme of input subtitle text.
|
||||
|
||||
@table @samp
|
||||
@item auto
|
||||
Automatically detect text encoding (default).
|
||||
@item jis
|
||||
8bit-char JIS encoding defined in ARIB STD B24.
|
||||
This encoding used in Japan for ISDB captions.
|
||||
@item utf8
|
||||
UTF-8 encoding defined in ARIB STD B24.
|
||||
This encoding is used in Philippines for ISDB-T captions.
|
||||
@item latin
|
||||
Latin character encoding defined in ABNT NBR 15606-1.
|
||||
This encoding is used in South America for SBTVD / ISDB-Tb captions.
|
||||
@end table
|
||||
|
||||
@item -font @var{font_name[,font_name2,...]}
|
||||
Specify comma-separated list of font family names to be used for @dfn{bitmap}
|
||||
or @dfn{ass} type subtitle rendering.
|
||||
Only first font name is used for @dfn{ass} type subtitle.
|
||||
|
||||
If not specified, use internally defined default font family.
|
||||
|
||||
@item -ass_single_rect @var{boolean}
|
||||
ARIB STD-B24 specifies that some captions may be displayed at different
|
||||
positions at a time (multi-rectangle subtitle).
|
||||
Since some players (e.g., old @dfn{mpv}) can't handle multiple ASS rectangles
|
||||
in a single AVSubtitle, or multiple ASS rectangles of indeterminate duration
|
||||
with the same start timestamp, this option can change the behavior so that
|
||||
all the texts are displayed in a single ASS rectangle.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
If your player cannot handle AVSubtitles with multiple ASS rectangles properly,
|
||||
set this option to @var{true} or define @env{ASS_SINGLE_RECT=1} to change
|
||||
default behavior at compilation.
|
||||
|
||||
@item -force_outline_text @var{boolean}
|
||||
Specify whether always render outline text for all characters regardless of
|
||||
the indication by character style.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -outline_width @var{number} (0.0 - 3.0)
|
||||
Specify width for outline text, in dots (relative).
|
||||
|
||||
The default is @var{1.5}.
|
||||
|
||||
@item -ignore_background @var{boolean}
|
||||
Specify whether to ignore background color rendering.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -ignore_ruby @var{boolean}
|
||||
Specify whether to ignore rendering for ruby-like (furigana) characters.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
@item -replace_drcs @var{boolean}
|
||||
Specify whether to render replaced DRCS characters as Unicode characters.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_ascii @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) fullwidth
|
||||
alphanumerics with halfwidth alphanumerics.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_japanese @var{boolean}
|
||||
Specify whether to replace some MSZ (Middle Size; half width) fullwidth
|
||||
japanese special characters with halfwidth ones.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -replace_msz_glyph @var{boolean}
|
||||
Specify whether to replace MSZ (Middle Size; half width) characters
|
||||
with halfwidth glyphs if the fonts supports it.
|
||||
This option works under FreeType or DirectWrite renderer
|
||||
with Adobe-Japan1 compliant fonts.
|
||||
e.g., IBM Plex Sans JP, Morisawa BIZ UDGothic, Morisawa BIZ UDMincho,
|
||||
Yu Gothic, Yu Mincho, and Meiryo.
|
||||
|
||||
The default is @var{true}.
|
||||
|
||||
@item -canvas_size @var{image_size}
|
||||
Specify the resolution of the canvas to render subtitles to; usually, this
|
||||
should be frame size of input video.
|
||||
This only applies when @code{-subtitle_type} is set to @var{bitmap}.
|
||||
|
||||
The libaribcaption decoder assumes input frame size for bitmap rendering as below:
|
||||
@enumerate
|
||||
@item
|
||||
PROFILE_A : 1440 x 1080 with SAR (PAR) 4:3
|
||||
@item
|
||||
PROFILE_C : 320 x 180 with SAR (PAR) 1:1
|
||||
@end enumerate
|
||||
|
||||
If actual frame size of input video does not match above assumption,
|
||||
the rendered captions may be distorted.
|
||||
To make the captions undistorted, add @code{-canvas_size} option to specify
|
||||
actual input video size.
|
||||
|
||||
Note that the @code{-canvas_size} option is not required for video with
|
||||
different size but same aspect ratio.
|
||||
In such cases, the caption will be stretched or shrunk to actual video size
|
||||
if @code{-canvas_size} option is not specified.
|
||||
If @code{-canvas_size} option is specified with different size,
|
||||
the caption will be stretched or shrunk as specified size with calculated SAR.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection libaribcaption decoder usage examples
|
||||
|
||||
Display MPEG-TS file with ARIB subtitle by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap MPEG.TS
|
||||
@end example
|
||||
|
||||
Display MPEG-TS file with input frame size 1920x1080 by @code{ffplay} tool:
|
||||
@example
|
||||
ffplay -sub_type bitmap -canvas_size 1920x1080 MPEG.TS
|
||||
@end example
|
||||
|
||||
Embed ARIB subtitle in transcoded video:
|
||||
@example
|
||||
ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h264 dest.mp4
|
||||
@end example
|
||||
|
||||
@section dvbsub
|
||||
|
||||
@subsection Options
|
||||
@@ -592,8 +293,6 @@ ffmpeg -sub_type bitmap -i src.m2t -filter_complex "[0:v][0:s]overlay" -vcodec h
|
||||
@table @option
|
||||
@item compute_clut
|
||||
@table @option
|
||||
@item -2
|
||||
Compute clut once if no matching CLUT is in the stream.
|
||||
@item -1
|
||||
Compute clut if no matching CLUT is in the stream.
|
||||
@item 0
|
||||
@@ -696,4 +395,4 @@ box and an end box, typically subtitles. Default value is 0 if
|
||||
|
||||
@end table
|
||||
|
||||
@c man end SUBTITLES DECODERS
|
||||
@c man end SUBTILES DECODERS
|
||||
|
||||
@@ -25,13 +25,6 @@ Audible Format 2, 3, and 4 demuxer.
|
||||
|
||||
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
|
||||
|
||||
@section aac
|
||||
|
||||
Raw Audio Data Transport Stream AAC demuxer.
|
||||
|
||||
This demuxer is used to demux an ADTS input containing a single AAC stream
|
||||
alongwith any ID3v1/2 or APE tags in it.
|
||||
|
||||
@section apng
|
||||
|
||||
Animated Portable Network Graphics demuxer.
|
||||
@@ -44,15 +37,12 @@ between the last fcTL and IEND chunks.
|
||||
|
||||
@table @option
|
||||
@item -ignore_loop @var{bool}
|
||||
Ignore the loop variable in the file if set. Default is enabled.
|
||||
|
||||
Ignore the loop variable in the file if set.
|
||||
@item -max_fps @var{int}
|
||||
Maximum framerate in frames per second. Default of 0 imposes no limit.
|
||||
|
||||
Maximum framerate in frames per second (0 for no limit).
|
||||
@item -default_fps @var{int}
|
||||
Default framerate in frames per second when none is specified in the file
|
||||
(0 meaning as fast as possible). Default is 15.
|
||||
|
||||
(0 meaning as fast as possible).
|
||||
@end table
|
||||
|
||||
@section asf
|
||||
@@ -103,7 +93,8 @@ backslash or single quotes.
|
||||
All subsequent file-related directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version.
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appear exactly as is (no extra space or byte-order-mark) on the very first
|
||||
@@ -157,16 +148,6 @@ directive) will be reduced based on their specified Out point.
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
This directive is deprecated, use @code{file_packet_meta} instead.
|
||||
|
||||
@item @code{file_packet_meta @var{key} @var{value}}
|
||||
Metadata of the packets of the file. The specified metadata will be set for
|
||||
each file packet. You can specify this directive multiple times to add multiple
|
||||
metadata entries.
|
||||
|
||||
@item @code{option @var{key} @var{value}}
|
||||
Option to access, open and probe the file.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream}
|
||||
Introduce a stream in the virtual file.
|
||||
@@ -184,20 +165,6 @@ subfiles will be used.
|
||||
This is especially useful for MPEG-PS (VOB) files, where the order of the
|
||||
streams is not reliable.
|
||||
|
||||
@item @code{stream_meta @var{key} @var{value}}
|
||||
Metadata for the stream.
|
||||
Can be present multiple times.
|
||||
|
||||
@item @code{stream_codec @var{value}}
|
||||
Codec for the stream.
|
||||
|
||||
@item @code{stream_extradata @var{hex_string}}
|
||||
Extradata for the string, encoded in hexadecimal.
|
||||
|
||||
@item @code{chapter @var{id} @var{start} @var{end}}
|
||||
Add a chapter. @var{id} is an unique identifier, possibly small and
|
||||
consecutive.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
@@ -207,8 +174,7 @@ This demuxer accepts the following option:
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths and directives.
|
||||
A file path is considered safe if it
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
@@ -218,6 +184,9 @@ If set to 0, any file name is accepted.
|
||||
|
||||
The default is 1.
|
||||
|
||||
-1 is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@item auto_convert
|
||||
If set to 1, try to perform automatic conversions on packet data to make the
|
||||
streams concatenable.
|
||||
@@ -274,214 +243,11 @@ which streams to actually receive.
|
||||
Each stream mirrors the @code{id} and @code{bandwidth} properties from the
|
||||
@code{<Representation>} as metadata keys named "id" and "variant_bitrate" respectively.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item cenc_decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@end table
|
||||
|
||||
@section dvdvideo
|
||||
|
||||
DVD-Video demuxer, powered by libdvdnav and libdvdread.
|
||||
|
||||
Can directly ingest DVD titles, specifically sequential PGCs, into
|
||||
a conversion pipeline. Menu assets, such as background video or audio,
|
||||
can also be demuxed given the menu's coordinates (at best effort).
|
||||
|
||||
Block devices (DVD drives), ISO files, and directory structures are accepted.
|
||||
Activate with @code{-f dvdvideo} in front of one of these inputs.
|
||||
|
||||
This demuxer does NOT have decryption code of any kind. You are on your own
|
||||
working with encrypted DVDs, and should not expect support on the matter.
|
||||
|
||||
Underlying playback is handled by libdvdnav, and structure parsing by libdvdread.
|
||||
FFmpeg must be built with GPL library support available as well as the
|
||||
configure switches @code{--enable-libdvdnav} and @code{--enable-libdvdread}.
|
||||
|
||||
You will need to provide either the desired "title number" or exact PGC/PG coordinates.
|
||||
Many open-source DVD players and tools can aid in providing this information.
|
||||
If not specified, the demuxer will default to title 1 which works for many discs.
|
||||
However, due to the flexibility of the format, it is recommended to check manually.
|
||||
There are many discs that are authored strangely or with invalid headers.
|
||||
|
||||
If the input is a real DVD drive, please note that there are some drives which may
|
||||
silently fail on reading bad sectors from the disc, returning random bits instead
|
||||
which is effectively corrupt data. This is especially prominent on aging or rotting discs.
|
||||
A second pass and integrity checks would be needed to detect the corruption.
|
||||
This is not an FFmpeg issue.
|
||||
|
||||
@subsection Background
|
||||
|
||||
DVD-Video is not a directly accessible, linear container format in the
|
||||
traditional sense. Instead, it allows for complex and programmatic playback of
|
||||
carefully muxed MPEG-PS streams that are stored in headerless VOB files.
|
||||
To the end-user, these streams are known simply as "titles", but the actual
|
||||
logical playback sequence is defined by one or more "PGCs", or Program Group Chains,
|
||||
within the title. The PGC is in turn comprised of multiple "PGs", or Programs",
|
||||
which are the actual video segments (and for a typical video feature, sequentially
|
||||
ordered). The PGC structure, along with stream layout and metadata, are stored in
|
||||
IFO files that need to be parsed. PGCs can be thought of as playlists in easier terms.
|
||||
|
||||
An actual DVD player relies on user GUI interaction via menus and an internal VM
|
||||
to drive the direction of demuxing. Generally, the user would either navigate (via menus)
|
||||
or automatically be redirected to the PGC of their choice. During this process and
|
||||
the subsequent playback, the DVD player's internal VM also maintains a state and
|
||||
executes instructions that can create jumps to different sectors during playback.
|
||||
This is why libdvdnav is involved, as a linear read of the MPEG-PS blobs on the
|
||||
disc (VOBs) is not enough to produce the right sequence in many cases.
|
||||
|
||||
There are many other DVD structures (a long subject) that will not be discussed here.
|
||||
NAV packets, in particular, are handled by this demuxer to build accurate timing
|
||||
but not emitted as a stream. For a good high-level understanding, refer to:
|
||||
@url{https://code.videolan.org/videolan/libdvdnav/-/blob/master/doc/dvd_structures}
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item title @var{int}
|
||||
The title number to play. Must be set if @option{pgc} and @option{pg} are not set.
|
||||
Not applicable to menus.
|
||||
Default is 0 (auto), which currently only selects the first available title (title 1)
|
||||
and notifies the user about the implications.
|
||||
|
||||
@item chapter_start @var{int}
|
||||
The chapter, or PTT (part-of-title), number to start at. Not applicable to menus.
|
||||
Default is 1.
|
||||
|
||||
@item chapter_end @var{int}
|
||||
The chapter, or PTT (part-of-title), number to end at. Not applicable to menus.
|
||||
Default is 0, which is a special value to signal end at the last possible chapter.
|
||||
|
||||
@item angle @var{int}
|
||||
The video angle number, referring to what is essentially an additional
|
||||
video stream that is composed from alternate frames interleaved in the VOBs.
|
||||
Not applicable to menus.
|
||||
Default is 1.
|
||||
|
||||
@item region @var{int}
|
||||
The region code to use for playback. Some discs may use this to default playback
|
||||
at a particular angle in different regions. This option will not affect the region code
|
||||
of a real DVD drive, if used as an input. Not applicable to menus.
|
||||
Default is 0, "world".
|
||||
|
||||
@item menu @var{bool}
|
||||
Demux menu assets instead of navigating a title. Requires exact coordinates
|
||||
of the menu (@option{menu_lu}, @option{menu_vts}, @option{pgc}, @option{pg}).
|
||||
Default is false.
|
||||
|
||||
@item menu_lu @var{int}
|
||||
The menu language to demux. In DVD, menus are grouped by language.
|
||||
Default is 1, the first language unit.
|
||||
|
||||
@item menu_vts @var{int}
|
||||
The VTS where the menu lives, or 0 if it is a VMG menu (root-level).
|
||||
Default is 1, menu of the first VTS.
|
||||
|
||||
@item pgc @var{int}
|
||||
The entry PGC to start playback, in conjunction with @option{pg}.
|
||||
Alternative to setting @option{title}.
|
||||
Chapter markers are not supported at this time.
|
||||
Must be explicitly set for menus.
|
||||
Default is 0, automatically resolve from value of @option{title}.
|
||||
|
||||
@item pg @var{int}
|
||||
The entry PG to start playback, in conjunction with @option{pgc}.
|
||||
Alternative to setting @option{title}.
|
||||
Chapter markers are not supported at this time.
|
||||
Default is 1, the first PG of the PGC.
|
||||
|
||||
@item preindex @var{bool}
|
||||
Enable this to have accurate chapter (PTT) markers and duration measurement,
|
||||
which requires a slow second pass read in order to index the chapter marker
|
||||
timestamps from NAV packets. This is non-ideal extra work for real optical drives.
|
||||
It is recommended and faster to use this option with a backup of the DVD structure
|
||||
stored on a hard drive. Not compatible with @option{pgc} and @option{pg}.
|
||||
Default is 0, false.
|
||||
|
||||
@item trim @var{bool}
|
||||
Skip padding cells (i.e. cells shorter than 1 second) from the beginning.
|
||||
There exist many discs with filler segments at the beginning of the PGC,
|
||||
often with junk data intended for controlling a real DVD player's
|
||||
buffering speed and with no other material data value.
|
||||
Not applicable to menus.
|
||||
Default is 1, true.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Open title 3 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -title 3 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open chapters 3-6 from title 1 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -chapter_start 3 -chapter_end 6 -title 1 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open only chapter 5 from title 1 from a given DVD structure:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -chapter_start 5 -chapter_end 5 -title 1 -i <path to DVD> ...
|
||||
@end example
|
||||
|
||||
@item
|
||||
Demux menu with language 1 from VTS 1, PGC 1, starting at PG 1:
|
||||
@example
|
||||
ffmpeg -f dvdvideo -menu 1 -menu_lu 1 -menu_vts 1 -pgc 1 -pg 1 -i <path to DVD> ...
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section ea
|
||||
|
||||
Electronic Arts Multimedia format demuxer.
|
||||
|
||||
This format is used by various Electronic Arts games.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item merge_alpha @var{bool}
|
||||
|
||||
Normally the VP6 alpha channel (if exists) is returned as a secondary video
|
||||
stream, by setting this option you can make the demuxer return a single video
|
||||
stream which contains the alpha channel in addition to the ordinary video.
|
||||
|
||||
@end table
|
||||
|
||||
@section imf
|
||||
|
||||
Interoperable Master Format demuxer.
|
||||
|
||||
This demuxer presents audio and video streams found in an IMF Composition, as
|
||||
specified in @url{https://doi.org/10.5594/SMPTE.ST2067-2.2020, SMPTE ST 2067-2}.
|
||||
|
||||
@example
|
||||
ffmpeg [-assetmaps <path of ASSETMAP1>,<path of ASSETMAP2>,...] -i <path of CPL> ...
|
||||
@end example
|
||||
|
||||
If @code{-assetmaps} is not specified, the demuxer looks for a file called
|
||||
@file{ASSETMAP.xml} in the same directory as the CPL.
|
||||
|
||||
@section flv, live_flv, kux
|
||||
@section flv, live_flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams. In case of live network streams, if you force format, you may use live_flv option instead of flv to survive timestamp discontinuities.
|
||||
KUX is a flv variant used on the Youku platform.
|
||||
|
||||
@example
|
||||
ffmpeg -f flv -i myfile.flv ...
|
||||
@@ -558,19 +324,9 @@ It accepts the following options:
|
||||
@item live_start_index
|
||||
segment index to start live streams at (negative values are from the end).
|
||||
|
||||
@item prefer_x_start
|
||||
prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
|
||||
|
||||
@item allowed_extensions
|
||||
',' separated list of file extensions that hls is allowed to access.
|
||||
|
||||
@item extension_picky
|
||||
This blocks disallowed extensions from probing
|
||||
It also requires all available segments to have matching extensions to the format
|
||||
except mpegts, which is always allowed.
|
||||
It is recommended to set the whitelists correctly instead of depending on extensions
|
||||
Enabled by default.
|
||||
|
||||
@item max_reload
|
||||
Maximum number of times a insufficient list is attempted to be reloaded.
|
||||
Default value is 1000.
|
||||
@@ -590,13 +346,6 @@ Enabled by default for HTTP/1.1 servers.
|
||||
@item http_seekable
|
||||
Use HTTP partial requests for downloading HTTP segments.
|
||||
0 = disable, 1 = enable, -1 = auto, Default is auto.
|
||||
|
||||
@item seg_format_options
|
||||
Set options for the demuxer of media segments using a list of key=value pairs separated by @code{:}.
|
||||
|
||||
@item seg_max_retry
|
||||
Maximum number of times to reload a segment on error, useful when segment skip on network error is not desired.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section image2
|
||||
@@ -855,32 +604,6 @@ Set the sample rate for libopenmpt to output.
|
||||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@anchor{mccdec}
|
||||
@section mcc
|
||||
|
||||
Demuxer for MacCaption MCC files, it supports MCC versions 1.0 and 2.0.
|
||||
MCC files store VANC data, which can include closed captions (EIA-608 and CEA-708), ancillary time code, pan-scan data, etc.
|
||||
By default, for backward compatibility, the MCC demuxer extracts just the EIA-608 and CEA-708 closed captions and returns a @code{EIA_608} stream, ignoring all other VANC data.
|
||||
You can change it to return all VANC data in a @code{SMPTE_436M_ANC} data stream by setting @option{-eia608_extract 0}
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Convert a MCC file to Scenarist (SCC) format:
|
||||
@example
|
||||
ffmpeg -i CC.mcc -c:s copy CC.scc
|
||||
@end example
|
||||
Note that the SCC format only supports EIA-608, so this will discard all other data such as CEA-708 extensions.
|
||||
|
||||
@item
|
||||
Merge a MCC file into a MXF file:
|
||||
@example
|
||||
ffmpeg -i video_and_audio.mxf -eia608_extract 0 -i CC.mcc -c copy -map 0 -map 1 out.mxf
|
||||
@end example
|
||||
This retains all VANC data and inserts it into the output MXF file as a @code{SMPTE_436M_ANC} data stream.
|
||||
@end itemize
|
||||
|
||||
@section mov/mp4/3gp
|
||||
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
@@ -938,12 +661,6 @@ Set mfra timestamps as PTS
|
||||
Don't use mfra box to set timestamps
|
||||
@end table
|
||||
|
||||
@item use_tfdt
|
||||
For fragmented input, set fragment's starting timestamp to @code{baseMediaDecodeTime} from the @code{tfdt} box.
|
||||
Default is enabled, which will prefer to use the @code{tfdt} box to set DTS. Disable to use the @code{earliest_presentation_time} from the @code{sidx} box.
|
||||
In either case, the timestamp from the @code{mfra} box will be used if it's available and @code{use_mfra_for} is
|
||||
set to pts or dts.
|
||||
|
||||
@item export_all
|
||||
Export unrecognized boxes within the @var{udta} box as metadata entries. The first four
|
||||
characters of the box type are set as the key. Default is false.
|
||||
@@ -962,22 +679,6 @@ specify.
|
||||
|
||||
@item decryption_key
|
||||
16-byte key, in hex, to decrypt files encrypted using ISO Common Encryption (CENC/AES-128 CTR; ISO/IEC 23001-7).
|
||||
|
||||
@item max_stts_delta
|
||||
Very high sample deltas written in a trak's stts box may occasionally be intended but usually they are written in
|
||||
error or used to store a negative value for dts correction when treated as signed 32-bit integers. This option lets
|
||||
the user set an upper limit, beyond which the delta is clamped to 1. Values greater than the limit if negative when
|
||||
cast to int32 are used to adjust onward dts.
|
||||
|
||||
Unit is the track time scale. Range is 0 to UINT_MAX. Default is @code{UINT_MAX - 48000*10} which allows up to
|
||||
a 10 second dts correction for 48 kHz audio streams while accommodating 99.9% of @code{uint32} range.
|
||||
|
||||
@item interleaved_read
|
||||
Interleave packets from multiple tracks at demuxer level. For badly interleaved files, this prevents playback issues
|
||||
caused by large gaps between packets in different tracks, as MOV/MP4 do not have packet placement requirements.
|
||||
However, this can cause excessive seeking on very badly interleaved files, due to seeking between tracks, so disabling
|
||||
it may prevent I/O issues, at the expense of playback.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Audible AAX
|
||||
@@ -1016,12 +717,8 @@ to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
||||
disabled). Default value is -1.
|
||||
|
||||
@item merge_pmt_versions
|
||||
Reuse existing streams when a PMT's version is updated and elementary
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
|
||||
@item max_packet_size
|
||||
Set maximum size, in bytes, of packet emitted by the demuxer. Payloads above this size
|
||||
are split across multiple packets. Range is 1 to INT_MAX/2. Default is 204800 bytes.
|
||||
@end table
|
||||
|
||||
@section mpjpeg
|
||||
@@ -1068,36 +765,6 @@ the command:
|
||||
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
|
||||
@end example
|
||||
|
||||
@anchor{rcwtdec}
|
||||
@section rcwt
|
||||
|
||||
RCWT (Raw Captions With Time) is a format native to ccextractor, a commonly
|
||||
used open source tool for processing 608/708 Closed Captions (CC) sources.
|
||||
For more information on the format, see @ref{rcwtenc,,,ffmpeg-formats}.
|
||||
|
||||
This demuxer implements the specification as of March 2024, which has
|
||||
been stable and unchanged since April 2014.
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Render CC to ASS using the built-in decoder:
|
||||
@example
|
||||
ffmpeg -i CC.rcwt.bin CC.ass
|
||||
@end example
|
||||
Note that if your output appears to be empty, you may have to manually
|
||||
set the decoder's @option{data_field} option to pick the desired CC substream.
|
||||
|
||||
@item
|
||||
Convert an RCWT backup to Scenarist (SCC) format:
|
||||
@example
|
||||
ffmpeg -i CC.rcwt.bin -c:s copy CC.scc
|
||||
@end example
|
||||
Note that the SCC format does not support all of the possible CC extensions
|
||||
that can be stored in RCWT (such as EIA-708).
|
||||
@end itemize
|
||||
|
||||
@section sbg
|
||||
|
||||
SBaGen script demuxer.
|
||||
@@ -1165,27 +832,4 @@ which in turn, acts as a ceiling for the size of scripts that can be read.
|
||||
Default is 1 MiB.
|
||||
@end table
|
||||
|
||||
@section w64
|
||||
|
||||
Sony Wave64 Audio demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item max_size
|
||||
See the same option for the @ref{wav} demuxer.
|
||||
@end table
|
||||
|
||||
@anchor{wav}
|
||||
@section wav
|
||||
|
||||
RIFF Wave Audio demuxer.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item max_size
|
||||
Specify the maximum packet size in bytes for the demuxed packets. By default
|
||||
this is set to 0, which means that a sensible value is chosen based on the
|
||||
input format.
|
||||
@end table
|
||||
|
||||
@c man end DEMUXERS
|
||||
|
||||
79
doc/dev_community/community.md
Normal file
79
doc/dev_community/community.md
Normal file
@@ -0,0 +1,79 @@
|
||||
# FFmpeg project
|
||||
|
||||
## Organisation
|
||||
|
||||
The FFmpeg project is organized through a community working on global consensus.
|
||||
|
||||
Decisions are taken by the ensemble of active members, through voting and
|
||||
are aided by two committees.
|
||||
|
||||
## General Assembly
|
||||
|
||||
The ensemble of active members is called the General Assembly (GA).
|
||||
|
||||
The General Assembly is sovereign and legitimate for all its decisions
|
||||
regarding the FFmpeg project.
|
||||
|
||||
The General Assembly is made up of active contributors.
|
||||
|
||||
Contributors are considered "active contributors" if they have pushed more
|
||||
than 20 patches in the last 36 months in the main FFmpeg repository, or
|
||||
if they have been voted in by the GA.
|
||||
|
||||
Additional members are added to the General Assembly through a vote after
|
||||
proposal by a member of the General Assembly.
|
||||
They are part of the GA for two years, after which they need a confirmation by
|
||||
the GA.
|
||||
|
||||
## Voting
|
||||
|
||||
Voting is done using a ranked voting system, currently running on https://vote.ffmpeg.org/ .
|
||||
|
||||
Majority vote means more than 50% of the expressed ballots.
|
||||
|
||||
## Technical Committee
|
||||
|
||||
The Technical Committee (TC) is here to arbitrate and make decisions when
|
||||
technical conflicts occur in the project.
|
||||
They will consider the merits of all the positions, judge them and make a
|
||||
decision.
|
||||
|
||||
The TC resolves technical conflicts but is not a technical steering committee.
|
||||
|
||||
Decisions by the TC are binding for all the contributors.
|
||||
|
||||
Decisions made by the TC can be re-opened after 1 year or by a majority vote
|
||||
of the General Assembly, requested by one of the member of the GA.
|
||||
|
||||
The TC is elected by the General Assembly for a duration of 1 year, and
|
||||
is composed of 5 members.
|
||||
Members can be re-elected if they wish. A majority vote in the General Assembly
|
||||
can trigger a new election of the TC.
|
||||
|
||||
The members of the TC can be elected from outside of the GA.
|
||||
Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The conflict resolution process is detailed in the [resolution process](resolution_process.md) document.
|
||||
|
||||
## Community committee
|
||||
|
||||
The Community Committee (CC) is here to arbitrage and make decisions when
|
||||
inter-personal conflicts occur in the project. It will decide quickly and
|
||||
take actions, for the sake of the project.
|
||||
|
||||
The CC can remove privileges of offending members, including removal of
|
||||
commit access and temporary ban from the community.
|
||||
|
||||
Decisions made by the CC can be re-opened after 1 year or by a majority vote
|
||||
of the General Assembly. Indefinite bans from the community must be confirmed
|
||||
by the General Assembly, in a majority vote.
|
||||
|
||||
The CC is elected by the General Assembly for a duration of 1 year, and is
|
||||
composed of 5 members.
|
||||
Members can be re-elected if they wish. A majority vote in the General Assembly
|
||||
can trigger a new election of the CC.
|
||||
|
||||
The members of the CC can be elected from outside of the GA.
|
||||
Candidates for election can either be suggested or self-nominated.
|
||||
|
||||
The CC is governed by and responsible for enforcing the Code of Conduct.
|
||||
91
doc/dev_community/resolution_process.md
Normal file
91
doc/dev_community/resolution_process.md
Normal file
@@ -0,0 +1,91 @@
|
||||
# Technical Committee
|
||||
|
||||
_This document only makes sense with the rules from [the community document](community)_.
|
||||
|
||||
The Technical Committee (**TC**) is here to arbitrate and make decisions when
|
||||
technical conflicts occur in the project.
|
||||
|
||||
The TC main role is to resolve technical conflicts.
|
||||
It is therefore not a technical steering committee, but it is understood that
|
||||
some decisions might impact the future of the project.
|
||||
|
||||
# Process
|
||||
|
||||
## Seizing
|
||||
|
||||
The TC can take possession of any technical matter that it sees fit.
|
||||
|
||||
To involve the TC in a matter, email tc@ or CC them on an ongoing discussion.
|
||||
|
||||
As members of TC are developers, they also can email tc@ to raise an issue.
|
||||
|
||||
## Announcement
|
||||
|
||||
The TC, once seized, must announce itself on the main mailing list, with a _[TC]_ tag.
|
||||
|
||||
The TC has 2 modes of operation: a RFC one and an internal one.
|
||||
|
||||
If the TC thinks it needs the input from the larger community, the TC can call
|
||||
for a RFC. Else, it can decide by itself.
|
||||
|
||||
If the disagreement involves a member of the TC, that member should recuse
|
||||
themselves from the decision.
|
||||
|
||||
The decision to use a RFC process or an internal discussion is a discretionary
|
||||
decision of the TC.
|
||||
|
||||
The TC can also reject a seizure for a few reasons such as:
|
||||
the matter was not discussed enough previously; it lacks expertise to reach a
|
||||
beneficial decision on the matter; or the matter is too trivial.
|
||||
|
||||
### RFC call
|
||||
|
||||
In the RFC mode, one person from the TC posts on the mailing list the
|
||||
technical question and will request input from the community.
|
||||
|
||||
The mail will have the following specification:
|
||||
* a precise title
|
||||
* a specific tag [TC RFC]
|
||||
* a top-level email
|
||||
* contain a precise question that does not exceed 100 words and that is answerable by developers
|
||||
* may have an extra description, or a link to a previous discussion, if deemed necessary,
|
||||
* contain a precise end date for the answers.
|
||||
|
||||
The answers from the community must be on the main mailing list and must have
|
||||
the following specification:
|
||||
* keep the tag and the title unchanged
|
||||
* limited to 400 words
|
||||
* a first-level, answering directly to the main email
|
||||
* answering to the question.
|
||||
|
||||
Further replies to answers are permitted, as long as they conform to the
|
||||
community standards of politeness, they are limited to 100 words, and are not
|
||||
nested more than once. (max-depth=2)
|
||||
|
||||
After the end-date, mails on the thread will be ignored.
|
||||
|
||||
Violations of those rules will be escalated through the Community Committee.
|
||||
|
||||
After all the emails are in, the TC has 96 hours to give its final decision.
|
||||
Exceptionally, the TC can request an extra delay, that will be notified on the
|
||||
mailing list.
|
||||
|
||||
### Within TC
|
||||
|
||||
In the internal case, the TC has 96 hours to give its final decision.
|
||||
Exceptionally, the TC can request an extra delay.
|
||||
|
||||
|
||||
## Decisions
|
||||
|
||||
The decisions from the TC will be sent on the mailing list, with the _[TC]_ tag.
|
||||
|
||||
Internally, the TC should take decisions with a majority, or using
|
||||
ranked-choice voting.
|
||||
|
||||
The decision from the TC should be published with a summary of the reasons that
|
||||
lead to this decision.
|
||||
|
||||
The decisions from the TC are final, until the matters are reopened after
|
||||
no less than one year.
|
||||
|
||||
@@ -10,109 +10,44 @@
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
@chapter Notes for external developers
|
||||
|
||||
This text is concerned with the development @emph{of} FFmpeg itself. Information
|
||||
on using the FFmpeg libraries in other programs can be found elsewhere, e.g. in:
|
||||
@itemize @bullet
|
||||
@item
|
||||
the installed header files
|
||||
@item
|
||||
@url{http://ffmpeg.org/doxygen/trunk/index.html, the Doxygen documentation}
|
||||
generated from the headers
|
||||
@item
|
||||
the examples under @file{doc/examples}
|
||||
@end itemize
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{https://ffmpeg.org/legal.html}.
|
||||
|
||||
If you modify FFmpeg code for your own use case, you are highly encouraged to
|
||||
@emph{submit your changes back to us}, using this document as a guide. There are
|
||||
both pragmatic and ideological reasons to do so:
|
||||
@chapter Contributing
|
||||
|
||||
There are 2 ways by which code gets into FFmpeg:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Maintaining external changes to keep up with upstream development is
|
||||
time-consuming and error-prone. With your code in the main tree, it will be
|
||||
maintained by FFmpeg developers.
|
||||
@item
|
||||
FFmpeg developers include leading experts in the field who can find bugs or
|
||||
design flaws in your code.
|
||||
@item
|
||||
By supporting the project you find useful you ensure it continues to be
|
||||
maintained and developed.
|
||||
@item Submitting patches to the ffmpeg-devel mailing list.
|
||||
See @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@end itemize
|
||||
|
||||
All proposed code changes should be submitted for review to
|
||||
@url{mailto:ffmpeg-devel@@ffmpeg.org, the development mailing list}, as
|
||||
described in more detail in the @ref{Submitting patches} chapter. The code
|
||||
should comply with the @ref{Development Policy} and follow the @ref{Coding Rules}.
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@chapter Coding Rules
|
||||
|
||||
@section Language
|
||||
|
||||
FFmpeg is mainly programmed in the ISO C11 language, except for the public
|
||||
headers which must stay C99 compatible.
|
||||
|
||||
Compiler-specific extensions may be used with good reason, but must not be
|
||||
depended on, i.e. the code must still compile and work with compilers lacking
|
||||
the extension.
|
||||
|
||||
The following C99 features must not be used anywhere in the codebase:
|
||||
@itemize @bullet
|
||||
@item
|
||||
variable-length arrays;
|
||||
|
||||
@item
|
||||
complex numbers;
|
||||
@end itemize
|
||||
|
||||
@subsection SIMD/DSP
|
||||
@anchor{SIMD/DSP}
|
||||
|
||||
As modern compilers are unable to generate efficient SIMD or other
|
||||
performance-critical DSP code from plain C, handwritten assembly is used.
|
||||
Usually such code is isolated in a separate function. Then the standard approach
|
||||
is writing multiple versions of this function – a plain C one that works
|
||||
everywhere and may also be useful for debugging, and potentially multiple
|
||||
architecture-specific optimized implementations. Initialization code then
|
||||
chooses the best available version at runtime and loads it into a function
|
||||
pointer; the function in question is then always called through this pointer.
|
||||
|
||||
The specific syntax used for writing assembly is:
|
||||
@itemize @bullet
|
||||
@item
|
||||
NASM on x86;
|
||||
|
||||
@item
|
||||
GAS on ARM and RISC-V.
|
||||
@end itemize
|
||||
|
||||
A unit testing framework for assembly called @code{checkasm} lives under
|
||||
@file{tests/checkasm}. All new assembly should come with @code{checkasm} tests;
|
||||
adding tests for existing assembly that lacks them is also strongly encouraged.
|
||||
|
||||
@subsection Other languages
|
||||
|
||||
Other languages than C may be used in special cases:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Compiler intrinsics or inline assembly when the code in question cannot be
|
||||
written in the standard way described in the @ref{SIMD/DSP} section. This
|
||||
typically applies to code that needs to be inlined.
|
||||
|
||||
@item
|
||||
Objective-C where required for interacting with macOS-specific interfaces.
|
||||
@end itemize
|
||||
|
||||
@section Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the code style in files:
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
@@ -132,137 +67,8 @@ K&R coding style is used.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
@subsection Examples
|
||||
Some notable examples to illustrate common code style in FFmpeg:
|
||||
|
||||
@itemize @bullet
|
||||
|
||||
@item
|
||||
Space around assignments and after
|
||||
@code{if}/@code{do}/@code{while}/@code{for} keywords:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
if (condition)
|
||||
av_foo();
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
for (size_t i = 0; i < len; i++)
|
||||
av_bar(i);
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
size_t size = 0;
|
||||
@end example
|
||||
|
||||
However no spaces between the parentheses and condition, unless it helps
|
||||
readability of complex conditions, so the following should not be done:
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
if ( condition )
|
||||
av_foo();
|
||||
@end example
|
||||
|
||||
@item
|
||||
No unnecessary parentheses, unless it helps readability:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
int fields = ilace ? 2 : 1;
|
||||
@end example
|
||||
|
||||
@item
|
||||
Don't wrap single-line blocks in braces. Use braces only if there is an accompanying else statement. This keeps future code changes easier to keep track of.
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
if (bits_pixel == 24) @{
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
@} else if (bits_pixel == 8) @{
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
@} else
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
@end example
|
||||
|
||||
@item
|
||||
Avoid assignments in conditions where it makes sense:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)
|
||||
if (!video_enc->chroma_intra_matrix)
|
||||
return AVERROR(ENOMEM);
|
||||
@end example
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
if (!(video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)))
|
||||
return AVERROR(ENOMEM);
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Ok
|
||||
while ((entry = av_dict_iterate(options, entry)))
|
||||
av_log(ctx, AV_LOG_INFO, "Item '%s': '%s'\n", entry->key, entry->value);
|
||||
@end example
|
||||
|
||||
@item
|
||||
When declaring a pointer variable, the @code{*} goes with the variable not the type:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
AVStream *stream;
|
||||
@end example
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
AVStream* stream;
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
If you work on a file that does not follow these guidelines consistently,
|
||||
change the parts that you are editing to follow these guidelines but do
|
||||
not make unrelated changes in the file to make it conform to these.
|
||||
|
||||
@subsection Vim configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
@subsection Emacs configuration
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@section Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
@@ -304,52 +110,92 @@ int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@anchor{Naming conventions}
|
||||
@section Naming conventions
|
||||
@section C language features
|
||||
|
||||
Names of functions, variables, and struct members must be lowercase, using
|
||||
underscores (_) to separate words. For example, @samp{avfilter_get_video_buffer}
|
||||
is an acceptable function name and @samp{AVFilterGetVideo} is not.
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
Struct, union, enum, and typedeffed type names must use CamelCase. All structs
|
||||
and unions should be typedeffed to the same name as the struct/union tag, e.g.
|
||||
@code{typedef struct AVFoo @{ ... @} AVFoo;}. Enums are typically not
|
||||
typedeffed.
|
||||
|
||||
Enumeration constants and macros must be UPPERCASE, except for macros
|
||||
masquerading as functions, which should use the function naming convention.
|
||||
|
||||
All identifiers in the libraries should be namespaced as follows:
|
||||
@itemize @bullet
|
||||
@item
|
||||
No namespacing for identifiers with file and lower scope (e.g. local variables,
|
||||
static functions), and struct and union members,
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
The @code{ff_} prefix must be used for variables and functions visible outside
|
||||
of file scope, but only used internally within a single library, e.g.
|
||||
@samp{ff_w64_demuxer}. This prevents name collisions when FFmpeg is statically
|
||||
linked.
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};});
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};}).
|
||||
|
||||
@item
|
||||
for loops with variable definition (@samp{for (int i = 0; i < 8; i++)});
|
||||
|
||||
@item
|
||||
Variadic macros (@samp{#define ARRAY(nb, ...) (int[nb + 1])@{ nb, __VA_ARGS__ @}});
|
||||
|
||||
@item
|
||||
Implementation defined behavior for signed integers is assumed to match the
|
||||
expected behavior for two's complement. Non representable values in integer
|
||||
casts are binary truncated. Shift right of signed values uses sign extension.
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@section Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in CamelCase.
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_report_missing_feature}.
|
||||
|
||||
@item
|
||||
All other internal identifiers, like private type or macro names, should be
|
||||
namespaced only to avoid possible internal conflicts. E.g. @code{H264_NAL_SPS}
|
||||
vs. @code{HEVC_NAL_SPS}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
|
||||
@item
|
||||
Other public identifiers (struct, union, enum, macro, type names) must use their
|
||||
library's public prefix (@code{AV}, @code{Sws}, or @code{Swr}).
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
@@ -363,50 +209,50 @@ symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
@section Miscellaneous conventions
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
@item
|
||||
Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@anchor{Development Policy}
|
||||
@section Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@lisp
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end lisp
|
||||
|
||||
@chapter Development Policy
|
||||
|
||||
@section Code behaviour
|
||||
|
||||
@subheading Correctness
|
||||
The code must be valid. It must not crash, abort, access invalid pointers, leak
|
||||
memory, cause data races or signed integer overflow, or otherwise cause
|
||||
undefined behaviour. Error codes should be checked and, when applicable,
|
||||
forwarded to the caller.
|
||||
|
||||
@subheading Thread- and library-safety
|
||||
Our libraries may be called by multiple independent callers in the same process.
|
||||
These calls may happen from any number of threads and the different call sites
|
||||
may not be aware of each other - e.g. a user program may be calling our
|
||||
libraries directly, and use one or more libraries that also call our libraries.
|
||||
The code must behave correctly under such conditions.
|
||||
|
||||
@subheading Robustness
|
||||
The code must treat as untrusted any bytestream received from a caller or read
|
||||
from a file, network, etc. It must not misbehave when arbitrary data is sent to
|
||||
it - typically it should print an error message and return
|
||||
@code{AVERROR_INVALIDDATA} on encountering invalid input data.
|
||||
|
||||
@subheading Memory allocation
|
||||
The code must use the @code{av_malloc()} family of functions from
|
||||
@file{libavutil/mem.h} to perform all memory allocation, except in special cases
|
||||
(e.g. when interacting with an external library that requires a specific
|
||||
allocator to be used).
|
||||
|
||||
All allocations should be checked and @code{AVERROR(ENOMEM)} returned on
|
||||
failure. A common mistake is that error paths leak memory - make sure that does
|
||||
not happen.
|
||||
|
||||
@subheading stdio
|
||||
Our libraries must not access the stdio streams stdin/stdout/stderr directly
|
||||
(e.g. via @code{printf()} family of functions), as that is not library-safe. For
|
||||
logging, use @code{av_log()}.
|
||||
|
||||
@section Patches/Committing
|
||||
@subheading Licenses for patches must be compatible with FFmpeg.
|
||||
Contributions should be licensed under the
|
||||
@@ -429,24 +275,13 @@ missing samples or an implementation with a small subset of features.
|
||||
Always check the mailing list for any reviewers with issues and test
|
||||
FATE before you push.
|
||||
|
||||
@subheading Commit messages
|
||||
Commit messages are highly important tools for informing other developers on
|
||||
what a given change does and why. Every commit must always have a properly
|
||||
filled out commit message with the following format:
|
||||
@example
|
||||
area changed: short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
If the commit addresses a known bug on our bug tracker or other external issue
|
||||
(e.g. CVE), the commit message should include the relevant bug ID(s) or other
|
||||
external identifiers. Note that this should be done in addition to a proper
|
||||
explanation and not instead of it. Comments such as "fixed!" or "Changed it."
|
||||
are not acceptable.
|
||||
|
||||
When applying patches that have been discussed at length on the mailing list,
|
||||
reference the thread in the commit message.
|
||||
@subheading Keep the main commit message short with an extended description below.
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
@subheading Testing must be adequate but not excessive.
|
||||
If it works for you, others, and passes FATE then it should be OK to commit
|
||||
@@ -465,6 +300,15 @@ later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
@subheading Ask before you change the build system (configure, etc).
|
||||
Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
@subheading Cosmetic changes should be kept in separate patches.
|
||||
We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
@@ -479,15 +323,27 @@ NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
|
||||
@subheading Commit messages should always be filled out properly.
|
||||
Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
|
||||
@example
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
@end example
|
||||
|
||||
@subheading Credit the author of the patch.
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
@subheading Credit any researchers
|
||||
If a commit/patch fixes an issues found by some researcher, always credit the
|
||||
researcher in the commit message for finding/reporting the issue.
|
||||
@subheading Complex patches should refer to discussion surrounding them.
|
||||
When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
@subheading Always wait long enough before pushing changes
|
||||
Do NOT commit to code actively maintained by others without permission.
|
||||
@@ -497,6 +353,22 @@ time-frame (12h for build failures and security fixes, 3 days small changes,
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
@section Code
|
||||
@subheading API/ABI changes should be discussed before they are made.
|
||||
Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove widely used functionality or features (redundant code can be removed).
|
||||
|
||||
@subheading Remember to check if you need to bump versions for libav*.
|
||||
Depending on the change, you may need to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@subheading Warnings for correct code may be disabled if there is no other option.
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
@@ -506,150 +378,10 @@ If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
@section Library public interfaces
|
||||
Every library in FFmpeg provides a set of public APIs in its installed headers,
|
||||
which are those listed in the variable @code{HEADERS} in that library's
|
||||
@file{Makefile}. All identifiers defined in those headers (except for those
|
||||
explicitly documented otherwise), and corresponding symbols exported from
|
||||
compiled shared or static libraries are considered public interfaces and must
|
||||
comply with the API and ABI compatibility rules described in this section.
|
||||
|
||||
Public APIs must be backward compatible within a given major version. I.e. any
|
||||
valid user code that compiles and works with a given library version must still
|
||||
compile and work with any later version, as long as the major version number is
|
||||
unchanged. "Valid user code" here means code that is calling our APIs in a
|
||||
documented and/or intended manner and is not relying on any undefined behavior.
|
||||
Incrementing the major version may break backward compatibility, but only to the
|
||||
extent described in @ref{Major version bumps}.
|
||||
|
||||
We also guarantee backward ABI compatibility for shared and static libraries.
|
||||
I.e. it should be possible to replace a shared or static build of our library
|
||||
with a build of any later version (re-linking the user binary in the static
|
||||
case) without breaking any valid user binaries, as long as the major version
|
||||
number remains unchanged.
|
||||
|
||||
@subsection Adding new interfaces
|
||||
Any new public identifiers in installed headers are considered new API - this
|
||||
includes new functions, structs, macros, enum values, typedefs, new fields in
|
||||
existing structs, new installed headers, etc. Consider the following
|
||||
guidelines when adding new APIs.
|
||||
|
||||
@subsubheading Motivation
|
||||
While new APIs can be added relatively easily, changing or removing them is much
|
||||
harder due to abovementioned compatibility requirements. You should then
|
||||
consider carefully whether the functionality you are adding really needs to be
|
||||
exposed to our callers as new public API.
|
||||
|
||||
Your new API should have at least one well-established use case outside of the
|
||||
library that cannot be easily achieved with existing APIs. Every library in
|
||||
FFmpeg also has a defined scope - your new API must fit within it.
|
||||
|
||||
@subsubheading Replacing existing APIs
|
||||
If your new API is replacing an existing one, it should be strictly superior to
|
||||
it, so that the advantages of using the new API outweigh the cost to the
|
||||
callers of changing their code. After adding the new API you should then
|
||||
deprecate the old one and schedule it for removal, as described in
|
||||
@ref{Removing interfaces}.
|
||||
|
||||
If you deem an existing API deficient and want to fix it, the preferred approach
|
||||
in most cases is to add a differently-named replacement and deprecate the
|
||||
existing API rather than modify it. It is important to make the changes visible
|
||||
to our callers (e.g. through compile- or run-time deprecation warnings) and make
|
||||
it clear how to transition to the new API (e.g. in the Doxygen documentation or
|
||||
on the wiki).
|
||||
|
||||
@subsubheading API design
|
||||
The FFmpeg libraries are used by a variety of callers to perform a wide range of
|
||||
multimedia-related processing tasks. You should therefore - within reason - try
|
||||
to design your new API for the broadest feasible set of use cases and avoid
|
||||
unnecessarily limiting it to a specific type of callers (e.g. just media
|
||||
playback or just transcoding).
|
||||
|
||||
@subsubheading Consistency
|
||||
Check whether similar APIs already exist in FFmpeg. If they do, try to model
|
||||
your new addition on them to achieve better overall consistency.
|
||||
|
||||
The naming of your new identifiers should follow the @ref{Naming conventions}
|
||||
and be aligned with other similar APIs, if applicable.
|
||||
|
||||
@subsubheading Extensibility
|
||||
You should also consider how your API might be extended in the future in a
|
||||
backward-compatible way. If you are adding a new struct @code{AVFoo}, the
|
||||
standard approach is requiring the caller to always allocate it through a
|
||||
constructor function, typically named @code{av_foo_alloc()}. This way new fields
|
||||
may be added to the end of the struct without breaking ABI compatibility.
|
||||
Typically you will also want a destructor - @code{av_foo_free(AVFoo**)} that
|
||||
frees the indirectly supplied object (and its contents, if applicable) and
|
||||
writes @code{NULL} to the supplied pointer, thus eliminating the potential
|
||||
dangling pointer in the caller's memory.
|
||||
|
||||
If you are adding new functions, consider whether it might be desirable to tweak
|
||||
their behavior in the future - you may want to add a flags argument, even though
|
||||
it would be unused initially.
|
||||
|
||||
@subsubheading Documentation
|
||||
All new APIs must be documented as Doxygen-formatted comments above the
|
||||
identifiers you add to the public headers. You should also briefly mention the
|
||||
change in @file{doc/APIchanges}.
|
||||
|
||||
@subsubheading Bump the version
|
||||
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
||||
major version number, as described in @ref{Major version bumps}. Major
|
||||
bumps are significant events that happen on a schedule - so if your change
|
||||
strictly requires one you should add it under @code{#if} preprocessor guards that
|
||||
disable it until the next major bump happens.
|
||||
|
||||
New APIs that can be added without breaking API or ABI compatibility require
|
||||
bumping the minor version number.
|
||||
|
||||
Incrementing the third (micro) version component means a noteworthy binary
|
||||
compatible change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@anchor{Removing interfaces}
|
||||
@subsection Removing interfaces
|
||||
Due to abovementioned compatibility guarantees, removing APIs is an involved
|
||||
process that should only be undertaken with good reason. Typically a deficient,
|
||||
restrictive, or otherwise inadequate API is replaced by a superior one, though
|
||||
it does at times happen that we remove an API without any replacement (e.g. when
|
||||
the feature it provides is deemed not worth the maintenance effort, out of scope
|
||||
of the project, fundamentally flawed, etc.).
|
||||
|
||||
The removal has two steps - first the API is deprecated and scheduled for
|
||||
removal, but remains present and functional. The second step is actually
|
||||
removing the API - this is described in @ref{Major version bumps}.
|
||||
|
||||
To deprecate an API you should signal to our users that they should stop using
|
||||
it. E.g. if you intend to remove struct members or functions, you should mark
|
||||
them with @code{attribute_deprecated}. When this cannot be done, it may be
|
||||
possible to detect the use of the deprecated API at runtime and print a warning
|
||||
(though take care not to print it too often). You should also document the
|
||||
deprecation (and the replacement, if applicable) in the relevant Doxygen
|
||||
documentation block.
|
||||
|
||||
Finally, you should define a deprecation guard along the lines of
|
||||
@code{#define FF_API_<FOO> (LIBAVBAR_VERSION_MAJOR < XX)} (where XX is the major
|
||||
version in which the API will be removed) in @file{libavbar/version_major.h}
|
||||
(@file{version.h} in case of @code{libavutil}). Then wrap all uses of the
|
||||
deprecated API in @code{#if FF_API_<FOO> .... #endif}, so that the code will
|
||||
automatically get disabled once the major version reaches XX. You can also use
|
||||
@code{FF_DISABLE_DEPRECATION_WARNINGS} and @code{FF_ENABLE_DEPRECATION_WARNINGS}
|
||||
to suppress compiler deprecation warnings inside these guards. You should test
|
||||
that the code compiles and works with the guard macro evaluating to both true
|
||||
and false.
|
||||
|
||||
@anchor{Major version bumps}
|
||||
@subsection Major version bumps
|
||||
A major version bump signifies an API and/or ABI compatibility break. To reduce
|
||||
the negative effects on our callers, who are required to adapt their code,
|
||||
backward-incompatible changes during a major bump should be limited to:
|
||||
@itemize @bullet
|
||||
@item
|
||||
Removing previously deprecated APIs.
|
||||
|
||||
@item
|
||||
Performing ABI- but not API-breaking changes, like reordering struct contents.
|
||||
@end itemize
|
||||
@subheading Check untrusted input properly.
|
||||
Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
@section Documentation/Other
|
||||
@subheading Subscribe to the ffmpeg-devel mailing list.
|
||||
@@ -693,6 +425,35 @@ finding a new maintainer and also don't forget to update the @file{MAINTAINERS}
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
@chapter Code of conduct
|
||||
|
||||
Be friendly and respectful towards others and third parties.
|
||||
Treat others the way you yourself want to be treated.
|
||||
|
||||
Be considerate. Not everyone shares the same viewpoint and priorities as you do.
|
||||
Different opinions and interpretations help the project.
|
||||
Looking at issues from a different perspective assists development.
|
||||
|
||||
Do not assume malice for things that can be attributed to incompetence. Even if
|
||||
it is malice, it's rarely good to start with that as initial assumption.
|
||||
|
||||
Stay friendly even if someone acts contrarily. Everyone has a bad day
|
||||
once in a while.
|
||||
If you yourself have a bad day or are angry then try to take a break and reply
|
||||
once you are calm and without anger if you have to.
|
||||
|
||||
Try to help other team members and cooperate if you can.
|
||||
|
||||
The goal of software development is to create technical excellence, not for any
|
||||
individual to be better and "win" against the others. Large software projects
|
||||
are only possible and successful through teamwork.
|
||||
|
||||
If someone struggles do not put them down. Give them a helping hand
|
||||
instead and point them in the right direction.
|
||||
|
||||
Finally, keep in mind the immortal words of Bill and Ted,
|
||||
"Be excellent to each other."
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@chapter Submitting patches
|
||||
|
||||
@@ -733,27 +494,6 @@ patch is inline or attached per mail.
|
||||
You can check @url{https://patchwork.ffmpeg.org}, if your patch does not show up, its mime type
|
||||
likely was wrong.
|
||||
|
||||
@subheading How to setup git send-email?
|
||||
|
||||
Please see @url{https://git-send-email.io/}.
|
||||
For gmail additionally see @url{https://shallowsky.com/blog/tech/email/gmail-app-passwds.html}.
|
||||
|
||||
@subheading Sending patches from email clients
|
||||
Using @code{git send-email} might not be desirable for everyone. The
|
||||
following trick allows to send patches via email clients in a safe
|
||||
way. It has been tested with Outlook and Thunderbird (with X-Unsent
|
||||
extension) and might work with other applications.
|
||||
|
||||
Create your patch like this:
|
||||
|
||||
@verbatim
|
||||
git format-patch -s -o "outputfolder" --add-header "X-Unsent: 1" --suffix .eml --to ffmpeg-devel@ffmpeg.org -1 1a2b3c4d
|
||||
@end verbatim
|
||||
|
||||
Now you'll just need to open the eml file with the email application
|
||||
and execute 'Send'.
|
||||
|
||||
@subheading Reviews
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
@@ -782,7 +522,7 @@ number) in @file{libavcodec/version.h} or @file{libavformat/version.h}?
|
||||
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
|
||||
|
||||
@item
|
||||
Did you add the AVCodecID to @file{codec_id.h}?
|
||||
Did you add the AVCodecID to @file{avcodec.h}?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in @file{libavcodec/codec_desc.c}.
|
||||
|
||||
@@ -797,7 +537,7 @@ already being compiled by some other rule, like a raw demuxer.
|
||||
|
||||
@item
|
||||
Did you add an entry to the table of supported formats or codecs in
|
||||
@file{doc/general_contents.texi}?
|
||||
@file{doc/general.texi}?
|
||||
|
||||
@item
|
||||
Did you add an entry in the Changelog?
|
||||
@@ -914,13 +654,15 @@ Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
@item
|
||||
Consider adding a regression test for your code. All new modules
|
||||
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
||||
filters, bitstream filters, parsers. If its not possible to do that, add
|
||||
an explanation why to your patchset, its ok to not test if there's a reason.
|
||||
Consider adding a regression test for your code.
|
||||
|
||||
@item
|
||||
If you added NASM code please check that things still work with --disable-x86asm.
|
||||
If you added YASM code please check that things still work with --disable-yasm.
|
||||
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
@@ -971,8 +713,6 @@ accordingly].
|
||||
|
||||
@section Adding files to the fate-suite dataset
|
||||
|
||||
If you need a sample uploaded send a mail to samples-request.
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be included in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
@@ -1022,25 +762,6 @@ In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Maintenance}
|
||||
@chapter Maintenance process
|
||||
|
||||
@anchor{MAINTAINERS}
|
||||
@section MAINTAINERS
|
||||
|
||||
The developers maintaining each part of the codebase are listed in @file{MAINTAINERS}.
|
||||
Being listed in @file{MAINTAINERS}, gives one the right to have git write access to
|
||||
the specific repository.
|
||||
|
||||
@anchor{Becoming a maintainer}
|
||||
@section Becoming a maintainer
|
||||
|
||||
People add themselves to @file{MAINTAINERS} by sending a patch like any other code
|
||||
change. These get reviewed by the community like any other patch. It is expected
|
||||
that, if someone has an objection to a new maintainer, she is willing to object
|
||||
in public with her full name and is willing to take over maintainership for the area.
|
||||
|
||||
|
||||
@anchor{Release process}
|
||||
@chapter Release process
|
||||
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/sh
|
||||
|
||||
OUT_DIR="${1}"
|
||||
SRC_DIR="${2}"
|
||||
DOXYFILE="${3}"
|
||||
DOXYGEN="${4}"
|
||||
DOXYFILE="${2}"
|
||||
DOXYGEN="${3}"
|
||||
|
||||
shift 4
|
||||
|
||||
cd ${SRC_DIR}
|
||||
shift 3
|
||||
|
||||
if [ -e "VERSION" ]; then
|
||||
VERSION=`cat "VERSION"`
|
||||
|
||||
1382
doc/encoders.texi
1382
doc/encoders.texi
File diff suppressed because it is too large
Load Diff
1
doc/examples/.gitignore
vendored
1
doc/examples/.gitignore
vendored
@@ -22,4 +22,3 @@
|
||||
/transcoding
|
||||
/vaapi_encode
|
||||
/vaapi_transcode
|
||||
/qsv_transcode
|
||||
|
||||
@@ -1,27 +1,26 @@
|
||||
EXAMPLES-$(CONFIG_AVIO_HTTP_SERVE_FILES) += avio_http_serve_files
|
||||
EXAMPLES-$(CONFIG_AVIO_LIST_DIR_EXAMPLE) += avio_list_dir
|
||||
EXAMPLES-$(CONFIG_AVIO_READ_CALLBACK_EXAMPLE) += avio_read_callback
|
||||
EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
|
||||
EXAMPLES-$(CONFIG_DECODE_AUDIO_EXAMPLE) += decode_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_AUDIO_EXAMPLE) += decode_filter_audio
|
||||
EXAMPLES-$(CONFIG_DECODE_FILTER_VIDEO_EXAMPLE) += decode_filter_video
|
||||
EXAMPLES-$(CONFIG_DECODE_VIDEO_EXAMPLE) += decode_video
|
||||
EXAMPLES-$(CONFIG_DEMUX_DECODE_EXAMPLE) += demux_decode
|
||||
EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
|
||||
EXAMPLES-$(CONFIG_ENCODE_AUDIO_EXAMPLE) += encode_audio
|
||||
EXAMPLES-$(CONFIG_ENCODE_VIDEO_EXAMPLE) += encode_video
|
||||
EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
|
||||
EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
|
||||
EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
|
||||
EXAMPLES-$(CONFIG_HTTP_MULTICLIENT_EXAMPLE) += http_multiclient
|
||||
EXAMPLES-$(CONFIG_HW_DECODE_EXAMPLE) += hw_decode
|
||||
EXAMPLES-$(CONFIG_MUX_EXAMPLE) += mux
|
||||
EXAMPLES-$(CONFIG_QSV_DECODE_EXAMPLE) += qsv_decode
|
||||
EXAMPLES-$(CONFIG_REMUX_EXAMPLE) += remux
|
||||
EXAMPLES-$(CONFIG_RESAMPLE_AUDIO_EXAMPLE) += resample_audio
|
||||
EXAMPLES-$(CONFIG_SCALE_VIDEO_EXAMPLE) += scale_video
|
||||
EXAMPLES-$(CONFIG_SHOW_METADATA_EXAMPLE) += show_metadata
|
||||
EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
|
||||
EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
|
||||
EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
|
||||
EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
|
||||
EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
|
||||
EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
|
||||
EXAMPLES-$(CONFIG_TRANSCODE_EXAMPLE) += transcode
|
||||
EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
|
||||
EXAMPLES-$(CONFIG_VAAPI_ENCODE_EXAMPLE) += vaapi_encode
|
||||
EXAMPLES-$(CONFIG_VAAPI_TRANSCODE_EXAMPLE) += vaapi_transcode
|
||||
EXAMPLES-$(CONFIG_QSV_TRANSCODE_EXAMPLE) += qsv_transcode
|
||||
|
||||
EXAMPLES := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
|
||||
EXAMPLES_G := $(EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
@@ -11,40 +11,33 @@ CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
# missing the following targets, since they need special options in the FFmpeg build:
|
||||
# qsv_decode
|
||||
# qsv_transcode
|
||||
# vaapi_encode
|
||||
# vaapi_transcode
|
||||
|
||||
EXAMPLES=\
|
||||
avio_http_serve_files \
|
||||
avio_list_dir \
|
||||
avio_read_callback \
|
||||
EXAMPLES= avio_list_dir \
|
||||
avio_reading \
|
||||
decode_audio \
|
||||
decode_filter_audio \
|
||||
decode_filter_video \
|
||||
decode_video \
|
||||
demux_decode \
|
||||
demuxing_decoding \
|
||||
encode_audio \
|
||||
encode_video \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
hw_decode \
|
||||
mux \
|
||||
remux \
|
||||
resample_audio \
|
||||
scale_video \
|
||||
show_metadata \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcode
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
encode_audio: LDLIBS += -lm
|
||||
mux: LDLIBS += -lm
|
||||
resample_audio: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
|
||||
@@ -7,10 +7,8 @@ that you have them installed and working on your system.
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and run:
|
||||
make -f Makefile.example
|
||||
|
||||
It will link to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
@@ -22,4 +20,4 @@ examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make -f Makefile.example
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
||||
|
||||
@@ -1,155 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Stephan Holljes
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat multi-client network API usage example
|
||||
* @example avio_http_serve_files.c
|
||||
*
|
||||
* Serve a file without decoding or demuxing it over the HTTP protocol. Multiple
|
||||
* clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static void process_client(AVIOContext *client, const char *in_uri)
|
||||
{
|
||||
AVIOContext *input = NULL;
|
||||
uint8_t buf[1024];
|
||||
int ret, n, reply_code;
|
||||
uint8_t *resource = NULL;
|
||||
while ((ret = avio_handshake(client)) > 0) {
|
||||
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
|
||||
// check for strlen(resource) is necessary, because av_opt_get()
|
||||
// may return empty string.
|
||||
if (resource && strlen(resource))
|
||||
break;
|
||||
av_freep(&resource);
|
||||
}
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
|
||||
if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
|
||||
reply_code = 200;
|
||||
} else {
|
||||
reply_code = AVERROR_HTTP_NOT_FOUND;
|
||||
}
|
||||
if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
|
||||
av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
|
||||
|
||||
while ((ret = avio_handshake(client)) > 0);
|
||||
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
fprintf(stderr, "Handshake performed.\n");
|
||||
if (reply_code != 200)
|
||||
goto end;
|
||||
fprintf(stderr, "Opening input file.\n");
|
||||
if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
|
||||
av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
for(;;) {
|
||||
n = avio_read(input, buf, sizeof(buf));
|
||||
if (n < 0) {
|
||||
if (n == AVERROR_EOF)
|
||||
break;
|
||||
av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
|
||||
av_err2str(n));
|
||||
break;
|
||||
}
|
||||
avio_write(client, buf, n);
|
||||
avio_flush(client);
|
||||
}
|
||||
end:
|
||||
fprintf(stderr, "Flushing client\n");
|
||||
avio_flush(client);
|
||||
fprintf(stderr, "Closing client\n");
|
||||
avio_close(client);
|
||||
fprintf(stderr, "Closing input\n");
|
||||
avio_close(input);
|
||||
av_freep(&resource);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVDictionary *options = NULL;
|
||||
AVIOContext *client = NULL, *server = NULL;
|
||||
const char *in_uri, *out_uri;
|
||||
int ret, pid;
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input http://hostname[:port]\n"
|
||||
"API example program to serve http to multiple clients.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_uri = argv[1];
|
||||
out_uri = argv[2];
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
|
||||
fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
|
||||
fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
fprintf(stderr, "Entering main loop.\n");
|
||||
for(;;) {
|
||||
if ((ret = avio_accept(server, &client)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Accepted client, forking process.\n");
|
||||
// XXX: Since we don't reap our children and don't ignore signals
|
||||
// this produces zombie processes.
|
||||
pid = fork();
|
||||
if (pid < 0) {
|
||||
perror("Fork failed");
|
||||
ret = AVERROR(errno);
|
||||
goto end;
|
||||
}
|
||||
if (pid == 0) {
|
||||
fprintf(stderr, "In child.\n");
|
||||
process_client(client, in_uri);
|
||||
avio_close(server);
|
||||
exit(0);
|
||||
}
|
||||
if (pid > 0)
|
||||
avio_close(client);
|
||||
}
|
||||
end:
|
||||
avio_close(server);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -20,13 +20,6 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext list directory API usage example
|
||||
* @example avio_list_dir.c
|
||||
*
|
||||
* Show how to list directories through the libavformat AVIOContext API.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat AVIOContext read callback API usage example
|
||||
* @example avio_read_callback.c
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
#include <libavutil/mem.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
if (!buf_size)
|
||||
return AVERROR_EOF;
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
av_freep(&avio_ctx_buffer);
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx)
|
||||
av_freep(&avio_ctx->buffer);
|
||||
avio_context_free(&avio_ctx);
|
||||
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
134
doc/examples/avio_reading.c
Normal file
134
doc/examples/avio_reading.c
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
if (!buf_size)
|
||||
return AVERROR_EOF;
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx)
|
||||
av_freep(&avio_ctx->buffer);
|
||||
avio_context_free(&avio_ctx);
|
||||
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec audio decoding API usage example
|
||||
* @example decode_audio.c
|
||||
* @file
|
||||
* audio decoding with libavcodec API example
|
||||
*
|
||||
* Decode data from an MP2 input file and generate a raw audio file to
|
||||
* be played with ffplay.
|
||||
* @example decode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -98,7 +97,7 @@ static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
|
||||
exit(1);
|
||||
}
|
||||
for (i = 0; i < frame->nb_samples; i++)
|
||||
for (ch = 0; ch < dec_ctx->ch_layout.nb_channels; ch++)
|
||||
for (ch = 0; ch < dec_ctx->channels; ch++)
|
||||
fwrite(frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
}
|
||||
@@ -128,10 +127,6 @@ int main(int argc, char **argv)
|
||||
outfilename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1); /* or proper cleanup and returning */
|
||||
}
|
||||
|
||||
/* find the MPEG audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
@@ -165,7 +160,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
fprintf(stderr, "Could not open %s\n", outfilename);
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@@ -220,7 +215,7 @@ int main(int argc, char **argv)
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
}
|
||||
|
||||
n_channels = c->ch_layout.nb_channels;
|
||||
n_channels = c->channels;
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
|
||||
@@ -1,321 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio decoding and filtering usage example
|
||||
* @example decode_filter_audio.c
|
||||
*
|
||||
* Demux, decode and filter audio input file, generate a raw audio
|
||||
* file to be played with ffplay.
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
|
||||
/* create decoding context */
|
||||
dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const int out_sample_rate = 8000;
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
ret = snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=",
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt));
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, args + ret, sizeof(args) - ret);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "sample_formats", "s16",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "channel_layouts", "mono",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_array(buffersink_ctx, "samplerates", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_INT, &out_sample_rate);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_channel_layout_describe(&outlink->ch_layout, args, sizeof(args));
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * frame->ch_layout.nb_channels;
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = av_packet_alloc();
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (!packet || !frame || !filt_frame) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
/* signal EOF to the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, NULL, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while closing the filtergraph\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* pull remaining frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_packet_free(&packet);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
@@ -1,318 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example decode_filter_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24,transpose=cclock";
|
||||
/* other way:
|
||||
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
|
||||
*/
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
const AVCodec *dec;
|
||||
int ret;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
|
||||
/* create decoding context */
|
||||
dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "pixel_formats", "gray8",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet;
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
packet = av_packet_alloc();
|
||||
if (!frame || !filt_frame || !packet) {
|
||||
fprintf(stderr, "Could not allocate frame or packet\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet->stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
if (ret == AVERROR_EOF) {
|
||||
/* signal EOF to the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, NULL, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while closing the filtergraph\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* pull remaining frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
av_packet_free(&packet);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
@@ -21,11 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec video decoding API usage example
|
||||
* @example decode_video.c *
|
||||
* @file
|
||||
* video decoding with libavcodec API example
|
||||
*
|
||||
* Read from an MPEG1 video file, decode frames, and generate PGM images as
|
||||
* output.
|
||||
* @example decode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -70,12 +69,12 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt,
|
||||
exit(1);
|
||||
}
|
||||
|
||||
printf("saving frame %3"PRId64"\n", dec_ctx->frame_num);
|
||||
printf("saving frame %3d\n", dec_ctx->frame_number);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder. no need to
|
||||
free it */
|
||||
snprintf(buf, sizeof(buf), "%s-%"PRId64, filename, dec_ctx->frame_num);
|
||||
snprintf(buf, sizeof(buf), "%s-%d", filename, dec_ctx->frame_number);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
}
|
||||
@@ -93,7 +92,6 @@ int main(int argc, char **argv)
|
||||
uint8_t *data;
|
||||
size_t data_size;
|
||||
int ret;
|
||||
int eof;
|
||||
AVPacket *pkt;
|
||||
|
||||
if (argc <= 2) {
|
||||
@@ -152,16 +150,15 @@ int main(int argc, char **argv)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
do {
|
||||
while (!feof(f)) {
|
||||
/* read raw data from the input file */
|
||||
data_size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (ferror(f))
|
||||
if (!data_size)
|
||||
break;
|
||||
eof = !data_size;
|
||||
|
||||
/* use the parser to split the data into frames */
|
||||
data = inbuf;
|
||||
while (data_size > 0 || eof) {
|
||||
while (data_size > 0) {
|
||||
ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
|
||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||
if (ret < 0) {
|
||||
@@ -173,10 +170,8 @@ int main(int argc, char **argv)
|
||||
|
||||
if (pkt->size)
|
||||
decode(c, frame, pkt, outfilename);
|
||||
else if (eof)
|
||||
break;
|
||||
}
|
||||
} while (!eof);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
decode(c, frame, NULL, outfilename);
|
||||
|
||||
@@ -1,380 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat and libavcodec demuxing and decoding API usage example
|
||||
* @example demux_decode.c
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and decode audio
|
||||
* and video data. Write the output as raw audio and input files to be played by
|
||||
* ffplay.
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket *pkt = NULL;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame n:%d\n",
|
||||
video_frame_count++);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy2(video_dst_data, video_dst_linesize,
|
||||
frame->data, frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
const AVCodec *dec = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec = avcodec_find_decoder(st->codecpar->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Allocate a codec context for the decoder */
|
||||
*dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!*dec_ctx) {
|
||||
fprintf(stderr, "Failed to allocate the %s codec context\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Copy codec parameters from input stream to output codec context */
|
||||
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
|
||||
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, pkt);
|
||||
else if (pkt->stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, pkt);
|
||||
av_packet_unref(pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->ch_layout.nb_channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avcodec_free_context(&audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
||||
382
doc/examples/demuxing_decoding.c
Normal file
382
doc/examples/demuxing_decoding.c
Normal file
@@ -0,0 +1,382 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket *pkt = NULL;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int output_video_frame(AVFrame *frame)
|
||||
{
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame n:%d coded_n:%d\n",
|
||||
video_frame_count++, frame->coded_picture_number);
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int output_audio_frame(AVFrame *frame)
|
||||
{
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
// submit the packet to the decoder
|
||||
ret = avcodec_send_packet(dec, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// get all the available frames from the decoder
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec, frame);
|
||||
if (ret < 0) {
|
||||
// those two return values are special and mean there is no output
|
||||
// frame available, but there were no errors during decoding
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
|
||||
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
// write the frame data to output file
|
||||
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||
ret = output_video_frame(frame);
|
||||
else
|
||||
ret = output_audio_frame(frame);
|
||||
|
||||
av_frame_unref(frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec = avcodec_find_decoder(st->codecpar->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Allocate a codec context for the decoder */
|
||||
*dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!*dec_ctx) {
|
||||
fprintf(stderr, "Failed to allocate the %s codec context\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Copy codec parameters from input stream to output codec context */
|
||||
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
|
||||
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init the decoders */
|
||||
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate packet\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
// check if the packet belongs to a stream we are interested in, otherwise
|
||||
// skip it
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(video_dec_ctx, pkt);
|
||||
else if (pkt->stream_index == audio_stream_idx)
|
||||
ret = decode_packet(audio_dec_ctx, pkt);
|
||||
av_packet_unref(pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/* flush the decoders */
|
||||
if (video_dec_ctx)
|
||||
decode_packet(video_dec_ctx, NULL);
|
||||
if (audio_dec_ctx)
|
||||
decode_packet(audio_dec_ctx, NULL);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avcodec_free_context(&audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_packet_free(&pkt);
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding audio API usage examples
|
||||
* @example encode_audio.c
|
||||
* @file
|
||||
* audio encoding with libavcodec API example.
|
||||
*
|
||||
* Generate a synthetic audio signal and encode it to an output MP2 file.
|
||||
* @example encode_audio.c
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
@@ -70,25 +70,26 @@ static int select_sample_rate(const AVCodec *codec)
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(const AVCodec *codec, AVChannelLayout *dst)
|
||||
static int select_channel_layout(const AVCodec *codec)
|
||||
{
|
||||
const AVChannelLayout *p, *best_ch_layout;
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->ch_layouts)
|
||||
return av_channel_layout_copy(dst, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->ch_layouts;
|
||||
while (p->nb_channels) {
|
||||
int nb_channels = p->nb_channels;
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = p;
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return av_channel_layout_copy(dst, best_ch_layout);
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt,
|
||||
@@ -163,9 +164,8 @@ int main(int argc, char **argv)
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
ret = select_channel_layout(codec, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
@@ -195,9 +195,7 @@ int main(int argc, char **argv)
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
ret = av_channel_layout_copy(&frame->ch_layout, &c->ch_layout);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* allocate the data buffers */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
@@ -220,7 +218,7 @@ int main(int argc, char **argv)
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->ch_layout.nb_channels; k++)
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
|
||||
@@ -21,10 +21,10 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec encoding video API usage example
|
||||
* @example encode_video.c
|
||||
* @file
|
||||
* video encoding with libavcodec API example
|
||||
*
|
||||
* Generate synthetic video data and encode it to an output file.
|
||||
* @example encode_video.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -155,25 +155,12 @@ int main(int argc, char **argv)
|
||||
for (i = 0; i < 25; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
/* Make sure the frame data is writable.
|
||||
On the first round, the frame is fresh from av_frame_get_buffer()
|
||||
and therefore we know it is writable.
|
||||
But on the next rounds, encode() will have called
|
||||
avcodec_send_frame(), and the codec may have kept a reference to
|
||||
the frame in its internal structures, that makes the frame
|
||||
unwritable.
|
||||
av_frame_make_writable() checks that and allocates a new buffer
|
||||
for the frame only if necessary.
|
||||
*/
|
||||
/* make sure the frame data is writable */
|
||||
ret = av_frame_make_writable(frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* Prepare a dummy image.
|
||||
In real code, this is where you would have your own logic for
|
||||
filling the frame. FFmpeg does not care what you put in the
|
||||
frame.
|
||||
*/
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
@@ -198,12 +185,7 @@ int main(int argc, char **argv)
|
||||
/* flush the encoder */
|
||||
encode(c, NULL, pkt, f);
|
||||
|
||||
/* Add sequence end code to have a real MPEG file.
|
||||
It makes only sense because this tiny examples writes packets
|
||||
directly. This is called "elementary stream" and only works for some
|
||||
codecs. To create a valid file, you usually need to write packets
|
||||
into a proper file format or protocol; see mux.c.
|
||||
*/
|
||||
/* add sequence end code to have a real MPEG file */
|
||||
if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
@@ -21,16 +21,7 @@
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavcodec motion vectors extraction API usage example
|
||||
* @example extract_mvs.c
|
||||
*
|
||||
* Read from input file, decode video stream and print a motion vectors
|
||||
* representation to stdout.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
@@ -69,11 +60,10 @@ static int decode_packet(const AVPacket *pkt)
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64",%4d,%4d,%4d\n",
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags,
|
||||
mv->motion_x, mv->motion_y, mv->motion_scale);
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
@@ -88,7 +78,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
const AVCodec *dec = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, &dec, 0);
|
||||
@@ -114,9 +104,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
ret = avcodec_open2(dec_ctx, dec, &opts);
|
||||
av_dict_free(&opts);
|
||||
if (ret < 0) {
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
@@ -133,7 +121,7 @@ static int open_codec_context(AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0;
|
||||
AVPacket *pkt = NULL;
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
@@ -168,20 +156,13 @@ int main(int argc, char **argv)
|
||||
goto end;
|
||||
}
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags,motion_x,motion_y,motion_scale\n");
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, pkt) >= 0) {
|
||||
if (pkt->stream_index == video_stream_idx)
|
||||
ret = decode_packet(pkt);
|
||||
av_packet_unref(pkt);
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
if (pkt.stream_index == video_stream_idx)
|
||||
ret = decode_packet(&pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
@@ -193,6 +174,5 @@ end:
|
||||
avcodec_free_context(&video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_packet_free(&pkt);
|
||||
return ret < 0;
|
||||
}
|
||||
|
||||
@@ -19,11 +19,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavfilter audio filtering API usage example
|
||||
* @example filter_audio.c
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* This example will generate a sine wave audio, pass it through a simple filter
|
||||
* chain, and then compute the MD5 checksum of the output data.
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
@@ -41,19 +43,19 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/md5.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
#include "libavfilter/buffersrc.h"
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT (AVChannelLayout)AV_CHANNEL_LAYOUT_5POINT0
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
@@ -98,7 +100,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_channel_layout_describe(&INPUT_CHANNEL_LAYOUT, ch_layout, sizeof(ch_layout));
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
@@ -152,8 +154,9 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=stereo",
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100);
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
@@ -212,7 +215,7 @@ static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = frame->ch_layout.nb_channels;
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
@@ -245,7 +248,7 @@ static int get_input(AVFrame *frame, int frame_num)
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
av_channel_layout_copy(&frame->ch_layout, &INPUT_CHANNEL_LAYOUT);
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
@@ -270,6 +273,7 @@ int main(int argc, char *argv[])
|
||||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
@@ -294,7 +298,6 @@ int main(int argc, char *argv[])
|
||||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
av_frame_free(&frame);
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
@@ -302,10 +305,8 @@ int main(int argc, char *argv[])
|
||||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
return 1;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
@@ -356,10 +357,7 @@ int main(int argc, char *argv[])
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
fprintf(stderr, "%s\n", av_err2str(err));
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
292
doc/examples/filtering_audio.c
Normal file
292
doc/examples/filtering_audio.c
Normal file
@@ -0,0 +1,292 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
|
||||
/* create decoding context */
|
||||
dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[audio_stream_index]->codecpar);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (ret >= 0) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
291
doc/examples/filtering_video.c
Normal file
291
doc/examples/filtering_video.c
Normal file
@@ -0,0 +1,291 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24,transpose=cclock";
|
||||
/* other way:
|
||||
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
|
||||
*/
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
|
||||
/* create decoding context */
|
||||
dec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!dec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
avcodec_parameters_to_context(dec_ctx, fmt_ctx->streams[video_stream_index]->codecpar);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
const AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
ret = avcodec_send_packet(dec_ctx, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while sending a packet to the decoder\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
break;
|
||||
} else if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while receiving a frame from the decoder\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame->pts = frame->best_effort_timestamp;
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_free_context(&dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
||||
156
doc/examples/http_multiclient.c
Normal file
156
doc/examples/http_multiclient.c
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Stephan Holljes
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <unistd.h>
|
||||
|
||||
static void process_client(AVIOContext *client, const char *in_uri)
|
||||
{
|
||||
AVIOContext *input = NULL;
|
||||
uint8_t buf[1024];
|
||||
int ret, n, reply_code;
|
||||
uint8_t *resource = NULL;
|
||||
while ((ret = avio_handshake(client)) > 0) {
|
||||
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
|
||||
// check for strlen(resource) is necessary, because av_opt_get()
|
||||
// may return empty string.
|
||||
if (resource && strlen(resource))
|
||||
break;
|
||||
av_freep(&resource);
|
||||
}
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
|
||||
if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
|
||||
reply_code = 200;
|
||||
} else {
|
||||
reply_code = AVERROR_HTTP_NOT_FOUND;
|
||||
}
|
||||
if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
|
||||
av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
|
||||
|
||||
while ((ret = avio_handshake(client)) > 0);
|
||||
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
fprintf(stderr, "Handshake performed.\n");
|
||||
if (reply_code != 200)
|
||||
goto end;
|
||||
fprintf(stderr, "Opening input file.\n");
|
||||
if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
|
||||
av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
for(;;) {
|
||||
n = avio_read(input, buf, sizeof(buf));
|
||||
if (n < 0) {
|
||||
if (n == AVERROR_EOF)
|
||||
break;
|
||||
av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
|
||||
av_err2str(n));
|
||||
break;
|
||||
}
|
||||
avio_write(client, buf, n);
|
||||
avio_flush(client);
|
||||
}
|
||||
end:
|
||||
fprintf(stderr, "Flushing client\n");
|
||||
avio_flush(client);
|
||||
fprintf(stderr, "Closing client\n");
|
||||
avio_close(client);
|
||||
fprintf(stderr, "Closing input\n");
|
||||
avio_close(input);
|
||||
av_freep(&resource);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVDictionary *options = NULL;
|
||||
AVIOContext *client = NULL, *server = NULL;
|
||||
const char *in_uri, *out_uri;
|
||||
int ret, pid;
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input http://hostname[:port]\n"
|
||||
"API example program to serve http to multiple clients.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_uri = argv[1];
|
||||
out_uri = argv[2];
|
||||
|
||||
avformat_network_init();
|
||||
|
||||
if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
|
||||
fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
|
||||
fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
fprintf(stderr, "Entering main loop.\n");
|
||||
for(;;) {
|
||||
if ((ret = avio_accept(server, &client)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Accepted client, forking process.\n");
|
||||
// XXX: Since we don't reap our children and don't ignore signals
|
||||
// this produces zombie processes.
|
||||
pid = fork();
|
||||
if (pid < 0) {
|
||||
perror("Fork failed");
|
||||
ret = AVERROR(errno);
|
||||
goto end;
|
||||
}
|
||||
if (pid == 0) {
|
||||
fprintf(stderr, "In child.\n");
|
||||
process_client(client, in_uri);
|
||||
avio_close(server);
|
||||
exit(0);
|
||||
}
|
||||
if (pid > 0)
|
||||
avio_close(client);
|
||||
}
|
||||
end:
|
||||
avio_close(server);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -24,18 +24,18 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file HW-accelerated decoding API usage.example
|
||||
* @example hw_decode.c
|
||||
* @file
|
||||
* HW-Accelerated decoding example.
|
||||
*
|
||||
* Perform HW-accelerated decoding with output frames from HW video
|
||||
* surfaces.
|
||||
* @example hw_decode.c
|
||||
* This example shows how to do HW-accelerated decoding with output
|
||||
* frames from the HW video surfaces.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/opt.h>
|
||||
@@ -152,8 +152,8 @@ int main(int argc, char *argv[])
|
||||
int video_stream, ret;
|
||||
AVStream *video = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVPacket *packet = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVPacket packet;
|
||||
enum AVHWDeviceType type;
|
||||
int i;
|
||||
|
||||
@@ -172,12 +172,6 @@ int main(int argc, char *argv[])
|
||||
return -1;
|
||||
}
|
||||
|
||||
packet = av_packet_alloc();
|
||||
if (!packet) {
|
||||
fprintf(stderr, "Failed to allocate AVPacket\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
if (avformat_open_input(&input_ctx, argv[2], NULL, NULL) != 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s'\n", argv[2]);
|
||||
@@ -233,21 +227,23 @@ int main(int argc, char *argv[])
|
||||
|
||||
/* actual decoding and dump the raw data */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(input_ctx, packet)) < 0)
|
||||
if ((ret = av_read_frame(input_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == packet->stream_index)
|
||||
ret = decode_write(decoder_ctx, packet);
|
||||
if (video_stream == packet.stream_index)
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
|
||||
av_packet_unref(packet);
|
||||
av_packet_unref(&packet);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_write(decoder_ctx, NULL);
|
||||
packet.data = NULL;
|
||||
packet.size = 0;
|
||||
ret = decode_write(decoder_ctx, &packet);
|
||||
av_packet_unref(&packet);
|
||||
|
||||
if (output_file)
|
||||
fclose(output_file);
|
||||
av_packet_free(&packet);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avformat_close_input(&input_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
|
||||
60
doc/examples/metadata.c
Normal file
60
doc/examples/metadata.c
Normal file
@@ -0,0 +1,60 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
||||
@@ -1,643 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat muxing API usage example
|
||||
* @example mux.c
|
||||
*
|
||||
* Generate a synthetic audio and video signal and mux them to a media file in
|
||||
* any supported libavformat format. The default codecs are used.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
AVCodecContext *enc;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
AVPacket *tmp_pkt;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame, AVPacket *pkt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(c, pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, c->time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
const AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->tmp_pkt = av_packet_alloc();
|
||||
if (!ost->tmp_pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = avcodec_alloc_context3(*codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not alloc an encoding context\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->enc = c;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
av_channel_layout_copy(&c->ch_layout, &(AVChannelLayout)AV_CHANNEL_LAYOUT_STEREO);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B-frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
const AVChannelLayout *channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
av_channel_layout_copy(&frame->ch_layout, channel_layout);
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, &c->ch_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, &c->ch_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not copy the stream parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout (ost->swr_ctx, "in_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_chlayout (ost->swr_ctx, "out_chlayout", &c->ch_layout, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->enc->ch_layout.nb_channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int dst_nb_samples;
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples;
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_frame(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame)
|
||||
return NULL;
|
||||
|
||||
frame->format = pix_fmt;
|
||||
frame->width = width;
|
||||
frame->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
||||
OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->enc;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a reusable frame */
|
||||
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_frame(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not copy the stream parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->enc;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally; make sure we do not overwrite it here */
|
||||
if (av_frame_make_writable(ost->frame) < 0)
|
||||
exit(1);
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
|
||||
ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
|
||||
ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost), ost->tmp_pkt);
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
av_packet_free(&ost->tmp_pkt);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const AVOutputFormat *fmt;
|
||||
const char *filename;
|
||||
AVFormatContext *oc;
|
||||
const AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
int i;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
for (i = 2; i+1 < argc; i+=2) {
|
||||
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
|
||||
av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
|
||||
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
649
doc/examples/muxing.c
Normal file
649
doc/examples/muxing.c
Normal file
@@ -0,0 +1,649 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
AVCodecContext *enc;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, AVCodecContext *c,
|
||||
AVStream *st, AVFrame *frame)
|
||||
{
|
||||
int ret;
|
||||
|
||||
// send the frame to the encoder
|
||||
ret = avcodec_send_frame(c, frame);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a frame to the encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
AVPacket pkt = { 0 };
|
||||
|
||||
ret = avcodec_receive_packet(c, &pkt);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
|
||||
pkt.stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, &pkt);
|
||||
ret = av_interleaved_write_frame(fmt_ctx, &pkt);
|
||||
av_packet_unref(&pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return ret == AVERROR_EOF ? 1 : 0;
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, NULL);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = avcodec_alloc_context3(*codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not alloc an encoding context\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->enc = c;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B-frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not copy the stream parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->enc->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->enc->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int dst_nb_samples;
|
||||
|
||||
c = ost->enc;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
return write_frame(oc, c, ost->st, frame);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->enc;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy the stream parameters to the muxer */
|
||||
ret = avcodec_parameters_from_context(ost->st->codecpar, c);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not copy the stream parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->enc;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, c->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) > 0)
|
||||
return NULL;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally; make sure we do not overwrite it here */
|
||||
if (av_frame_make_writable(ost->frame) < 0)
|
||||
exit(1);
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx, (const uint8_t * const *) ost->tmp_frame->data,
|
||||
ost->tmp_frame->linesize, 0, c->height, ost->frame->data,
|
||||
ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
return write_frame(oc, ost->enc, ost->st, get_video_frame(ost));
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_free_context(&ost->enc);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
int i;
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
for (i = 2; i+1 < argc; i+=2) {
|
||||
if (!strcmp(argv[i], "-flags") || !strcmp(argv[i], "-fflags"))
|
||||
av_dict_set(&opt, argv[i]+1, argv[i+1], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
|
||||
audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,238 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated H.264 decoding API usage example
|
||||
* @example qsv_decode.c
|
||||
*
|
||||
* Perform QSV-accelerated H.264 decoding with output frames in the
|
||||
* GPU video surfaces, write the decoded frames to an output file.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
|
||||
#include <libavutil/buffer.h>
|
||||
#include <libavutil/error.h>
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/hwcontext_qsv.h>
|
||||
#include <libavutil/mem.h>
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
int i, j;
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
ret = av_hwframe_transfer_data(sw_frame, frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error transferring the data to system memory\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sw_frame->data) && sw_frame->data[i]; i++)
|
||||
for (j = 0; j < (sw_frame->height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, sw_frame->data[i] + j * sw_frame->linesize[i], sw_frame->width);
|
||||
|
||||
fail:
|
||||
av_frame_unref(sw_frame);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket *pkt = NULL;
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
|
||||
AVBufferRef *device_ref = NULL;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codecpar->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codecpar->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codecpar->extradata_size +
|
||||
AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codecpar->extradata,
|
||||
video_st->codecpar->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||
}
|
||||
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(device_ref);
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
pkt = av_packet_alloc();
|
||||
if (!frame || !sw_frame || !pkt) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt->stream_index == video_st->index)
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, pkt, output_ctx);
|
||||
|
||||
av_packet_unref(pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0)
|
||||
fprintf(stderr, "%s\n", av_err2str(ret));
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&device_ref);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1,436 +0,0 @@
|
||||
/*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel QSV-accelerated video transcoding API usage example
|
||||
* @example qsv_transcode.c
|
||||
*
|
||||
* Perform QSV-accelerated transcoding and show to dynamically change
|
||||
* encoder's options.
|
||||
*
|
||||
* Usage: qsv_transcode input_stream codec output_stream initial option
|
||||
* { frame_number new_option }
|
||||
* e.g: - qsv_transcode input.mp4 h264_qsv output_h264.mp4 "g 60"
|
||||
* - qsv_transcode input.mp4 hevc_qsv output_hevc.mp4 "g 60 async_depth 1"
|
||||
* 100 "g 120"
|
||||
* (initialize codec with gop_size 60 and change it to 120 after 100
|
||||
* frames)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
|
||||
#include <libavutil/hwcontext.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
static AVBufferRef *hw_device_ctx = NULL;
|
||||
static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;
|
||||
static int video_stream = -1;
|
||||
|
||||
typedef struct DynamicSetting {
|
||||
int frame_number;
|
||||
char* optstr;
|
||||
} DynamicSetting;
|
||||
static DynamicSetting *dynamic_setting;
|
||||
static int setting_number;
|
||||
static int current_setting_number;
|
||||
|
||||
static int str_to_dict(char* optstr, AVDictionary **opt)
|
||||
{
|
||||
char *key, *value;
|
||||
if (strlen(optstr) == 0)
|
||||
return 0;
|
||||
key = strtok(optstr, " ");
|
||||
if (key == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
do {
|
||||
key = strtok(NULL, " ");
|
||||
if (key == NULL)
|
||||
return 0;
|
||||
value = strtok(NULL, " ");
|
||||
if (value == NULL)
|
||||
return AVERROR(EINVAL);
|
||||
av_dict_set(opt, key, value, 0);
|
||||
} while(1);
|
||||
}
|
||||
|
||||
static int dynamic_set_parameter(AVCodecContext *avctx)
|
||||
{
|
||||
AVDictionary *opts = NULL;
|
||||
int ret = 0;
|
||||
static int frame_number = 0;
|
||||
frame_number++;
|
||||
if (current_setting_number < setting_number &&
|
||||
frame_number == dynamic_setting[current_setting_number].frame_number) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
ret = str_to_dict(dynamic_setting[current_setting_number++].optstr, &opts);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "The dynamic parameter is wrong\n");
|
||||
goto fail;
|
||||
}
|
||||
/* Set common option. The dictionary will be freed and replaced
|
||||
* by a new one containing all options not found in common option list.
|
||||
* Then this new dictionary is used to set private option. */
|
||||
if ((ret = av_opt_set_dict(avctx, &opts)) < 0)
|
||||
goto fail;
|
||||
/* Set codec specific option */
|
||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||
goto fail;
|
||||
/* There is no "framerate" option in common option list. Use "-r" to set
|
||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
avctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
}
|
||||
fail:
|
||||
av_dict_free(&opts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int open_input_file(char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
|
||||
filename, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot find a video stream in the input file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
video_stream = ret;
|
||||
video = ifmt_ctx->streams[video_stream];
|
||||
|
||||
switch(video->codecpar->codec_id) {
|
||||
case AV_CODEC_ID_H264:
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_HEVC:
|
||||
decoder = avcodec_find_decoder_by_name("hevc_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP9:
|
||||
decoder = avcodec_find_decoder_by_name("vp9_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_VP8:
|
||||
decoder = avcodec_find_decoder_by_name("vp8_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_AV1:
|
||||
decoder = avcodec_find_decoder_by_name("av1_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MPEG2VIDEO:
|
||||
decoder = avcodec_find_decoder_by_name("mpeg2_qsv");
|
||||
break;
|
||||
case AV_CODEC_ID_MJPEG:
|
||||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Codec is not supported by qsv\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
|
||||
fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
decoder_ctx->framerate = av_guess_frame_rate(ifmt_ctx, video, NULL);
|
||||
|
||||
decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
|
||||
if (!decoder_ctx->hw_device_ctx) {
|
||||
fprintf(stderr, "A hardware device reference create failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
decoder_ctx->get_format = get_format;
|
||||
decoder_ctx->pkt_timebase = video->time_base;
|
||||
if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
|
||||
fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int encode_write(AVPacket *enc_pkt, AVFrame *frame)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if((ret = dynamic_set_parameter(encoder_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to set dynamic parameter. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
|
||||
fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
while (1) {
|
||||
if (ret = avcodec_receive_packet(encoder_ctx, enc_pkt))
|
||||
break;
|
||||
enc_pkt->stream_index = 0;
|
||||
av_packet_rescale_ts(enc_pkt, encoder_ctx->time_base,
|
||||
ofmt_ctx->streams[0]->time_base);
|
||||
if ((ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt)) < 0) {
|
||||
fprintf(stderr, "Error during writing data to output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
if (ret == AVERROR_EOF)
|
||||
return 0;
|
||||
ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding. Error code: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
if (!(frame = av_frame_alloc()))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
av_frame_free(&frame);
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error while decoding. Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
AVDictionaryEntry *e = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
AVStream *ost;
|
||||
/* we need to ref hw_frames_ctx of decoder to initialize encoder's codec.
|
||||
Only after we get a decoded frame, can we obtain its hw_frames_ctx */
|
||||
encoder_ctx->hw_frames_ctx = av_buffer_ref(decoder_ctx->hw_frames_ctx);
|
||||
if (!encoder_ctx->hw_frames_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
/* set AVCodecContext Parameters for encoder, here we keep them stay
|
||||
* the same as decoder.
|
||||
*/
|
||||
encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);
|
||||
encoder_ctx->pix_fmt = AV_PIX_FMT_QSV;
|
||||
encoder_ctx->width = decoder_ctx->width;
|
||||
encoder_ctx->height = decoder_ctx->height;
|
||||
if ((ret = str_to_dict(optstr, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||
goto fail;
|
||||
}
|
||||
/* There is no "framerate" option in common option list. Use "-r" to
|
||||
* set framerate, which is compatible with ffmpeg commandline. The
|
||||
* video is assumed to be average frame rate, so set time_base to
|
||||
* 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
if (e) {
|
||||
encoder_ctx->framerate = av_d2q(atof(e->value), INT_MAX);
|
||||
encoder_ctx->time_base = av_inv_q(encoder_ctx->framerate);
|
||||
}
|
||||
if ((ret = avcodec_open2(encoder_ctx, enc_codec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open encode codec. Error code: %s\n",
|
||||
av_err2str(ret));
|
||||
av_dict_free(&opts);
|
||||
goto fail;
|
||||
}
|
||||
av_dict_free(&opts);
|
||||
|
||||
if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {
|
||||
fprintf(stderr, "Failed to allocate stream for output format.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ost->time_base = encoder_ctx->time_base;
|
||||
ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy the stream parameters. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the stream header */
|
||||
if ((ret = avformat_write_header(ofmt_ctx, NULL)) < 0) {
|
||||
fprintf(stderr, "Error while writing stream header. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
frame->pts = av_rescale_q(frame->pts, decoder_ctx->pkt_timebase,
|
||||
encoder_ctx->time_base);
|
||||
if ((ret = encode_write(pkt, frame)) < 0)
|
||||
fprintf(stderr, "Error during encoding and writing.\n");
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt = NULL;
|
||||
|
||||
if (argc < 5 || (argc - 5) % 2) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <encoder> <output file>"
|
||||
" <\"encoding option set 0\"> [<frame_number> <\"encoding options set 1\">]...\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
setting_number = (argc - 5) / 2;
|
||||
dynamic_setting = av_malloc(setting_number * sizeof(*dynamic_setting));
|
||||
current_setting_number = 0;
|
||||
for (int i = 0; i < setting_number; i++) {
|
||||
dynamic_setting[i].frame_number = atoi(argv[i*2 + 5]);
|
||||
dynamic_setting[i].optstr = argv[i*2 + 6];
|
||||
}
|
||||
|
||||
ret = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_QSV, NULL, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to create a QSV device. Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
dec_pkt = av_packet_alloc();
|
||||
if (!dec_pkt) {
|
||||
fprintf(stderr, "Failed to allocate decode packet\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
|
||||
if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {
|
||||
fprintf(stderr, "Could not find encoder '%s'\n", argv[2]);
|
||||
ret = -1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = (avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, argv[3]))) < 0) {
|
||||
fprintf(stderr, "Failed to deduce output format from file extension. Error code: "
|
||||
"%s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avio_open(&ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open output file. "
|
||||
"Error code: %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* read all packets and only transcoding video */
|
||||
while (ret >= 0) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) < 0)
|
||||
break;
|
||||
|
||||
if (video_stream == dec_pkt->stream_index)
|
||||
ret = dec_enc(dec_pkt, enc_codec, argv[4]);
|
||||
|
||||
av_packet_unref(dec_pkt);
|
||||
}
|
||||
|
||||
/* flush decoder */
|
||||
av_packet_unref(dec_pkt);
|
||||
if ((ret = dec_enc(dec_pkt, enc_codec, argv[4])) < 0) {
|
||||
fprintf(stderr, "Failed to flush decoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
if ((ret = encode_write(dec_pkt, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to flush encoder %s\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* write the trailer for output stream */
|
||||
if ((ret = av_write_trailer(ofmt_ctx)) < 0)
|
||||
fprintf(stderr, "Failed to write trailer %s\n", av_err2str(ret));
|
||||
|
||||
end:
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
avformat_close_input(&ofmt_ctx);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
avcodec_free_context(&encoder_ctx);
|
||||
av_buffer_unref(&hw_device_ctx);
|
||||
av_packet_free(&dec_pkt);
|
||||
av_freep(&dynamic_setting);
|
||||
return ret;
|
||||
}
|
||||
271
doc/examples/qsvdec.c
Normal file
271
doc/examples/qsvdec.c
Normal file
@@ -0,0 +1,271 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the GPU video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavutil/buffer.h"
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/hwcontext.h"
|
||||
#include "libavutil/hwcontext_qsv.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
AVBufferRef *hw_device_ref;
|
||||
} DecodeContext;
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVHWFramesContext *frames_ctx;
|
||||
AVQSVFramesContext *frames_hwctx;
|
||||
int ret;
|
||||
|
||||
/* create a pool of surfaces to be used by the decoder */
|
||||
avctx->hw_frames_ctx = av_hwframe_ctx_alloc(decode->hw_device_ref);
|
||||
if (!avctx->hw_frames_ctx)
|
||||
return AV_PIX_FMT_NONE;
|
||||
frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||
frames_hwctx = frames_ctx->hwctx;
|
||||
|
||||
frames_ctx->format = AV_PIX_FMT_QSV;
|
||||
frames_ctx->sw_format = avctx->sw_pix_fmt;
|
||||
frames_ctx->width = FFALIGN(avctx->coded_width, 32);
|
||||
frames_ctx->height = FFALIGN(avctx->coded_height, 32);
|
||||
frames_ctx->initial_pool_size = 32;
|
||||
|
||||
frames_hwctx->frame_type = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
|
||||
|
||||
ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVFrame *sw_frame,
|
||||
AVPacket *pkt, AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = avcodec_send_packet(decoder_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
int i, j;
|
||||
|
||||
ret = avcodec_receive_frame(decoder_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
ret = av_hwframe_transfer_data(sw_frame, frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error transferring the data to system memory\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sw_frame->data) && sw_frame->data[i]; i++)
|
||||
for (j = 0; j < (sw_frame->height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, sw_frame->data[i] + j * sw_frame->linesize[i], sw_frame->width);
|
||||
|
||||
fail:
|
||||
av_frame_unref(sw_frame);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL, *sw_frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codecpar->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the hardware device */
|
||||
ret = av_hwdevice_ctx_create(&decode.hw_device_ref, AV_HWDEVICE_TYPE_QSV,
|
||||
"auto", NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open the hardware device\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codecpar->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codecpar->extradata_size +
|
||||
AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codecpar->extradata,
|
||||
video_st->codecpar->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codecpar->extradata_size;
|
||||
}
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
sw_frame = av_frame_alloc();
|
||||
if (!frame || !sw_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, sw_frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&sw_frame);
|
||||
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
av_buffer_unref(&decode.hw_device_ref);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1,199 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat/libavcodec demuxing and muxing API usage example
|
||||
* @example remux.c
|
||||
*
|
||||
* Remux streams from one container format to another. Data is copied from the
|
||||
* input to the output without transcoding.
|
||||
*/
|
||||
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket *pkt = NULL;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
int stream_index = 0;
|
||||
int *stream_mapping = NULL;
|
||||
int stream_mapping_size = 0;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||
stream_mapping = av_calloc(stream_mapping_size, sizeof(*stream_mapping));
|
||||
if (!stream_mapping) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVCodecParameters *in_codecpar = in_stream->codecpar;
|
||||
|
||||
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
|
||||
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
|
||||
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
|
||||
stream_mapping[i] = -1;
|
||||
continue;
|
||||
}
|
||||
|
||||
stream_mapping[i] = stream_index++;
|
||||
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy codec parameters\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codecpar->codec_tag = 0;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt->stream_index];
|
||||
if (pkt->stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt->stream_index] < 0) {
|
||||
av_packet_unref(pkt);
|
||||
continue;
|
||||
}
|
||||
|
||||
pkt->stream_index = stream_mapping[pkt->stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt->stream_index];
|
||||
log_packet(ifmt_ctx, pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
av_packet_rescale_ts(pkt, in_stream->time_base, out_stream->time_base);
|
||||
pkt->pos = -1;
|
||||
log_packet(ofmt_ctx, pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, pkt);
|
||||
/* pkt is now blank (av_interleaved_write_frame() takes ownership of
|
||||
* its contents and resets pkt), so that no unreferencing is necessary.
|
||||
* This would be different if one used av_write_frame(). */
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&pkt);
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
av_freep(&stream_mapping);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
191
doc/examples/remuxing.c
Normal file
191
doc/examples/remuxing.c
Normal file
@@ -0,0 +1,191 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
int stream_index = 0;
|
||||
int *stream_mapping = NULL;
|
||||
int stream_mapping_size = 0;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
stream_mapping_size = ifmt_ctx->nb_streams;
|
||||
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
|
||||
if (!stream_mapping) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVCodecParameters *in_codecpar = in_stream->codecpar;
|
||||
|
||||
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
|
||||
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
|
||||
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
|
||||
stream_mapping[i] = -1;
|
||||
continue;
|
||||
}
|
||||
|
||||
stream_mapping[i] = stream_index++;
|
||||
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy codec parameters\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codecpar->codec_tag = 0;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
if (pkt.stream_index >= stream_mapping_size ||
|
||||
stream_mapping[pkt.stream_index] < 0) {
|
||||
av_packet_unref(&pkt);
|
||||
continue;
|
||||
}
|
||||
|
||||
pkt.stream_index = stream_mapping[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
av_freep(&stream_mapping);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,220 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio resampling API usage example
|
||||
* @example resample_audio.c
|
||||
*
|
||||
* Generate a synthetic audio signal, and Use libswresample API to perform audio
|
||||
* resampling. The output is written to a raw audio file to be played with
|
||||
* ffplay.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_STEREO, dst_ch_layout = AV_CHANNEL_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
char buf[64];
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_chlayout(swr_ctx, "in_chlayout", &src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_chlayout(swr_ctx, "out_chlayout", &dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = src_ch_layout.nb_channels;
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = dst_ch_layout.nb_channels;
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
av_channel_layout_describe(&dst_ch_layout, buf, sizeof(buf));
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %s -channels %d -ar %d %s\n",
|
||||
fmt, buf, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
214
doc/examples/resampling_audio.c
Normal file
214
doc/examples/resampling_audio.c
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libswscale API usage example
|
||||
* @example scale_video.c
|
||||
*
|
||||
* Generate a synthetic video signal and use libswscale to perform rescaling.
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
140
doc/examples/scaling_video.c
Normal file
140
doc/examples/scaling_video.c
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file libavformat metadata extraction API usage example
|
||||
* @example show_metadata.c
|
||||
*
|
||||
* Show metadata from an input file.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
const AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
while ((tag = av_dict_iterate(fmt_ctx->metadata, tag)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file demuxing, decoding, filtering, encoding and muxing API usage example
|
||||
* @example transcode.c
|
||||
*
|
||||
* Convert input to output file, applying some hard-coded filter-graph on both
|
||||
* audio and video streams.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
|
||||
AVPacket *enc_pkt;
|
||||
AVFrame *filtered_frame;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
typedef struct StreamContext {
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
AVFrame *dec_frame;
|
||||
} StreamContext;
|
||||
static StreamContext *stream_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
if (!stream_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream = ifmt_ctx->streams[i];
|
||||
const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodecContext *codec_ctx;
|
||||
if (!dec) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
codec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!codec_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
|
||||
"for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Inform the decoder about the timebase for the packet timestamps.
|
||||
* This is highly recommended, but not mandatory. */
|
||||
codec_ctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx, dec, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
stream_ctx[i].dec_ctx = codec_ctx;
|
||||
|
||||
stream_ctx[i].dec_frame = av_frame_alloc();
|
||||
if (!stream_ctx[i].dec_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
const AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = stream_ctx[i].dec_ctx;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
enc_ctx = avcodec_alloc_context3(encoder);
|
||||
if (!enc_ctx) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
const enum AVPixelFormat *pix_fmts = NULL;
|
||||
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_PIX_FORMAT, 0,
|
||||
(const void**)&pix_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = (ret >= 0 && pix_fmts) ?
|
||||
pix_fmts[0] : dec_ctx->pix_fmt;
|
||||
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
const enum AVSampleFormat *sample_fmts = NULL;
|
||||
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_SAMPLE_FORMAT, 0,
|
||||
(const void**)&sample_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = (ret >= 0 && sample_fmts) ?
|
||||
sample_fmts[0] : dec_ctx->sample_fmt;
|
||||
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open %s encoder for stream #%u\n", encoder->name, i);
|
||||
return ret;
|
||||
}
|
||||
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
out_stream->time_base = enc_ctx->time_base;
|
||||
stream_ctx[i].enc_ctx = enc_ctx;
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
|
||||
return ret;
|
||||
}
|
||||
out_stream->time_base = in_stream->time_base;
|
||||
}
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *buffersrc = NULL;
|
||||
const AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
|
||||
av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
|
||||
av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf));
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s",
|
||||
dec_ctx->pkt_timebase.num, dec_ctx->pkt_timebase.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
buf);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf));
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts",
|
||||
buf, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (enc_ctx->frame_size > 0)
|
||||
av_buffersink_set_frame_size(buffersink_ctx, enc_ctx->frame_size);
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
|
||||
stream_ctx[i].enc_ctx, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filter_ctx[i].enc_pkt = av_packet_alloc();
|
||||
if (!filter_ctx[i].enc_pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
filter_ctx[i].filtered_frame = av_frame_alloc();
|
||||
if (!filter_ctx[i].filtered_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(unsigned int stream_index, int flush)
|
||||
{
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
|
||||
AVPacket *enc_pkt = filter->enc_pkt;
|
||||
int ret;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
if (filt_frame && filt_frame->pts != AV_NOPTS_VALUE)
|
||||
filt_frame->pts = av_rescale_q(filt_frame->pts, filt_frame->time_base,
|
||||
stream->enc_ctx->time_base);
|
||||
|
||||
ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt->stream_index = stream_index;
|
||||
av_packet_rescale_ts(enc_pkt,
|
||||
stream->enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
int ret;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter->buffersink_ctx,
|
||||
filter->filtered_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
filter->filtered_frame->time_base = av_buffersink_get_time_base(filter->buffersink_ctx);;
|
||||
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(stream_index, 0);
|
||||
av_frame_unref(filter->filtered_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
return encode_write_frame(stream_index, 1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = NULL;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
if (!(packet = av_packet_alloc()))
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
|
||||
break;
|
||||
stream_index = packet->stream_index;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
|
||||
ret = avcodec_send_packet(stream->dec_ctx, packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, stream_index);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
|
||||
/* flush decoders, filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
StreamContext *stream;
|
||||
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
|
||||
stream = &stream_ctx[i];
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream %u decoder\n", i);
|
||||
|
||||
/* flush decoder */
|
||||
ret = avcodec_send_packet(stream->dec_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing decoding failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF)
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, i);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush filter */
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&packet);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_free_context(&stream_ctx[i].dec_ctx);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
|
||||
avcodec_free_context(&stream_ctx[i].enc_ctx);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph) {
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
av_packet_free(&filter_ctx[i].enc_pkt);
|
||||
av_frame_free(&filter_ctx[i].filtered_frame);
|
||||
}
|
||||
|
||||
av_frame_free(&stream_ctx[i].dec_frame);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
av_free(stream_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2022 Andreas Unterweger
|
||||
* Copyright (c) 2013-2018 Andreas Unterweger
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
@@ -19,30 +19,29 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file audio transcoding to MPEG/AAC API usage example
|
||||
* @example transcode_aac.c
|
||||
* @file
|
||||
* Simple audio converter
|
||||
*
|
||||
* Convert an input audio file to AAC in an MP4 container. Formats other than
|
||||
* MP4 are supported based on the output file extension.
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* Formats other than MP4 are supported based on the output file extension.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include <libavutil/audio_fifo.h>
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/avstring.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/frame.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include <libswresample/swresample.h>
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/* The output bit rate in bit/s */
|
||||
#define OUTPUT_BIT_RATE 96000
|
||||
@@ -61,8 +60,7 @@ static int open_input_file(const char *filename,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodecContext *avctx;
|
||||
const AVCodec *input_codec;
|
||||
const AVStream *stream;
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/* Open the input file to read from it. */
|
||||
@@ -90,10 +88,8 @@ static int open_input_file(const char *filename,
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
stream = (*input_format_context)->streams[0];
|
||||
|
||||
/* Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder(stream->codecpar->codec_id))) {
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codecpar->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
@@ -108,7 +104,7 @@ static int open_input_file(const char *filename,
|
||||
}
|
||||
|
||||
/* Initialize the stream parameters with demuxer information. */
|
||||
error = avcodec_parameters_to_context(avctx, stream->codecpar);
|
||||
error = avcodec_parameters_to_context(avctx, (*input_format_context)->streams[0]->codecpar);
|
||||
if (error < 0) {
|
||||
avformat_close_input(input_format_context);
|
||||
avcodec_free_context(&avctx);
|
||||
@@ -124,9 +120,6 @@ static int open_input_file(const char *filename,
|
||||
return error;
|
||||
}
|
||||
|
||||
/* Set the packet timebase for the decoder. */
|
||||
avctx->pkt_timebase = stream->time_base;
|
||||
|
||||
/* Save the decoder context for easier access later. */
|
||||
*input_codec_context = avctx;
|
||||
|
||||
@@ -151,7 +144,7 @@ static int open_output_file(const char *filename,
|
||||
AVCodecContext *avctx = NULL;
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
const AVCodec *output_codec = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/* Open the output file to write to it. */
|
||||
@@ -206,11 +199,15 @@ static int open_output_file(const char *filename,
|
||||
|
||||
/* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion. */
|
||||
av_channel_layout_default(&avctx->ch_layout, OUTPUT_CHANNELS);
|
||||
avctx->channels = OUTPUT_CHANNELS;
|
||||
avctx->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
avctx->sample_rate = input_codec_context->sample_rate;
|
||||
avctx->sample_fmt = output_codec->sample_fmts[0];
|
||||
avctx->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/* Allow the use of the experimental AAC encoder. */
|
||||
avctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/* Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
@@ -292,18 +289,21 @@ static int init_resampler(AVCodecContext *input_codec_context,
|
||||
/*
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
error = swr_alloc_set_opts2(resample_context,
|
||||
&output_codec_context->ch_layout,
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
&input_codec_context->ch_layout,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (error < 0) {
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return error;
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/*
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
@@ -331,7 +331,7 @@ static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
{
|
||||
/* Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->ch_layout.nb_channels, 1))) {
|
||||
output_codec_context->channels, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
@@ -380,8 +380,6 @@ static int decode_audio_frame(AVFrame *frame,
|
||||
if (error < 0)
|
||||
return error;
|
||||
|
||||
*data_present = 0;
|
||||
*finished = 0;
|
||||
/* Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, input_packet)) < 0) {
|
||||
/* If we are at the end of the file, flush the decoder below. */
|
||||
@@ -448,17 +446,26 @@ static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
int error;
|
||||
|
||||
/* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will point to the audio samples of the corresponding
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience. */
|
||||
if ((error = av_samples_alloc_array_and_samples(converted_input_samples, NULL,
|
||||
output_codec_context->ch_layout.nb_channels,
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
av_err2str(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
@@ -551,7 +558,7 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFrame *input_frame = NULL;
|
||||
/* Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int data_present = 0;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/* Initialize temporary storage for one input frame. */
|
||||
@@ -590,9 +597,10 @@ static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples)
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
av_freep(&converted_input_samples);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
@@ -624,7 +632,7 @@ static int init_output_frame(AVFrame **frame,
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity. */
|
||||
(*frame)->nb_samples = frame_size;
|
||||
av_channel_layout_copy(&(*frame)->ch_layout, &output_codec_context->ch_layout);
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
@@ -671,16 +679,17 @@ static int encode_audio_frame(AVFrame *frame,
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
*data_present = 0;
|
||||
/* Send the audio frame stored in the temporary packet to the encoder.
|
||||
* The output audio stream encoder is used to do this. */
|
||||
error = avcodec_send_frame(output_codec_context, frame);
|
||||
/* Check for errors, but proceed with fetching encoded samples if the
|
||||
* encoder signals that it has nothing more to encode. */
|
||||
if (error < 0 && error != AVERROR_EOF) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
/* The encoder signals that it has nothing more to encode. */
|
||||
if (error == AVERROR_EOF) {
|
||||
error = 0;
|
||||
goto cleanup;
|
||||
} else if (error < 0) {
|
||||
fprintf(stderr, "Could not send packet for encoding (error '%s')\n",
|
||||
av_err2str(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* Receive one encoded frame from the encoder. */
|
||||
@@ -851,6 +860,7 @@ int main(int argc, char **argv)
|
||||
int data_written;
|
||||
/* Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
data_written = 0;
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
|
||||
621
doc/examples/transcoding.c
Normal file
621
doc/examples/transcoding.c
Normal file
@@ -0,0 +1,621 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
|
||||
AVPacket *enc_pkt;
|
||||
AVFrame *filtered_frame;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
typedef struct StreamContext {
|
||||
AVCodecContext *dec_ctx;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
AVFrame *dec_frame;
|
||||
} StreamContext;
|
||||
static StreamContext *stream_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
|
||||
if (!stream_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream = ifmt_ctx->streams[i];
|
||||
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
|
||||
AVCodecContext *codec_ctx;
|
||||
if (!dec) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
|
||||
return AVERROR_DECODER_NOT_FOUND;
|
||||
}
|
||||
codec_ctx = avcodec_alloc_context3(dec);
|
||||
if (!codec_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
|
||||
"for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx, dec, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
stream_ctx[i].dec_ctx = codec_ctx;
|
||||
|
||||
stream_ctx[i].dec_frame = av_frame_alloc();
|
||||
if (!stream_ctx[i].dec_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = stream_ctx[i].dec_ctx;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
enc_ctx = avcodec_alloc_context3(encoder);
|
||||
if (!enc_ctx) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
if (encoder->pix_fmts)
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
else
|
||||
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
out_stream->time_base = enc_ctx->time_base;
|
||||
stream_ctx[i].enc_ctx = enc_ctx;
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
|
||||
return ret;
|
||||
}
|
||||
out_stream->time_base = in_stream->time_base;
|
||||
}
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
const AVFilter *buffersrc = NULL;
|
||||
const AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
|
||||
stream_ctx[i].enc_ctx, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
filter_ctx[i].enc_pkt = av_packet_alloc();
|
||||
if (!filter_ctx[i].enc_pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
filter_ctx[i].filtered_frame = av_frame_alloc();
|
||||
if (!filter_ctx[i].filtered_frame)
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(unsigned int stream_index, int flush)
|
||||
{
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
|
||||
AVPacket *enc_pkt = filter->enc_pkt;
|
||||
int ret;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
av_packet_unref(enc_pkt);
|
||||
|
||||
ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
|
||||
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt->stream_index = stream_index;
|
||||
av_packet_rescale_ts(enc_pkt,
|
||||
stream->enc_ctx->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
FilteringContext *filter = &filter_ctx[stream_index];
|
||||
int ret;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter->buffersink_ctx,
|
||||
filter->filtered_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(stream_index, 0);
|
||||
av_frame_unref(filter->filtered_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
return encode_write_frame(stream_index, 1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket *packet = NULL;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
if (!(packet = av_packet_alloc()))
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
|
||||
break;
|
||||
stream_index = packet->stream_index;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
StreamContext *stream = &stream_ctx[stream_index];
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
|
||||
av_packet_rescale_ts(packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
stream->dec_ctx->time_base);
|
||||
ret = avcodec_send_packet(stream->dec_ctx, packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
|
||||
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||
break;
|
||||
else if (ret < 0)
|
||||
goto end;
|
||||
|
||||
stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
|
||||
ret = filter_encode_write_frame(stream->dec_frame, stream_index);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_packet_unref(packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_packet_free(&packet);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_free_context(&stream_ctx[i].dec_ctx);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
|
||||
avcodec_free_context(&stream_ctx[i].enc_ctx);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph) {
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
av_packet_free(&filter_ctx[i].enc_pkt);
|
||||
av_frame_free(&filter_ctx[i].filtered_frame);
|
||||
}
|
||||
|
||||
av_frame_free(&stream_ctx[i].dec_frame);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
av_free(stream_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
||||
@@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Video Acceleration API (video encoding) encode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
@@ -19,12 +21,13 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated encoding API usage example
|
||||
* @example vaapi_encode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated encoding example.
|
||||
*
|
||||
* @example vaapi_encode.c
|
||||
* This example shows how to do VAAPI-accelerated encoding. now only support NV12
|
||||
* raw file, usage like: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*
|
||||
* Perform VAAPI-accelerated encoding. Read input from an NV12 raw
|
||||
* file, and write the H.264 encoded data to an output raw file.
|
||||
* Usage: vaapi_encode 1920 1080 input.yuv output.h264
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
@@ -88,10 +91,6 @@ static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout)
|
||||
enc_pkt->stream_index = 0;
|
||||
ret = fwrite(enc_pkt->data, enc_pkt->size, 1, fout);
|
||||
av_packet_unref(enc_pkt);
|
||||
if (!ret) {
|
||||
ret = AVERROR(errno);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
@@ -106,7 +105,7 @@ int main(int argc, char *argv[])
|
||||
FILE *fin = NULL, *fout = NULL;
|
||||
AVFrame *sw_frame = NULL, *hw_frame = NULL;
|
||||
AVCodecContext *avctx = NULL;
|
||||
const AVCodec *codec = NULL;
|
||||
AVCodec *codec = NULL;
|
||||
const char *enc_name = "h264_vaapi";
|
||||
|
||||
if (argc < 5) {
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
/*
|
||||
* Video Acceleration API (video transcoding) transcode sample
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
@@ -19,10 +21,11 @@
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file Intel VAAPI-accelerated transcoding API usage example
|
||||
* @example vaapi_transcode.c
|
||||
* @file
|
||||
* Intel VAAPI-accelerated transcoding example.
|
||||
*
|
||||
* Perform VAAPI-accelerated transcoding.
|
||||
* @example vaapi_transcode.c
|
||||
* This example shows how to do VAAPI-accelerated transcoding.
|
||||
* Usage: vaapi_transcode input_stream codec output_stream
|
||||
* e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4
|
||||
* - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf
|
||||
@@ -59,7 +62,7 @@ static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
const AVCodec *decoder = NULL;
|
||||
AVCodec *decoder = NULL;
|
||||
AVStream *video = NULL;
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
@@ -139,7 +142,7 @@ end:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
static int dec_enc(AVPacket *pkt, AVCodec *enc_codec)
|
||||
{
|
||||
AVFrame *frame;
|
||||
int ret = 0;
|
||||
@@ -215,15 +218,17 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec)
|
||||
|
||||
fail:
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *enc_codec;
|
||||
int ret = 0;
|
||||
AVPacket *dec_pkt;
|
||||
AVCodec *enc_codec;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "Usage: %s <input file> <encode codec> <output file>\n"
|
||||
|
||||
@@ -450,7 +450,7 @@ work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5@tie{}Mo) and @code{analyzeduration} (default 5,000,000@tie{}µs = 5@tie{}s). For
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
@@ -467,7 +467,7 @@ point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@section I have a stretched video, why does scaling not fix it?
|
||||
@section I have a stretched video, why does scaling does not fix it?
|
||||
|
||||
A lot of video codecs and formats can store the @emph{aspect ratio} of the
|
||||
video: this is the ratio between the width and the height of either the full
|
||||
|
||||
@@ -79,29 +79,6 @@ Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
Beware that some assertions are disabled by default, so mind setting
|
||||
@option{--assert-level=<level>} at configuration time, e.g. when seeking
|
||||
the highest possible test coverage:
|
||||
@example
|
||||
./configure --assert-level=2
|
||||
@end example
|
||||
Note that raising the assert level could have a performance impact.
|
||||
|
||||
To get the complete list of tests, run the command:
|
||||
@example
|
||||
make fate-list
|
||||
@end example
|
||||
|
||||
You can specify a subset of tests to run by specifying the
|
||||
corresponding elements from the list with the @code{fate-} prefix,
|
||||
e.g. as in:
|
||||
@example
|
||||
make fate-ffprobe_compact fate-ffprobe_xml
|
||||
@end example
|
||||
|
||||
This makes it easier to run a few tests in case of failure without
|
||||
running the complete test suite.
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
@@ -208,13 +185,6 @@ Download/synchronize sample files to the configured samples directory.
|
||||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
@item fate-list-failing
|
||||
List the fate tests that failed the last time they were executed.
|
||||
|
||||
@item fate-clear-reports
|
||||
Remove the test reports from previous test executions (getting rid of
|
||||
potentially stale results from fate-list-failing).
|
||||
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
@@ -238,14 +208,6 @@ meaning only while running the regression tests.
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
This variable may be set to the string "random", optionally followed by a
|
||||
number, like "random99", This will cause each test to use a random number of
|
||||
threads. If a number is specified, it is used as a maximum number of threads,
|
||||
otherwise 16 is the maximum.
|
||||
|
||||
In case a test fails, the thread count used for it will be written into the
|
||||
errfile.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @samp{slice} or @samp{frame},
|
||||
by default @samp{slice+frame}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
slot= # some unique identifier
|
||||
repo=https://git.ffmpeg.org/ffmpeg.git # the source repository
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
#branch=release/2.6 # the branch to test
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
@@ -31,25 +31,3 @@ makeopts= # extra options passed to 'make'
|
||||
# defaulting to makeopts above if this is not set
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
||||
#fate_targets= # targets to make when running fate; defaults to "fate",
|
||||
# can be set to run a subset of tests, e.g. "fate-checkasm".
|
||||
|
||||
#fate_environments= # a list of names of configurations to run tests for;
|
||||
# each round is run with variables from ${${name}_env} set.
|
||||
|
||||
# One example of using fate_environments:
|
||||
|
||||
# target_exec="qemu-aarch64-static"
|
||||
# fate_targets="fate-checkasm fate-cpu"
|
||||
# fate_environments="sve128 sve256"
|
||||
# sve128_env="QEMU_CPU=max,sve128=on"
|
||||
# sve256_env="QEMU_CPU=max,sve256=on"
|
||||
|
||||
# The variables set by fate_environments can also be used explicitly
|
||||
# by target_exec, e.g. like this:
|
||||
|
||||
# target_exec="qemu-aarch64-static -cpu \$(MY_CPU)"
|
||||
# fate_targets="fate-checkasm fate-cpu"
|
||||
# fate_environments="sve128 sve256"
|
||||
# sve128_env="MY_CPU=max,sve128=on"
|
||||
# sve256_env="MY_CPU=max,sve256=on"
|
||||
|
||||
1810
doc/ffmpeg.texi
1810
doc/ffmpeg.texi
File diff suppressed because it is too large
Load Diff
@@ -34,6 +34,10 @@ various FFmpeg APIs.
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -fs
|
||||
Start in fullscreen mode.
|
||||
@item -an
|
||||
@@ -122,6 +126,10 @@ Read @var{input_url}.
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
@@ -196,18 +204,6 @@ will produce a thread pool with this many threads available for parallel
|
||||
processing. The default is 0 which means that the thread count will be
|
||||
determined by the number of available CPUs.
|
||||
|
||||
@item -enable_vulkan
|
||||
Use vulkan renderer rather than SDL builtin renderer. Depends on libplacebo.
|
||||
|
||||
@item -vulkan_params
|
||||
|
||||
Vulkan configuration using a list of @var{key}=@var{value} pairs separated by
|
||||
":".
|
||||
|
||||
@item -hwaccel
|
||||
Use HW accelerated decoding. Enable this option will enable vulkan renderer
|
||||
automatically.
|
||||
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
@@ -226,6 +222,8 @@ Pause.
|
||||
Toggle mute.
|
||||
|
||||
@item 9, 0
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
@item /, *
|
||||
Decrease and increase volume respectively.
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] @file{input_url}
|
||||
ffprobe [@var{options}] [@file{input_url}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
@@ -28,9 +28,6 @@ If a url is specified in input, ffprobe will try to open and
|
||||
probe the url content. If the url cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
If no output is specified as output with @option{o} ffprobe will write
|
||||
to stdout.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
@@ -41,15 +38,15 @@ ffprobe will show it.
|
||||
|
||||
ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{output_format} option.
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT", "STREAM", "STREAM_GROUP_STREAM"
|
||||
or "PROGRAM_STREAM" section.
|
||||
and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
|
||||
section.
|
||||
|
||||
@c man end
|
||||
|
||||
@@ -83,7 +80,7 @@ Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -output_format, -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@@ -91,7 +88,7 @@ Set the output printing format.
|
||||
|
||||
For example for printing the output in JSON format, specify:
|
||||
@example
|
||||
-output_format json
|
||||
-print_format json
|
||||
@end example
|
||||
|
||||
For more details on the available output printing formats, see the
|
||||
@@ -139,6 +136,13 @@ stream.
|
||||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
@@ -225,13 +229,6 @@ multimedia stream.
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "PROGRAM_STREAM".
|
||||
|
||||
@item -show_stream_groups
|
||||
Show information about stream groups and their streams contained in the
|
||||
input multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM_GROUP_STREAM".
|
||||
|
||||
@item -show_chapters
|
||||
Show information about chapters stored in the format.
|
||||
|
||||
@@ -338,25 +335,6 @@ Show information about all pixel formats supported by FFmpeg.
|
||||
Pixel format information for each format is printed within a section
|
||||
with name "PIXEL_FORMAT".
|
||||
|
||||
@item -show_optional_fields @var{value}
|
||||
Some writers viz. JSON and XML, omit the printing of fields with invalid or non-applicable values,
|
||||
while other writers always print them. This option enables one to control this behaviour.
|
||||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
||||
Default is @var{auto}.
|
||||
|
||||
@item -analyze_frames
|
||||
Analyze frames and/or their side data up to the provided read interval,
|
||||
providing additional information that may be useful at a stream level.
|
||||
Must be paired with the @option{-show_streams} option or it will have no effect.
|
||||
|
||||
Currently, the additional fields provided by this option when enabled are the
|
||||
@code{closed_captions} and @code{film_grain} fields.
|
||||
|
||||
For example, to analyze the first 20 seconds and populate these fields:
|
||||
@example
|
||||
ffprobe -show_streams -analyze_frames -read_intervals "%+20" INPUT
|
||||
@end example
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
@@ -364,10 +342,6 @@ on the specific build.
|
||||
@item -i @var{input_url}
|
||||
Read @var{input_url}.
|
||||
|
||||
@item -o @var{output_url}
|
||||
Write output to @var{output_url}. If not specified, the output is sent
|
||||
to stdout.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@@ -428,9 +402,8 @@ keyN=valN
|
||||
[/SECTION]
|
||||
@end example
|
||||
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM,
|
||||
STREAM_GROUP_STREAM or PROGRAM_STREAM section, and are prefixed by the
|
||||
string "TAG:".
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
|
||||
PROGRAM_STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
|
||||
910
doc/ffprobe.xsd
910
doc/ffprobe.xsd
@@ -1,529 +1,389 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="stream_groups" type="ffprobe:StreamGroupsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType"/>
|
||||
</xsd:choice>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_datum" type="ffprobe:packetSideDatumType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="type" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDatumType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="crop_top" type="xsd:long" />
|
||||
<xsd:attribute name="crop_bottom" type="xsd:long" />
|
||||
<xsd:attribute name="crop_left" type="xsd:long" />
|
||||
<xsd:attribute name="crop_right" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="lossless" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecodes" type="ffprobe:frameSideDataTimecodeList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:frameSideDataComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDatumType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecode" type="ffprobe:frameSideDataTimecodeType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeType">
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:frameSideDataComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pieces" type="ffprobe:frameSideDataPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="piece" type="ffprobe:frameSideDataPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_datum" type="ffprobe:frameSideDatumType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="StreamGroupsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream_group" type="ffprobe:streamGroupType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="non_diegetic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="captions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="descriptions" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="metadata" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dependent" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="still_image" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="multilayer" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_size" type="xsd:int" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="film_grain" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="initial_padding" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:streamGroupComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="type" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:streamGroupComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subcomponents" type="ffprobe:streamGroupSubComponentList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="component_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubComponentList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subcomponent" type="ffprobe:streamGroupSubComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubComponentType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pieces" type="ffprobe:streamGroupPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="subcomponent_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="piece" type="ffprobe:streamGroupPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subpieces" type="ffprobe:streamGroupSubPieceList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="piece_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubPieceList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="subpiece" type="ffprobe:streamGroupSubPieceType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupSubPieceType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="blocks" type="ffprobe:streamGroupBlockList" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="subpiece_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupBlockList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="block" type="ffprobe:streamGroupBlockType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupBlockType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="block_entry" type="ffprobe:streamGroupEntryType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamGroupEntryType">
|
||||
<xsd:attribute name="key" type="xsd:string"/>
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_stream_groups" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tags" type="ffprobe:tagsType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsAndFramesType">
|
||||
<xsd:sequence>
|
||||
<xsd:choice minOccurs="0" maxOccurs="unbounded">
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:choice>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
<xsd:attribute name="data_hash" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="packetSideDataType">
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="logs" type="ffprobe:logsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="stream_index" type="xsd:int" />
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
|
||||
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="logsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="log" type="ffprobe:logType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="logType">
|
||||
<xsd:attribute name="context" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int" />
|
||||
<xsd:attribute name="category" type="xsd:int" />
|
||||
<xsd:attribute name="parent_context" type="xsd:string"/>
|
||||
<xsd:attribute name="parent_category" type="xsd:int" />
|
||||
<xsd:attribute name="message" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataListType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
<xsd:complexType name="frameSideDataType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecodes" type="ffprobe:frameSideDataTimecodeList" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="side_data_type" type="xsd:string"/>
|
||||
<xsd:attribute name="side_data_size" type="xsd:int" />
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeList">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="timecode" type="ffprobe:frameSideDataTimecodeType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameSideDataTimecodeType">
|
||||
<xsd:attribute name="value" type="xsd:string"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="subtitleType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="format" type="xsd:int" />
|
||||
<xsd:attribute name="start_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="end_display_time" type="xsd:int" />
|
||||
<xsd:attribute name="num_rects" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="timed_thumbnails" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
<xsd:attribute name="extradata_hash" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_width" type="xsd:int"/>
|
||||
<xsd:attribute name="coded_height" type="xsd:int"/>
|
||||
<xsd:attribute name="closed_captions" type="xsd:boolean"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
<xsd:attribute name="color_transfer" type="xsd:string"/>
|
||||
<xsd:attribute name="color_primaries" type="xsd:string"/>
|
||||
<xsd:attribute name="chroma_location" type="xsd:string"/>
|
||||
<xsd:attribute name="field_order" type="xsd:string"/>
|
||||
<xsd:attribute name="refs" type="xsd:int"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string"/>
|
||||
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatFlagsType">
|
||||
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="palette" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="planar" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentType">
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatComponentsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
|
||||
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
|
||||
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="pixelFormatsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
||||
|
||||
@@ -13,15 +13,6 @@ corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
|
||||
Options that take arguments support a special syntax where the argument given on
|
||||
the command line is interpreted as a path to the file from which the actual
|
||||
argument value is loaded. To use this feature, add a forward slash '/'
|
||||
immediately before the option name (after the leading dash). E.g.
|
||||
@example
|
||||
ffmpeg -i INPUT -/filter:v filter.script OUTPUT
|
||||
@end example
|
||||
will load a filtergraph description from the file named @file{filter.script}.
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
@@ -46,9 +37,9 @@ Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4. If @var{stream_index} is used as an
|
||||
additional stream specifier (see below), then it selects stream number
|
||||
@var{stream_index} from the matching streams. Stream numbering is based on the
|
||||
order of the streams as detected by libavformat except when a stream group
|
||||
specifier or program ID is also specified. In this case it is based on the
|
||||
ordering of the streams in the group or program.
|
||||
order of the streams as detected by libavformat except when a program ID is
|
||||
also specified. In this case it is based on the ordering of the streams in the
|
||||
program.
|
||||
@item @var{stream_type}[:@var{additional_stream_specifier}]
|
||||
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
|
||||
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
|
||||
@@ -57,17 +48,6 @@ thumbnails or cover arts. If @var{additional_stream_specifier} is used, then
|
||||
it matches streams which both have this type and match the
|
||||
@var{additional_stream_specifier}. Otherwise, it matches all streams of the
|
||||
specified type.
|
||||
@item g:@var{group_specifier}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the group with the specifier @var{group_specifier}.
|
||||
if @var{additional_stream_specifier} is used, then it matches streams which both
|
||||
are part of the group and match the @var{additional_stream_specifier}.
|
||||
@var{group_specifier} may be one of the following:
|
||||
@table @option
|
||||
@item @var{group_index}
|
||||
Match the stream with this group index.
|
||||
@item #@var{group_id} or i:@var{group_id}
|
||||
Match the stream with this group id.
|
||||
@end table
|
||||
@item p:@var{program_id}[:@var{additional_stream_specifier}]
|
||||
Matches streams which are in the program with the id @var{program_id}. If
|
||||
@var{additional_stream_specifier} is used, then it matches streams which both
|
||||
@@ -78,12 +58,7 @@ Match the stream by stream id (e.g. PID in MPEG-TS container).
|
||||
@item m:@var{key}[:@var{value}]
|
||||
Matches streams with the metadata tag @var{key} having the specified value. If
|
||||
@var{value} is not given, matches streams that contain the given tag with any
|
||||
value. The colon character ':' in @var{key} or @var{value} needs to be
|
||||
backslash-escaped.
|
||||
@item disp:@var{dispositions}[:@var{additional_stream_specifier}]
|
||||
Matches streams with the given disposition(s). @var{dispositions} is a list of
|
||||
one or more dispositions (as printed by the @option{-dispositions} option)
|
||||
joined with '+'.
|
||||
value.
|
||||
@item u
|
||||
Matches streams with usable configuration, the codec must be defined and the
|
||||
essential information such as video dimension or audio sample rate must be present.
|
||||
@@ -192,9 +167,6 @@ Show available sample formats.
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -dispositions
|
||||
Show stream dispositions.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@@ -226,10 +198,6 @@ and the "Last message repeated n times" line will be omitted.
|
||||
Indicates that log output should add a @code{[level]} prefix to each message
|
||||
line. This can be used as an alternative to log coloring, e.g. when dumping the
|
||||
log to file.
|
||||
@item time
|
||||
Indicates that log lines should be prefixed with time information.
|
||||
@item datetime
|
||||
Indicates that log lines should be prefixed with date and time information.
|
||||
@end table
|
||||
Flags can also be used alone by adding a '+'/'-' prefix to set/reset a single
|
||||
flag without affecting other @var{flags} or changing @var{loglevel}. When
|
||||
@@ -385,13 +353,6 @@ Possible flags for this option are:
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -cpucount @var{count} (@emph{global})
|
||||
Override detection of CPU count. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpucount 2
|
||||
@end example
|
||||
|
||||
@item -max_alloc @var{bytes}
|
||||
Set the maximum size limit for allocating a block on the heap by ffmpeg's
|
||||
family of malloc functions. Exercise @strong{extreme caution} when using
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user