Compare commits
235 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
db69d06eee | ||
|
|
68af2cc3fe | ||
|
|
a8a3fc84af | ||
|
|
71889a8437 | ||
|
|
3e16202c39 | ||
|
|
d392931391 | ||
|
|
6646dd2825 | ||
|
|
bf8c0be971 | ||
|
|
0e5b6a7156 | ||
|
|
2e7364727c | ||
|
|
6acfaa1084 | ||
|
|
c388bc1774 | ||
|
|
43ba995a31 | ||
|
|
4018b915a6 | ||
|
|
6c4e56f07d | ||
|
|
880007e75c | ||
|
|
c74740fcd5 | ||
|
|
9dea077922 | ||
|
|
b753bac08f | ||
|
|
9abf2aef37 | ||
|
|
b06845c672 | ||
|
|
04fd3f69b3 | ||
|
|
8cabfd922a | ||
|
|
745e70f1d5 | ||
|
|
283e5a4fa0 | ||
|
|
f2b85c8aa1 | ||
|
|
ac494b8325 | ||
|
|
31f0084cbb | ||
|
|
e0f3c1018a | ||
|
|
8accee8b56 | ||
|
|
57a53adf2b | ||
|
|
8a72775d5d | ||
|
|
a4bf50b5c0 | ||
|
|
dc07f98934 | ||
|
|
a7a6d92e84 | ||
|
|
293e4ddd98 | ||
|
|
5f337bd798 | ||
|
|
43eaca54bc | ||
|
|
1646840988 | ||
|
|
f710d5af0a | ||
|
|
c82813c966 | ||
|
|
30bef79c69 | ||
|
|
5ee56ebd30 | ||
|
|
c3c7ecfe48 | ||
|
|
ac9c710e18 | ||
|
|
75385a729f | ||
|
|
ece76d7dd4 | ||
|
|
5fed7010f8 | ||
|
|
0bb8a38c10 | ||
|
|
41f8052284 | ||
|
|
5e02e1ba96 | ||
|
|
73fd378995 | ||
|
|
585c799b10 | ||
|
|
da1e316a1f | ||
|
|
658029334e | ||
|
|
b827ac49b7 | ||
|
|
601616f2de | ||
|
|
553517868d | ||
|
|
5129ae45bf | ||
|
|
d322ffdef5 | ||
|
|
d32543f520 | ||
|
|
f44a7207f0 | ||
|
|
0da3deb079 | ||
|
|
14af28c2f5 | ||
|
|
aeb8631048 | ||
|
|
d72536008a | ||
|
|
5b2ee11a60 | ||
|
|
9328e07ad5 | ||
|
|
60551696c0 | ||
|
|
dd78b9ed7c | ||
|
|
ae7332065b | ||
|
|
ba94588afa | ||
|
|
9d9b87be2b | ||
|
|
0a51afd21d | ||
|
|
3a375e3dcf | ||
|
|
0ccb070863 | ||
|
|
a6d4bac44d | ||
|
|
7264127438 | ||
|
|
7fc55c38c7 | ||
|
|
a626a08c45 | ||
|
|
53e66c7161 | ||
|
|
19740bcbf8 | ||
|
|
09e08d790e | ||
|
|
340795adc9 | ||
|
|
212546e9c6 | ||
|
|
a61eb0c2c0 | ||
|
|
e46e1c3368 | ||
|
|
75c1943e30 | ||
|
|
ece2a9a275 | ||
|
|
6f4ae5f183 | ||
|
|
b108d17c56 | ||
|
|
a5c48cc7a1 | ||
|
|
5d37c7cf08 | ||
|
|
aebce8b1b8 | ||
|
|
15684239ca | ||
|
|
54331d4305 | ||
|
|
89bc70ddd8 | ||
|
|
0151f1daab | ||
|
|
7d1532f752 | ||
|
|
535a8262cc | ||
|
|
1a254c5354 | ||
|
|
615f29e301 | ||
|
|
cac9112bf3 | ||
|
|
a82139d0e6 | ||
|
|
a5ce143895 | ||
|
|
8ae93fdc42 | ||
|
|
1cb5caeb5b | ||
|
|
5e17ff811a | ||
|
|
364eb21d2a | ||
|
|
6350530806 | ||
|
|
52461e7e8b | ||
|
|
b444880422 | ||
|
|
828569c0d0 | ||
|
|
bc8248d07a | ||
|
|
886dd058fe | ||
|
|
028391aa58 | ||
|
|
82d45cb004 | ||
|
|
9285b93143 | ||
|
|
048a545e31 | ||
|
|
851bc9927d | ||
|
|
f9f4835731 | ||
|
|
173a978b9d | ||
|
|
3c8b588f3c | ||
|
|
8ac2375b71 | ||
|
|
49e4c1717f | ||
|
|
3417e955c3 | ||
|
|
d9687e6156 | ||
|
|
f9901306ba | ||
|
|
4930dd91c6 | ||
|
|
c7aa0c4ecd | ||
|
|
445065e23a | ||
|
|
1f1b309f9e | ||
|
|
e2394166bf | ||
|
|
241e87afa4 | ||
|
|
ded98a0919 | ||
|
|
0288fedf18 | ||
|
|
6b02047811 | ||
|
|
562af93025 | ||
|
|
2ab2803944 | ||
|
|
a2666675bf | ||
|
|
9f8bd56e4f | ||
|
|
7043ef6828 | ||
|
|
4ca3f5102f | ||
|
|
26fc4bf42c | ||
|
|
2a39eeb8de | ||
|
|
d870febf88 | ||
|
|
631976108c | ||
|
|
ba89c5d1eb | ||
|
|
3f76a33317 | ||
|
|
76ddc3f1e1 | ||
|
|
811fce437e | ||
|
|
e4538bc888 | ||
|
|
145a3a8455 | ||
|
|
be26ee23ab | ||
|
|
4f5769e052 | ||
|
|
b168ed9b14 | ||
|
|
871c69a28b | ||
|
|
888a6b54bc | ||
|
|
88126fc4ec | ||
|
|
10aaf84f85 | ||
|
|
364f6a5f11 | ||
|
|
f265f9c9d0 | ||
|
|
4b801baa46 | ||
|
|
26bc7827e4 | ||
|
|
c13a0a2f59 | ||
|
|
f7d59ab17a | ||
|
|
07a92c1a64 | ||
|
|
302f1b3882 | ||
|
|
1b5bdce86a | ||
|
|
075c6d325a | ||
|
|
dc9d9b8636 | ||
|
|
f1592a7ff1 | ||
|
|
13282e61b8 | ||
|
|
33588a3749 | ||
|
|
e07d7c75db | ||
|
|
8fa5fa1463 | ||
|
|
cb997ccd09 | ||
|
|
3513ccccb6 | ||
|
|
64e2864cb9 | ||
|
|
0c641ee661 | ||
|
|
ac4c725944 | ||
|
|
20c8a3f5ff | ||
|
|
b016aae662 | ||
|
|
0a58588ffb | ||
|
|
99f6adce60 | ||
|
|
5b461ffb04 | ||
|
|
85f389520d | ||
|
|
fa15e3839d | ||
|
|
b9abdd9eaa | ||
|
|
5f8b02a9ff | ||
|
|
f8fcebae95 | ||
|
|
23697c3f02 | ||
|
|
779b0fe015 | ||
|
|
f686cf77db | ||
|
|
4ea558152f | ||
|
|
11e8319b8e | ||
|
|
03ffd4b3b3 | ||
|
|
1b1ba7d4f2 | ||
|
|
baef55364f | ||
|
|
5120d30890 | ||
|
|
367b7b546f | ||
|
|
15035aaec0 | ||
|
|
9fbbd924f2 | ||
|
|
87ccf995cb | ||
|
|
020d9f2b48 | ||
|
|
32d3e67717 | ||
|
|
5c59d97e8a | ||
|
|
0b5559378b | ||
|
|
c0d19c640a | ||
|
|
794245fa71 | ||
|
|
63f5c007a7 | ||
|
|
9b061291ad | ||
|
|
e14a3a4b11 | ||
|
|
4c0ef3bfae | ||
|
|
8be6e56a53 | ||
|
|
ff93c1898d | ||
|
|
12682eba2e | ||
|
|
b08d7969c5 | ||
|
|
69cf68f5a0 | ||
|
|
0bad953cc5 | ||
|
|
09e9d61d0a | ||
|
|
ebd0ca9fee | ||
|
|
963145483c | ||
|
|
05ff3eb9f2 | ||
|
|
d5c308fcc5 | ||
|
|
55d0c353fe | ||
|
|
9ada04faa3 | ||
|
|
0482410818 | ||
|
|
c21472ec31 | ||
|
|
cb23275623 | ||
|
|
f7643454aa | ||
|
|
2db706374e | ||
|
|
a6a346e154 | ||
|
|
00ce36d037 | ||
|
|
f459c56b0f |
3800 changed files with 64961 additions and 134344 deletions
|
|
@ -1,96 +0,0 @@
|
|||
# This file describes the expected reviewers for a PR based on the changed
|
||||
# files. Unlike what the name of the file suggests they don't own the code, but
|
||||
# merely have a good understanding of that area of the codebase and therefore
|
||||
# are usually suited as a reviewer.
|
||||
|
||||
# Lines in this file match changed paths via Go-Style regular expressions:
|
||||
# https://pkg.go.dev/regexp/syntax
|
||||
|
||||
# Mind the alphabetical order
|
||||
|
||||
# avcodec
|
||||
# =======
|
||||
libavcodec/.*aac.* @lynne
|
||||
libavcodec/.*ac3.* @lynne
|
||||
libavcodec/.*atrac9.* @lynne
|
||||
libavcodec/.*bitpacked.* @lynne
|
||||
libavcodec/.*d3d12va.* @jianhuaw
|
||||
libavcodec/.*dirac.* @lynne
|
||||
libavcodec/.*ffv1.* @lynne @michaelni
|
||||
libavcodec/golomb.* @michaelni
|
||||
libavcodec/.*h266.* @frankplow @NuoMi @jianhuaw
|
||||
libavcodec/h26x/.* @frankplow @NuoMi @jianhuaw
|
||||
libavcodec/.*jpegxl.* @lynne @Traneptora
|
||||
libavcodec/.*jxl.* @lynne @Traneptora
|
||||
libavcodec/.*opus.* @lynne
|
||||
libavcodec/.*png.* @Traneptora
|
||||
libavcodec/.*prores.* @lynne
|
||||
libavcodec/rangecoder.* @michaelni
|
||||
libavcodec/ratecontrol.* @michaelni
|
||||
libavcodec/.*siren.* @lynne
|
||||
libavcodec/.*vc2.* @lynne
|
||||
libavcodec/.*vvc.* @frankplow @NuoMi @jianhuaw
|
||||
|
||||
libavcodec/aarch64/.* @lynne @mstorsjo
|
||||
libavcodec/arm/.* @mstorsjo
|
||||
libavcodec/ppc/.* @sean_mcg
|
||||
libavcodec/x86/.* @lynne
|
||||
|
||||
# avfilter
|
||||
# =======
|
||||
libavfilter/aarch64/.* @mstorsjo
|
||||
libavfilter/af_whisper.* @vpalmisano
|
||||
libavfilter/vf_yadif.* @michaelni
|
||||
libavfilter/vsrc_mandelbrot.* @michaelni
|
||||
|
||||
# avformat
|
||||
# =======
|
||||
libavformat/iamf.* @jamrial
|
||||
libavformat/.*jpegxl.* @Traneptora
|
||||
libavformat/.*jxl.* @Traneptora
|
||||
|
||||
# avutil
|
||||
# ======
|
||||
libavutil/.*crc.* @lynne @michaelni
|
||||
libavutil/.*d3d12va.* @jianhuaw
|
||||
libavutil/eval.* @michaelni
|
||||
libavutil/iamf.* @jamrial
|
||||
libavutil/integer.* @michaelni
|
||||
libavutil/lfg.* @michaelni
|
||||
libavutil/lls.* @michaelni
|
||||
libavutil/md5.* @michaelni
|
||||
libavutil/mathematics.* @michaelni
|
||||
libavutil/mem.* @michaelni
|
||||
libavutil/qsort.* @michaelni
|
||||
libavutil/random_seed.* @michaelni
|
||||
libavutil/rational.* @michaelni
|
||||
libavutil/sfc.* @michaelni
|
||||
libavutil/softfloat.* @michaelni
|
||||
libavutil/tree.* @michaelni
|
||||
libavutil/tx.* @lynne
|
||||
|
||||
libavutil/aarch64/.* @lynne @mstorsjo
|
||||
libavutil/arm/.* @mstorsjo
|
||||
libavutil/ppc/.* @sean_mcg
|
||||
libavutil/x86/.* @lynne
|
||||
|
||||
# swresample
|
||||
# =======
|
||||
libswresample/aarch64/.* @mstorsjo
|
||||
libswresample/arm/.* @mstorsjo
|
||||
libswresample/.* @michaelni
|
||||
|
||||
# swscale
|
||||
# =======
|
||||
libswscale/aarch64/.* @mstorsjo
|
||||
libswscale/arm/.* @mstorsjo
|
||||
libswscale/ppc/.* @sean_mcg
|
||||
|
||||
# doc
|
||||
# ===
|
||||
doc/.* @GyanD
|
||||
|
||||
# Frameworks
|
||||
# ==========
|
||||
.*d3d12va.* @jianhuaw
|
||||
.*vulkan.* @lynne
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
# Summary of the bug
|
||||
|
||||
Briefly describe the issue you're experiencing. Include any error messages, unexpected behavior, or relevant observations.
|
||||
|
||||
# Steps to reproduce
|
||||
|
||||
List the steps required to trigger the bug.
|
||||
Include the exact CLI command used, if any.
|
||||
Provide sample input files, logs, or scripts if available.
|
||||
|
|
@ -1,72 +0,0 @@
|
|||
module.exports = async ({github, context}) => {
|
||||
const title = (context.payload.pull_request?.title || context.payload.issue?.title || '').toLowerCase();
|
||||
const labels = [];
|
||||
const issueNumber = context.payload.pull_request?.number || context.payload.issue?.number;
|
||||
|
||||
const kwmap = {
|
||||
'avcodec': 'avcodec',
|
||||
'avdevice': 'avdevice',
|
||||
'avfilter': 'avfilter',
|
||||
'avformat': 'avformat',
|
||||
'avutil': 'avutil',
|
||||
'swresample': 'swresample',
|
||||
'swscale': 'swscale',
|
||||
'fftools': 'CLI'
|
||||
};
|
||||
|
||||
async function isOrgMember(username) {
|
||||
try {
|
||||
const response = await github.rest.orgs.checkMembershipForUser({
|
||||
org: context.repo.owner,
|
||||
username: username
|
||||
});
|
||||
return response.status === 204;
|
||||
} catch (error) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (context.payload.action === 'closed' ||
|
||||
(context.payload.action !== 'opened' && (
|
||||
context.payload.action === 'assigned' ||
|
||||
context.payload.action === 'label_updated' ||
|
||||
context.payload.comment) &&
|
||||
await isOrgMember(context.payload.sender.login))
|
||||
) {
|
||||
try {
|
||||
await github.rest.issues.removeLabel({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issueNumber,
|
||||
// this should say 'new', but forgejo deviates from GitHub API here and expects the ID
|
||||
name: '41'
|
||||
});
|
||||
console.log('Removed "new" label');
|
||||
} catch (error) {
|
||||
if (error.status !== 404 && error.status !== 410) {
|
||||
console.log('Could not remove "new" label');
|
||||
}
|
||||
}
|
||||
} else if (context.payload.action === 'opened') {
|
||||
labels.push('new');
|
||||
console.log('Detected label: new');
|
||||
}
|
||||
|
||||
if ((context.payload.action === 'opened' || context.payload.action === 'edited') && context.eventName !== 'issue_comment') {
|
||||
for (const [kw, label] of Object.entries(kwmap)) {
|
||||
if (title.includes(kw)) {
|
||||
labels.push(label);
|
||||
console.log('Detected label: ' + label);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (labels.length > 0) {
|
||||
await github.rest.issues.addLabels({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: issueNumber,
|
||||
labels: labels,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
avcodec:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavcodec/**
|
||||
|
||||
avdevice:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavdevice/**
|
||||
|
||||
avfilter:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavfilter/**
|
||||
|
||||
avformat:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavformat/**
|
||||
|
||||
avutil:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libavutil/**
|
||||
|
||||
swresample:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libswresample/**
|
||||
|
||||
swscale:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: libswscale/**
|
||||
|
||||
CLI:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: fftools/**
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
exclude: ^tests/ref/
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-case-conflict
|
||||
- id: check-executables-have-shebangs
|
||||
- id: check-illegal-windows-names
|
||||
- id: check-shebang-scripts-are-executable
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: file-contents-sorter
|
||||
files:
|
||||
.forgejo/pre-commit/ignored-words.txt
|
||||
args:
|
||||
- --ignore-case
|
||||
- id: fix-byte-order-marker
|
||||
- id: mixed-line-ending
|
||||
- id: trailing-whitespace
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.4.1
|
||||
hooks:
|
||||
- id: codespell
|
||||
args:
|
||||
- --ignore-words=.forgejo/pre-commit/ignored-words.txt
|
||||
- --ignore-multiline-regex=codespell:off.*?(codespell:on|\Z)
|
||||
exclude: ^tools/(patcheck|clean-diff)$
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
abl
|
||||
ACN
|
||||
acount
|
||||
addin
|
||||
alis
|
||||
alls
|
||||
ALOG
|
||||
ALS
|
||||
als
|
||||
ANC
|
||||
anc
|
||||
ANS
|
||||
ans
|
||||
anull
|
||||
basf
|
||||
bloc
|
||||
brane
|
||||
BREIF
|
||||
BU
|
||||
bu
|
||||
bufer
|
||||
CAF
|
||||
caf
|
||||
clen
|
||||
clens
|
||||
Collet
|
||||
compre
|
||||
dum
|
||||
endin
|
||||
erro
|
||||
FIEL
|
||||
fiel
|
||||
filp
|
||||
fils
|
||||
FILTERD
|
||||
filterd
|
||||
fle
|
||||
fo
|
||||
FPR
|
||||
fro
|
||||
Hald
|
||||
indx
|
||||
ine
|
||||
inh
|
||||
inout
|
||||
inouts
|
||||
inport
|
||||
ist
|
||||
LAF
|
||||
laf
|
||||
lastr
|
||||
LinS
|
||||
mapp
|
||||
mis
|
||||
mot
|
||||
nd
|
||||
nIn
|
||||
offsetp
|
||||
orderd
|
||||
ot
|
||||
outout
|
||||
padd
|
||||
PAETH
|
||||
paeth
|
||||
PARM
|
||||
parm
|
||||
parms
|
||||
pEvents
|
||||
PixelX
|
||||
Psot
|
||||
quater
|
||||
readd
|
||||
recuse
|
||||
redY
|
||||
Reencode
|
||||
reencode
|
||||
remaind
|
||||
renderD
|
||||
rin
|
||||
SAV
|
||||
SEH
|
||||
SER
|
||||
ser
|
||||
setts
|
||||
shft
|
||||
SIZ
|
||||
siz
|
||||
skipd
|
||||
sme
|
||||
som
|
||||
sover
|
||||
STAP
|
||||
startd
|
||||
statics
|
||||
struc
|
||||
suble
|
||||
TE
|
||||
tE
|
||||
te
|
||||
tha
|
||||
tne
|
||||
tolen
|
||||
tpye
|
||||
tre
|
||||
TRUN
|
||||
trun
|
||||
truns
|
||||
Tung
|
||||
TYE
|
||||
ue
|
||||
UES
|
||||
ues
|
||||
vai
|
||||
vas
|
||||
vie
|
||||
VILL
|
||||
vor
|
||||
wel
|
||||
wih
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
on:
|
||||
pull_request_target:
|
||||
types: [opened, edited, synchronize, closed, assigned, labeled, unlabeled]
|
||||
issues:
|
||||
types: [opened, edited, closed, assigned, labeled, unlabeled]
|
||||
issue_comment:
|
||||
types: [created]
|
||||
|
||||
jobs:
|
||||
pr_labeler:
|
||||
runs-on: utilities
|
||||
if: ${{ github.event.sender.login != 'ffmpeg-devel' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Label by file-changes
|
||||
uses: https://github.com/actions/labeler@v5
|
||||
if: ${{ forge.event_name == 'pull_request_target' }}
|
||||
with:
|
||||
configuration-path: .forgejo/labeler/labeler.yml
|
||||
repo-token: ${{ secrets.AUTOLABELER_TOKEN }}
|
||||
- name: Label by title-match
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const script = require('.forgejo/labeler/labeler.js')
|
||||
await script({github, context})
|
||||
github-token: ${{ secrets.AUTOLABELER_TOKEN }}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: utilities
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Install pre-commit CI
|
||||
id: install
|
||||
run: |
|
||||
python3 -m venv ~/pre-commit
|
||||
~/pre-commit/bin/pip install --upgrade pip setuptools
|
||||
~/pre-commit/bin/pip install pre-commit
|
||||
echo "envhash=$({ python3 --version && cat .forgejo/pre-commit/config.yaml; } | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
|
||||
- name: Cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cache/pre-commit
|
||||
key: pre-commit-${{ steps.install.outputs.envhash }}
|
||||
- name: Run pre-commit CI
|
||||
run: ~/pre-commit/bin/pre-commit run -c .forgejo/pre-commit/config.yaml --show-diff-on-failure --color=always --all-files
|
||||
|
|
@ -1,59 +0,0 @@
|
|||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
run_fate:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
runner: [linux-amd64,linux-aarch64]
|
||||
runs-on: ${{ matrix.runner }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Configure
|
||||
run: ./configure --enable-gpl --enable-nonfree --enable-memory-poisoning --assert-level=2
|
||||
- name: Build
|
||||
run: make -j$(nproc)
|
||||
- name: Restore Cached Fate-Suite
|
||||
id: cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: fate-suite
|
||||
key: fate-suite
|
||||
restore-keys: |
|
||||
fate-suite-
|
||||
- name: Sync Fate-Suite
|
||||
id: fate
|
||||
run: |
|
||||
make fate-rsync SAMPLES=$PWD/fate-suite
|
||||
echo "hash=$(find fate-suite -type f | sort | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
|
||||
- name: Cache Fate-Suite
|
||||
uses: actions/cache/save@v4
|
||||
if: ${{ format('fate-suite-{0}', steps.fate.outputs.hash) != steps.cache.outputs.cache-matched-key }}
|
||||
with:
|
||||
path: fate-suite
|
||||
key: fate-suite-${{ steps.fate.outputs.hash }}
|
||||
- name: Run Fate
|
||||
run: make fate SAMPLES=$PWD/fate-suite -j$(nproc)
|
||||
compile_only:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
image: ["ghcr.io/btbn/ffmpeg-builds/win64-gpl:latest"]
|
||||
runs-on: linux-amd64
|
||||
container: ${{ matrix.image }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Configure
|
||||
run: |
|
||||
./configure --pkg-config-flags="--static" $FFBUILD_TARGET_FLAGS $FF_CONFIGURE \
|
||||
--cc="$CC" --cxx="$CXX" --ar="$AR" --ranlib="$RANLIB" --nm="$NM" \
|
||||
--extra-cflags="$FF_CFLAGS" --extra-cxxflags="$FF_CXXFLAGS" \
|
||||
--extra-libs="$FF_LIBS" --extra-ldflags="$FF_LDFLAGS" --extra-ldexeflags="$FF_LDEXEFLAGS"
|
||||
- name: Build
|
||||
run: make -j$(nproc)
|
||||
1
.gitattributes
vendored
1
.gitattributes
vendored
|
|
@ -1,2 +1 @@
|
|||
*.pnm -diff -text
|
||||
Changelog merge=union
|
||||
|
|
|
|||
5
.gitignore
vendored
5
.gitignore
vendored
|
|
@ -1,6 +1,5 @@
|
|||
*.a
|
||||
*.o
|
||||
*.objs
|
||||
*.o.*
|
||||
*.d
|
||||
*.def
|
||||
|
|
@ -42,7 +41,3 @@
|
|||
/src
|
||||
/mapfile
|
||||
/tools/python/__pycache__/
|
||||
/libavcodec/vulkan/*.c
|
||||
/libavfilter/vulkan/*.c
|
||||
/.*/
|
||||
!/.forgejo/
|
||||
|
|
|
|||
2
.mailmap
2
.mailmap
|
|
@ -26,5 +26,3 @@ rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
|
|||
Cosmin Stejerean <cosmin@cosmin.at> Cosmin Stejerean via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
|
||||
<wutong1208@outlook.com> <tong1.wu-at-intel.com@ffmpeg.org>
|
||||
<wutong1208@outlook.com> <tong1.wu@intel.com>
|
||||
<toqsxw@outlook.com> <jianhua.wu-at-intel.com@ffmpeg.org>
|
||||
<toqsxw@outlook.com> <jianhua.wu@intel.com>
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know
|
|||
that what they have is not the original version, so that the original
|
||||
author's reputation will not be affected by problems that might be
|
||||
introduced by others.
|
||||
|
||||
|
||||
Finally, software patents pose a constant threat to the existence of
|
||||
any free program. We wish to make sure that a company cannot
|
||||
effectively restrict the users of a free program by obtaining a
|
||||
|
|
@ -111,7 +111,7 @@ modification follow. Pay close attention to the difference between a
|
|||
"work based on the library" and a "work that uses the library". The
|
||||
former contains code derived from the library, whereas the latter must
|
||||
be combined with the library in order to run.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
|
|
@ -158,7 +158,7 @@ Library.
|
|||
You may charge a fee for the physical act of transferring a copy,
|
||||
and you may at your option offer warranty protection in exchange for a
|
||||
fee.
|
||||
|
||||
|
||||
2. You may modify your copy or copies of the Library or any portion
|
||||
of it, thus forming a work based on the Library, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
|
|
@ -216,7 +216,7 @@ instead of to this License. (If a newer version than version 2 of the
|
|||
ordinary GNU General Public License has appeared, then you can specify
|
||||
that version instead if you wish.) Do not make any other change in
|
||||
these notices.
|
||||
|
||||
|
||||
Once this change is made in a given copy, it is irreversible for
|
||||
that copy, so the ordinary GNU General Public License applies to all
|
||||
subsequent copies and derivative works made from that copy.
|
||||
|
|
@ -267,7 +267,7 @@ Library will still fall under Section 6.)
|
|||
distribute the object code for the work under the terms of Section 6.
|
||||
Any executables containing that work also fall under Section 6,
|
||||
whether or not they are linked directly with the Library itself.
|
||||
|
||||
|
||||
6. As an exception to the Sections above, you may also combine or
|
||||
link a "work that uses the Library" with the Library to produce a
|
||||
work containing portions of the Library, and distribute that work
|
||||
|
|
@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally
|
|||
accompany the operating system. Such a contradiction means you cannot
|
||||
use both them and the Library together in an executable that you
|
||||
distribute.
|
||||
|
||||
|
||||
7. You may place library facilities that are a work based on the
|
||||
Library side-by-side in a single library together with other library
|
||||
facilities not covered by this License, and distribute such a combined
|
||||
|
|
@ -370,7 +370,7 @@ subject to these terms and conditions. You may not impose any further
|
|||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties with
|
||||
this License.
|
||||
|
||||
|
||||
11. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
|
|
@ -422,7 +422,7 @@ conditions either of that version or of any later version published by
|
|||
the Free Software Foundation. If the Library does not specify a
|
||||
license version number, you may choose any version ever published by
|
||||
the Free Software Foundation.
|
||||
|
||||
|
||||
14. If you wish to incorporate parts of the Library into other free
|
||||
programs whose distribution conditions are incompatible with these,
|
||||
write to the author to ask for permission. For software which is
|
||||
|
|
@ -456,7 +456,7 @@ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
|||
DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
|
||||
How to Apply These Terms to Your New Libraries
|
||||
|
||||
If you develop a new library, and you want it to be of the greatest
|
||||
|
|
|
|||
258
Changelog
258
Changelog
|
|
@ -1,43 +1,221 @@
|
|||
Entries are sorted chronologically from oldest to youngest within each release,
|
||||
releases are sorted from youngest to oldest.
|
||||
|
||||
version <next>:
|
||||
|
||||
|
||||
version 8.0:
|
||||
- Whisper filter
|
||||
- Drop support for OpenSSL < 1.1.0
|
||||
- Enable TLS peer certificate verification by default (on next major version bump)
|
||||
- Drop support for OpenSSL < 1.1.1
|
||||
- yasm support dropped, users need to use nasm
|
||||
- VVC VAAPI decoder
|
||||
- RealVideo 6.0 decoder
|
||||
- OpenMAX encoders deprecated
|
||||
- libx265 alpha layer encoding
|
||||
- ADPCM IMA Xbox decoder
|
||||
- Enhanced FLV v2: Multitrack audio/video, modern codec support
|
||||
- Animated JPEG XL encoding (via libjxl)
|
||||
- VVC in Matroska
|
||||
- CENC AV1 support in MP4 muxer
|
||||
- pngenc: set default prediction method to PAETH
|
||||
- APV decoder and APV raw bitstream muxing and demuxing
|
||||
- APV parser
|
||||
- APV encoding support through a libopenapv wrapper
|
||||
- VVC decoder supports all content of SCC (Screen Content Coding):
|
||||
IBC (Inter Block Copy), Palette Mode and ACT (Adaptive Color Transform
|
||||
- G.728 decoder
|
||||
- pad_cuda filter
|
||||
- Sanyo LD-ADPCM decoder
|
||||
- APV in MP4/ISOBMFF muxing and demuxing
|
||||
- OpenHarmony hardware decoder/encoder
|
||||
- Colordetect filter
|
||||
- Add vf_scale_d3d11 filter
|
||||
- No longer disabling GCC autovectorization, on X86, ARM and AArch64
|
||||
- VP9 Vulkan hwaccel
|
||||
- AV1 Vulkan encoder
|
||||
- ProRes RAW decoder
|
||||
- ProRes RAW Vulkan hwaccel
|
||||
- ffprobe -codec option
|
||||
version 7.1.1:
|
||||
avformat/hls: Partially revert "reduce default max reload to 3"
|
||||
avformat/mov: (v4) fix get_eia608_packet
|
||||
avformat/iff: Check that we have a stream in read_dst_frame()
|
||||
avcodec/aac/aacdec_lpd: Limit get_unary()
|
||||
avcodec/aac/aacdec_usac: Simplify decode_usac_scale_factors()
|
||||
avcodec/aac/aacdec: Clear SFO on error
|
||||
avformat/mlvdec: fix size checks
|
||||
avformat/wavdec: Fix overflow of intermediate in block_align check
|
||||
avformat/mxfdec: Check edit unit for overflow in mxf_set_current_edit_unit()
|
||||
avformat/hls: Fix twitter
|
||||
avcodec/vvc/refs: fix negative pps_scaling_win offsets
|
||||
libavformat/hls: Be more restrictive on mpegts extensions
|
||||
avformat/hls: .ts is always ok even if its a mov/mp4
|
||||
avcodec/h263dec: Check against previous dimensions instead of coded
|
||||
avformat/hls: Print input format in error message
|
||||
avformat/hls: Be more picky on extensions
|
||||
avformat/iamf_parse: ensure there's at most one of each parameter types in audio elements
|
||||
avformat/iamf_parse: add missing constrains for num_parameters in audio_element_oub()
|
||||
avformat/iamf_parse: add missing av_free() call on failure path
|
||||
lavc/hevcdec: unbreak WPP/progress2 code
|
||||
fate: Add a dependency on ffprobe for fate-flcl1905
|
||||
checkasm: aacencdsp: Actually test nonzero values in quant_bands
|
||||
x86: aacencdsp: Fix negating signed values in aac_quantize_bands
|
||||
rtmpproto: Avoid rare crashes in the fail: codepath in rtmp_open
|
||||
configure: Improve the check for the rsync --contimeout option
|
||||
avutil/downmix_info: add missing semicolon
|
||||
doc/t2h: Support texinfo 7.1 and 7.2 pretest
|
||||
avfilter/drawtext: fix memory leak when using "reinit" runtime command
|
||||
avutil/downmix_info: zero the allocated buffer
|
||||
avformat/mov: fix overflow in drift timestamp calculation
|
||||
Changelog: update
|
||||
avformat/mxfdec: Check avio_read() success in mxf_decrypt_triplet()
|
||||
avcodec/huffyuvdec: Initialize whole output for decode_gray_bitstream()
|
||||
avformat/iamf_reader: Initialize padding and check read in ff_iamf_read_packet()
|
||||
avformat/ipmovie: Check signature_buffer read
|
||||
avformat/wtvdec: Initialize buf
|
||||
avcodec/cbs_vp9: Initialize VP9RawSuperframeIndex
|
||||
avformat/vqf: Propagate errors from add_metadata()
|
||||
avformat/vqf: Check avio_read() in add_metadata()
|
||||
avcodec/ffv1enc: Fix RCT for GBR colorspace
|
||||
avformat/dashdec: Check whitelist
|
||||
avutil/avstring: dont mess with NULL pointers in av_match_list()
|
||||
avfilter/vf_v360: Fix NULL pointer use
|
||||
avcodec/mpegvideo_enc: Check FLV1 resolution limits
|
||||
avcodec/ffv1enc: Fix handling of 32bit unsigned symbols
|
||||
avformat/mov: perform sanity checks for heif before index building
|
||||
avformat/mov: Factorize sanity check out
|
||||
avcodec/vc1dec: Clear block_index in vc1_decode_reset()
|
||||
avcodec/aacsbr_template: Clear n_q on error
|
||||
avformat/iamf_parse: Check output_channel_count
|
||||
avcodec/osq: Fixes several undefined overflows in do_decode()
|
||||
swscale/output: Fix undefined overflow in yuv2rgba64_full_X_c_template()
|
||||
avfilter/af_pan: Fix sscanf() use
|
||||
avfilter/vf_grayworld: Use the correct pointer for av_log()
|
||||
avfilter/vf_addroi: Add missing NULL termination to addroi_var_names[]()
|
||||
avcodec/get_buffer: Use av_buffer_mallocz() for audio same as its done for video
|
||||
avformat/jpegxl_anim_dec: clear buffer padding
|
||||
avformat/rmdec: check that buf if completely filled
|
||||
avcodec/cfhdenc: Clear dwt_tmp
|
||||
avcodec/hapdec: Clear tex buffer
|
||||
avformat/mxfdec: Check that key was read sucessfull
|
||||
avformat/hevc: fix writing hvcC when no arrays are provided in hvcC-formatted input
|
||||
avformat/rtpdec: int overflow in start_time_realtime
|
||||
avcodec/decode: Fix incorrect enum type used in side_data_map()
|
||||
avformat/mov: fix crash when trying to get a fragment time for a non-existing fragment
|
||||
avformat/libssh: fix credential variables typo
|
||||
avformat/hlsenc: check return value of avcodec_parameters_copy()
|
||||
avformat/dashdec: format open_demux_for_component()
|
||||
avformat/dashdec: check return code of avcodec_parameters_copy()
|
||||
avformat/dashdec: return ret directly in open_demux_for_component()
|
||||
avformat/smoothstreamingenc: check return value of avcodec_parameters_copy()
|
||||
avcodec/cbs_av1: fix variable shadowing in cbs_av1_split_fragment()
|
||||
doc/demuxers/dvdvideo: seeking is supported, remove outdated statement
|
||||
avformat/dvdvideodec: check return code of ff_dvdclut_yuv_to_rgb()
|
||||
avformat/dvdvideodec: fix missing last chapter marker due to off-by-one
|
||||
avformat/dvdvideodec: don't allow seeking beyond dvdnav reported duration
|
||||
avformat/dvdvideodec: discard duplicate or partial AC3 samples
|
||||
avformat/dvdvideodec: drop packets with unset PTS or DTS
|
||||
avformat/dvdvideodec: remove unnecessary need_parsing argument
|
||||
avformat/dvdvideodec: open subdemuxer after initializing IFO headers
|
||||
avformat/dvdvideodec: remove auto value for menu_lu option
|
||||
avformat/dvdvideodec: default menu_vts option to 1 and clarify description
|
||||
avformat/dvdvideodec: check the length of a NAV packet when reading titles
|
||||
avformat/dvdvideodec: reset the subdemuxer on discontinuity instead of flushing
|
||||
avformat/dvdvideodec: simplify dvdvideo_read_packet()
|
||||
avformat/dvdvideodec: enable chapter calculation for menus
|
||||
avformat/dvdvideodec: standardize the NAV packet event signal
|
||||
avformat/dvdvideodec: move memcpy below missed NAV packet warning
|
||||
avformat/dvdvideodec: remove "auto" value for -pg option, default to 1
|
||||
avformat/dvdvideodec: measure duration of the current menu VOBU in state
|
||||
avformat/dvdvideodec: fix menu PGC number off-by-one in state
|
||||
avformat/dvdvideodec: remove unused headers
|
||||
lavc/aarch64: Fix ff_pred16x16_plane_neon_10
|
||||
lavc/aarch64: Fix ff_pred8x8_plane_neon_10
|
||||
aarch64/vvc: Fix clip in alf
|
||||
vp9: recon: Use emulated edge to prevent buffer overflows
|
||||
arm: vp9mc: Load only 12 pixels in the 4 pixel wide horizontal filter
|
||||
aarch64: vp9mc: Load only 12 pixels in the 4 pixel wide horizontal filter
|
||||
avformat/rpl: Fix check for negative values
|
||||
avformat/mlvdec: Check avio_read()
|
||||
avcodec/aac/aacdec: Free channel layout
|
||||
avformat/mov: dereference pointer after null check
|
||||
avcodec/utils: Fix block align overflow for ADPCM_IMA_WAV
|
||||
avformat/matroskadec: Check pre_ns for overflow
|
||||
tools/target_dec_fuzzer: Adjust threshold for EACMV
|
||||
tools/target_dec_fuzzer: Adjust threshold for MVC1
|
||||
tools/target_dec_fuzzer: Adjust Threshold for indeo5
|
||||
avutil/timecode: Avoid fps overflow in av_timecode_get_smpte_from_framenum()
|
||||
avcodec/aac/aacdec_usac: Dont leave type at a invalid value
|
||||
avcodec/aac/aacdec_usac: Clean ics2->max_sfb when first SCE fails
|
||||
avcodec/webp: Check ref_x/y
|
||||
avcodec/ilbcdec: Initialize tempbuff2
|
||||
swscale/swscale_unscaled: Fix odd height with nv24_to_yuv420p_chroma()
|
||||
avcodec/hevc/hevcdec: initialize qp_y_tab
|
||||
avformat/qcp: Check for read failure in header
|
||||
avcodec/eatgq: Check bytestream2_get_buffer() for failure
|
||||
avformat/dxa: check bpc
|
||||
swscale/slice: clear allocated memory in alloc_lines()
|
||||
avcodec/h2645_parse: Ignore NAL with nuh_layer_id == 63
|
||||
MAINTAINERS: Lauri is still available but is really low on time nowadays
|
||||
MAINTAINERS: Remove Guillaume Poirier and Romain Dolbeau
|
||||
avcodec/mjpegdec: Disallow progressive bayer images
|
||||
avformat/icodec: fix integer overflow with nb_pal
|
||||
doc/developer: Document relationship between git accounts and MAINTAINERS
|
||||
doc/infra: Document trac backup system
|
||||
doc/infra: Document gitolite
|
||||
avformat/vividas: Check avio_read() for failure
|
||||
avformat/ilbc: Check avio_read() for failure
|
||||
avformat/nistspheredec: Clear buffer
|
||||
avformat/mccdec: Initialize and check rate.den
|
||||
avformat/rpl: check channels
|
||||
INSTALL: explain the circular dependency issue and solution
|
||||
avformat/mpegts: Initialize predefined_SLConfigDescriptor_seen
|
||||
avformat/mxfdec: Fix overflow in midpoint computation
|
||||
swscale/output: used unsigned for bit accumulation
|
||||
swscale/rgb2rgb_template: Fix ff_rgb24toyv12_c() with odd height
|
||||
avcodec/rangecoder: only perform renorm check/loop for callers that need it
|
||||
avcodec/ffv1: add a named constant for the quant table size
|
||||
avcodec/ffv1: RCT is only possible with RGB
|
||||
avcodec/ffv1enc: Fix RCT with RGB64
|
||||
avcodec/ffv1dec: Fix end computation with ec=2
|
||||
avcodec/ffv1enc: Move slice termination into threads
|
||||
avcodec/ffv1enc: Prevent generation of files with broken slices
|
||||
avformat/matroskadec: Check desc_bytes so bits fit in 64bit
|
||||
avformat/mov: Avoid overflow in dts
|
||||
avcodec/ffv1enc: Correct error message about unsupported version
|
||||
avcodec/ffv1: Store and reuse sx/sy
|
||||
avcodec/ffv1enc: Slice combination is unsupported
|
||||
avcodec/ffv1enc: 2Pass mode is not possible with golomb coding
|
||||
avfilter/buffersrc: check for valid sample rate
|
||||
avcodec/libdav1d: clear the buffered Dav1dData on decoding failure
|
||||
avformat/iamf_writer: ensure the stream groups are not empty
|
||||
avformat/iamf_writer: fix setting num_samples_per_frame for OPUS
|
||||
avformat/iamf_parse: fix setting duration for the last subblock in a parameter definition
|
||||
avformat/iamf_parse: add checks to parameter definition durations
|
||||
avformat/iamf_parse: reject ambisonics mode > 1
|
||||
checkasm: Print benchmarks of C-only functions
|
||||
avcodec/ac3dec: fix downmix logic for eac3
|
||||
avcodec/codec_desc: remove Intra Only prop for AAC
|
||||
avcodec/mediacodecdec: set set keyframe flag in output frames
|
||||
avcodec/libfdk-aacenc: set keyframe in output packets
|
||||
avcodec/libfdk-aacdec: set keyframe flag and profile in output frames
|
||||
avcodec/audiotoolboxnec: set set keyframe flag in output packets
|
||||
avcodec/audiotoolboxdec: set set keyframe flag in output frames
|
||||
avcodec/aacenc: set keyframe flag in output packets
|
||||
avcodec/aac/aacdec: set keyframe flag in output frames
|
||||
avcodec/aac_parser: set key_frame and profile
|
||||
avformat/mov: don't unconditionally set all audio packets in fragments as key frames
|
||||
avformat/matroskadec: set all frames in a keyframe simple box as keyframes
|
||||
avformat/test/movenc: set audio packets as key frames
|
||||
avformat/movenc: write stss boxes for xHE-AAC
|
||||
avformat/spdifdec: parse headers for audio codecs
|
||||
avformat/movenc: don't disable edit lists when writing CMAF output
|
||||
avcodec/libfdk-aacenc: export CPB properties
|
||||
avformat/movenc: don't write a calculated avgBitrate when the provided one is unset
|
||||
libavutil/riscv: Make use of elf_aux_info() on FreeBSD / OpenBSD riscv
|
||||
libavutil/ppc: defines involving bit shifts should be unsigned
|
||||
libavutil/ppc: Include the hardware feature flags like the other archs
|
||||
lavu/riscv: fix compilation without Vector support
|
||||
avfilter/f_loop: fix aloop activate logic
|
||||
avfilter/f_loop: fix length of aloop leftover buffer
|
||||
avfilter/vf_zscale: align the frame buffers
|
||||
lavfi/vf_zscale: fix call to av_pix_fmt_count_planes
|
||||
lavfi/vf_zscale: fix tmp buffer ptr alignment for zimg_filter_graph_process
|
||||
avfilter/framepool: align the frame buffers
|
||||
avcodec/h2645_sei: use the RefStruct API for film_grain_characteristics
|
||||
avcodec/aom_film_grain: allocate film grain metadata dynamically
|
||||
avformat/mov: use an array of pointers for heif_item
|
||||
avformat/mov: split off heif item initialization to its own function
|
||||
avformat/mov: factorize getting the current item
|
||||
lavc/h264idct: fix RISC-V group multiplier
|
||||
lavc/h264dsp: move RISC-V fn pointers to .data.rel.ro
|
||||
avcodec/jpegxl_parser: fix reading lz77-pair as initial entropy symbol
|
||||
avcodec/jpegxl_parser: check entropy_decoder_read_symbol return value
|
||||
avcodec/cbs_h266: Fix regression in DVB clip introduced by 93281630a71c06642adfebebb0d4b105a4e02e91
|
||||
avcodec/x86/vvc: add prototypes for OF functions
|
||||
Document stream specifier syntax change from 46cbe4ab5c
|
||||
fftools/ffplay: fix crash when vk renderer is null
|
||||
avutil/wchar_filename: re-introduce explicit cast of void* to char*
|
||||
fate/ffmpeg: add samples dependency to fate-ffmpeg-spec-disposition
|
||||
fftools/ffmpeg_filter: treat apad filter as a source
|
||||
lavc/avcodec: fix global/private option precendence
|
||||
avfilter/framesync: fix forward EOF pts
|
||||
avcodec/vaapi_encode: fix compilation without CONFIG_VAAPI_1
|
||||
libavcodec: x86: Remove an explicit include of config.asm
|
||||
checkasm: lls: Use relative tolerances rather than absolute ones
|
||||
arm: Consistently use proper interworking function returns
|
||||
avcodec/libx265: unbreak build for X265_BUILD >= 213
|
||||
fftools: log unconnected filter output label
|
||||
fftools: do not access out of bounds filtergraph
|
||||
avcodec/mediacodecenc: Fix access of uninitialized value
|
||||
avformat/img2enc: Fix integer truncation when frame_pts is enabled
|
||||
avformat/internal: Add ff_get_frame_filename
|
||||
avformat/mov: don't return the latest stream when an item stream is expected
|
||||
|
||||
|
||||
version 7.1:
|
||||
|
|
@ -75,11 +253,6 @@ version 7.1:
|
|||
question mark is a part of <val> or not)
|
||||
- multiple stream types in a single specifier (e.g. :s:s:0) now cause an
|
||||
error, as such a specifier makes no sense
|
||||
- Mastering Display and Content Light Level metadata support in hevc_nvenc
|
||||
and av1_nvenc encoders
|
||||
- libswresample now accepts custom order channel layouts as input, with some
|
||||
constrains
|
||||
- FFV1 parser
|
||||
|
||||
|
||||
version 7.0:
|
||||
|
|
@ -168,7 +341,6 @@ version 6.1:
|
|||
variable-fields elements within the same parent element
|
||||
- ffprobe -output_format option added as an alias of -of
|
||||
|
||||
# codespell:off
|
||||
|
||||
version 6.0:
|
||||
- Radiance HDR image support
|
||||
|
|
|
|||
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"drips": {
|
||||
"ethereum": {
|
||||
"ownedBy": "0x2f3900e7064eE63D30d749971265858612AA7139"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,8 +1,5 @@
|
|||
## Installing FFmpeg
|
||||
|
||||
0. If you like to include source plugins, merge them before configure
|
||||
for example run tools/merge-all-source-plugins
|
||||
|
||||
1. Type `./configure` to create the configuration. A list of configure
|
||||
options is printed by running `configure --help`.
|
||||
|
||||
|
|
@ -24,5 +21,5 @@ NOTICE for Package Maintainers
|
|||
|
||||
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
|
||||
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
|
||||
can then be built. And last build FFmpeg with full dependencies (which may in turn depend on
|
||||
can then be built. And last build FFmpeg with full dependancies (which may in turn depend on
|
||||
some of these 3rd party packages). This avoids circular dependencies during build.
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
|
|||
|
||||
Specifically, the GPL parts of FFmpeg are:
|
||||
|
||||
- libpostproc
|
||||
- optional x86 optimization in the files
|
||||
- `libavcodec/x86/flac_dsp_gpl.asm`
|
||||
- `libavcodec/x86/idct_mmx.c`
|
||||
|
|
@ -44,6 +45,7 @@ Specifically, the GPL parts of FFmpeg are:
|
|||
- `vf_owdenoise.c`
|
||||
- `vf_perspective.c`
|
||||
- `vf_phase.c`
|
||||
- `vf_pp.c`
|
||||
- `vf_pp7.c`
|
||||
- `vf_pullup.c`
|
||||
- `vf_repeatfields.c`
|
||||
|
|
|
|||
26
MAINTAINERS
26
MAINTAINERS
|
|
@ -45,20 +45,17 @@ Commandline utility code:
|
|||
QuickTime faststart:
|
||||
tools/qt-faststart.c Baptiste Coudurier
|
||||
|
||||
Execution Graph Printing
|
||||
fftools/graph, fftools/resources [2] softworkz
|
||||
|
||||
Miscellaneous Areas
|
||||
===================
|
||||
|
||||
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
|
||||
project server day to day operations (L: root@ffmpeg.org) Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
project server emergencies (L: root@ffmpeg.org) Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
project server day to day operations (L: root@ffmpeg.org) Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
project server emergencies (L: root@ffmpeg.org) Árpád Gereöffy, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
|
||||
presets [0]
|
||||
metadata subsystem Aurelien Jacobs
|
||||
release management Michael Niedermayer
|
||||
API tests [0]
|
||||
samples-request [2] Thilo Borgmann, James Almer, Ben Littler
|
||||
|
||||
|
||||
Communication
|
||||
|
|
@ -85,14 +82,12 @@ Other:
|
|||
aes_ctr.c, aes_ctr.h Eran Kornblau
|
||||
bprint Nicolas George
|
||||
bswap.h
|
||||
csp.c, csp.h Leo Izen, Ronald S. Bultje
|
||||
des Reimar Doeffinger
|
||||
dynarray.h Nicolas George
|
||||
eval.c, eval.h [2] Michael Niedermayer
|
||||
float_dsp Loren Merritt
|
||||
hash Reimar Doeffinger
|
||||
hwcontext_cuda* Timo Rothenpieler
|
||||
hwcontext_d3d12va* Wu Jianhua
|
||||
hwcontext_vulkan* [2] Lynne
|
||||
intfloat* Michael Niedermayer
|
||||
integer.c, integer.h Michael Niedermayer
|
||||
|
|
@ -138,6 +133,8 @@ Generic Parts:
|
|||
ratecontrol.c [2] Michael Niedermayer
|
||||
simple IDCT:
|
||||
simple_idct.c, simple_idct.h [2] Michael Niedermayer
|
||||
postprocessing:
|
||||
libpostproc/* [2] Michael Niedermayer
|
||||
table generation:
|
||||
tableprint.c, tableprint.h Reimar Doeffinger
|
||||
fixed point FFT:
|
||||
|
|
@ -179,7 +176,6 @@ Codecs:
|
|||
dss_sp.c Oleksij Rempel
|
||||
dv.c Roman Shaposhnik
|
||||
dvbsubdec.c Anshul Maheshwari
|
||||
dxv.*, dxvenc.* Emma Worley
|
||||
eacmv*, eaidct*, eat* Peter Ross
|
||||
exif.c, exif.h Thilo Borgmann
|
||||
ffv1* [2] Michael Niedermayer
|
||||
|
|
@ -210,6 +206,7 @@ Codecs:
|
|||
libgsm.c Michel Bardiaux
|
||||
libkvazaar.c Arttu Ylä-Outinen
|
||||
libopenh264enc.c Martin Storsjo, Linjie Fu
|
||||
libopenjpeg.c Jaikrishnan Menon
|
||||
libopenjpegenc.c Michael Bradshaw
|
||||
libtheoraenc.c David Conrad
|
||||
libvorbis.c David Conrad
|
||||
|
|
@ -247,7 +244,6 @@ Codecs:
|
|||
rpza.c Roberto Togni
|
||||
rtjpeg.c, rtjpeg.h Reimar Doeffinger
|
||||
rv10.c Michael Niedermayer
|
||||
sanm.c Manuel Lauss
|
||||
smc.c Mike Melanson
|
||||
snow* Michael Niedermayer, Loren Merritt
|
||||
sonic.c Alex Beregszaszi
|
||||
|
|
@ -270,7 +266,7 @@ Codecs:
|
|||
vp8 David Conrad, Ronald Bultje
|
||||
vp9 Ronald Bultje
|
||||
vqavideo.c Mike Melanson
|
||||
vvc [2] Nuo Mi, Wu Jianhua, Frank Plowman
|
||||
vvc [2] Nuo Mi
|
||||
wmaprodec.c Sascha Sommer
|
||||
wmavoice.c Ronald S. Bultje
|
||||
wmv2.c Michael Niedermayer
|
||||
|
|
@ -280,7 +276,6 @@ Codecs:
|
|||
Hardware acceleration:
|
||||
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
|
||||
d3d11va* Steve Lhomme
|
||||
d3d12va* Wu Jianhua
|
||||
d3d12va_encode* Tong Wu
|
||||
mediacodec* Matthieu Bouron, Aman Gupta, Zhao Zhili
|
||||
vaapi* Haihao Xiang
|
||||
|
|
@ -396,7 +391,7 @@ Muxers/Demuxers:
|
|||
dss.c Oleksij Rempel
|
||||
dtsdec.c foo86
|
||||
dv.c Roman Shaposhnik
|
||||
dvdvideodec.c [2] Marth64
|
||||
dvdvideodec.c Marth64
|
||||
electronicarts.c Peter Ross
|
||||
evc* Samsung (Dawid Kozinski)
|
||||
ffm* Baptiste Coudurier
|
||||
|
|
@ -450,8 +445,7 @@ Muxers/Demuxers:
|
|||
pva.c Ivo van Poorten
|
||||
r3d.c Baptiste Coudurier
|
||||
raw.c Michael Niedermayer
|
||||
rcwtdec.c [2] Marth64
|
||||
rcwtenc.c [2] Marth64
|
||||
rcwtenc.c Marth64
|
||||
rdt.c Ronald S. Bultje
|
||||
rl2.c Sascha Sommer
|
||||
rmdec.c, rmenc.c Ronald S. Bultje
|
||||
|
|
@ -470,7 +464,6 @@ Muxers/Demuxers:
|
|||
sdp.c Martin Storsjo
|
||||
segafilm.c Mike Melanson
|
||||
segment.c Stefano Sabatini
|
||||
smush.c Manuel Lauss
|
||||
spdif* Anssi Hannula
|
||||
srtdec.c Aurelien Jacobs
|
||||
swf.c Baptiste Coudurier
|
||||
|
|
@ -518,7 +511,6 @@ Operating systems / CPU architectures
|
|||
Alpha [0]
|
||||
MIPS Manojkumar Bhosale, Shiyou Yin
|
||||
LoongArch [2] Shiyou Yin
|
||||
Darwin (macOS, iOS) [2] Marvin Scholz
|
||||
Mac OS X / PowerPC [0]
|
||||
Amiga / PowerPC Colin Ward
|
||||
Linux / PowerPC [1] Lauri Kasanen
|
||||
|
|
@ -549,7 +541,6 @@ James Darnley
|
|||
Jan Ekström
|
||||
Joakim Plate
|
||||
Jun Zhao
|
||||
Kacper Michajłow
|
||||
Kieran Kunhya
|
||||
Kirill Gavrilov
|
||||
Limin Wang
|
||||
|
|
@ -591,7 +582,6 @@ Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
|
|||
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
|
||||
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
|
||||
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
|
||||
Frank Plowman 34E2 48D6 B7DF 4769 70C7 3304 03A8 4C6A 098F 2C6B
|
||||
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
|
||||
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
|
||||
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F
|
||||
|
|
|
|||
12
Makefile
12
Makefile
|
|
@ -19,20 +19,14 @@ vpath %/fate_config.sh.template $(SRC_PATH)
|
|||
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
|
||||
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
|
||||
|
||||
ALLFFLIBS = \
|
||||
avcodec \
|
||||
avdevice \
|
||||
avfilter \
|
||||
avformat \
|
||||
avutil \
|
||||
swscale \
|
||||
swresample \
|
||||
ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
|
||||
|
||||
# $(FFLIBS-yes) needs to be in linking order
|
||||
FFLIBS-$(CONFIG_AVDEVICE) += avdevice
|
||||
FFLIBS-$(CONFIG_AVFILTER) += avfilter
|
||||
FFLIBS-$(CONFIG_AVFORMAT) += avformat
|
||||
FFLIBS-$(CONFIG_AVCODEC) += avcodec
|
||||
FFLIBS-$(CONFIG_POSTPROC) += postproc
|
||||
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
|
||||
FFLIBS-$(CONFIG_SWSCALE) += swscale
|
||||
|
||||
|
|
@ -110,7 +104,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
|
|||
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
|
||||
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
|
||||
MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS RVVB-OBJS \
|
||||
OBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS SIMD128-OBJS
|
||||
OBJS SLIBOBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS
|
||||
|
||||
define RESET
|
||||
$(1) :=
|
||||
|
|
|
|||
2
RELEASE
2
RELEASE
|
|
@ -1 +1 @@
|
|||
8.0.git
|
||||
7.1.1
|
||||
|
|
|
|||
15
RELEASE_NOTES
Normal file
15
RELEASE_NOTES
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
|
||||
┌──────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 7.1 "Péter" │
|
||||
└──────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 7.1 "Péter", about 6
|
||||
months after the release of FFmpeg 7.0.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.libera.chat) or ask
|
||||
on the mailing-lists.
|
||||
|
|
@ -189,7 +189,4 @@ static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a);
|
|||
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
|
||||
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
|
||||
|
||||
// Misc helper functions
|
||||
extern "C" __device__ int printf(const char*, ...);
|
||||
|
||||
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ static int optind = 1;
|
|||
static int optopt;
|
||||
static char *optarg;
|
||||
|
||||
static int getopt(int argc, char *argv[], const char *opts)
|
||||
static int getopt(int argc, char *argv[], char *opts)
|
||||
{
|
||||
static int sp = 1;
|
||||
int c;
|
||||
|
|
|
|||
|
|
@ -218,7 +218,7 @@ while (<F>) {
|
|||
# Lines of the form '} SOME_VERSION_NAME_1.0;'
|
||||
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
|
||||
$glob = 'glob';
|
||||
# We tried to match symbols against this version, but none matched.
|
||||
# We tried to match symbols agains this version, but none matched.
|
||||
# Emit dummy hidden symbol to avoid marking this version WEAK.
|
||||
if ($matches_attempted && $matched_symbols == 0) {
|
||||
print " hidden:\n";
|
||||
|
|
|
|||
|
|
@ -44,14 +44,13 @@
|
|||
#include "libavutil/internal.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/time.h"
|
||||
#include "libavutil/wchar_filename.h"
|
||||
|
||||
typedef struct pthread_t {
|
||||
void *handle;
|
||||
void *(*func)(void* arg);
|
||||
void *arg;
|
||||
void *ret;
|
||||
} *pthread_t;
|
||||
} pthread_t;
|
||||
|
||||
/* use light weight mutex/condition variable API for Windows Vista and later */
|
||||
typedef SRWLOCK pthread_mutex_t;
|
||||
|
|
@ -75,7 +74,7 @@ typedef CONDITION_VARIABLE pthread_cond_t;
|
|||
static av_unused THREADFUNC_RETTYPE
|
||||
__stdcall attribute_align_arg win32thread_worker(void *arg)
|
||||
{
|
||||
pthread_t h = (pthread_t)arg;
|
||||
pthread_t *h = (pthread_t*)arg;
|
||||
h->ret = h->func(h->arg);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -83,35 +82,21 @@ __stdcall attribute_align_arg win32thread_worker(void *arg)
|
|||
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
|
||||
void *(*start_routine)(void*), void *arg)
|
||||
{
|
||||
pthread_t ret;
|
||||
|
||||
ret = av_mallocz(sizeof(*ret));
|
||||
if (!ret)
|
||||
return EAGAIN;
|
||||
|
||||
ret->func = start_routine;
|
||||
ret->arg = arg;
|
||||
thread->func = start_routine;
|
||||
thread->arg = arg;
|
||||
#if HAVE_WINRT
|
||||
ret->handle = (void*)CreateThread(NULL, 0, win32thread_worker, ret,
|
||||
0, NULL);
|
||||
thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread,
|
||||
0, NULL);
|
||||
#else
|
||||
ret->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, ret,
|
||||
0, NULL);
|
||||
thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
|
||||
0, NULL);
|
||||
#endif
|
||||
|
||||
if (!ret->handle) {
|
||||
av_free(ret);
|
||||
return EAGAIN;
|
||||
}
|
||||
|
||||
*thread = ret;
|
||||
|
||||
return 0;
|
||||
return !thread->handle;
|
||||
}
|
||||
|
||||
static av_unused int pthread_join(pthread_t thread, void **value_ptr)
|
||||
{
|
||||
DWORD ret = WaitForSingleObject(thread->handle, INFINITE);
|
||||
DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
|
||||
if (ret != WAIT_OBJECT_0) {
|
||||
if (ret == WAIT_ABANDONED)
|
||||
return EINVAL;
|
||||
|
|
@ -119,9 +104,8 @@ static av_unused int pthread_join(pthread_t thread, void **value_ptr)
|
|||
return EDEADLK;
|
||||
}
|
||||
if (value_ptr)
|
||||
*value_ptr = thread->ret;
|
||||
CloseHandle(thread->handle);
|
||||
av_free(thread);
|
||||
*value_ptr = thread.ret;
|
||||
CloseHandle(thread.handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
@ -210,38 +194,4 @@ static inline int pthread_setcancelstate(int state, int *oldstate)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int win32_thread_setname(const char *name)
|
||||
{
|
||||
#if !HAVE_UWP
|
||||
typedef HRESULT (WINAPI *SetThreadDescriptionFn)(HANDLE, PCWSTR);
|
||||
|
||||
// Although SetThreadDescription lives in kernel32.dll, on Windows Server 2016,
|
||||
// Windows 10 LTSB 2016 and Windows 10 version 1607, it was only available in
|
||||
// kernelbase.dll. So, load it from there for maximum coverage.
|
||||
HMODULE kernelbase = GetModuleHandleW(L"kernelbase.dll");
|
||||
if (!kernelbase)
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
SetThreadDescriptionFn pSetThreadDescription =
|
||||
(SetThreadDescriptionFn)GetProcAddress(kernelbase, "SetThreadDescription");
|
||||
if (!pSetThreadDescription)
|
||||
return AVERROR(ENOSYS);
|
||||
|
||||
wchar_t *wname;
|
||||
if (utf8towchar(name, &wname) < 0)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
HRESULT hr = pSetThreadDescription(GetCurrentThread(), wname);
|
||||
av_free(wname);
|
||||
return SUCCEEDED(hr) ? 0 : AVERROR(EINVAL);
|
||||
#else
|
||||
// UWP is not supported because we cannot use LoadLibrary/GetProcAddress to
|
||||
// detect the availability of the SetThreadDescription API. There is a small
|
||||
// gap in Windows builds 1507-1607 where it was not available. UWP allows
|
||||
// querying the availability of APIs with QueryOptionalDelayLoadedAPI, but it
|
||||
// requires /DELAYLOAD:kernel32.dll during linking, and we cannot enforce that.
|
||||
return AVERROR(ENOSYS);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* COMPAT_W32PTHREADS_H */
|
||||
|
|
|
|||
221
doc/APIchanges
221
doc/APIchanges
|
|
@ -1,222 +1,7 @@
|
|||
The last version increases of all libraries were on 2025-03-28
|
||||
The last version increases of all libraries were on 2024-03-07
|
||||
|
||||
API changes, most recent first:
|
||||
|
||||
2025-07-29 - 1c85a3832af - lavc 62.10.100 - smpte_436m.h
|
||||
Add a new public header smpte_436m.h with API for
|
||||
manipulating AV_CODEC_ID_SMPTE_436M_ANC data.
|
||||
|
||||
2025-07-10 - a566fcb9dc0 - lavf 62.2.100
|
||||
mxf [de]muxer now uses AV_CODEC_ID_SMPTE_436M_ANC for
|
||||
the vbi_vanc_smpte_436M streams instead of AV_CODEC_ID_NONE.
|
||||
|
||||
2025-07-10 - f4ff379baea - lavc 62.10.100 - codec_id.h
|
||||
Add AV_CODEC_ID_SMPTE_436M_ANC.
|
||||
|
||||
2025-08-08 - 83b36f54108 - lavc 62.9.100 - codec_id.h
|
||||
Add AV_CODEC_ID_PRORES_RAW.
|
||||
|
||||
2025-07-31 - 119d127d05c - lavu 60.7.100 - spherical.h
|
||||
Add AV_SPHERICAL_PARAMETRIC_IMMERSIVE.
|
||||
|
||||
2025-07-20 - 157d3b007e9 - lavu 60.6.100 - attributes.h, avstring.h
|
||||
Add av_scanf_format() and use it on av_sscanf().
|
||||
|
||||
2025-07-18 - fbda5ffb953 - lavu 60.5.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_OHCODEC.
|
||||
|
||||
2025-07-18 - fbda5ffb953 - lavu 60.5.100 - hwcontext.h
|
||||
Add AV_HWDEVICE_TYPE_OHCODEC and AVOHCodecDeviceContext.
|
||||
|
||||
2025-07-14 - b24155cae11 - lavfi 11.2.100 - avfilter.h
|
||||
Add AVFilterGraph->max_buffered_frames.
|
||||
|
||||
2025-07-07 - eca477da52 - lavc 62.6.100 - packet.h
|
||||
Add AV_PKT_DATA_RTCP_SR.
|
||||
|
||||
2025-07-01 - 39d5a998bd - lavc 62.4.101 - packet.h
|
||||
Add AV_PKT_DATA_3D_REFERENCE_DISPLAYS.
|
||||
|
||||
2025-07-01 - b2e4b0e282 - lavu 60.4.101 - frame.h
|
||||
Add AV_FRAME_DATA_3D_REFERENCE_DISPLAYS.
|
||||
|
||||
2025-07-01 - 80a05bea4f - lavu 60.4.100 - tdrdi.h
|
||||
Add AV3DReferenceDisplaysInfo and AV3DReferenceDisplay structs.
|
||||
Add av_tdrdi_alloc() and av_tdrdi_get_display().
|
||||
|
||||
2025-05-21 - 004cc60f0e3 - lavu 60.3.100 - avassert.h
|
||||
Add av_unreachable() and av_assume() macros.
|
||||
|
||||
2025-02-15 - e2f39671ae2 - lavfi 10.10.100 - avfilter.h
|
||||
Add avfilter_link_get_hw_frames_ctx().
|
||||
|
||||
2025-04-21 - bf1579c904a - lavu 60.2.100 - log.h
|
||||
Add AV_CLASS_CATEGORY_HWDEVICE.
|
||||
|
||||
2025-04-16 - c818c67991 - libpostproc 59.1.100 - postprocess.h
|
||||
Deprecate PP_CPU_CAPS_3DNOW.
|
||||
|
||||
2025-04-07 - 19e9a203b7 - lavu 60.01.100 - dict.h
|
||||
Add AV_DICT_DEDUP.
|
||||
|
||||
2025-03-17 - 49af9746e8f - lavu 59.60.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_GBRAP32BE and AV_PIX_FMT_GBRAP32LE.
|
||||
|
||||
2025-03-10 - 61fc9b6fee1 - lavu 59.59.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_YAF16BE, AV_PIX_FMT_YAF16LE, AV_PIX_FMT_YAF32BE,
|
||||
and AV_PIX_FMT_YAF32LE.
|
||||
|
||||
2025-03-01 - 0245e9382c7 - lavu 59.58.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_GRAY32BE and AV_PIX_FMT_GRAY32LE.
|
||||
|
||||
2025-02-04 - 0ef678f5c50 - lavu 59.56.000 - pixfmt.h
|
||||
Add AV_PIX_FMT_AMF_SURFACE.
|
||||
|
||||
2025-01-09 - a73760da537 - lavu 59.55.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_GBRPF16BE, AV_PIX_FMT_GBRPF16LE, AV_PIX_FMT_GBRAPF16BE,
|
||||
AV_PIX_FMT_GBRAPF16LE, AV_PIX_FMT_GRAYF16BE, and AV_PIX_FMT_GRAYF16LE.
|
||||
|
||||
2025-02-16 - c79cdae3777 - lavu 59.57.100 - log.h
|
||||
Add flags AV_LOG_PRINT_TIME and AV_LOG_PRINT_DATETIME.
|
||||
|
||||
2025-02-09 - 9fb806fa577 - lavc 61.32.100 - codec_id.h
|
||||
Add AV_CODEC_ID_IVTV_VBI.
|
||||
|
||||
2025-01-25 - ea3c3b42dff - lavu 59.56.100 - frame.h
|
||||
Add AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT.
|
||||
|
||||
2025-01-25 - 6707d970c04 - lavfi 10.9.100 - buffersink.h
|
||||
Add av_buffersink_get_side_data().
|
||||
|
||||
2025-01-25 - 7a025e1cb5f - lavfi 10.8.100 - buffersrc.h
|
||||
Add AVBufferSrcParameters.side_data and AVBufferSrcParameters.nb_side_data
|
||||
|
||||
2025-01-25 - ef1cb1c9c81 - lavfi 10.7.100 - avfilter.h
|
||||
Add AVFilterLink.side_data and AVFilterLink.nb_side_data
|
||||
|
||||
2025-01-05 - 42e72d5c8b5 - lavu 59.55.100 - frame.h
|
||||
Add AV_FRAME_SIDE_DATA_FLAG_NEW_REF.
|
||||
|
||||
2025-01-05 - 19c95ecbff8 - lavc 61.31.100 - avcodec.h
|
||||
Deprecate AVCodecContext->properties.
|
||||
|
||||
2025-01-05 - 2d91f89445d - lavc 61.30.100 - frame.h
|
||||
Add AV_FRAME_FLAG_LOSSLESS.
|
||||
|
||||
2025-01-03 - f3c40826455 - lavc 61.29.100 - codec_id.h
|
||||
Add AV_CODEC_ID_JPEGXL_ANIM.
|
||||
|
||||
2025-01-03 - da9dcaba69d - lavu 59.54.100 - frame.h
|
||||
Add AV_CH_LAYOUT_5POINT1POINT2 and AV_CHANNEL_LAYOUT_5POINT1POINT2.
|
||||
|
||||
2024-12-23 - b88944a8aa5 - lavu 59.53.100 - frame.h
|
||||
Add av_frame_side_data_remove_by_props().
|
||||
|
||||
2024-12-23 - 3428a8d8303 - lavu 59.52.100 - frame.h
|
||||
Add AV_SIDE_DATA_PROP_SIZE_DEPENDENT and AV_FRAME_DATA_PROP_COLOR_DEPENDENT.
|
||||
|
||||
2024-12-23 - 45f0a7ad338 - lsws 8.13.100 - swscale.h
|
||||
Add enum SwsIntent and SwsContext.intent.
|
||||
|
||||
2024-12-15 - 2ac34d08542 - lavc 61.27.100 packet.h
|
||||
Add av_container_fifo_alloc_avpacket().
|
||||
|
||||
2024-12-15 - 56ba57b6725 - lavu 59.51.100 - refstruct.h container_fifo.h
|
||||
Add a new public header refstruct.h with new API for
|
||||
reference-counted objects.
|
||||
|
||||
Add a new public header container_fifo.h with new API for
|
||||
a FIFO of container objects (e.g. AVFrame or AVPacket).
|
||||
|
||||
2024-12-13 - 6eb4bf04e92 - lavu 59.50.100 - channel_layout.h
|
||||
Add AV_CH_LAYOUT_9POINT1POINT6 and AV_CHANNEL_LAYOUT_9POINT1POINT6.
|
||||
|
||||
2024-12-05 - 06f084468e0 - lavu 59.49.100 - csp.h
|
||||
Add av_csp_itu_eotf() and av_csp_itu_eotf_inv().
|
||||
|
||||
2024-12-05 - bf0a6c41111 - lavu 59.48.100 - csp.h
|
||||
Add av_csp_trc_func_inv_from_id().
|
||||
|
||||
2024-11-25 - 2a091d4f2ee - lsws 8.12.100 - swscale.h
|
||||
Allow using sws_frame_scale() dynamically, without first initializing the
|
||||
SwsContext. Deprecate sws_init_context(). Add sws_frame_setup() instead.
|
||||
|
||||
2024-11-25 - fb169640092 - lsws 8.11.100 - swscale.h
|
||||
Replace #define-based SWS_* flags by enum SwsFlags.
|
||||
|
||||
2024-11-25 - ed5dd675624 - lsws 8.10.100 - swscale.h
|
||||
Publicly expose struct SwsContext, enum SwsDither, and enum SwsAlphaBlend.
|
||||
|
||||
2024-11-16 - 46cb7b8d9dc - lavu 59.47.101 - frame.h
|
||||
av_frame_get_buffer() now also aligns the data pointers according to
|
||||
the requested alignment.
|
||||
|
||||
2024-11-13 - 20af68b63a4 - lavu 59.47.100 - channel_layout.h
|
||||
Add AV_CHAN_BINAURAL_LEFT, AV_CHAN_BINAURAL_RIGHT
|
||||
Add AV_CH_BINAURAL_LEFT, AV_CH_BINAURAL_RIGHT
|
||||
Add AV_CH_LAYOUT_BINAURAL, AV_CHANNEL_LAYOUT_BINAURAL
|
||||
|
||||
2024-10-26 - e02a3b40a5e - lavu 59.46.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_XV48.
|
||||
|
||||
2024-10-23 - b03c758600f - lsws 8.9.100 - swscale.h
|
||||
Add sws_is_noop().
|
||||
|
||||
2024-10-23 - 5e50a56b9c4 - lsws 8.8.100 - swscale.h
|
||||
Add frame property testing API:
|
||||
- sws_test_format()
|
||||
- sws_test_colorspace()
|
||||
- sws_test_primaries()
|
||||
- sws_test_transfer()
|
||||
- sws_test_frame()
|
||||
|
||||
2024-10-23 - 87baf9ab2c2 - lsws 8.7.100 - swscale.h
|
||||
Add sws_free_context().
|
||||
|
||||
2024-10-23 - f462ba05f54 - lavu 59.45.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_Y216.
|
||||
|
||||
2024-10-15 - 2336e685657 - lavu 59.44.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGB96 and AV_PIX_FMT_RGBA128.
|
||||
|
||||
2024-10-14 - c993a91bea - lavu 59.43.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_RGBF16.
|
||||
|
||||
2024-10-08 - 29ea34728f1 - lavu 59.42.100 - pixfmt.h
|
||||
Add AV_PIX_FMT_AYUV, AV_PIX_FMT_UYVA, AV_PIX_FMT_VYU444,
|
||||
and AV_PIX_FMT_V30X.
|
||||
|
||||
2024-10-01 - 0548ab2e425 - lavu 59.41.100 - log.h
|
||||
Add AVClass.state_flags_offset and AV_CLASS_STATE_INITIALIZED.
|
||||
|
||||
2024-09-30 - 50d1b89fa0d - lavf 61.9.100 - avformat.h
|
||||
Add {nb_}coded_side_data to AVStreamGroupTileGrid.
|
||||
|
||||
2024-09-30 - df9b80d21a2 - lavu 59
|
||||
Deprecate av_int_list_length_for_size(), av_int_list_length(), and
|
||||
av_opt_set_int_list() without replacement. All AVOptions using these
|
||||
should be replaced with AV_OPT_TYPE_FLAG_ARRAY.
|
||||
|
||||
2024-09-30 - 1efcdbc54d9 - lavfi 10.6.100
|
||||
Buffersink now has array-type options
|
||||
- pixel_formats
|
||||
- colorspaces
|
||||
- colorranges
|
||||
replacing the int-list options
|
||||
- pix_fmts
|
||||
- color_spaces
|
||||
- color_ranges
|
||||
abuffersink now has array-type options
|
||||
- sample_formats
|
||||
- samplerates
|
||||
- channel_layouts
|
||||
replacing the int-list/string options
|
||||
- sample_fmts
|
||||
- sample_rates
|
||||
- ch_layouts
|
||||
|
||||
-------- 8< --------- FFmpeg 7.1 was cut here -------- 8< ---------
|
||||
|
||||
2024-09-23 - 6940a6de2f0 - lavu 59.38.100 - frame.h
|
||||
Add AV_FRAME_DATA_VIEW_ID.
|
||||
|
||||
|
|
@ -627,7 +412,7 @@ API changes, most recent first:
|
|||
Deprecate AVFrame.palette_has_changed without replacement.
|
||||
|
||||
2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h
|
||||
Deprecate AVCodecContext.ticks_per_frame in favor of
|
||||
Depreate AVCodecContext.ticks_per_frame in favor of
|
||||
AVCodecContext.framerate (encoding) and
|
||||
AV_CODEC_PROP_FIELDS (decoding).
|
||||
|
||||
|
|
@ -635,7 +420,7 @@ API changes, most recent first:
|
|||
Add AV_CODEC_PROP_FIELDS.
|
||||
|
||||
2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h
|
||||
Deprecate AV_CODEC_CAP_SUBFRAMES without replacement.
|
||||
Depreate AV_CODEC_CAP_SUBFRAMES without replacement.
|
||||
|
||||
2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h
|
||||
Add AVCodecParameters.framerate.
|
||||
|
|
|
|||
|
|
@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
|||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_NUMBER = 7.1.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
|
@ -1093,7 +1093,7 @@ HTML_STYLESHEET =
|
|||
# cascading style sheets that are included after the standard style sheets
|
||||
# created by doxygen. Using this option one can overrule certain style aspects.
|
||||
# This is preferred over using HTML_STYLESHEET since it does not replace the
|
||||
# standard style sheet and is therefore more robust against future updates.
|
||||
# standard style sheet and is therefor more robust against future updates.
|
||||
# Doxygen will copy the style sheet files to the output directory.
|
||||
# Note: The order of the extra stylesheet files is of importance (e.g. the last
|
||||
# stylesheet in the list overrules the setting of the previous ones in the
|
||||
|
|
@ -1636,7 +1636,7 @@ EXTRA_PACKAGES =
|
|||
# Note: Only use a user-defined header if you know what you are doing! The
|
||||
# following commands have a special meaning inside the header: $title,
|
||||
# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
|
||||
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty string,
|
||||
# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
|
||||
# for the replacement values of the other commands the user is referred to
|
||||
# HTML_HEADER.
|
||||
# This tag requires that the tag GENERATE_LATEX is set to YES.
|
||||
|
|
|
|||
|
|
@ -189,52 +189,6 @@ see page 44-46 or section 5.5 of
|
|||
|
||||
Extract the core from a E-AC-3 stream, dropping extra channels.
|
||||
|
||||
@section eia608_to_smpte436m
|
||||
|
||||
Convert from a @code{EIA_608} stream to a @code{SMPTE_436M_ANC} data stream, wrapping the closed captions in CTA-708 CDP VANC packets.
|
||||
|
||||
@table @option
|
||||
@item line_number
|
||||
Choose which line number the generated VANC packets should go on. You generally want either line 9 (the default) or 11.
|
||||
@item wrapping_type
|
||||
Choose the SMPTE 436M wrapping type, defaults to @samp{vanc_frame}.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item vanc_frame
|
||||
VANC frame (interlaced or segmented progressive frame)
|
||||
@item vanc_field_1
|
||||
@item vanc_field_2
|
||||
@item vanc_progressive_frame
|
||||
@end table
|
||||
@item sample_coding
|
||||
Choose the SMPTE 436M sample coding, defaults to @samp{8bit_luma}.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item 8bit_luma
|
||||
8-bit component luma samples
|
||||
@item 8bit_color_diff
|
||||
8-bit component color difference samples
|
||||
@item 8bit_luma_and_color_diff
|
||||
8-bit component luma and color difference samples
|
||||
@item 10bit_luma
|
||||
10-bit component luma samples
|
||||
@item 10bit_color_diff
|
||||
10-bit component color difference samples
|
||||
@item 10bit_luma_and_color_diff
|
||||
10-bit component luma and color difference samples
|
||||
@item 8bit_luma_parity_error
|
||||
8-bit component luma samples with parity error
|
||||
@item 8bit_color_diff_parity_error
|
||||
8-bit component color difference samples with parity error
|
||||
@item 8bit_luma_and_color_diff_parity_error
|
||||
8-bit component luma and color difference samples with parity error
|
||||
@end table
|
||||
@item initial_cdp_sequence_cntr
|
||||
The initial value of the CDP's 16-bit unsigned integer @code{cdp_hdr_sequence_cntr} and @code{cdp_ftr_sequence_cntr} fields. Defaults to 0.
|
||||
@item cdp_frame_rate
|
||||
Set the CDP's @code{cdp_frame_rate} field. This doesn't actually change the timing of the data stream, it just changes the values inserted in that field in the generated CDP packets. Defaults to @samp{30000/1001}.
|
||||
@end table
|
||||
|
||||
@section extract_extradata
|
||||
|
||||
Extract the in-band extradata.
|
||||
|
|
@ -469,21 +423,9 @@ Please note that this filter is auto-inserted for MPEG-TS (muxer
|
|||
|
||||
@section h264_redundant_pps
|
||||
|
||||
This applies a specific fixup to some Blu-ray BDMV H264 streams
|
||||
which contain redundant PPSs. The PPSs modify irrelevant parameters
|
||||
of the stream, confusing other transformations which require
|
||||
the correct extradata.
|
||||
|
||||
The encoder used on these impacted streams adds extra PPSs throughout
|
||||
the stream, varying the initial QP and whether weighted prediction
|
||||
was enabled. This causes issues after copying the stream into
|
||||
a global header container, as the starting PPS is not suitable
|
||||
for the rest of the stream. One side effect, for example,
|
||||
is seeking will return garbled output until a new PPS appears.
|
||||
|
||||
This BSF removes the extra PPSs and rewrites the slice headers
|
||||
such that the stream uses a single leading PPS in the global header,
|
||||
which resolves the issue.
|
||||
This applies a specific fixup to some Blu-ray streams which contain
|
||||
redundant PPSs modifying irrelevant parameters of the stream which
|
||||
confuse other transformations which require correct extradata.
|
||||
|
||||
@section hevc_metadata
|
||||
|
||||
|
|
@ -754,12 +696,12 @@ ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
|
|||
Drop every video packet not marked as a keyframe after timestamp 30s but do not
|
||||
modify any of the remaining packets.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(pts*tb\,30)*not(key)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
|
||||
@end example
|
||||
|
||||
Drop one second of audio every 10 seconds and add some random noise to the rest.
|
||||
@example
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(pts*tb\,10)\,9\,10)' output.mkv
|
||||
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
|
||||
@end example
|
||||
|
||||
@section null
|
||||
|
|
@ -994,11 +936,6 @@ ffmpeg -i INPUT -c:a copy -bsf:a setts=pts=DTS out.mkv
|
|||
Log basic packet information. Mainly useful for testing, debugging,
|
||||
and development.
|
||||
|
||||
@section smpte436m_to_eia608
|
||||
|
||||
Convert from a @code{SMPTE_436M_ANC} data stream to a @code{EIA_608} stream,
|
||||
extracting the closed captions from CTA-708 CDP VANC packets, and ignoring all other data.
|
||||
|
||||
@anchor{text2movsub}
|
||||
@section text2movsub
|
||||
|
||||
|
|
|
|||
|
|
@ -30,13 +30,6 @@ fate
|
|||
fate-list
|
||||
List all fate/regression test targets.
|
||||
|
||||
fate-list-failing
|
||||
List the fate tests that failed the last time they were executed.
|
||||
|
||||
fate-clear-reports
|
||||
Remove the test reports from previous test executions (getting rid of
|
||||
potentially stale results from fate-list-failing).
|
||||
|
||||
install
|
||||
Install headers, libraries and programs.
|
||||
|
||||
|
|
@ -70,3 +63,4 @@ make -j<num>
|
|||
make -k
|
||||
Continue build in case of errors, this is useful for the regression tests
|
||||
sometimes but note that it will still not run all reg tests.
|
||||
|
||||
|
|
|
|||
|
|
@ -664,8 +664,6 @@ for codecs that support it. At present, those are H.264 and VP9.
|
|||
@item film_grain
|
||||
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
|
||||
Supported at present by AV1 decoders.
|
||||
@item enhancements
|
||||
Export picture enhancement metadata through frame side data, e.g. LCEVC (see @code{AV_FRAME_DATA_LCEVC}).
|
||||
@end table
|
||||
|
||||
@item threads @var{integer} (@emph{decoding/encoding,video})
|
||||
|
|
|
|||
|
|
@ -395,7 +395,7 @@ without this library.
|
|||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTITLES DECODERS
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section libaribb24
|
||||
|
||||
|
|
@ -427,7 +427,7 @@ Enabled by default.
|
|||
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
|
||||
library.
|
||||
|
||||
Implements profiles A and C of the Japanese ARIB STD-B24 standard,
|
||||
Implements profiles A and C of the Japanse ARIB STD-B24 standard,
|
||||
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
|
||||
|
||||
Requires the presence of the libaribcaption headers and library
|
||||
|
|
@ -477,7 +477,7 @@ Specify comma-separated list of font family names to be used for @dfn{bitmap}
|
|||
or @dfn{ass} type subtitle rendering.
|
||||
Only first font name is used for @dfn{ass} type subtitle.
|
||||
|
||||
If not specified, use internally defined default font family.
|
||||
If not specified, use internaly defined default font family.
|
||||
|
||||
@item -ass_single_rect @var{boolean}
|
||||
ARIB STD-B24 specifies that some captions may be displayed at different
|
||||
|
|
@ -495,7 +495,7 @@ default behavior at compilation.
|
|||
|
||||
@item -force_outline_text @var{boolean}
|
||||
Specify whether always render outline text for all characters regardless of
|
||||
the indication by character style.
|
||||
the indication by charactor style.
|
||||
|
||||
The default is @var{false}.
|
||||
|
||||
|
|
@ -696,4 +696,4 @@ box and an end box, typically subtitles. Default value is 0 if
|
|||
|
||||
@end table
|
||||
|
||||
@c man end SUBTITLES DECODERS
|
||||
@c man end SUBTILES DECODERS
|
||||
|
|
|
|||
|
|
@ -855,32 +855,6 @@ Set the sample rate for libopenmpt to output.
|
|||
Range is from 1000 to INT_MAX. The value default is 48000.
|
||||
@end table
|
||||
|
||||
@anchor{mccdec}
|
||||
@section mcc
|
||||
|
||||
Demuxer for MacCaption MCC files, it supports MCC versions 1.0 and 2.0.
|
||||
MCC files store VANC data, which can include closed captions (EIA-608 and CEA-708), ancillary time code, pan-scan data, etc.
|
||||
By default, for backward compatibility, the MCC demuxer extracts just the EIA-608 and CEA-708 closed captions and returns a @code{EIA_608} stream, ignoring all other VANC data.
|
||||
You can change it to return all VANC data in a @code{SMPTE_436M_ANC} data stream by setting @option{-eia608_extract 0}
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Convert a MCC file to Scenarist (SCC) format:
|
||||
@example
|
||||
ffmpeg -i CC.mcc -c:s copy CC.scc
|
||||
@end example
|
||||
Note that the SCC format only supports EIA-608, so this will discard all other data such as CEA-708 extensions.
|
||||
|
||||
@item
|
||||
Merge a MCC file into a MXF file:
|
||||
@example
|
||||
ffmpeg -i video_and_audio.mxf -eia608_extract 0 -i CC.mcc -c copy -map 0 -map 1 out.mxf
|
||||
@end example
|
||||
This retains all VANC data and inserts it into the output MXF file as a @code{SMPTE_436M_ANC} data stream.
|
||||
@end itemize
|
||||
|
||||
@section mov/mp4/3gp
|
||||
|
||||
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
|
||||
|
|
@ -1016,7 +990,7 @@ to 1 (-1 means automatic setting, 1 means enabled, 0 means
|
|||
disabled). Default value is -1.
|
||||
|
||||
@item merge_pmt_versions
|
||||
Reuse existing streams when a PMT's version is updated and elementary
|
||||
Re-use existing streams when a PMT's version is updated and elementary
|
||||
streams move to different PIDs. Default value is 0.
|
||||
|
||||
@item max_packet_size
|
||||
|
|
|
|||
|
|
@ -70,6 +70,9 @@ variable-length arrays;
|
|||
|
||||
@item
|
||||
complex numbers;
|
||||
|
||||
@item
|
||||
mixed statements and declarations.
|
||||
@end itemize
|
||||
|
||||
@subsection SIMD/DSP
|
||||
|
|
@ -112,7 +115,7 @@ Objective-C where required for interacting with macOS-specific interfaces.
|
|||
|
||||
@section Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the code style in files:
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
|
|
@ -132,104 +135,6 @@ K&R coding style is used.
|
|||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
@subsection Examples
|
||||
Some notable examples to illustrate common code style in FFmpeg:
|
||||
|
||||
@itemize @bullet
|
||||
|
||||
@item
|
||||
Space around assignments and after
|
||||
@code{if}/@code{do}/@code{while}/@code{for} keywords:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
if (condition)
|
||||
av_foo();
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
for (size_t i = 0; i < len; i++)
|
||||
av_bar(i);
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
size_t size = 0;
|
||||
@end example
|
||||
|
||||
However no spaces between the parentheses and condition, unless it helps
|
||||
readability of complex conditions, so the following should not be done:
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
if ( condition )
|
||||
av_foo();
|
||||
@end example
|
||||
|
||||
@item
|
||||
No unnecessary parentheses, unless it helps readability:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
int fields = ilace ? 2 : 1;
|
||||
@end example
|
||||
|
||||
@item
|
||||
Don't wrap single-line blocks in braces. Use braces only if there is an accompanying else statement. This keeps future code changes easier to keep track of.
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
if (bits_pixel == 24) @{
|
||||
avctx->pix_fmt = AV_PIX_FMT_BGR24;
|
||||
@} else if (bits_pixel == 8) @{
|
||||
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
|
||||
@} else
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
@end example
|
||||
|
||||
@item
|
||||
Avoid assignments in conditions where it makes sense:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)
|
||||
if (!video_enc->chroma_intra_matrix)
|
||||
return AVERROR(ENOMEM);
|
||||
@end example
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
if (!(video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)))
|
||||
return AVERROR(ENOMEM);
|
||||
@end example
|
||||
|
||||
@example c, good
|
||||
// Ok
|
||||
while ((entry = av_dict_iterate(options, entry)))
|
||||
av_log(ctx, AV_LOG_INFO, "Item '%s': '%s'\n", entry->key, entry->value);
|
||||
@end example
|
||||
|
||||
@item
|
||||
When declaring a pointer variable, the @code{*} goes with the variable not the type:
|
||||
|
||||
@example c, good
|
||||
// Good
|
||||
AVStream *stream;
|
||||
@end example
|
||||
|
||||
@example c, bad
|
||||
// Bad style
|
||||
AVStream* stream;
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
If you work on a file that does not follow these guidelines consistently,
|
||||
change the parts that you are editing to follow these guidelines but do
|
||||
not make unrelated changes in the file to make it conform to these.
|
||||
|
||||
@subsection Vim configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
|
|
@ -546,7 +451,7 @@ FFmpeg also has a defined scope - your new API must fit within it.
|
|||
|
||||
@subsubheading Replacing existing APIs
|
||||
If your new API is replacing an existing one, it should be strictly superior to
|
||||
it, so that the advantages of using the new API outweigh the cost to the
|
||||
it, so that the advantages of using the new API outweight the cost to the
|
||||
callers of changing their code. After adding the new API you should then
|
||||
deprecate the old one and schedule it for removal, as described in
|
||||
@ref{Removing interfaces}.
|
||||
|
|
@ -596,7 +501,7 @@ change in @file{doc/APIchanges}.
|
|||
Backward-incompatible API or ABI changes require incrementing (bumping) the
|
||||
major version number, as described in @ref{Major version bumps}. Major
|
||||
bumps are significant events that happen on a schedule - so if your change
|
||||
strictly requires one you should add it under @code{#if} preprocessor guards that
|
||||
strictly requires one you should add it under @code{#if} preprocesor guards that
|
||||
disable it until the next major bump happens.
|
||||
|
||||
New APIs that can be added without breaking API or ABI compatibility require
|
||||
|
|
@ -917,10 +822,10 @@ improves readability.
|
|||
Consider adding a regression test for your code. All new modules
|
||||
should be covered by tests. That includes demuxers, muxers, decoders, encoders
|
||||
filters, bitstream filters, parsers. If its not possible to do that, add
|
||||
an explanation why to your patchset, its ok to not test if there's a reason.
|
||||
an explanation why to your patchset, its ok to not test if theres a reason.
|
||||
|
||||
@item
|
||||
If you added NASM code please check that things still work with --disable-x86asm.
|
||||
If you added YASM code please check that things still work with --disable-yasm.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
|
|
|
|||
|
|
@ -106,8 +106,15 @@ debugging by setting the option to "disable".
|
|||
Enables the use of the long term prediction extension which increases coding
|
||||
efficiency in very low bandwidth situations such as encoding of voice or
|
||||
solo piano music by extending constant harmonic peaks in bands throughout
|
||||
frames. This option is implied by profile:a aac_low.
|
||||
Use in conjunction with @option{-ar} to decrease the samplerate.
|
||||
frames. This option is implied by profile:a aac_low and is incompatible with
|
||||
aac_pred. Use in conjunction with @option{-ar} to decrease the samplerate.
|
||||
|
||||
@item aac_pred
|
||||
Enables the use of a more traditional style of prediction where the spectral
|
||||
coefficients transmitted are replaced by the difference of the current
|
||||
coefficients minus the previous "predicted" coefficients. In theory and sometimes
|
||||
in practice this can improve quality for low to mid bitrate audio.
|
||||
This option implies the aac_main profile and is incompatible with aac_ltp.
|
||||
|
||||
@item profile
|
||||
Sets the encoding profile, possible values:
|
||||
|
|
@ -125,6 +132,10 @@ MPEG4 specifications.
|
|||
Long term prediction profile, is enabled by and will enable the @option{aac_ltp}
|
||||
option. Introduced in MPEG4.
|
||||
|
||||
@item aac_main
|
||||
Main-type prediction profile, is enabled by and will enable the @option{aac_pred}
|
||||
option. Introduced in MPEG2.
|
||||
|
||||
@end table
|
||||
If this option is unspecified it is set to @samp{aac_low}.
|
||||
@end table
|
||||
|
|
@ -1038,7 +1049,7 @@ forces a wideband cutoff for bitrates < 15 kbps, unless CELT-only
|
|||
Set channel mapping family to be used by the encoder. The default value of -1
|
||||
uses mapping family 0 for mono and stereo inputs, and mapping family 1
|
||||
otherwise. The default also disables the surround masking and LFE bandwidth
|
||||
optimizations in libopus, and requires that the input contains 8 channels or
|
||||
optimzations in libopus, and requires that the input contains 8 channels or
|
||||
fewer.
|
||||
|
||||
Other values include 0 for mono and stereo, 1 for surround sound with masking
|
||||
|
|
@ -1380,48 +1391,6 @@ Higher is better but slower.
|
|||
|
||||
@end table
|
||||
|
||||
@anchor{ffv1}
|
||||
@section ffv1
|
||||
|
||||
FFv1 Encoder
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by FFmpeg's FFv1 encoder.
|
||||
|
||||
@table @option
|
||||
@item context
|
||||
Sets the context size, 0 (default) is small, 1 is big.
|
||||
|
||||
@item coder
|
||||
Set the coder,
|
||||
@table @samp
|
||||
@item rice
|
||||
Golomb rice coder
|
||||
@item range_def
|
||||
Range coder with default table
|
||||
@item range_tab
|
||||
Range coder with custom table
|
||||
@end table
|
||||
|
||||
@item slicecrc
|
||||
-1 (default, automatic), 1 use crc with zero initial and final state, 2 use crc with non zero initial and final state
|
||||
|
||||
@item qtable
|
||||
@table @samp
|
||||
@item default
|
||||
default, automatic
|
||||
@item 8bit
|
||||
use 8bit default
|
||||
@item greater8bit
|
||||
use >8bit default
|
||||
@end table
|
||||
|
||||
@item remap_optimizer
|
||||
0 - 5, default 3, how much effort the encoder puts into optimizing the remap table.
|
||||
|
||||
@end table
|
||||
|
||||
@section GIF
|
||||
|
||||
GIF image/animation encoder.
|
||||
|
|
@ -1889,42 +1858,6 @@ ffmpeg -i input -c:v libaom-av1 -b:v 500K -aom-params tune=psnr:enable-tpl-model
|
|||
|
||||
@end table
|
||||
|
||||
@section liboapv
|
||||
|
||||
Advanced Professional Video codec encoder wrapper.
|
||||
|
||||
This encoder requires the presence of the liboapv headers and library
|
||||
during configuration. You need to explicitly configure the build with
|
||||
@option{--enable-liboapv}.
|
||||
|
||||
@float NOTE
|
||||
Many liboapv encoder options are mapped to FFmpeg global codec options,
|
||||
while unique encoder options are provided through private options.
|
||||
@end float
|
||||
|
||||
The apv project website is at @url{https://github.com/AcademySoftwareFoundation/openapv}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following options are supported by the liboapv wrapper.
|
||||
|
||||
@float NOTE
|
||||
To get a more extensive documentation of the liboapv options, consult the
|
||||
liboapv documentation.
|
||||
@end float
|
||||
|
||||
@table @option
|
||||
@item preset
|
||||
Set the quality-speed tradeoff [fastest, fast, medium, slow, placebo, default]
|
||||
|
||||
@item qp
|
||||
Set the quantization parameter value for CQP rate control mode.
|
||||
|
||||
@item oapv-params (@emph{parse_apv_params})
|
||||
Set liboapvenc options using a list of @var{key}=@var{value} pairs separated
|
||||
by ":". See the liboapv encoder user guide for a list of accepted parameters.
|
||||
@end table
|
||||
|
||||
@section libsvtav1
|
||||
|
||||
SVT-AV1 encoder wrapper.
|
||||
|
|
@ -3342,75 +3275,6 @@ fastest.
|
|||
|
||||
@end table
|
||||
|
||||
@section MediaCodec
|
||||
|
||||
MediaCodec encoder wrapper enables hardware-accelerated video encoding on
|
||||
Android device. It supports H.264, H.265 (HEVC), VP8, VP9, MPEG-4, and AV1
|
||||
encoding (whether works or not is device dependent).
|
||||
|
||||
Android provides two sets of APIs: Java MediaCodec and NDK MediaCodec. The
|
||||
MediaCodec encoder wrapper supports both. Note that the NDK MediaCodec API
|
||||
operates without requiring JVM, but may fail to function outside the JVM
|
||||
environment due to dependencies on system framework services, particularly
|
||||
after Android 15.
|
||||
|
||||
@table @option
|
||||
@item ndk_codec @var{boolean}
|
||||
Use the NDK-based MediaCodec API instead of the Java API. Enabled by default
|
||||
if @code{av_jni_get_java_vm()} return NULL.
|
||||
|
||||
@item ndk_async @var{boolean}
|
||||
Use NDK MediaCodec in async mode. Async mode has less overhead than poll in a
|
||||
loop in sync mode. The drawback of async mode is AV_CODEC_FLAG_GLOBAL_HEADER
|
||||
doesn't work (use extract_extradata bsf when necessary). It doesn't work and
|
||||
will be disabled automatically on devices below Android 8.0.
|
||||
|
||||
@item codec_name @var{string}
|
||||
A codec type can have multiple implementations on a single device, this option
|
||||
specify which backend to use (via MediaCodec createCodecByName API). It's NULL
|
||||
by default, and encoder is created by createEncoderByType.
|
||||
|
||||
@item bitrate_mode @var{integer}
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item cq
|
||||
Constant quality mode
|
||||
@item vbr
|
||||
Variable bitrate mode
|
||||
@item cbr
|
||||
Constant bitrate mode
|
||||
@item cbr_fd
|
||||
Constant bitrate mode with frame drops
|
||||
@end table
|
||||
|
||||
@item pts_as_dts @var{boolean}
|
||||
Use PTS as DTS. This is a workaround since MediaCodec API doesn't provide
|
||||
decoding timestamp. It is enabled automatically if B frame is 0.
|
||||
|
||||
@item operating_rate @var{integer}
|
||||
The desired operating rate that the codec will need to operate at, zero for
|
||||
unspecified. This is used for cases like high-speed/slow-motion video capture,
|
||||
where the video encoder format contains the target playback rate (e.g. 30fps),
|
||||
but the component must be able to handle the high operating capture rate (e.g.
|
||||
240fps). This rate will be used by codec for resource planning and setting the
|
||||
operating points.
|
||||
|
||||
@item qp_i_min @var{integer}
|
||||
Minimum quantization parameter for I frame.
|
||||
@item qp_p_min @var{integer}
|
||||
Minimum quantization parameter for P frame.
|
||||
@item qp_b_min @var{integer}
|
||||
Minimum quantization parameter for B frame.
|
||||
@item qp_i_max @var{integer}
|
||||
Maximum quantization parameter for I frame.
|
||||
@item qp_p_max @var{integer}
|
||||
Maximum quantization parameter for P frame.
|
||||
@item qp_b_max @var{integer}
|
||||
Maximum quantization parameter for B frame.
|
||||
|
||||
@end table
|
||||
|
||||
@section MediaFoundation
|
||||
|
||||
This provides wrappers to encoders (both audio and video) in the
|
||||
|
|
@ -3493,13 +3357,6 @@ Default is 1 (on).
|
|||
|
||||
PNG image encoder.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item compression_level
|
||||
Sets the compression level, from 0 to 9(default)
|
||||
@end table
|
||||
|
||||
@subsection Private options
|
||||
|
||||
@table @option
|
||||
|
|
@ -3507,8 +3364,6 @@ Sets the compression level, from 0 to 9(default)
|
|||
Set physical density of pixels, in dots per inch, unset by default
|
||||
@item dpm @var{integer}
|
||||
Set physical density of pixels, in dots per meter, unset by default
|
||||
@item pred @var{method}
|
||||
Set prediction method (none, sub, up, avg, paeth, mixed), default is paeth
|
||||
@end table
|
||||
|
||||
@section ProRes
|
||||
|
|
@ -3703,7 +3558,7 @@ For encoders set this flag to ON to reduce power consumption and GPU usage.
|
|||
@end table
|
||||
|
||||
@subsection Runtime Options
|
||||
Following options can be used during qsv encoding.
|
||||
Following options can be used durning qsv encoding.
|
||||
|
||||
@table @option
|
||||
@item @var{global_quality}
|
||||
|
|
@ -3813,7 +3668,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
|
|||
on performance and objective visual quality metric.
|
||||
|
||||
@item @var{low_delay_brc}
|
||||
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||
by frame. Value: -1-default 0-off 1-on
|
||||
|
||||
|
|
@ -4012,7 +3867,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
|
|||
on performance and objective visual quality metric.
|
||||
|
||||
@item @var{low_delay_brc}
|
||||
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||
by frame. Value: -1-default 0-off 1-on
|
||||
|
||||
|
|
@ -4246,7 +4101,7 @@ Extended bitrate control.
|
|||
Depth of look ahead in number frames, available when extbrc option is enabled.
|
||||
|
||||
@item @var{low_delay_brc}
|
||||
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides
|
||||
Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
|
||||
more accurate bitrate control to minimize the variance of bitstream size frame
|
||||
by frame. Value: -1-default 0-off 1-on
|
||||
|
||||
|
|
@ -4602,25 +4457,6 @@ Reduces detail but attempts to preserve color at extremely low bitrates.
|
|||
@chapter Subtitles Encoders
|
||||
@c man begin SUBTITLES ENCODERS
|
||||
|
||||
@section dvbsub
|
||||
|
||||
This codec encodes the bitmap subtitle format that is used in DVB
|
||||
broadcasts and recordings. The bitmaps are typically embedded in a
|
||||
container such as MPEG-TS as a separate stream.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item min_bpp @var{integer (2, 4, or 8)}
|
||||
Set a minimum bits-per-pixel value for the subtitle color lookup tables.
|
||||
|
||||
DVB supports 2, 4, and 8 bits-per-pixel color lookup tables. This
|
||||
option enables forcing a particular bits-per-pixel value regardless of
|
||||
the number of colors. Since not all players support or properly
|
||||
support 2 bits-per-pixel, this value defaults to 4.
|
||||
|
||||
@end table
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec encodes the bitmap subtitle format that is used in DVDs.
|
||||
|
|
@ -4648,18 +4484,4 @@ one byte per subtitle on average.
|
|||
By default, this work-around is disabled.
|
||||
@end table
|
||||
|
||||
@section lrc
|
||||
|
||||
This codec encodes the LRC lyrics format.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item precision
|
||||
Specify the precision of the fractional part of the timestamp. Time base is
|
||||
determined based on this value.
|
||||
|
||||
Defaults to 2 for centiseconds.
|
||||
@end table
|
||||
|
||||
@c man end SUBTITLES ENCODERS
|
||||
|
|
|
|||
|
|
@ -96,7 +96,6 @@ int main(int argc, char *argv[])
|
|||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
av_freep(&avio_ctx_buffer);
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -128,10 +128,6 @@ int main(int argc, char **argv)
|
|||
outfilename = argv[2];
|
||||
|
||||
pkt = av_packet_alloc();
|
||||
if (!pkt) {
|
||||
fprintf(stderr, "Could not allocate AVPacket\n");
|
||||
exit(1); /* or proper cleanup and returning */
|
||||
}
|
||||
|
||||
/* find the MPEG audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
|
|
@ -165,7 +161,7 @@ int main(int argc, char **argv)
|
|||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
fprintf(stderr, "Could not open %s\n", outfilename);
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@
|
|||
* file to be played with ffplay.
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
|
|
@ -94,7 +96,8 @@ static int init_filters(const char *filters_descr)
|
|||
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const int out_sample_rate = 8000;
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
|
|
@ -120,40 +123,34 @@ static int init_filters(const char *filters_descr)
|
|||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "sample_formats", "s16",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "channel_layouts", "mono",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_array(buffersink_ctx, "samplerates", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_INT, &out_sample_rate);
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@
|
|||
* @example decode_filter_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
|
|
@ -36,7 +38,6 @@
|
|||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/mem.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/time.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24,transpose=cclock";
|
||||
/* other way:
|
||||
|
|
@ -98,6 +99,7 @@ static int init_filters(const char *filters_descr)
|
|||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
|
|
@ -120,26 +122,20 @@ static int init_filters(const char *filters_descr)
|
|||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set(buffersink_ctx, "pixel_formats", "gray8",
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
|
|
@ -194,7 +190,7 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
|
|||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
av_usleep(delay);
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -270,6 +270,7 @@ int main(int argc, char *argv[])
|
|||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
|
|
@ -294,7 +295,6 @@ int main(int argc, char *argv[])
|
|||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
av_frame_free(&frame);
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
|
|
@ -302,10 +302,8 @@ int main(int argc, char *argv[])
|
|||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
return 1;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
|
|
@ -356,10 +354,7 @@ int main(int argc, char *argv[])
|
|||
return 0;
|
||||
|
||||
fail:
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
fprintf(stderr, "%s\n", av_err2str(err));
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -418,7 +418,7 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
|
|||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a reusable frame */
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
|
|
|
|||
|
|
@ -219,8 +219,11 @@ int main(int argc, char **argv)
|
|||
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0)
|
||||
fprintf(stderr, "%s\n", av_err2str(ret));
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
|
|
|
|||
|
|
@ -101,7 +101,7 @@ static int dynamic_set_parameter(AVCodecContext *avctx)
|
|||
/* Set codec specific option */
|
||||
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
|
||||
goto fail;
|
||||
/* There is no "framerate" option in common option list. Use "-r" to set
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to set
|
||||
* framerate, which is compatible with ffmpeg commandline. The video is
|
||||
* assumed to be average frame rate, so set time_base to 1/framerate. */
|
||||
e = av_dict_get(opts, "r", NULL, 0);
|
||||
|
|
@ -180,7 +180,7 @@ static int open_input_file(char *filename)
|
|||
decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "Codec is not supported by qsv\n");
|
||||
fprintf(stderr, "Codec is not supportted by qsv\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
|
|
@ -289,7 +289,7 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
|
|||
fprintf(stderr, "Failed to set encoding parameter.\n");
|
||||
goto fail;
|
||||
}
|
||||
/* There is no "framerate" option in common option list. Use "-r" to
|
||||
/* There is no "framerate" option in commom option list. Use "-r" to
|
||||
* set framerate, which is compatible with ffmpeg commandline. The
|
||||
* video is assumed to be average frame rate, so set time_base to
|
||||
* 1/framerate. */
|
||||
|
|
|
|||
|
|
@ -171,38 +171,23 @@ static int open_output_file(const char *filename)
|
|||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
const enum AVPixelFormat *pix_fmts = NULL;
|
||||
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_PIX_FORMAT, 0,
|
||||
(const void**)&pix_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = (ret >= 0 && pix_fmts) ?
|
||||
pix_fmts[0] : dec_ctx->pix_fmt;
|
||||
|
||||
if (encoder->pix_fmts)
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
else
|
||||
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
|
||||
} else {
|
||||
const enum AVSampleFormat *sample_fmts = NULL;
|
||||
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = avcodec_get_supported_config(dec_ctx, NULL,
|
||||
AV_CODEC_CONFIG_SAMPLE_FORMAT, 0,
|
||||
(const void**)&sample_fmts, NULL);
|
||||
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = (ret >= 0 && sample_fmts) ?
|
||||
sample_fmts[0] : dec_ctx->sample_fmt;
|
||||
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
|
|
@ -298,10 +283,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
|||
goto end;
|
||||
}
|
||||
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
|
@ -312,12 +297,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
|||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
char buf[64];
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
|
|
@ -343,10 +322,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
|||
goto end;
|
||||
}
|
||||
|
||||
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out");
|
||||
if (!buffersink_ctx) {
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
|
@ -373,15 +352,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
|||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (enc_ctx->frame_size > 0)
|
||||
av_buffersink_set_frame_size(buffersink_ctx, enc_ctx->frame_size);
|
||||
|
||||
ret = avfilter_init_dict(buffersink_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
|
|
|
|||
|
|
@ -208,13 +208,6 @@ Download/synchronize sample files to the configured samples directory.
|
|||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
@item fate-list-failing
|
||||
List the fate tests that failed the last time they were executed.
|
||||
|
||||
@item fate-clear-reports
|
||||
Remove the test reports from previous test executions (getting rid of
|
||||
potentially stale results from fate-list-failing).
|
||||
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
slot= # some unique identifier
|
||||
repo=https://git.ffmpeg.org/ffmpeg.git # the source repository
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
#branch=release/2.6 # the branch to test
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
|
|
|
|||
649
doc/ffmpeg.texi
649
doc/ffmpeg.texi
|
|
@ -21,24 +21,22 @@ ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_url}@}
|
|||
inputs - including live grabbing/recording devices - filter, and transcode them
|
||||
into a plethora of output formats.
|
||||
|
||||
@command{ffmpeg} reads from an arbitrary number of inputs (which can be regular
|
||||
@command{ffmpeg} reads from an arbitrary number of input "files" (which can be regular
|
||||
files, pipes, network streams, grabbing devices, etc.), specified by the
|
||||
@code{-i} option, and writes to an arbitrary number of outputs, which are
|
||||
specified by a plain output url. Anything found on the command line which cannot
|
||||
be interpreted as an option is considered to be an output url.
|
||||
@code{-i} option, and writes to an arbitrary number of output "files", which are
|
||||
specified by a plain output url. Anything found on the command line which
|
||||
cannot be interpreted as an option is considered to be an output url.
|
||||
|
||||
Each input or output can, in principle, contain any number of elementary streams
|
||||
of different types (video/audio/subtitle/attachment/data), though the allowed
|
||||
stream counts and/or types may be limited by the container format. Selecting
|
||||
which streams from which inputs will go into which output is either done
|
||||
automatically or with the @code{-map} option (see the @ref{Stream selection}
|
||||
chapter).
|
||||
Each input or output url can, in principle, contain any number of streams of
|
||||
different types (video/audio/subtitle/attachment/data). The allowed number and/or
|
||||
types of streams may be limited by the container format. Selecting which
|
||||
streams from which inputs will go into which output is either done automatically
|
||||
or with the @code{-map} option (see the Stream selection chapter).
|
||||
|
||||
To refer to inputs/outputs in options, you must use their indices (0-based).
|
||||
E.g. the first input is @code{0}, the second is @code{1}, etc. Similarly,
|
||||
streams within an input/output are referred to by their indices. E.g. @code{2:3}
|
||||
refers to the fourth stream in the third input or output. Also see the
|
||||
@ref{Stream specifiers} chapter.
|
||||
To refer to input files in options, you must use their indices (0-based). E.g.
|
||||
the first input file is @code{0}, the second is @code{1}, etc. Similarly, streams
|
||||
within a file are referred to by their indices. E.g. @code{2:3} refers to the
|
||||
fourth stream in the third input file. Also see the Stream specifiers chapter.
|
||||
|
||||
As a general rule, options are applied to the next specified
|
||||
file. Therefore, order is important, and you can have the same
|
||||
|
|
@ -87,405 +85,140 @@ The format option may be needed for raw input files.
|
|||
@chapter Detailed description
|
||||
@c man begin DETAILED DESCRIPTION
|
||||
|
||||
@command{ffmpeg} builds a transcoding pipeline out of the components listed
|
||||
below. The program's operation then consists of input data chunks flowing from
|
||||
the sources down the pipes towards the sinks, while being transformed by the
|
||||
components they encounter along the way.
|
||||
The transcoding process in @command{ffmpeg} for each output can be described by
|
||||
the following diagram:
|
||||
|
||||
The following kinds of components are available:
|
||||
@itemize
|
||||
@item
|
||||
@emph{Demuxers} (short for "demultiplexers") read an input source in order to
|
||||
extract
|
||||
|
||||
@itemize
|
||||
@item
|
||||
global properties such as metadata or chapters;
|
||||
@item
|
||||
list of input elementary streams and their properties
|
||||
@end itemize
|
||||
|
||||
One demuxer instance is created for each @option{-i} option, and sends encoded
|
||||
@emph{packets} to @emph{decoders} or @emph{muxers}.
|
||||
|
||||
In other literature, demuxers are sometimes called @emph{splitters}, because
|
||||
their main function is splitting a file into elementary streams (though some
|
||||
files only contain one elementary stream).
|
||||
|
||||
A schematic representation of a demuxer looks like this:
|
||||
@verbatim
|
||||
┌──────────┬───────────────────────┐
|
||||
│ demuxer │ │ packets for stream 0
|
||||
╞══════════╡ elementary stream 0 ├──────────────────────►
|
||||
│ │ │
|
||||
│ global ├───────────────────────┤
|
||||
│properties│ │ packets for stream 1
|
||||
│ and │ elementary stream 1 ├──────────────────────►
|
||||
│ metadata │ │
|
||||
│ ├───────────────────────┤
|
||||
│ │ │
|
||||
│ │ ........... │
|
||||
│ │ │
|
||||
│ ├───────────────────────┤
|
||||
│ │ │ packets for stream N
|
||||
│ │ elementary stream N ├──────────────────────►
|
||||
│ │ │
|
||||
└──────────┴───────────────────────┘
|
||||
▲
|
||||
│
|
||||
│ read from file, network stream,
|
||||
│ grabbing device, etc.
|
||||
│
|
||||
_______ ______________
|
||||
| | | |
|
||||
| input | demuxer | encoded data | decoder
|
||||
| file | ---------> | packets | -----+
|
||||
|_______| |______________| |
|
||||
v
|
||||
_________
|
||||
| |
|
||||
| decoded |
|
||||
| frames |
|
||||
|_________|
|
||||
________ ______________ |
|
||||
| | | | |
|
||||
| output | <-------- | encoded data | <----+
|
||||
| file | muxer | packets | encoder
|
||||
|________| |______________|
|
||||
|
||||
|
||||
@end verbatim
|
||||
|
||||
@item
|
||||
@emph{Decoders} receive encoded (compressed) @emph{packets} for an audio, video,
|
||||
or subtitle elementary stream, and decode them into raw @emph{frames} (arrays of
|
||||
pixels for video, PCM for audio). A decoder is typically associated with (and
|
||||
receives its input from) an elementary stream in a @emph{demuxer}, but sometimes
|
||||
may also exist on its own (see @ref{Loopback decoders}).
|
||||
|
||||
A schematic representation of a decoder looks like this:
|
||||
@verbatim
|
||||
┌─────────┐
|
||||
packets │ │ raw frames
|
||||
─────────►│ decoder ├────────────►
|
||||
│ │
|
||||
└─────────┘
|
||||
@end verbatim
|
||||
|
||||
@item
|
||||
@emph{Filtergraphs} process and transform raw audio or video @emph{frames}. A
|
||||
filtergraph consists of one or more individual @emph{filters} linked into a
|
||||
graph. Filtergraphs come in two flavors - @emph{simple} and @emph{complex},
|
||||
configured with the @option{-filter} and @option{-filter_complex} options,
|
||||
respectively.
|
||||
|
||||
A simple filtergraph is associated with an @emph{output elementary stream}; it
|
||||
receives the input to be filtered from a @emph{decoder} and sends filtered
|
||||
output to that output stream's @emph{encoder}.
|
||||
|
||||
A simple video filtergraph that performs deinterlacing (using the @code{yadif}
|
||||
deinterlacer) followed by resizing (using the @code{scale} filter) can look like
|
||||
this:
|
||||
@verbatim
|
||||
|
||||
┌────────────────────────┐
|
||||
│ simple filtergraph │
|
||||
frames from ╞════════════════════════╡ frames for
|
||||
a decoder │ ┌───────┐ ┌───────┐ │ an encoder
|
||||
────────────►├─►│ yadif ├─►│ scale ├─►│────────────►
|
||||
│ └───────┘ └───────┘ │
|
||||
└────────────────────────┘
|
||||
@end verbatim
|
||||
|
||||
A complex filtergraph is standalone and not associated with any specific stream.
|
||||
It may have multiple (or zero) inputs, potentially of different types (audio or
|
||||
video), each of which receiving data either from a decoder or another complex
|
||||
filtergraph's output. It also has one or more outputs that feed either an
|
||||
encoder or another complex filtergraph's input.
|
||||
|
||||
The following example diagram represents a complex filtergraph with 3 inputs and
|
||||
2 outputs (all video):
|
||||
@verbatim
|
||||
┌─────────────────────────────────────────────────┐
|
||||
│ complex filtergraph │
|
||||
╞═════════════════════════════════════════════════╡
|
||||
frames ├───────┐ ┌─────────┐ ┌─────────┐ ┌────────┤ frames
|
||||
─────────►│input 0├─►│ overlay ├─────►│ overlay ├─►│output 0├────────►
|
||||
├───────┘ │ │ │ │ └────────┤
|
||||
frames ├───────┐╭►│ │ ╭►│ │ │
|
||||
─────────►│input 1├╯ └─────────┘ │ └─────────┘ │
|
||||
├───────┘ │ │
|
||||
frames ├───────┐ ┌─────┐ ┌─────┬─╯ ┌────────┤ frames
|
||||
─────────►│input 2├►│scale├►│split├───────────────►│output 1├────────►
|
||||
├───────┘ └─────┘ └─────┘ └────────┤
|
||||
└─────────────────────────────────────────────────┘
|
||||
@end verbatim
|
||||
Frames from second input are overlaid over those from the first. Frames from the
|
||||
third input are rescaled, then the duplicated into two identical streams. One of
|
||||
them is overlaid over the combined first two inputs, with the result exposed as
|
||||
the filtergraph's first output. The other duplicate ends up being the
|
||||
filtergraph's second output.
|
||||
|
||||
@item
|
||||
@emph{Encoders} receive raw audio, video, or subtitle @emph{frames} and encode
|
||||
them into encoded @emph{packets}. The encoding (compression) process is
|
||||
typically @emph{lossy} - it degrades stream quality to make the output smaller;
|
||||
some encoders are @emph{lossless}, but at the cost of much higher output size. A
|
||||
video or audio encoder receives its input from some filtergraph's output,
|
||||
subtitle encoders receive input from a decoder (since subtitle filtering is not
|
||||
supported yet). Every encoder is associated with some muxer's @emph{output
|
||||
elementary stream} and sends its output to that muxer.
|
||||
|
||||
A schematic representation of an encoder looks like this:
|
||||
@verbatim
|
||||
┌─────────┐
|
||||
raw frames │ │ packets
|
||||
────────────►│ encoder ├─────────►
|
||||
│ │
|
||||
└─────────┘
|
||||
@end verbatim
|
||||
|
||||
@item
|
||||
@emph{Muxers} (short for "multiplexers") receive encoded @emph{packets} for
|
||||
their elementary streams from encoders (the @emph{transcoding} path) or directly
|
||||
from demuxers (the @emph{streamcopy} path), interleave them (when there is more
|
||||
than one elementary stream), and write the resulting bytes into the output file
|
||||
(or pipe, network stream, etc.).
|
||||
|
||||
A schematic representation of a muxer looks like this:
|
||||
@verbatim
|
||||
┌──────────────────────┬───────────┐
|
||||
packets for stream 0 │ │ muxer │
|
||||
──────────────────────►│ elementary stream 0 ╞═══════════╡
|
||||
│ │ │
|
||||
├──────────────────────┤ global │
|
||||
packets for stream 1 │ │properties │
|
||||
──────────────────────►│ elementary stream 1 │ and │
|
||||
│ │ metadata │
|
||||
├──────────────────────┤ │
|
||||
│ │ │
|
||||
│ ........... │ │
|
||||
│ │ │
|
||||
├──────────────────────┤ │
|
||||
packets for stream N │ │ │
|
||||
──────────────────────►│ elementary stream N │ │
|
||||
│ │ │
|
||||
└──────────────────────┴─────┬─────┘
|
||||
│
|
||||
write to file, network stream, │
|
||||
grabbing device, etc. │
|
||||
│
|
||||
▼
|
||||
@end verbatim
|
||||
|
||||
@end itemize
|
||||
|
||||
@section Streamcopy
|
||||
The simplest pipeline in @command{ffmpeg} is single-stream
|
||||
@emph{streamcopy}, that is copying one @emph{input elementary stream}'s packets
|
||||
without decoding, filtering, or encoding them. As an example, consider an input
|
||||
file called @file{INPUT.mkv} with 3 elementary streams, from which we take the
|
||||
second and write it to file @file{OUTPUT.mp4}. A schematic representation of
|
||||
such a pipeline looks like this:
|
||||
@verbatim
|
||||
┌──────────┬─────────────────────┐
|
||||
│ demuxer │ │ unused
|
||||
╞══════════╡ elementary stream 0 ├────────╳
|
||||
│ │ │
|
||||
│INPUT.mkv ├─────────────────────┤ ┌──────────────────────┬───────────┐
|
||||
│ │ │ packets │ │ muxer │
|
||||
│ │ elementary stream 1 ├─────────►│ elementary stream 0 ╞═══════════╡
|
||||
│ │ │ │ │OUTPUT.mp4 │
|
||||
│ ├─────────────────────┤ └──────────────────────┴───────────┘
|
||||
│ │ │ unused
|
||||
│ │ elementary stream 2 ├────────╳
|
||||
│ │ │
|
||||
└──────────┴─────────────────────┘
|
||||
@end verbatim
|
||||
|
||||
The above pipeline can be constructed with the following commandline:
|
||||
@example
|
||||
ffmpeg -i INPUT.mkv -map 0:1 -c copy OUTPUT.mp4
|
||||
@end example
|
||||
|
||||
In this commandline
|
||||
@itemize
|
||||
|
||||
@item
|
||||
there is a single input @file{INPUT.mkv};
|
||||
|
||||
@item
|
||||
there are no input options for this input;
|
||||
|
||||
@item
|
||||
there is a single output @file{OUTPUT.mp4};
|
||||
|
||||
@item
|
||||
there are two output options for this output:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
@code{-map 0:1} selects the input stream to be used - from input with index 0
|
||||
(i.e. the first one) the stream with index 1 (i.e. the second one);
|
||||
|
||||
@item
|
||||
@code{-c copy} selects the @code{copy} encoder, i.e. streamcopy with no decoding
|
||||
or encoding.
|
||||
@end itemize
|
||||
|
||||
@end itemize
|
||||
|
||||
Streamcopy is useful for changing the elementary stream count, container format,
|
||||
or modifying container-level metadata. Since there is no decoding or encoding,
|
||||
it is very fast and there is no quality loss. However, it might not work in some
|
||||
cases because of a variety of factors (e.g. certain information required by the
|
||||
target container is not available in the source). Applying filters is obviously
|
||||
also impossible, since filters work on decoded frames.
|
||||
|
||||
More complex streamcopy scenarios can be constructed - e.g. combining streams
|
||||
from two input files into a single output:
|
||||
@verbatim
|
||||
┌──────────┬────────────────────┐ ┌────────────────────┬───────────┐
|
||||
│ demuxer 0│ │ packets │ │ muxer │
|
||||
╞══════════╡elementary stream 0 ├────────►│elementary stream 0 ╞═══════════╡
|
||||
│INPUT0.mkv│ │ │ │OUTPUT.mp4 │
|
||||
└──────────┴────────────────────┘ ├────────────────────┤ │
|
||||
┌──────────┬────────────────────┐ │ │ │
|
||||
│ demuxer 1│ │ packets │elementary stream 1 │ │
|
||||
╞══════════╡elementary stream 0 ├────────►│ │ │
|
||||
│INPUT1.aac│ │ └────────────────────┴───────────┘
|
||||
└──────────┴────────────────────┘
|
||||
@end verbatim
|
||||
that can be built by the commandline
|
||||
@example
|
||||
ffmpeg -i INPUT0.mkv -i INPUT1.aac -map 0:0 -map 1:0 -c copy OUTPUT.mp4
|
||||
@end example
|
||||
|
||||
The output @option{-map} option is used twice here, creating two streams in the
|
||||
output file - one fed by the first input and one by the second. The single
|
||||
instance of the @option{-c} option selects streamcopy for both of those streams.
|
||||
You could also use multiple instances of this option together with
|
||||
@ref{Stream specifiers} to apply different values to each stream, as will be
|
||||
demonstrated in following sections.
|
||||
|
||||
A converse scenario is splitting multiple streams from a single input into
|
||||
multiple outputs:
|
||||
@verbatim
|
||||
┌──────────┬─────────────────────┐ ┌───────────────────┬───────────┐
|
||||
│ demuxer │ │ packets │ │ muxer 0 │
|
||||
╞══════════╡ elementary stream 0 ├─────────►│elementary stream 0╞═══════════╡
|
||||
│ │ │ │ │OUTPUT0.mp4│
|
||||
│INPUT.mkv ├─────────────────────┤ └───────────────────┴───────────┘
|
||||
│ │ │ packets ┌───────────────────┬───────────┐
|
||||
│ │ elementary stream 1 ├─────────►│ │ muxer 1 │
|
||||
│ │ │ │elementary stream 0╞═══════════╡
|
||||
└──────────┴─────────────────────┘ │ │OUTPUT1.mp4│
|
||||
└───────────────────┴───────────┘
|
||||
@end verbatim
|
||||
built with
|
||||
@example
|
||||
ffmpeg -i INPUT.mkv -map 0:0 -c copy OUTPUT0.mp4 -map 0:1 -c copy OUTPUT1.mp4
|
||||
@end example
|
||||
Note how a separate instance of the @option{-c} option is needed for every
|
||||
output file even though their values are the same. This is because non-global
|
||||
options (which is most of them) only apply in the context of the file before
|
||||
which they are placed.
|
||||
|
||||
These examples can of course be further generalized into arbitrary remappings
|
||||
of any number of inputs into any number of outputs.
|
||||
|
||||
@section Transcoding
|
||||
@emph{Transcoding} is the process of decoding a stream and then encoding it
|
||||
again. Since encoding tends to be computationally expensive and in most cases
|
||||
degrades the stream quality (i.e. it is @emph{lossy}), you should only transcode
|
||||
when you need to and perform streamcopy otherwise. Typical reasons to transcode
|
||||
are:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
applying filters - e.g. resizing, deinterlacing, or overlaying video; resampling
|
||||
or mixing audio;
|
||||
|
||||
@item
|
||||
you want to feed the stream to something that cannot decode the original codec.
|
||||
@end itemize
|
||||
Note that @command{ffmpeg} will transcode all audio, video, and subtitle streams
|
||||
unless you specify @option{-c copy} for them.
|
||||
|
||||
Consider an example pipeline that reads an input file with one audio and one
|
||||
video stream, transcodes the video and copies the audio into a single output
|
||||
file. This can be schematically represented as follows
|
||||
@verbatim
|
||||
┌──────────┬─────────────────────┐
|
||||
│ demuxer │ │ audio packets
|
||||
╞══════════╡ stream 0 (audio) ├─────────────────────────────────────╮
|
||||
│ │ │ │
|
||||
│INPUT.mkv ├─────────────────────┤ video ┌─────────┐ raw │
|
||||
│ │ │ packets │ video │ video frames │
|
||||
│ │ stream 1 (video) ├─────────►│ decoder ├──────────────╮ │
|
||||
│ │ │ │ │ │ │
|
||||
└──────────┴─────────────────────┘ └─────────┘ │ │
|
||||
▼ ▼
|
||||
│ │
|
||||
┌──────────┬─────────────────────┐ video ┌─────────┐ │ │
|
||||
│ muxer │ │ packets │ video │ │ │
|
||||
╞══════════╡ stream 0 (video) │◄─────────┤ encoder ├──────────────╯ │
|
||||
│ │ │ │(libx264)│ │
|
||||
│OUTPUT.mp4├─────────────────────┤ └─────────┘ │
|
||||
│ │ │ │
|
||||
│ │ stream 1 (audio) │◄────────────────────────────────────╯
|
||||
│ │ │
|
||||
└──────────┴─────────────────────┘
|
||||
@end verbatim
|
||||
and implemented with the following commandline:
|
||||
@example
|
||||
ffmpeg -i INPUT.mkv -map 0:v -map 0:a -c:v libx264 -c:a copy OUTPUT.mp4
|
||||
@end example
|
||||
Note how it uses stream specifiers @code{:v} and @code{:a} to select input
|
||||
streams and apply different values of the @option{-c} option to them; see the
|
||||
@ref{Stream specifiers} section for more details.
|
||||
@command{ffmpeg} calls the libavformat library (containing demuxers) to read
|
||||
input files and get packets containing encoded data from them. When there are
|
||||
multiple input files, @command{ffmpeg} tries to keep them synchronized by
|
||||
tracking lowest timestamp on any active input stream.
|
||||
|
||||
Encoded packets are then passed to the decoder (unless streamcopy is selected
|
||||
for the stream, see further for a description). The decoder produces
|
||||
uncompressed frames (raw video/PCM audio/...) which can be processed further by
|
||||
filtering (see next section). After filtering, the frames are passed to the
|
||||
encoder, which encodes them and outputs encoded packets. Finally, those are
|
||||
passed to the muxer, which writes the encoded packets to the output file.
|
||||
|
||||
@section Filtering
|
||||
|
||||
When transcoding, audio and video streams can be filtered before encoding, with
|
||||
either a @emph{simple} or @emph{complex} filtergraph.
|
||||
Before encoding, @command{ffmpeg} can process raw audio and video frames using
|
||||
filters from the libavfilter library. Several chained filters form a filter
|
||||
graph. @command{ffmpeg} distinguishes between two types of filtergraphs:
|
||||
simple and complex.
|
||||
|
||||
@subsection Simple filtergraphs
|
||||
|
||||
Simple filtergraphs are those that have exactly one input and output, both of
|
||||
the same type (audio or video). They are configured with the per-stream
|
||||
@option{-filter} option (with @option{-vf} and @option{-af} aliases for
|
||||
@option{-filter:v} (video) and @option{-filter:a} (audio) respectively). Note
|
||||
that simple filtergraphs are tied to their output stream, so e.g. if you have
|
||||
multiple audio streams, @option{-af} will create a separate filtergraph for each
|
||||
one.
|
||||
the same type. In the above diagram they can be represented by simply inserting
|
||||
an additional step between decoding and encoding:
|
||||
|
||||
Taking the transcoding example from above, adding filtering (and omitting audio,
|
||||
for clarity) makes it look like this:
|
||||
@verbatim
|
||||
┌──────────┬───────────────┐
|
||||
│ demuxer │ │ ┌─────────┐
|
||||
╞══════════╡ video stream │ packets │ video │ frames
|
||||
│INPUT.mkv │ ├─────────►│ decoder ├─────►───╮
|
||||
│ │ │ └─────────┘ │
|
||||
└──────────┴───────────────┘ │
|
||||
╭───────────◄───────────╯
|
||||
│ ┌────────────────────────┐
|
||||
│ │ simple filtergraph │
|
||||
│ ╞════════════════════════╡
|
||||
│ │ ┌───────┐ ┌───────┐ │
|
||||
╰──►├─►│ yadif ├─►│ scale ├─►├╮
|
||||
│ └───────┘ └───────┘ ││
|
||||
└────────────────────────┘│
|
||||
│
|
||||
│
|
||||
┌──────────┬───────────────┐ video ┌─────────┐ │
|
||||
│ muxer │ │ packets │ video │ │
|
||||
╞══════════╡ video stream │◄─────────┤ encoder ├───────◄───────╯
|
||||
│OUTPUT.mp4│ │ │ │
|
||||
│ │ │ └─────────┘
|
||||
└──────────┴───────────────┘
|
||||
_________ ______________
|
||||
| | | |
|
||||
| decoded | | encoded data |
|
||||
| frames |\ _ | packets |
|
||||
|_________| \ /||______________|
|
||||
\ __________ /
|
||||
simple _\|| | / encoder
|
||||
filtergraph | filtered |/
|
||||
| frames |
|
||||
|__________|
|
||||
|
||||
@end verbatim
|
||||
|
||||
@subsection Complex filtergraphs
|
||||
Simple filtergraphs are configured with the per-stream @option{-filter} option
|
||||
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
|
||||
A simple filtergraph for video can look for example like this:
|
||||
|
||||
@verbatim
|
||||
_______ _____________ _______ ________
|
||||
| | | | | | | |
|
||||
| input | ---> | deinterlace | ---> | scale | ---> | output |
|
||||
|_______| |_____________| |_______| |________|
|
||||
|
||||
@end verbatim
|
||||
|
||||
Note that some filters change frame properties but not frame contents. E.g. the
|
||||
@code{fps} filter in the example above changes number of frames, but does not
|
||||
touch the frame contents. Another example is the @code{setpts} filter, which
|
||||
only sets timestamps and otherwise passes the frames unchanged.
|
||||
|
||||
@subsection Complex filtergraphs
|
||||
Complex filtergraphs are those which cannot be described as simply a linear
|
||||
processing chain applied to one stream. This is the case, for example, when the
|
||||
graph has more than one input and/or output, or when output stream type is
|
||||
different from input. Complex filtergraphs are configured with the
|
||||
@option{-filter_complex} option. Note that this option is global, since a
|
||||
complex filtergraph, by its nature, cannot be unambiguously associated with a
|
||||
single stream or file. Each instance of @option{-filter_complex} creates a new
|
||||
complex filtergraph, and there can be any number of them.
|
||||
processing chain applied to one stream. This is the case, for example, when the graph has
|
||||
more than one input and/or output, or when output stream type is different from
|
||||
input. They can be represented with the following diagram:
|
||||
|
||||
@verbatim
|
||||
_________
|
||||
| |
|
||||
| input 0 |\ __________
|
||||
|_________| \ | |
|
||||
\ _________ /| output 0 |
|
||||
\ | | / |__________|
|
||||
_________ \| complex | /
|
||||
| | | |/
|
||||
| input 1 |---->| filter |\
|
||||
|_________| | | \ __________
|
||||
/| graph | \ | |
|
||||
/ | | \| output 1 |
|
||||
_________ / |_________| |__________|
|
||||
| | /
|
||||
| input 2 |/
|
||||
|_________|
|
||||
|
||||
@end verbatim
|
||||
|
||||
Complex filtergraphs are configured with the @option{-filter_complex} option.
|
||||
Note that this option is global, since a complex filtergraph, by its nature,
|
||||
cannot be unambiguously associated with a single stream or file.
|
||||
|
||||
The @option{-lavfi} option is equivalent to @option{-filter_complex}.
|
||||
|
||||
A trivial example of a complex filtergraph is the @code{overlay} filter, which
|
||||
has two video inputs and one video output, containing one video overlaid on top
|
||||
of the other. Its audio counterpart is the @code{amix} filter.
|
||||
|
||||
@anchor{Loopback decoders}
|
||||
@section Stream copy
|
||||
Stream copy is a mode selected by supplying the @code{copy} parameter to the
|
||||
@option{-codec} option. It makes @command{ffmpeg} omit the decoding and encoding
|
||||
step for the specified stream, so it does only demuxing and muxing. It is useful
|
||||
for changing the container format or modifying container-level metadata. The
|
||||
diagram above will, in this case, simplify to this:
|
||||
|
||||
@verbatim
|
||||
_______ ______________ ________
|
||||
| | | | | |
|
||||
| input | demuxer | encoded data | muxer | output |
|
||||
| file | ---------> | packets | -------> | file |
|
||||
|_______| |______________| |________|
|
||||
|
||||
@end verbatim
|
||||
|
||||
Since there is no decoding or encoding, it is very fast and there is no quality
|
||||
loss. However, it might not work in some cases because of many factors. Applying
|
||||
filters is obviously also impossible, since filters work on uncompressed data.
|
||||
|
||||
@section Loopback decoders
|
||||
While decoders are normally associated with demuxer streams, it is also possible
|
||||
to create "loopback" decoders that decode the output from some encoder and allow
|
||||
|
|
@ -526,41 +259,8 @@ reads an input video and
|
|||
|
||||
@end itemize
|
||||
|
||||
Such a transcoding pipeline can be represented with the following diagram:
|
||||
@verbatim
|
||||
┌──────────┬───────────────┐
|
||||
│ demuxer │ │ ┌─────────┐ ┌─────────┐ ┌────────────────────┐
|
||||
╞══════════╡ video stream │ │ video │ │ video │ │ null muxer │
|
||||
│ INPUT │ ├──►│ decoder ├──┬────────►│ encoder ├─┬─►│(discards its input)│
|
||||
│ │ │ └─────────┘ │ │(libx264)│ │ └────────────────────┘
|
||||
└──────────┴───────────────┘ │ └─────────┘ │
|
||||
╭───────◄──╯ ┌─────────┐ │
|
||||
│ │loopback │ │
|
||||
│ ╭─────◄──────┤ decoder ├────◄──╯
|
||||
│ │ └─────────┘
|
||||
│ │
|
||||
│ │
|
||||
│ │ ┌───────────────────┐
|
||||
│ │ │complex filtergraph│
|
||||
│ │ ╞═══════════════════╡
|
||||
│ │ │ ┌─────────────┐ │
|
||||
╰─╫─►├─►│ hstack ├─►├╮
|
||||
╰─►├─►│ │ ││
|
||||
│ └─────────────┘ ││
|
||||
└───────────────────┘│
|
||||
│
|
||||
┌──────────┬───────────────┐ ┌─────────┐ │
|
||||
│ muxer │ │ │ video │ │
|
||||
╞══════════╡ video stream │◄─┤ encoder ├───────◄──────────╯
|
||||
│ OUTPUT │ │ │ (ffv1) │
|
||||
│ │ │ └─────────┘
|
||||
└──────────┴───────────────┘
|
||||
@end verbatim
|
||||
|
||||
|
||||
@c man end DETAILED DESCRIPTION
|
||||
|
||||
@anchor{Stream selection}
|
||||
@chapter Stream selection
|
||||
@c man begin STREAM SELECTION
|
||||
|
||||
|
|
@ -921,25 +621,24 @@ ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
|
|||
@end example
|
||||
|
||||
@item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream})
|
||||
Sets the disposition flags for a stream.
|
||||
Sets the disposition for a stream.
|
||||
|
||||
Default value: by default, all disposition flags are copied from the input stream,
|
||||
unless the output stream this option applies to is fed by a complex filtergraph
|
||||
- in that case no disposition flags are set by default.
|
||||
By default, the disposition is copied from the input stream, unless the output
|
||||
stream this option applies to is fed by a complex filtergraph - in that case the
|
||||
disposition is unset by default.
|
||||
|
||||
@var{value} is a sequence of disposition flags separated by '+' or '-'. A '+'
|
||||
prefix adds the given disposition, '-' removes it. If the first flag is also
|
||||
prefixed with '+' or '-', the resulting disposition is the default value
|
||||
updated by @var{value}. If the first flag is not prefixed, the resulting
|
||||
disposition is @var{value}. It is also possible to clear the disposition by
|
||||
setting it to 0.
|
||||
@var{value} is a sequence of items separated by '+' or '-'. The first item may
|
||||
also be prefixed with '+' or '-', in which case this option modifies the default
|
||||
value. Otherwise (the first item is not prefixed) this options overrides the
|
||||
default value. A '+' prefix adds the given disposition, '-' removes it. It is
|
||||
also possible to clear the disposition by setting it to 0.
|
||||
|
||||
If no @code{-disposition} options were specified for an output file, ffmpeg will
|
||||
automatically set the 'default' disposition flag on the first stream of each type,
|
||||
automatically set the 'default' disposition on the first stream of each type,
|
||||
when there are multiple streams of this type in the output file and no stream of
|
||||
that type is already marked as default.
|
||||
|
||||
The @code{-dispositions} option lists the known disposition flags.
|
||||
The @code{-dispositions} option lists the known dispositions.
|
||||
|
||||
For example, to make the second audio stream the default stream:
|
||||
@example
|
||||
|
|
@ -957,29 +656,6 @@ To add an embedded cover/thumbnail:
|
|||
ffmpeg -i in.mp4 -i IMAGE -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic out.mp4
|
||||
@end example
|
||||
|
||||
To add the 'original' and remove the 'comment' disposition flag from the first
|
||||
audio stream without removing its other disposition flags:
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -disposition:a:0 +original-comment out.mkv
|
||||
@end example
|
||||
|
||||
To remove the 'original' and add the 'comment' disposition flag to the first
|
||||
audio stream without removing its other disposition flags:
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -disposition:a:0 -original+comment out.mkv
|
||||
@end example
|
||||
|
||||
To set only the 'original' and 'comment' disposition flags on the first audio
|
||||
stream (and remove its other disposition flags):
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -disposition:a:0 original+comment out.mkv
|
||||
@end example
|
||||
|
||||
To remove all disposition flags from the first audio stream:
|
||||
@example
|
||||
ffmpeg -i in.mkv -c copy -disposition:a:0 0 out.mkv
|
||||
@end example
|
||||
|
||||
Not all muxers support embedded thumbnails, and those who do, only support a few formats, like JPEG or PNG.
|
||||
|
||||
@item -program [title=@var{title}:][program_num=@var{program_num}:]st=@var{stream}[:st=@var{stream}...] (@emph{output})
|
||||
|
|
@ -1048,7 +724,7 @@ The following flags are available:
|
|||
|
||||
@table @option
|
||||
@item recon_gain
|
||||
Whether to signal if recon_gain is present as metadata in parameter blocks within frames
|
||||
Wether to signal if recon_gain is present as metadata in parameter blocks within frames
|
||||
@end table
|
||||
|
||||
@item output_gain
|
||||
|
|
@ -1196,9 +872,9 @@ ffmpeg -i front.wav -i back.wav -i center.wav -i lfe.wav
|
|||
demixing=parameter_id=998,
|
||||
recon_gain=parameter_id=101,
|
||||
layer=ch_layout=stereo,
|
||||
layer=ch_layout=5.1(side),
|
||||
layer=ch_layout=5.1,
|
||||
-stream_group type=iamf_mix_presentation:id=2:stg=0:annotations=en-us=Mix_Presentation,
|
||||
submix=parameter_id=100:parameter_rate=48000|element=stg=0:parameter_id=100:annotations=en-us=Scalable_Submix|layout=sound_system=stereo|layout=sound_system=5.1(side)
|
||||
submix=parameter_id=100:parameter_rate=48000|element=stg=0:parameter_id=100:annotations=en-us=Scalable_Submix|layout=sound_system=stereo|layout=sound_system=5.1
|
||||
-streamid 0:0 -streamid 1:1 -streamid 2:2 -streamid 3:3 output.iamf
|
||||
@end example
|
||||
|
||||
|
|
@ -1373,62 +1049,31 @@ The properties where a change triggers reinitialization are,
|
|||
for video, frame resolution or pixel format;
|
||||
for audio, sample format, sample rate, channel count or channel layout.
|
||||
|
||||
@item -drop_changed[:@var{stream_specifier}] @var{integer} (@emph{input,per-stream})
|
||||
This boolean option determines whether a frame with differing frame parameters mid-stream
|
||||
gets dropped instead of leading to filtergraph reinitialization, as that would lead to loss
|
||||
of filter state. Generally useful to avoid corrupted yet decodable packets in live streaming
|
||||
inputs. Default is false.
|
||||
|
||||
@item -filter_threads @var{nb_threads} (@emph{global})
|
||||
Defines how many threads are used to process a filter pipeline. Each pipeline
|
||||
will produce a thread pool with this many threads available for parallel processing.
|
||||
The default is the number of available CPUs.
|
||||
|
||||
@item -filter_buffered_frames @var{nb_frames} (@emph{global})
|
||||
Defines the maximum number of buffered frames allowed in a filtergraph. Under
|
||||
normal circumstances, a filtergraph should not buffer more than a few frames,
|
||||
especially if frames are being fed to it and read from it in a balanced way
|
||||
(which is the intended behavior in ffmpeg). That said, this option allows you
|
||||
to limit the total number of frames buffered across all links in a filtergraph.
|
||||
If more frames are generated, filtering is aborted and an error is returned.
|
||||
The default value is 0, which means no limit.
|
||||
|
||||
@item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream})
|
||||
Specify the preset for matching stream(s).
|
||||
|
||||
@item -stats (@emph{global})
|
||||
Log encoding progress/statistics as "info"-level log (see @code{-loglevel}).
|
||||
It is on by default, to explicitly disable it you need to specify @code{-nostats}.
|
||||
Print encoding progress/statistics. It is on by default, to explicitly
|
||||
disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -stats_period @var{time} (@emph{global})
|
||||
Set period at which encoding progress/statistics are updated. Default is 0.5 seconds.
|
||||
|
||||
@item -print_graphs (@emph{global})
|
||||
Prints execution graph details to stderr in the format set via -print_graphs_format.
|
||||
|
||||
@item -print_graphs_file @var{filename} (@emph{global})
|
||||
Writes execution graph details to the specified file in the format set via -print_graphs_format.
|
||||
|
||||
@item -print_graphs_format @var{format} (@emph{global})
|
||||
Sets the output format (available formats are: default, compact, csv, flat, ini, json, xml, mermaid, mermaidhtml)
|
||||
The default format is json.
|
||||
|
||||
@item -progress @var{url} (@emph{global})
|
||||
Send program-friendly progress information to @var{url}.
|
||||
|
||||
Progress information is written periodically and at the end of
|
||||
the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key}
|
||||
consists of only alphanumeric characters. The last key of a sequence of
|
||||
progress information is always "progress" with the value "continue" or "end".
|
||||
progress information is always "progress".
|
||||
|
||||
The update period is set using @code{-stats_period}.
|
||||
|
||||
For example, log progress information to stdout:
|
||||
|
||||
@example
|
||||
ffmpeg -progress pipe:1 -i in.mkv out.mkv
|
||||
@end example
|
||||
|
||||
@anchor{stdin option}
|
||||
@item -stdin
|
||||
Enable interaction on standard input. On by default unless standard input is
|
||||
|
|
@ -2010,9 +1655,6 @@ transcoding, without copying the frames into the system memory.
|
|||
|
||||
For it to work, both the decoder and the encoder must support QSV acceleration
|
||||
and no filters must be used.
|
||||
|
||||
@item videotoolbox
|
||||
Use Video Toolbox hardware acceleration.
|
||||
@end table
|
||||
|
||||
This option has no effect if the selected hwaccel is not available or not
|
||||
|
|
@ -2353,11 +1995,6 @@ Read input at native frame rate. This is equivalent to setting @code{-readrate 1
|
|||
@item -readrate_initial_burst @var{seconds}
|
||||
Set an initial read burst time, in seconds, after which @option{-re/-readrate}
|
||||
will be enforced.
|
||||
@item -readrate_catchup @var{speed} (@emph{input})
|
||||
If either the input or output is blocked leading to actual read speed falling behind the
|
||||
specified readrate, then this rate takes effect till the input catches up with the
|
||||
specified readrate. Must not be lower than the primary readrate.
|
||||
|
||||
@item -vsync @var{parameter} (@emph{global})
|
||||
@itemx -fps_mode[:@var{stream_specifier}] @var{parameter} (@emph{output,per-stream})
|
||||
Set video sync method / framerate mode. vsync is applied to all output video streams
|
||||
|
|
|
|||
|
|
@ -139,6 +139,13 @@ stream.
|
|||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
|
|
@ -344,19 +351,6 @@ while other writers always print them. This option enables one to control this b
|
|||
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
|
||||
Default is @var{auto}.
|
||||
|
||||
@item -analyze_frames
|
||||
Analyze frames and/or their side data up to the provided read interval,
|
||||
providing additional information that may be useful at a stream level.
|
||||
Must be paired with the @option{-show_streams} option or it will have no effect.
|
||||
|
||||
Currently, the additional fields provided by this option when enabled are the
|
||||
@code{closed_captions} and @code{film_grain} fields.
|
||||
|
||||
For example, to analyze the first 20 seconds and populate these fields:
|
||||
@example
|
||||
ffprobe -show_streams -analyze_frames -read_intervals "%+20" INPUT
|
||||
@end example
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
|
@ -368,12 +362,6 @@ Read @var{input_url}.
|
|||
Write output to @var{output_url}. If not specified, the output is sent
|
||||
to stdout.
|
||||
|
||||
@item -c:@var{media_specifier} @var{codec_name}
|
||||
@itemx -codec:@var{media_specifier} @var{codec_name}
|
||||
Force a specific decoder implementation for the stream identified by
|
||||
@var{media_specifier}, which can assume the values @code{a} (audio),
|
||||
@code{v} (video), @code{s} (subtitle), and @code{d} (data).
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
|
|
|
|||
|
|
@ -129,7 +129,6 @@
|
|||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="lossless" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
<xsd:attribute name="color_range" type="xsd:string"/>
|
||||
<xsd:attribute name="color_space" type="xsd:string"/>
|
||||
|
|
|
|||
|
|
@ -226,10 +226,6 @@ and the "Last message repeated n times" line will be omitted.
|
|||
Indicates that log output should add a @code{[level]} prefix to each message
|
||||
line. This can be used as an alternative to log coloring, e.g. when dumping the
|
||||
log to file.
|
||||
@item time
|
||||
Indicates that log lines should be prefixed with time information.
|
||||
@item datetime
|
||||
Indicates that log lines should be prefixed with date and time information.
|
||||
@end table
|
||||
Flags can also be used alone by adding a '+'/'-' prefix to set/reset a single
|
||||
flag without affecting other @var{flags} or changing @var{loglevel}. When
|
||||
|
|
|
|||
|
|
@ -214,7 +214,6 @@ Frame scheduling
|
|||
FF_FILTER_FORWARD_STATUS(inlink, outlink);
|
||||
FF_FILTER_FORWARD_STATUS_ALL(inlink, filter);
|
||||
FF_FILTER_FORWARD_WANTED(outlink, inlink);
|
||||
FF_FILTER_FORWARD_WANTED_ANY(filter, inlink);
|
||||
|
||||
filter_frame
|
||||
------------
|
||||
|
|
|
|||
2222
doc/filters.texi
2222
doc/filters.texi
File diff suppressed because it is too large
Load diff
|
|
@ -162,7 +162,7 @@ Then pass @code{--enable-libmp3lame} to configure to enable it.
|
|||
|
||||
@section LCEVCdec
|
||||
|
||||
FFmpeg can make use of the liblcevc_dec library for LCEVC enhancement layer
|
||||
FFmpeg can make use of the liblcevc_dec library for LCEVC enhacement layer
|
||||
decoding on supported bitstreams.
|
||||
|
||||
Go to @url{https://github.com/v-novaltd/LCEVCdec} and follow the instructions
|
||||
|
|
@ -625,7 +625,6 @@ library:
|
|||
@item raw AMR-NB @tab @tab X
|
||||
@item raw AMR-WB @tab @tab X
|
||||
@item raw APAC @tab @tab X
|
||||
@item raw APV @tab X @tab X
|
||||
@item raw aptX @tab X @tab X
|
||||
@item raw aptX HD @tab X @tab X
|
||||
@item raw Bonk @tab @tab X
|
||||
|
|
@ -638,7 +637,6 @@ library:
|
|||
@item raw E-AC-3 @tab X @tab X
|
||||
@item raw EVC @tab X @tab X
|
||||
@item raw FLAC @tab X @tab X
|
||||
@item raw G.728 @tab @tab X
|
||||
@item raw GSM @tab @tab X
|
||||
@item raw H.261 @tab X @tab X
|
||||
@item raw H.263 @tab X @tab X
|
||||
|
|
@ -897,7 +895,6 @@ following image formats are supported:
|
|||
@tab fourcc: apch,apcn,apcs,apco,ap4h,ap4x
|
||||
@item Apple QuickDraw @tab @tab X
|
||||
@tab fourcc: qdrw
|
||||
@item APV @tab @tab X
|
||||
@item Argonaut Video @tab @tab X
|
||||
@tab Used in some Argonaut games.
|
||||
@item Asus v1 @tab X @tab X
|
||||
|
|
@ -1112,7 +1109,6 @@ following image formats are supported:
|
|||
@item RealVideo 3.0 @tab @tab X
|
||||
@tab still far from ideal
|
||||
@item RealVideo 4.0 @tab @tab X
|
||||
@item RealVideo 6.0 @tab @tab X
|
||||
@item Renderware TXD (TeXture Dictionary) @tab @tab X
|
||||
@tab Texture dictionaries used by the Renderware Engine.
|
||||
@item RivaTuner Video @tab @tab X
|
||||
|
|
@ -1236,7 +1232,6 @@ following image formats are supported:
|
|||
@item ADPCM IMA Duck DK4 @tab @tab X
|
||||
@tab Used in some Sega Saturn console games.
|
||||
@item ADPCM IMA Radical @tab @tab X
|
||||
@item ADPCM IMA Xbox @tab @tab X
|
||||
@item ADPCM Microsoft @tab X @tab X
|
||||
@item ADPCM MS IMA @tab X @tab X
|
||||
@item ADPCM Nintendo Gamecube AFC @tab @tab X
|
||||
|
|
@ -1244,7 +1239,6 @@ following image formats are supported:
|
|||
@item ADPCM Nintendo THP @tab @tab X
|
||||
@item ADPCM Playstation @tab @tab X
|
||||
@item ADPCM QT IMA @tab X @tab X
|
||||
@item ADPCM Sanyo @tab @tab X
|
||||
@item ADPCM SEGA CRI ADX @tab X @tab X
|
||||
@tab Used in Sega Dreamcast games.
|
||||
@item ADPCM Shockwave Flash @tab X @tab X
|
||||
|
|
@ -1319,7 +1313,6 @@ following image formats are supported:
|
|||
@item FLAC (Free Lossless Audio Codec) @tab X @tab IX
|
||||
@item FTR Voice @tab @tab X
|
||||
@item G.723.1 @tab X @tab X
|
||||
@item G.728 @tab @tab X
|
||||
@item G.729 @tab @tab X
|
||||
@item GSM @tab E @tab X
|
||||
@tab encoding supported through external library libgsm
|
||||
|
|
|
|||
|
|
@ -71,6 +71,7 @@ git clone git@@ffmpeg.org:ffmpeg-web <target>
|
|||
|
||||
This will put the source of the FFmpeg website into the directory
|
||||
@var{<target>} and let you push back your changes to the remote repository.
|
||||
(Note that @var{gil} stands for GItoLite and is not a typo of @var{git}.)
|
||||
|
||||
If you don't have write-access to the ffmpeg-web repository, you can
|
||||
create patches after making a read-only ffmpeg-web clone:
|
||||
|
|
@ -142,7 +143,7 @@ git log <filename(s)>
|
|||
@end example
|
||||
|
||||
You may also use the graphical tools like @command{gitview} or @command{gitk}
|
||||
or the web interface available at @url{https://git.ffmpeg.org/ffmpeg.git}.
|
||||
or the web interface available at @url{http://source.ffmpeg.org/}.
|
||||
|
||||
@section Checking source tree status
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +0,0 @@
|
|||
ffmpeg mono ./ffmpeg.html
|
||||
ffmpeg-filters mono ./ffmpeg-filters.html
|
||||
ffmpeg-formats mono ./ffmpeg-formats.html
|
||||
ffmpeg-resampler mono ./ffmpeg-resampler.html
|
||||
ffmpeg-scaler mono ./ffmpeg-scaler.html
|
||||
ffmpeg-utils mono ./ffmpeg-utils.html
|
||||
|
|
@ -220,6 +220,41 @@ $ ffmpeg -f avfoundation -capture_raw_data true -i "zr100:none" out.dv
|
|||
|
||||
@end itemize
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device. Deprecated and will be removed - please contact
|
||||
the developers if you are interested in maintaining it.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set the frame rate.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default is @code{vga}.
|
||||
|
||||
@item standard
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item pal
|
||||
|
||||
@item ntsc
|
||||
|
||||
@item secam
|
||||
|
||||
@item paln
|
||||
|
||||
@item palm
|
||||
|
||||
@item ntscj
|
||||
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@section decklink
|
||||
|
||||
The decklink input device provides capture capabilities for Blackmagic
|
||||
|
|
@ -704,7 +739,7 @@ Win32 GDI-based screen capture device.
|
|||
|
||||
This device allows you to capture a region of the display on Windows.
|
||||
|
||||
Amongst options for the input filenames are such elements as:
|
||||
Amongst options for the imput filenames are such elements as:
|
||||
@example
|
||||
desktop
|
||||
@end example
|
||||
|
|
|
|||
|
|
@ -1,18 +1,8 @@
|
|||
FFmpeg Infrastructure:
|
||||
======================
|
||||
|
||||
Trademark:
|
||||
~~~~~~~~~~
|
||||
ffmpeg trademark registered in france by ffmpeg creator.
|
||||
|
||||
|
||||
Domain + NS:
|
||||
~~~~~~~~~~~~
|
||||
ffmpeg.org domain name
|
||||
ns1.avcodec.org Primary Name server (provided by Telepoint, hosted at Telepoint in bulgaria)
|
||||
ns2.avcodec.org Replica Name server (provided by an ffmpeg developer, hosted at Hetzner in germany)
|
||||
ns3.avcodec.org Replica Name server (provided by an ffmpeg developer, hosted at Prometeus Cdlan in italy)
|
||||
|
||||
|
||||
Servers:
|
||||
~~~~~~~~
|
||||
|
|
@ -102,47 +92,8 @@ You need a VM, docker container for FFmpeg? contact root at ffmpeg.org
|
|||
|
||||
|
||||
|
||||
Multimedia Wiki:
|
||||
~~~~~~~~~~~~~~~~
|
||||
The Multimedia Wiki http://wiki.multimedia.cx is ran by Mike Melanson.
|
||||
While not directly part of FFmpeg infrastructure, technical codec and format
|
||||
information written by FFmpeg developers can be found within.
|
||||
It is our unofficial official tech wiki. For access contact Mike.
|
||||
|
||||
|
||||
|
||||
IRC:
|
||||
~~~~
|
||||
irc channels are at https://libera.chat/
|
||||
irc channel archives are at https://libera.irclog.whitequark.org
|
||||
|
||||
#ffmpeg and #ffmpeg-devel founder/admins: BtbN, Michael, Compn
|
||||
#ffmpeg-meeting founder/admins: BtbN, Michael
|
||||
|
||||
|
||||
Twitter aka X:
|
||||
~~~~~~~~~~~~~~
|
||||
https://twitter.com/FFmpeg or https://x.com/FFmpeg
|
||||
|
||||
If you would like to post to twitter please contact twitter MAINTAINERS
|
||||
for access. We want more developers posting to twitter!
|
||||
|
||||
|
||||
|
||||
Reddit:
|
||||
~~~~~~~
|
||||
https://www.reddit.com/r/ffmpeg/
|
||||
moderated by Gyan
|
||||
|
||||
|
||||
|
||||
Facebook:
|
||||
~~~~~~~~~
|
||||
https://www.facebook.com/ffmpeg
|
||||
???
|
||||
|
||||
|
||||
|
||||
Wikipedia entry:
|
||||
~~~~~~~~~~~~~~~~
|
||||
https://en.wikipedia.org/wiki/FFmpeg
|
||||
|
|
|
|||
115
doc/libav-merge.txt
Normal file
115
doc/libav-merge.txt
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
CONTEXT
|
||||
=======
|
||||
|
||||
The FFmpeg project merges all the changes from the Libav project
|
||||
(https://libav.org) since the origin of the fork (around 2011).
|
||||
|
||||
With the exceptions of some commits due to technical/political disagreements or
|
||||
issues, the changes are merged on a more or less regular schedule (daily for
|
||||
years thanks to Michael, but more sparse nowadays).
|
||||
|
||||
WHY
|
||||
===
|
||||
|
||||
The majority of the active developers believe the project needs to keep this
|
||||
policy for various reasons.
|
||||
|
||||
The most important one is that we don't want our users to have to choose
|
||||
between two distributors of libraries of the exact same name in order to have a
|
||||
different set of features and bugfixes. By taking the responsibility of
|
||||
unifying the two codebases, we allow users to benefit from the changes from the
|
||||
two teams.
|
||||
|
||||
Today, FFmpeg has a much larger user database (we are distributed by every
|
||||
major distribution), so we consider this mission a priority.
|
||||
|
||||
A different approach to the merge could have been to pick the changes we are
|
||||
interested in and drop most of the cosmetics and other less important changes.
|
||||
Unfortunately, this makes the following picks much harder, especially since the
|
||||
Libav project is involved in various deep API changes. As a result, we decide
|
||||
to virtually take everything done there.
|
||||
|
||||
Any Libav developer is of course welcome anytime to contribute directly to the
|
||||
FFmpeg tree. Of course, we fully understand and are forced to accept that very
|
||||
few Libav developers are interested in doing so, but we still want to recognize
|
||||
their work. This leads us to create merge commits for every single one from
|
||||
Libav. The original commit appears totally unchanged with full authorship in
|
||||
our history (and the conflict are solved in the merge one). That way, not a
|
||||
single thing from Libav will be lost in the future in case some reunification
|
||||
happens, or that project disappears one way or another.
|
||||
|
||||
DOWNSIDES
|
||||
=========
|
||||
|
||||
Of course, there are many downsides to this approach.
|
||||
|
||||
- It causes a non negligible merge commits pollution. We make sure there are
|
||||
not several level of merges entangled (we do a 1:1 merge/commit), but it's
|
||||
still a non-linear history.
|
||||
|
||||
- Many duplicated work. For instance, we added libavresample in our tree to
|
||||
keep compatibility with Libav when our libswresample was already covering the
|
||||
exact same purpose. The same thing happened for various elements such as the
|
||||
ProRes support (but differences in features, bugs, licenses, ...). There are
|
||||
many work to do to unify them, and any help is very much welcome.
|
||||
|
||||
- So much manpower from both FFmpeg and Libav is lost because of this mess. We
|
||||
know it, and we don't know how to fix it. It takes incredible time to do
|
||||
these merges, so we have even less time to work on things we personally care
|
||||
about. The bad vibes also do not help with keeping our developers motivated.
|
||||
|
||||
- There is a growing technical risk factor with the merges due to the codebase
|
||||
differing more and more.
|
||||
|
||||
MERGE GUIDELINES
|
||||
================
|
||||
|
||||
The following gives developer guidelines on how to proceed when merging Libav commits.
|
||||
|
||||
Before starting, you can reduce the risk of errors on merge conflicts by using
|
||||
a different merge conflict style:
|
||||
|
||||
$ git config --global merge.conflictstyle diff3
|
||||
|
||||
tools/libav-merge-next-commit is a script to help merging the next commit in
|
||||
the queue. It assumes a remote named libav. It has two modes: merge, and noop.
|
||||
The noop mode creates a merge with no change to the HEAD. You can pass a hash
|
||||
as extra argument to reference a justification (it is common that we already
|
||||
have the change done in FFmpeg).
|
||||
|
||||
Also see tools/murge, you can copy and paste a 3 way conflict into its stdin
|
||||
and it will display colored diffs. Any arguments to murge (like ones to suppress
|
||||
whitespace differences) are passed into colordiff.
|
||||
|
||||
TODO/FIXME/UNMERGED
|
||||
===================
|
||||
|
||||
Stuff that didn't reach the codebase:
|
||||
-------------------------------------
|
||||
|
||||
- HEVC DSP and x86 MC SIMD improvements from Libav (see https://ffmpeg.org/pipermail/ffmpeg-devel/2015-December/184777.html)
|
||||
- 1f821750f hevcdsp: split the qpel functions by width instead of by the subpixel fraction
|
||||
- 818bfe7f0 hevcdsp: split the epel functions by width
|
||||
- 688417399 hevcdsp: split the pred functions by width
|
||||
- a853388d2 hevc: change the stride of the MC buffer to be in bytes instead of elements
|
||||
- 0cef06df0 checkasm: add HEVC MC tests
|
||||
- e7078e842 hevcdsp: add x86 SIMD for MC
|
||||
- 7993ec19a hevc: Add hevc_get_pixel_4/8/12/16/24/32/48/64
|
||||
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
|
||||
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
|
||||
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
|
||||
- Support recovery from an already present HLS playlist (see 16cb06bb30)
|
||||
- Remove all output devices (see 8e7e042d41, 8d3db95f20, 6ce13070bd, d46cd24986 and https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/216904.html)
|
||||
- avcodec/libaomenc: export the Sequence Header OBU as extradata (See a024c3ce9a)
|
||||
|
||||
Collateral damage that needs work locally:
|
||||
------------------------------------------
|
||||
|
||||
- Merge proresenc_anatoliy.c and proresenc_kostya.c
|
||||
- Fix MIPS AC3 downmix
|
||||
|
||||
Extra changes needed to be aligned with Libav:
|
||||
----------------------------------------------
|
||||
|
||||
- Switching our examples to the new encode/decode API (see 67d28f4a0f)
|
||||
- HEVC IDCT bit depth 12-bit support (Libav added 8 and 10 but doesn't have 12)
|
||||
|
|
@ -60,5 +60,6 @@ Files that have MIPS copyright notice in them:
|
|||
compute_antialias_float.h
|
||||
lsp_mips.h
|
||||
fmtconvert_mips.c
|
||||
iirfilter_mips.c
|
||||
mpegaudiodsp_mips_fixed.c
|
||||
mpegaudiodsp_mips_float.c
|
||||
|
|
|
|||
102
doc/muxers.texi
102
doc/muxers.texi
|
|
@ -319,7 +319,7 @@ This is the same as the @samp{vob} muxer with a few differences.
|
|||
@table @option
|
||||
@item muxrate @var{rate}
|
||||
Set user-defined mux rate expressed as a number of bits/s. If not
|
||||
specified the automatically computed mux rate is employed. Default value
|
||||
specied the automatically computed mux rate is employed. Default value
|
||||
is @code{0}.
|
||||
|
||||
@item preload @var{delay}
|
||||
|
|
@ -772,7 +772,7 @@ Force a delay expressed in seconds after the last frame of each
|
|||
repetition. Default value is @code{0.0}.
|
||||
|
||||
@item plays @var{repetitions}
|
||||
specify how many times to play the content, @code{0} causes an infinite
|
||||
specify how many times to play the content, @code{0} causes an infinte
|
||||
loop, with @code{1} there is no loop
|
||||
@end table
|
||||
|
||||
|
|
@ -1770,7 +1770,7 @@ for looping indefinitely (default).
|
|||
@item final_delay @var{delay}
|
||||
Force the delay (expressed in centiseconds) after the last frame. Each frame
|
||||
ends with a delay until the next frame. The default is @code{-1}, which is a
|
||||
special value to tell the muxer to reuse the previous delay. In case of a
|
||||
special value to tell the muxer to re-use the previous delay. In case of a
|
||||
loop, you might want to customize this value to mark a pause for instance.
|
||||
@end table
|
||||
|
||||
|
|
@ -1856,7 +1856,7 @@ This muxer creates an .f4m (Adobe Flash Media Manifest File) manifest, an .abst
|
|||
(Adobe Bootstrap File) for each stream, and segment files in a directory
|
||||
specified as the output.
|
||||
|
||||
These needs to be accessed by an HDS player through HTTPS for it to be able to
|
||||
These needs to be accessed by an HDS player throuhg HTTPS for it to be able to
|
||||
perform playback on the generated stream.
|
||||
|
||||
@subsection Options
|
||||
|
|
@ -2436,14 +2436,13 @@ ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \
|
|||
@item
|
||||
Create a single variant stream. Add the @code{#EXT-X-MEDIA} tag with
|
||||
@code{TYPE=SUBTITLES} in the master playlist with webvtt subtitle group name
|
||||
'subtitle' and optional subtitle name, e.g. 'English'. Make sure the input
|
||||
file has one text subtitle stream at least.
|
||||
'subtitle'. Make sure the input file has one text subtitle stream at least.
|
||||
@example
|
||||
ffmpeg -y -i input_with_subtitle.mkv \
|
||||
-b:v:0 5250k -c:v h264 -pix_fmt yuv420p -profile:v main -level 4.1 \
|
||||
-b:a:0 256k \
|
||||
-c:s webvtt -c:a mp2 -ar 48000 -ac 2 -map 0:v -map 0:a:0 -map 0:s:0 \
|
||||
-f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle,sname:English" \
|
||||
-f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle" \
|
||||
-master_pl_name master.m3u8 -t 300 -hls_time 10 -hls_init_time 4 -hls_list_size \
|
||||
10 -master_pl_publish_rate 10 -hls_flags \
|
||||
delete_segments+discont_start+split_by_time ./tmp/video.m3u8
|
||||
|
|
@ -2538,7 +2537,7 @@ these applications, audio may be played back on a wide range of devices, e.g.,
|
|||
headphones, mobile phones, tablets, TVs, sound bars, home theater systems, and
|
||||
big screens.
|
||||
|
||||
This format was promoted and designed by Alliance for Open Media.
|
||||
This format was promoted and desgined by Alliance for Open Media.
|
||||
|
||||
For more information about this format, see @url{https://aomedia.org/iamf/}.
|
||||
|
||||
|
|
@ -2940,44 +2939,6 @@ ffmpeg -i INPUT -f md5 -
|
|||
@end example
|
||||
@end itemize
|
||||
|
||||
@anchor{mccenc}
|
||||
@section mcc
|
||||
Muxer for MacCaption MCC files, it supports MCC versions 1.0 and 2.0.
|
||||
MCC files store VANC data, which can include closed captions (EIA-608 and CEA-708), ancillary time code, pan-scan data, etc.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The muxer options are:
|
||||
|
||||
@table @option
|
||||
@item override_time_code_rate
|
||||
Override the @code{Time Code Rate} value in the output. Defaults to trying to deduce from the stream's @code{time_base}, which often doesn't work.
|
||||
@item use_u_alias
|
||||
Use the @code{U} alias for the byte sequence @code{E1h 00h 00h 00h}.
|
||||
Disabled by default because some @file{.mcc} files disagree on whether it has 2 or 3 zero bytes.
|
||||
@item mcc_version
|
||||
The MCC file format version. Must be either 1 or 2, defaults to 2.
|
||||
@item creation_program
|
||||
The creation program. Defaults to this version of FFmpeg.
|
||||
@item creation_time
|
||||
The creation time. Defaults to the current time.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
@itemize
|
||||
@item
|
||||
Extract a MXF @code{SMPTE_436M_ANC} stream from a MXF file and write it to a MCC file at 30 fps.
|
||||
@example
|
||||
ffmpeg -i input.mxf -c copy -map 0:d -override_time_code_rate 30 out.mcc
|
||||
@end example
|
||||
|
||||
@item
|
||||
Extract EIA-608/CTA-708 closed captions from a @file{.mp4} file and write them to a MCC file at 29.97 fps.
|
||||
@example
|
||||
ffmpeg -f lavfi -i "movie=input.mp4[out+subcc]" -c:s copy -map 0:s -override_time_code_rate 30000/1001 out.mcc
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section microdvd
|
||||
MicroDVD subtitle format muxer.
|
||||
|
||||
|
|
@ -3917,53 +3878,4 @@ ffmpeg -f webm_dash_manifest -i video1.webm \
|
|||
manifest.xml
|
||||
@end example
|
||||
|
||||
@anchor{whip}
|
||||
@section whip
|
||||
|
||||
WebRTC (Real-Time Communication) muxer that supports sub-second latency streaming according to
|
||||
the WHIP (WebRTC-HTTP ingestion protocol) specification.
|
||||
|
||||
This is an experimental feature.
|
||||
|
||||
It uses HTTP as a signaling protocol to exchange SDP capabilities and ICE lite candidates. Then,
|
||||
it uses STUN binding requests and responses to establish a session over UDP. Subsequently, it
|
||||
initiates a DTLS handshake to exchange the SRTP encryption keys. Lastly, it splits video and
|
||||
audio frames into RTP packets and encrypts them using SRTP.
|
||||
|
||||
Ensure that you use H.264 without B frames and Opus for the audio codec. For example, to convert
|
||||
an input file with @command{ffmpeg} to WebRTC:
|
||||
@example
|
||||
ffmpeg -re -i input.mp4 -acodec libopus -ar 48000 -ac 2 \
|
||||
-vcodec libx264 -profile:v baseline -tune zerolatency -threads 1 -bf 0 \
|
||||
-f whip "http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream"
|
||||
@end example
|
||||
|
||||
For this example, we have employed low latency options, resulting in an end-to-end latency of
|
||||
approximately 150ms.
|
||||
|
||||
@subsection Options
|
||||
|
||||
This muxer supports the following options:
|
||||
|
||||
@table @option
|
||||
|
||||
@item handshake_timeout @var{integer}
|
||||
Set the timeout in milliseconds for ICE and DTLS handshake.
|
||||
Default value is 5000.
|
||||
|
||||
@item pkt_size @var{integer}
|
||||
Set the maximum size, in bytes, of RTP packets that send out.
|
||||
Default value is 1500.
|
||||
|
||||
@item authorization @var{string}
|
||||
The optional Bearer token for WHIP Authorization.
|
||||
|
||||
@item cert_file @var{string}
|
||||
The optional certificate file path for DTLS.
|
||||
|
||||
@item key_file @var{string}
|
||||
The optional private key file path for DTLS.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end MUXERS
|
||||
|
|
|
|||
|
|
@ -157,3 +157,4 @@ PFD[32] would for example be signed 32 bit little-endian IEEE float
|
|||
@item XVID @tab non-compliant MPEG-4 generated by old Xvid
|
||||
@item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug
|
||||
@end multitable
|
||||
|
||||
|
|
|
|||
|
|
@ -188,7 +188,7 @@ Code that depends on data in registries being untouched, should be written as
|
|||
a single __asm__() statement. Ideally, a single function contains only one
|
||||
__asm__() block.
|
||||
|
||||
Use external asm (nasm) or inline asm (__asm__()), do not use intrinsics.
|
||||
Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
|
||||
The latter requires a good optimizing compiler which gcc is not.
|
||||
|
||||
When debugging a x86 external asm compilation issue, if lost in the macro
|
||||
|
|
@ -199,7 +199,7 @@ actual lines causing issues.
|
|||
Inline asm vs. external asm
|
||||
---------------------------
|
||||
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
|
||||
and external asm (.s or .asm files, handled by an assembler such as nasm)
|
||||
and external asm (.s or .asm files, handled by an assembler such as nasm/yasm)
|
||||
are accepted in FFmpeg. Which one to use differs per specific case.
|
||||
|
||||
- if your code is intended to be inlined in a C function, inline asm is always
|
||||
|
|
|
|||
111
doc/outdevs.texi
111
doc/outdevs.texi
|
|
@ -301,6 +301,45 @@ ffmpeg -re -i INPUT -c:v rawvideo -pix_fmt bgra -f fbdev /dev/fb0
|
|||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section opengl
|
||||
OpenGL output device. Deprecated and will be removed.
|
||||
|
||||
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
|
||||
|
||||
This output device allows one to render to OpenGL context.
|
||||
Context may be provided by application or default SDL window is created.
|
||||
|
||||
When device renders to external context, application must implement handlers for following messages:
|
||||
@code{AV_DEV_TO_APP_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
|
||||
@code{AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
|
||||
@code{AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER} - swap buffers.
|
||||
@code{AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
|
||||
Application is also required to inform a device about current resolution by sending @code{AV_APP_TO_DEV_WINDOW_SIZE} message.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
|
||||
@item background
|
||||
Set background color. Black is a default.
|
||||
@item no_window
|
||||
Disables default SDL window when set to non-zero value.
|
||||
Application must provide OpenGL context and both @code{window_size_cb} and @code{window_swap_buffers_cb} callbacks when set.
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename specified for the output device.
|
||||
Ignored when @option{no_window} is set.
|
||||
@item window_size
|
||||
Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
|
||||
If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
|
||||
Mostly usable when @option{no_window} is not set.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Play a file on SDL window using OpenGL rendering:
|
||||
@example
|
||||
ffmpeg -i INPUT -f opengl "window title"
|
||||
@end example
|
||||
|
||||
@section oss
|
||||
|
||||
OSS (Open Sound System) output device.
|
||||
|
|
@ -367,6 +406,78 @@ Play a file on default device on default server:
|
|||
ffmpeg -i INPUT -f pulse "stream name"
|
||||
@end example
|
||||
|
||||
@section sdl
|
||||
|
||||
SDL (Simple DirectMedia Layer) output device. Deprecated and will be removed.
|
||||
|
||||
For monitoring purposes in FFmpeg, pipes and a video player such as ffplay can be used:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT -f nut -c:v rawvideo - | ffplay -
|
||||
@end example
|
||||
|
||||
"sdl2" can be used as alias for "sdl".
|
||||
|
||||
This output device allows one to show a video stream in an SDL
|
||||
window. Only one SDL window is allowed per application, so you can
|
||||
have only one instance of this output device in an application.
|
||||
|
||||
To enable this output device you need libsdl installed on your system
|
||||
when configuring your build.
|
||||
|
||||
For more information about SDL, check:
|
||||
@url{http://www.libsdl.org/}
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item window_borderless
|
||||
Set SDL window border off.
|
||||
Default value is 0 (enable window border).
|
||||
|
||||
@item window_enable_quit
|
||||
Enable quit action (using window button or keyboard key)
|
||||
when non-zero value is provided.
|
||||
Default value is 1 (enable quit action).
|
||||
|
||||
@item window_fullscreen
|
||||
Set fullscreen mode when non-zero value is provided.
|
||||
Default value is zero.
|
||||
|
||||
@item window_size
|
||||
Set the SDL window size, can be a string of the form
|
||||
@var{width}x@var{height} or a video size abbreviation.
|
||||
If not specified it defaults to the size of the input video,
|
||||
downscaled according to the aspect ratio.
|
||||
|
||||
@item window_title
|
||||
Set the SDL window title, if not specified default to the filename
|
||||
specified for the output device.
|
||||
|
||||
@item window_x
|
||||
@item window_y
|
||||
Set the position of the window on the screen.
|
||||
@end table
|
||||
|
||||
@subsection Interactive commands
|
||||
|
||||
The window created by the device can be controlled through the
|
||||
following interactive commands.
|
||||
|
||||
@table @key
|
||||
@item q, ESC
|
||||
Quit the device immediately.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
The following command shows the @command{ffmpeg} output is an
|
||||
SDL window, forcing its size to the qcif format:
|
||||
@example
|
||||
ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
|
||||
@end example
|
||||
|
||||
@section sndio
|
||||
|
||||
sndio audio output device.
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ You will need the following prerequisites:
|
|||
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
|
||||
the Visual Studio or Intel Compiler command prompt.
|
||||
|
||||
Place @code{nasm.exe} somewhere in your @code{PATH}.
|
||||
Place @code{yasm.exe} somewhere in your @code{PATH}.
|
||||
|
||||
Next, make sure any other headers and libs you want to use, such as zlib, are
|
||||
located in a spot that the compiler can see. Do so by modifying the @code{LIB}
|
||||
|
|
@ -301,7 +301,7 @@ These library packages are only available from
|
|||
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
|
||||
|
||||
@example
|
||||
libSDL-devel, libgsm-devel, libmp3lame-devel,
|
||||
yasm, libSDL-devel, libgsm-devel, libmp3lame-devel,
|
||||
speex-devel, libtheora-devel, libxvidcore-devel
|
||||
@end example
|
||||
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ client may also set a user/password for authentication. The default for both
|
|||
fields is "guest". Name of virtual host on broker can be set with vhost. The
|
||||
default value is "/".
|
||||
|
||||
Multiple subscribers may stream from the broker using the command:
|
||||
Muliple subscribers may stream from the broker using the command:
|
||||
@example
|
||||
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
|
||||
@end example
|
||||
|
|
@ -607,7 +607,7 @@ The resource requested by a client, when the experimental HTTP server is in use.
|
|||
The HTTP code returned to the client, when the experimental HTTP server is in use.
|
||||
|
||||
@item short_seek_size
|
||||
Set the threshold, in bytes, for when a readahead should be preferred over a seek and
|
||||
Set the threshold, in bytes, for when a readahead should be prefered over a seek and
|
||||
new HTTP request. This is useful, for example, to make sure the same connection
|
||||
is used for reading large video packets with small audio packets in between.
|
||||
|
||||
|
|
@ -1150,15 +1150,10 @@ ffplay "rtmp://myserver/live/mystream live=1"
|
|||
Real-time Transport Protocol.
|
||||
|
||||
The required syntax for an RTP URL is:
|
||||
@example
|
||||
rtp://@var{hostname}[:@var{port}][?@var{options}]
|
||||
@end example
|
||||
rtp://@var{hostname}[:@var{port}][?@var{option}=@var{val}...]
|
||||
|
||||
@var{port} specifies the RTP port to use.
|
||||
|
||||
@var{options} contains a list of &-separated options of the form
|
||||
@var{key}=@var{val}.
|
||||
|
||||
The following URL options are supported:
|
||||
|
||||
@table @option
|
||||
|
|
@ -1198,15 +1193,16 @@ set to 1) or to a default remote address (if set to 0).
|
|||
@item localport=@var{n}
|
||||
Set the local RTP port to @var{n}.
|
||||
|
||||
This is a deprecated option. Instead, @option{localrtpport} should be
|
||||
used.
|
||||
|
||||
@item localaddr=@var{addr}
|
||||
Local IP address of a network interface used for sending packets or joining
|
||||
multicast groups.
|
||||
|
||||
@item timeout=@var{n}
|
||||
Set timeout (in microseconds) of socket I/O operations to @var{n}.
|
||||
|
||||
This is a deprecated option. Instead, @option{localrtpport} should be
|
||||
used.
|
||||
|
||||
@end table
|
||||
|
||||
Important notes:
|
||||
|
|
@ -2028,87 +2024,6 @@ To play back a stream from the TLS/SSL server using @command{ffplay}:
|
|||
ffplay tls://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
@section dtls
|
||||
|
||||
Datagram Transport Layer Security (DTLS)
|
||||
|
||||
The required syntax for a DTLS URL is:
|
||||
@example
|
||||
dtls://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
DTLS shares most options with TLS, but operates over UDP instead of TCP.
|
||||
The following parameters can be set via command line options
|
||||
(or in code via @code{AVOption}s):
|
||||
|
||||
@table @option
|
||||
|
||||
@item ca_file, cafile=@var{filename}
|
||||
A file containing certificate authority (CA) root certificates to treat
|
||||
as trusted. If the linked TLS library contains a default this might not
|
||||
need to be specified for verification to work, but not all libraries and
|
||||
setups have defaults built in.
|
||||
The file must be in OpenSSL PEM format.
|
||||
|
||||
@item tls_verify=@var{1|0}
|
||||
If enabled, try to verify the peer that we are communicating with.
|
||||
Note, if using OpenSSL, this currently only makes sure that the
|
||||
peer certificate is signed by one of the root certificates in the CA
|
||||
database, but it does not validate that the certificate actually
|
||||
matches the host name we are trying to connect to.
|
||||
|
||||
This is disabled by default since it requires a CA database to be
|
||||
provided by the caller in many cases.
|
||||
|
||||
@item cert_file, cert=@var{filename}
|
||||
A file containing a certificate to use in the handshake with the peer.
|
||||
(When operating as server, in listen mode, this is more often required
|
||||
by the peer, while client certificates only are mandated in certain
|
||||
setups.)
|
||||
|
||||
@item key_file, key=@var{filename}
|
||||
A file containing the private key for the certificate.
|
||||
|
||||
@item cert_pem=@var{string}
|
||||
Certificate PEM string
|
||||
|
||||
@item key_pem=@var{string}
|
||||
Private key PEM string
|
||||
|
||||
@item listen=@var{1|0}
|
||||
If enabled, listen for connections on the provided port, and assume
|
||||
the server role in the handshake instead of the client role.
|
||||
|
||||
@item mtu=@var{size}
|
||||
Set the Maximum Transmission Unit (MTU) for DTLS packets.
|
||||
|
||||
@item use_srtp=@var{1|0}
|
||||
Enable the use_srtp DTLS extension.
|
||||
This is used in WebRTC applications to establish SRTP encryption keys
|
||||
through the DTLS handshake. Default is disabled.
|
||||
|
||||
@item external_sock=@var{1|0}
|
||||
Use an external socket instead of creating a new one.
|
||||
This option only makes sense to pass when interacting with the code via
|
||||
API, enabling this from CLI will cause immediate failure.
|
||||
Default is disabled.
|
||||
|
||||
@end table
|
||||
|
||||
Example command lines:
|
||||
|
||||
To create a DTLS server:
|
||||
|
||||
@example
|
||||
ffmpeg -listen 1 -i dtls://@var{hostname}:@var{port} @var{output}
|
||||
@end example
|
||||
|
||||
To create a DTLS client and send data to server:
|
||||
|
||||
@example
|
||||
ffmpeg -i @var{input} -f @var{format} dtls://@var{hostname}:@var{port}
|
||||
@end example
|
||||
|
||||
@section udp
|
||||
|
||||
User Datagram Protocol.
|
||||
|
|
|
|||
|
|
@ -96,9 +96,6 @@ If value is set to @code{1}, indicates source is full range. Default value is
|
|||
If value is set to @code{1}, enable full range for destination. Default value
|
||||
is @code{0}, which enables limited range.
|
||||
|
||||
@item gamma @var{(boolean)}
|
||||
If value is set to @code{1}, enable gamma correct scaling. Default value is @code{0}.
|
||||
|
||||
@anchor{sws_params}
|
||||
@item param0, param1
|
||||
Set scaling algorithm parameters. The specified values are specific of
|
||||
|
|
|
|||
2
doc/style.min.css
vendored
2
doc/style.min.css
vendored
File diff suppressed because one or more lines are too long
|
|
@ -1,344 +0,0 @@
|
|||
New swscale design to change everything (tm)
|
||||
============================================
|
||||
|
||||
SwsGraph
|
||||
--------
|
||||
|
||||
The entry point to the new architecture, SwsGraph is what coordinates
|
||||
multiple "passes". These can include cascaded scaling passes, error diffusion
|
||||
dithering, and so on. Or we could have separate passes for the vertical and
|
||||
horizontal scaling. In between each SwsPass lies a fully allocated image buffer.
|
||||
Graph passes may have different levels of threading, e.g. we can have a single
|
||||
threaded error diffusion pass following a multi-threaded scaling pass.
|
||||
|
||||
SwsGraph is internally recreated whenever the image format, dimensions or
|
||||
settings change in any way. sws_scale_frame() is itself just a light-weight
|
||||
wrapper that runs ff_sws_graph_create() whenever the format changes, splits
|
||||
interlaced images into separate fields, and calls ff_sws_graph_run() on each.
|
||||
|
||||
From the point of view of SwsGraph itself, all inputs are progressive.
|
||||
|
||||
SwsOp / SwsOpList
|
||||
-----------------
|
||||
|
||||
This is the newly introduced abstraction layer between the high-level format
|
||||
handling logic and the low-level backing implementation. Each SwsOp is designed
|
||||
to be as small and atomic as possible, with the possible exception of the
|
||||
read / write operations due to their numerous variants.
|
||||
|
||||
The basic idea is to split logic between three major components:
|
||||
|
||||
1. The high-level format "business logic", which generates in a very
|
||||
naive way a sequence of operations guaranteed to get you from point A
|
||||
to point B. This logic is written with correctness in mind only, and
|
||||
ignoring any performance concerns or low-level implementation decisions.
|
||||
Semantically, everything is always decoded from the input format to
|
||||
normalized (real valued) RGB, and then encoded back to output format.
|
||||
|
||||
This code lives in libswscale/format.c
|
||||
|
||||
2. The optimizer. This is where the "magic" happens, so to speak. The
|
||||
optimizer's job is to take the abstract sequence of operations
|
||||
produced by the high-level format analysis code and incrementally
|
||||
optimize it. Each optimization step is designed to be minute and provably
|
||||
lossless, or otherwise guarded behind the BITEXACT flag. This ensures that
|
||||
the resulting output is always identical, no matter how many layers of
|
||||
optimization we add.
|
||||
|
||||
This code lives in libswscale/ops.c
|
||||
|
||||
3. The compiler. Once we have a sequence of operations as output by the
|
||||
optimizer, we "compile" this down to a callable function. This is then
|
||||
applied by the dispatch wrapper by striping it over the input image.
|
||||
|
||||
See libswscale/ops_backend.c for the reference backend, or
|
||||
libswscale/x86/ops.c for a more complex SIMD example.
|
||||
|
||||
This overall approach has a considerable number of benefits:
|
||||
|
||||
1. It allows us to verify correctness of logic and spot semantic errors at a
|
||||
very high level, by simply looking at the sequence of operations (available
|
||||
by default at debug / verbose log level), without having to dig through the
|
||||
multiple levels of complicated, interwoven format handling code that is
|
||||
legacy swscale.
|
||||
|
||||
2. Because most of the brains lives inside the the powerful optimizer, we get
|
||||
fast paths "for free" for any suitable format conversion, rather than having
|
||||
to enumerate them one by one. SIMD code itself can be written in a very
|
||||
general way and does need to be tied to specific pixel formats - subsequent
|
||||
low-level implementations can be strung together without much overhead.
|
||||
|
||||
3. We can in the future, with relative ease, compile these operations
|
||||
down to SPIR-V (or even LLVM IR) and generate efficient GPU or
|
||||
target-machine specific implementations. This also opens the window for
|
||||
adding hardware frame support to libswscale, and even transparently using
|
||||
GPU acceleration for CPU frames.
|
||||
|
||||
4. Platform-specific SIMD can be reduced down to a comparatively small set of
|
||||
optimized routines, while still providing 100% coverage for all possible
|
||||
pixel formats and operations. (As of writing, the x86 example backend has
|
||||
about 60 unique implementations, of which 20 are trivial swizzles, 10 are
|
||||
read/write ops, 10 are pixel type conversions and the remaining 20 are the
|
||||
various logic/arithmetic ops).
|
||||
|
||||
5. Backends hide behind a layer of abstraction offering them a considerable
|
||||
deal of flexibility in how they want to implement their operations. For
|
||||
example, the x86 backend has a dedicated function for compiling compatible
|
||||
operations down to a single in-place pshufb instruction.
|
||||
|
||||
Platform specific low level data is self-contained within its own setup()
|
||||
function and private data structure, eliminating all reads into SwsContext
|
||||
or the possibility of conflicts between platforms.
|
||||
|
||||
6. We can compute an exact reference result for each operation with fixed
|
||||
precision (ff_sws_op_apply_q), and use that to e.g. measure the amount of
|
||||
error introduced by dithering, or even catch bugs in the reference C
|
||||
implementation. (In theory - currently checkasm just compares against C)
|
||||
|
||||
Examples of SwsOp in action
|
||||
---------------------------
|
||||
|
||||
For illustration, here is the sequence of operations currently generated by
|
||||
my prototype, for a conversion from RGB24 to YUV444P:
|
||||
|
||||
Unoptimized operation list:
|
||||
[ u8 .... -> ....] SWS_OP_READ : 3 elem(s) packed >> 0
|
||||
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
|
||||
[ u8 .... -> ....] SWS_OP_RSHIFT : >> 0
|
||||
[ u8 .... -> ....] SWS_OP_CLEAR : {_ _ _ 0}
|
||||
[ u8 .... -> ....] SWS_OP_CONVERT : u8 -> f32
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : diag3+alpha [[1/255 0 0 0 0] [0 1/255 0 0 0] [0 0 1/255 0 0] [0 0 0 1 1]]
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : matrix3 [[0.299000 0.587000 0.114000 0 0] [-0.168736 -0.331264 1/2 0 0] [1/2 -0.418688 -57/701 0 0] [0 0 0 1 0]]
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : diag3+off3 [[219 0 0 0 16] [0 224 0 0 128] [0 0 224 0 128] [0 0 0 1 0]]
|
||||
[f32 .... -> ....] SWS_OP_DITHER : 16x16 matrix
|
||||
[f32 .... -> ....] SWS_OP_MAX : {0 0 0 0} <= x
|
||||
[f32 .... -> ....] SWS_OP_MIN : x <= {255 255 255 _}
|
||||
[f32 .... -> ....] SWS_OP_CONVERT : f32 -> u8
|
||||
[ u8 .... -> ....] SWS_OP_LSHIFT : << 0
|
||||
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
|
||||
[ u8 .... -> ....] SWS_OP_WRITE : 3 elem(s) planar >> 0
|
||||
|
||||
This is optimized into the following sequence:
|
||||
|
||||
Optimized operation list:
|
||||
[ u8 XXXX -> +++X] SWS_OP_READ : 3 elem(s) packed >> 0
|
||||
[ u8 ...X -> +++X] SWS_OP_CONVERT : u8 -> f32
|
||||
[f32 ...X -> ...X] SWS_OP_LINEAR : matrix3+off3 [[0.256788 0.504129 0.097906 0 16] [-0.148223 -0.290993 112/255 0 128] [112/255 -0.367788 -0.071427 0 128] [0 0 0 1 0]]
|
||||
[f32 ...X -> ...X] SWS_OP_DITHER : 16x16 matrix
|
||||
[f32 ...X -> +++X] SWS_OP_CONVERT : f32 -> u8
|
||||
[ u8 ...X -> +++X] SWS_OP_WRITE : 3 elem(s) planar >> 0
|
||||
(X = unused, + = exact, 0 = zero)
|
||||
|
||||
The extra metadata on the left of the operation list is just a dump of the
|
||||
internal state used by the optimizer during optimization. It keeps track of
|
||||
knowledge about the pixel values, such as their value range, whether or not
|
||||
they're exact integers, and so on.
|
||||
|
||||
In this example, you can see that the input values are exact (except for
|
||||
the alpha channel, which is undefined), until the first SWS_OP_LINEAR
|
||||
multiplies them by a noninteger constant. They regain their exact integer
|
||||
status only after the (truncating) conversion to U8 in the output step.
|
||||
|
||||
Example of more aggressive optimization
|
||||
---------------------------------------
|
||||
|
||||
Conversion pass for gray -> rgb48:
|
||||
Unoptimized operation list:
|
||||
[ u8 .... -> ....] SWS_OP_READ : 1 elem(s) planar >> 0
|
||||
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
|
||||
[ u8 .... -> ....] SWS_OP_RSHIFT : >> 0
|
||||
[ u8 .... -> ....] SWS_OP_CLEAR : {_ 0 0 0}
|
||||
[ u8 .... -> ....] SWS_OP_CONVERT : u8 -> f32
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : luma+alpha [[1/255 0 0 0 0] [0 1 0 0 0] [0 0 1 0 0] [0 0 0 1 1]]
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : matrix3 [[1 0 701/500 0 0] [1 -0.344136 -0.714136 0 0] [1 443/250 0 0 0] [0 0 0 1 0]]
|
||||
[f32 .... -> ....] SWS_OP_LINEAR : diag3 [[65535 0 0 0 0] [0 65535 0 0 0] [0 0 65535 0 0] [0 0 0 1 0]]
|
||||
[f32 .... -> ....] SWS_OP_MAX : {0 0 0 0} <= x
|
||||
[f32 .... -> ....] SWS_OP_MIN : x <= {65535 65535 65535 _}
|
||||
[f32 .... -> ....] SWS_OP_CONVERT : f32 -> u16
|
||||
[u16 .... -> ....] SWS_OP_LSHIFT : << 0
|
||||
[u16 .... -> ....] SWS_OP_SWIZZLE : 0123
|
||||
[u16 .... -> ....] SWS_OP_WRITE : 3 elem(s) packed >> 0
|
||||
|
||||
Optimized operation list:
|
||||
[ u8 XXXX -> +XXX] SWS_OP_READ : 1 elem(s) planar >> 0
|
||||
[ u8 .XXX -> +XXX] SWS_OP_CONVERT : u8 -> u16 (expand)
|
||||
[u16 .XXX -> +++X] SWS_OP_SWIZZLE : 0003
|
||||
[u16 ...X -> +++X] SWS_OP_WRITE : 3 elem(s) packed >> 0
|
||||
(X = unused, + = exact, 0 = zero)
|
||||
|
||||
Here, the optimizer has managed to eliminate all of the unnecessary linear
|
||||
operations on previously zero'd values, turn the resulting column matrix into
|
||||
a swizzle operation, avoid the unnecessary dither (and round trip via float)
|
||||
because the pixel values are guaranteed to be bit exact, and finally, turns
|
||||
the multiplication by 65535 / 255 = 257 into a simple integer expand operation.
|
||||
|
||||
As a final bonus, the x86 backend further optimizes this into a 12-byte shuffle:
|
||||
pshufb = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1}
|
||||
|
||||
time=208 us, ref=4212 us, speedup=20.236x faster (single thread)
|
||||
time=57 us, ref=472 us, speedup=8.160x faster (multi thread)
|
||||
|
||||
Compiler and underlying implementation layer (SwsOpChain)
|
||||
---------------------------------------------------------
|
||||
|
||||
While the backend API is flexible enough to permit more exotic implementations
|
||||
(e.g. using JIT code generation), we establish a common set of helpers for use
|
||||
in "traditional" SIMD implementations.
|
||||
|
||||
The basic idea is to have one "kernel" (or implementation) per operation,
|
||||
and then just chain a list of these kernels together as separate function
|
||||
calls. For best performance, we want to keep data in vector registers in
|
||||
between function calls using a custom calling convention, thus avoiding any
|
||||
unnecessary memory accesses. Additionally, we want the per-kernel overhead to
|
||||
be as low as possible, with each kernel ideally just jumping directly into
|
||||
the next kernel.
|
||||
|
||||
As a result, we arrive at a design where we first divide the image into small
|
||||
chunks, or "blocks", and then dispatch the "chain" of kernels on each chunk in
|
||||
sequence. Each kernel processes a fixed number of pixels, with the overall
|
||||
entry point taking care of looping. Remaining pixels (the "tail") are handled
|
||||
generically by the backend-invariant dispatch code (located in ops.c), using a
|
||||
partial memcpy into a suitably sized temporary buffer.
|
||||
|
||||
To minimize the per-kernel function call overhead, we use a "continuation
|
||||
passing style" for chaining kernels. Each operation computes its result and
|
||||
then directly calls the next operation in the sequence, with the appropriate
|
||||
internal function signature.
|
||||
|
||||
The C reference backend reads data into the stack and then passes the array
|
||||
pointers to the next continuation as regular function arguments:
|
||||
|
||||
void process(GlobalContext *ctx, OpContext *op,
|
||||
block_t x, block_t y, block_t z, block_t w)
|
||||
{
|
||||
for (int i = 0; i < SWS_BLOCK_SIZE; i++)
|
||||
// do something with x[i], y[i], z[i], w[i]
|
||||
|
||||
op->next(ctx, &op[1], x, y, z, w);
|
||||
}
|
||||
|
||||
With type conversions pushing the new data onto the stack as well:
|
||||
|
||||
void convert8to16(GlobalContext *ctx, OpContext *op,
|
||||
block_t x, block_t y, block_t z, block_t w)
|
||||
{
|
||||
/* Pseudo-code */
|
||||
u16block_t x16 = (u16block_t) x;
|
||||
u16block_t y16 = (u16block_t) y;
|
||||
u16block_t z16 = (u16block_t) z;
|
||||
u16block_t w16 = (u16block_t) w;
|
||||
|
||||
op->next(ctx, &op[1], x16, y16, z16, w16);
|
||||
}
|
||||
|
||||
By contrast, the x86 backend always keeps the X/Y/Z/W values pinned in specific
|
||||
vector registers (ymm0-ymm3 for the lower half, and ymm4-ymm7 for the second
|
||||
half).
|
||||
|
||||
Each kernel additionally has access to a 32 byte per-op context storing the
|
||||
pointer to the next kernel plus 16 bytes of arbitrary private data. This is
|
||||
used during construction of the function chain to place things like small
|
||||
constants.
|
||||
|
||||
In assembly, the per-kernel overhead looks like this:
|
||||
|
||||
load $tmp, $arg1
|
||||
...
|
||||
add $arg1, 32
|
||||
jump $tmp
|
||||
|
||||
This design gives vastly better performance than the alternative of returning
|
||||
out to a central loop or "trampoline". This is partly because the order of
|
||||
kernels within a chain is always the same, so the branch predictor can easily
|
||||
remember the target address of each "jump" instruction.
|
||||
|
||||
The only way to realistically improve on this design would be to directly
|
||||
stitch the kernel body together using runtime code generation.
|
||||
|
||||
Future considerations and limitations
|
||||
-------------------------------------
|
||||
|
||||
My current prototype has a number of severe limitations and opportunities
|
||||
for improvements:
|
||||
|
||||
1. It does not handle scaling at all. I am not yet entirely sure on how I want
|
||||
to handle scaling; this includes handling of subsampled content. I have a
|
||||
number of vague ideas in my head, but nothing where I can say with certainty
|
||||
that it will work out well.
|
||||
|
||||
It's possible that we won't come up with a perfect solution here, and will
|
||||
need to decide on which set of compromises we are comfortable accepting:
|
||||
|
||||
1. Do we need the ability to scale YUV -> YUV by handling luma and chroma
|
||||
independently? When downscaling 100x100 4:2:0 to 50x50 4:4:4, should we
|
||||
support the option of reusing the chroma plane directly (even though
|
||||
this would introduce a subpixel shift for typical chroma siting)?
|
||||
|
||||
Looking towards zimg, I am also thinking that we probably also want to do
|
||||
scaling on floating point values, since this is best for both performance
|
||||
and accuracy, especially given that we need to go up to 32-bit intermediates
|
||||
during scaling anyway.
|
||||
|
||||
So far, the most promising approach seems to be to handle subsampled
|
||||
input/output as a dedicated read/write operation type; perhaps even with a
|
||||
fixed/static subsampling kernel. To avoid compromising on performance when
|
||||
chroma resampling is not necessary, the optimizer could then relax the
|
||||
pipeline to use non-interpolating read/writes when all intermediate
|
||||
operations are component-independent.
|
||||
|
||||
2. Since each operation is conceptually defined on 4-component pixels, we end
|
||||
up defining a lot of variants of each implementation for each possible
|
||||
*subset*. For example, we have four different implementations for
|
||||
SWS_OP_SCALE in my current templates:
|
||||
- op_scale_1000
|
||||
- op_scale_1001
|
||||
- op_scale_1110
|
||||
- op_scale_1111
|
||||
|
||||
This reflects the four different arrangements of pixel components that are
|
||||
typically present (or absent). While best for performance, it does turn into
|
||||
a bit of a chore when implementing these kernels.
|
||||
|
||||
The only real alternative would be to either branch inside the kernel (bad),
|
||||
or to use separate kernels for each individual component and chain them all
|
||||
together. I have not yet tested whether the latter approach would be faster
|
||||
after the latest round of refactors to the kernel glue code.
|
||||
|
||||
3. I do not yet have any support for LUTs. But when I add them, something we
|
||||
could do is have the optimized pass automatically "promote" a sequence of
|
||||
operations to LUTs. For example, any sequence that looks like:
|
||||
|
||||
1. [u8] SWS_OP_CONVERT -> X
|
||||
2. [X] ... // only per-component operations
|
||||
4. [X] SWS_OP_CONVERT -> Y
|
||||
3. [Y] SWS_OP_WRITE
|
||||
|
||||
could be replaced by a LUT with 256 entries. This is especially important
|
||||
for anything involving packed 8-bit input (e.g. rgb8, rgb4_byte).
|
||||
|
||||
We also definitely want to hook this up to the existing CMS code for
|
||||
transformations between different primaries.
|
||||
|
||||
4. Because we rely on AVRational math to generate the coefficients for
|
||||
operations, we need to be able to represent all pixel values as an
|
||||
AVRational. However, this presents a challenge for 32-bit formats (e.g.
|
||||
GRAY32, RGBA128), because their size exceeds INT_MAX, which is the maximum
|
||||
value representable by an AVRational.
|
||||
|
||||
It's possible we may want to introduce an AVRational64 for this, or
|
||||
perhaps more flexibly, extend AVRational to an AVFloating type which is
|
||||
represented as { AVRational n; int exp; }, representing n/d * 2^exp. This
|
||||
would preserve our ability to represent all pixel values exactly, while
|
||||
opening up the range arbitrarily.
|
||||
|
||||
5. Is there ever a situation where the use of floats introduces the risk of
|
||||
non bit-exact output? For this reason, and possible performance advantages,
|
||||
we may want to explore the use of a fixed-point 16 bit path as an alternative
|
||||
to the floating point math.
|
||||
|
||||
So far, I have managed to avoid any bit exactness issues inside the x86
|
||||
backend by ensuring that the order of linear operations is identical
|
||||
between the C backend and the x86 backend, but this may not be practical
|
||||
to guarantee on all backends. The x86 float code is also dramatically
|
||||
faster than the old fixed point code, so I'm tentatively optimistic about
|
||||
the lack of a need for a fixed point path.
|
||||
0
doc/texi2pod.pl
Executable file → Normal file
0
doc/texi2pod.pl
Executable file → Normal file
0
doc/texidep.pl
Executable file → Normal file
0
doc/texidep.pl
Executable file → Normal file
|
|
@ -44,3 +44,4 @@ a+b*c;
|
|||
here the reader knows that a,b,c are meant to be signed integers but for C
|
||||
standard compliance / to avoid undefined behavior they are stored in unsigned
|
||||
ints.
|
||||
|
||||
|
|
|
|||
|
|
@ -731,12 +731,8 @@ FL+FR+FC+LFE+BL+BR+SL+SR+TFL+TFR+TBL+TBR
|
|||
FL+FR+FC+LFE+BL+BR+SL+SR+TFL+TFR+TBC+LFE2
|
||||
@item 9.1.4
|
||||
FL+FR+FC+LFE+BL+BR+FLC+FRC+SL+SR+TFL+TFR+TBL+TBR
|
||||
@item 9.1.6
|
||||
FL+FR+FC+LFE+BL+BR+FLC+FRC+SL+SR+TFL+TFR+TBL+TBR+TSL+TSR
|
||||
@item hexadecagonal
|
||||
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
|
||||
@item binaural
|
||||
BIL+BIR
|
||||
@item downmix
|
||||
DL+DR
|
||||
@item 22.2
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
|
|||
OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes)
|
||||
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
|
||||
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
|
||||
OBJS-$(HAVE_SVE) += $(SVE-OBJS) $(SVE-OBJS-yes)
|
||||
OBJS-$(HAVE_SVE2) += $(SVE2-OBJS) $(SVE2-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
|
||||
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
|
||||
|
|
@ -21,7 +19,5 @@ OBJS-$(HAVE_RV) += $(RV-OBJS) $(RV-OBJS-yes)
|
|||
OBJS-$(HAVE_RVV) += $(RVV-OBJS) $(RVV-OBJS-yes)
|
||||
OBJS-$(HAVE_RV_ZVBB) += $(RVVB-OBJS) $(RVVB-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_SIMD128) += $(SIMD128-OBJS) $(SIMD128-OBJS-yes)
|
||||
|
||||
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
|
||||
OBJS-$(HAVE_X86ASM) += $(X86ASM-OBJS) $(X86ASM-OBJS-yes)
|
||||
|
|
|
|||
|
|
@ -38,10 +38,8 @@ int main(int argc, char **argv)
|
|||
return -1;
|
||||
|
||||
output = fopen(argv[2], "wb");
|
||||
if (!output) {
|
||||
fclose(input);
|
||||
if (!output)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (argc == 4) {
|
||||
name = argv[3];
|
||||
|
|
@ -69,10 +67,8 @@ int main(int argc, char **argv)
|
|||
|
||||
fclose(output);
|
||||
|
||||
if (ferror(input) || !feof(input)) {
|
||||
fclose(input);
|
||||
if (ferror(input) || !feof(input))
|
||||
return -1;
|
||||
}
|
||||
|
||||
fclose(input);
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ BIN2C = $(BIN2CEXE)
|
|||
ifndef V
|
||||
Q = @
|
||||
ECHO = printf "$(1)\t%s\n" $(2)
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C METALCC METALLIB
|
||||
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C
|
||||
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
|
||||
|
||||
MSG = $@
|
||||
|
|
@ -115,12 +115,6 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
|
|||
$(BIN2CEXE): ffbuild/bin2c_host.o
|
||||
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS)
|
||||
|
||||
RUN_BIN2C = $(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
RUN_GZIP = $(M)gzip -nc9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
|
||||
RUN_MINIFY = $(M)sed 's!/\\*.*\\*/!!g' $< | tr '\n' ' ' | tr -s ' ' | sed 's/^ //; s/ $$//' > $@
|
||||
%.gz: TAG = GZIP
|
||||
%.min: TAG = MINIFY
|
||||
|
||||
%.metal.air: %.metal
|
||||
$(METALCC) $< -o $@
|
||||
|
||||
|
|
@ -128,46 +122,21 @@ RUN_MINIFY = $(M)sed 's!/\\*.*\\*/!!g' $< | tr '\n' ' ' | tr -s ' ' | sed 's/^ /
|
|||
$(METALLIB) --split-module-without-linking $< -o $@
|
||||
|
||||
%.metallib.c: %.metallib $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
$(BIN2C) $< $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
|
||||
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
|
||||
$(COMPILE_NVCC)
|
||||
|
||||
ifdef CONFIG_PTX_COMPRESSION
|
||||
%.ptx.gz: TAG = GZIP
|
||||
%.ptx.gz: %.ptx
|
||||
$(RUN_GZIP)
|
||||
$(M)gzip -nc9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
|
||||
|
||||
%.ptx.c: %.ptx.gz $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
else
|
||||
%.ptx.c: %.ptx $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
endif
|
||||
|
||||
%.css.min: %.css
|
||||
$(RUN_MINIFY)
|
||||
|
||||
ifdef CONFIG_RESOURCE_COMPRESSION
|
||||
|
||||
%.css.min.gz: %.css.min
|
||||
$(RUN_GZIP)
|
||||
|
||||
%.css.c: %.css.min.gz $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
|
||||
%.html.gz: %.html
|
||||
$(RUN_GZIP)
|
||||
|
||||
%.html.c: %.html.gz $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
|
||||
else # NO COMPRESSION
|
||||
|
||||
%.css.c: %.css.min $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
|
||||
%.html.c: %.html $(BIN2CEXE)
|
||||
$(RUN_BIN2C)
|
||||
$(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
|
||||
endif
|
||||
|
||||
clean::
|
||||
|
|
@ -190,6 +159,7 @@ endif
|
|||
include $(SRC_PATH)/ffbuild/arch.mak
|
||||
|
||||
OBJS += $(OBJS-yes)
|
||||
SLIBOBJS += $(SLIBOBJS-yes)
|
||||
SHLIBOBJS += $(SHLIBOBJS-yes)
|
||||
STLIBOBJS += $(STLIBOBJS-yes)
|
||||
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
|
||||
|
|
@ -199,6 +169,7 @@ LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
|
|||
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=EXTRALIBS-%),$($(lib))) $(EXTRALIBS)
|
||||
|
||||
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
|
||||
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
|
||||
SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%))
|
||||
STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%))
|
||||
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
|
||||
|
|
@ -223,6 +194,7 @@ PTXOBJS = $(filter %.ptx.o,$(OBJS))
|
|||
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
|
||||
checkheaders: $(HOBJS)
|
||||
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=.gz) $(PTXOBJS:.o=)
|
||||
|
||||
alltools: $(TOOLS)
|
||||
|
||||
$(HOSTOBJS): %.o: %.c
|
||||
|
|
@ -234,14 +206,15 @@ $(HOSTPROGS): %$(HOSTEXESUF): %.o
|
|||
$(OBJS): | $(sort $(dir $(OBJS)))
|
||||
$(HOBJS): | $(sort $(dir $(HOBJS)))
|
||||
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
|
||||
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
|
||||
$(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS)))
|
||||
$(STLIBOBJS): | $(sort $(dir $(STLIBOBJS)))
|
||||
$(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
|
||||
$(TOOLOBJS): | tools
|
||||
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
|
||||
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
|
||||
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.objs *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *.html.gz *.html.c *.css.min.gz *.css.min *.css.c *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
||||
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
|
||||
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
|
||||
|
||||
define RULES
|
||||
|
|
@ -251,4 +224,4 @@ endef
|
|||
|
||||
$(eval $(RULES))
|
||||
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ ifdef CONFIG_SHARED
|
|||
# for purely shared builds.
|
||||
# Test programs are always statically linked against their library
|
||||
# to be able to access their library's internals, even with shared builds.
|
||||
# Yet linking against dependent libraries still uses dynamic linking.
|
||||
# Yet linking against dependend libraries still uses dynamic linking.
|
||||
# This means that we are in the scenario described above.
|
||||
# In case only static libs are used, the linker will only use
|
||||
# one of these copies; this depends on the duplicated object files
|
||||
|
|
@ -35,14 +35,8 @@ OBJS += $(SHLIBOBJS)
|
|||
endif
|
||||
$(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS)
|
||||
$(RM) $@
|
||||
ifeq ($(RESPONSE_FILES),yes)
|
||||
$(Q)echo $^ > $@.objs
|
||||
$(AR) $(ARFLAGS) $(AR_O) @$@.objs
|
||||
else
|
||||
$(AR) $(ARFLAGS) $(AR_O) $^
|
||||
endif
|
||||
$(RANLIB) $@
|
||||
-$(RM) $@.objs
|
||||
|
||||
install-headers: install-lib$(NAME)-headers install-lib$(NAME)-pkgconfig
|
||||
|
||||
|
|
@ -70,16 +64,10 @@ $(SUBDIR)lib$(NAME).ver: $(SUBDIR)lib$(NAME).v $(OBJS)
|
|||
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
|
||||
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
|
||||
|
||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
|
||||
$(SLIB_CREATE_DEF_CMD)
|
||||
ifeq ($(RESPONSE_FILES),yes)
|
||||
$(Q)echo $$(filter %.o,$$^) > $$@.objs
|
||||
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) @$$@.objs $(FFEXTRALIBS)
|
||||
else
|
||||
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
|
||||
endif
|
||||
$(SLIB_EXTRA_CMD)
|
||||
-$(RM) $$@.objs
|
||||
|
||||
ifdef SUBDIR
|
||||
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(DEP_LIBS)
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ includedir=${source_path}
|
|||
prefix=
|
||||
exec_prefix=
|
||||
libdir=\${pcfiledir}/../../../$name
|
||||
includedir=${includedir}
|
||||
includedir=${source_path}
|
||||
|
||||
Name: $fullname
|
||||
Description: $comment
|
||||
|
|
|
|||
|
|
@ -9,8 +9,6 @@ AVBASENAMES = ffmpeg ffplay ffprobe
|
|||
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
|
||||
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
|
||||
|
||||
include $(SRC_PATH)/fftools/resources/Makefile
|
||||
|
||||
OBJS-ffmpeg += \
|
||||
fftools/ffmpeg_dec.o \
|
||||
fftools/ffmpeg_demux.o \
|
||||
|
|
@ -21,35 +19,9 @@ OBJS-ffmpeg += \
|
|||
fftools/ffmpeg_mux_init.o \
|
||||
fftools/ffmpeg_opt.o \
|
||||
fftools/ffmpeg_sched.o \
|
||||
fftools/graph/graphprint.o \
|
||||
fftools/objpool.o \
|
||||
fftools/sync_queue.o \
|
||||
fftools/thread_queue.o \
|
||||
fftools/textformat/avtextformat.o \
|
||||
fftools/textformat/tf_compact.o \
|
||||
fftools/textformat/tf_default.o \
|
||||
fftools/textformat/tf_flat.o \
|
||||
fftools/textformat/tf_ini.o \
|
||||
fftools/textformat/tf_json.o \
|
||||
fftools/textformat/tf_mermaid.o \
|
||||
fftools/textformat/tf_xml.o \
|
||||
fftools/textformat/tw_avio.o \
|
||||
fftools/textformat/tw_buffer.o \
|
||||
fftools/textformat/tw_stdout.o \
|
||||
$(OBJS-resman) \
|
||||
$(RESOBJS) \
|
||||
|
||||
OBJS-ffprobe += \
|
||||
fftools/textformat/avtextformat.o \
|
||||
fftools/textformat/tf_compact.o \
|
||||
fftools/textformat/tf_default.o \
|
||||
fftools/textformat/tf_flat.o \
|
||||
fftools/textformat/tf_ini.o \
|
||||
fftools/textformat/tf_json.o \
|
||||
fftools/textformat/tf_mermaid.o \
|
||||
fftools/textformat/tf_xml.o \
|
||||
fftools/textformat/tw_avio.o \
|
||||
fftools/textformat/tw_buffer.o \
|
||||
fftools/textformat/tw_stdout.o \
|
||||
|
||||
OBJS-ffplay += fftools/ffplay_renderer.o
|
||||
|
||||
|
|
@ -59,7 +31,7 @@ ifdef HAVE_GNU_WINDRES
|
|||
OBJS-$(1) += fftools/fftoolsres.o
|
||||
endif
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
|
||||
$$(OBJS-$(1)): | fftools fftools/textformat fftools/resources fftools/graph
|
||||
$$(OBJS-$(1)): | fftools
|
||||
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
|
||||
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(EXTRALIBS-$(1))
|
||||
|
|
@ -72,9 +44,6 @@ all: $(AVPROGS)
|
|||
|
||||
fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools
|
||||
OUTDIRS += fftools
|
||||
OUTDIRS += fftools/textformat
|
||||
OUTDIRS += fftools/resources
|
||||
OUTDIRS += fftools/graph
|
||||
|
||||
ifdef AVPROGS
|
||||
install: install-progs install-data
|
||||
|
|
@ -93,4 +62,4 @@ uninstall-progs:
|
|||
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
|
||||
|
||||
clean::
|
||||
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G) $(CLEANSUFFIXES:%=fftools/%) $(CLEANSUFFIXES:%=fftools/graph/%) $(CLEANSUFFIXES:%=fftools/textformat/%)
|
||||
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G) $(CLEANSUFFIXES:%=fftools/%)
|
||||
|
|
|
|||
|
|
@ -255,10 +255,9 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
|||
if (*opt == '/') {
|
||||
opt++;
|
||||
|
||||
if (!opt_has_arg(po)) {
|
||||
if (po->type == OPT_TYPE_BOOL) {
|
||||
av_log(NULL, AV_LOG_FATAL,
|
||||
"Requested to load an argument from file for an option '%s'"
|
||||
" which does not take an argument\n",
|
||||
"Requested to load an argument from file for a bool option '%s'\n",
|
||||
po->name);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
|
@ -353,11 +352,9 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
|
|||
|
||||
ret = po->u.func_arg(optctx, opt, arg);
|
||||
if (ret < 0) {
|
||||
if ((strcmp(opt, "init_hw_device") != 0) || (strcmp(arg, "list") != 0)) {
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Failed to set value '%s' for option '%s': %s\n",
|
||||
arg, opt, av_err2str(ret));
|
||||
}
|
||||
av_log(NULL, AV_LOG_ERROR,
|
||||
"Failed to set value '%s' for option '%s': %s\n",
|
||||
arg, opt, av_err2str(ret));
|
||||
goto finish;
|
||||
}
|
||||
}
|
||||
|
|
@ -495,9 +492,8 @@ int locate_option(int argc, char **argv, const OptionDef *options,
|
|||
for (i = 1; i < argc; i++) {
|
||||
const char *cur_opt = argv[i];
|
||||
|
||||
if (!(cur_opt[0] == '-' && cur_opt[1]))
|
||||
if (*cur_opt++ != '-')
|
||||
continue;
|
||||
cur_opt++;
|
||||
|
||||
po = find_option(options, cur_opt);
|
||||
if (!po->name && cur_opt[0] == 'n' && cur_opt[1] == 'o')
|
||||
|
|
@ -555,12 +551,11 @@ static void check_options(const OptionDef *po)
|
|||
|
||||
void parse_loglevel(int argc, char **argv, const OptionDef *options)
|
||||
{
|
||||
int idx;
|
||||
int idx = locate_option(argc, argv, options, "loglevel");
|
||||
char *env;
|
||||
|
||||
check_options(options);
|
||||
|
||||
idx = locate_option(argc, argv, options, "loglevel");
|
||||
if (!idx)
|
||||
idx = locate_option(argc, argv, options, "v");
|
||||
if (idx && argv[idx + 1])
|
||||
|
|
@ -1471,12 +1466,9 @@ void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
|
|||
{
|
||||
void *new_elem;
|
||||
|
||||
new_elem = av_mallocz(elem_size);
|
||||
if (!new_elem)
|
||||
if (!(new_elem = av_mallocz(elem_size)) ||
|
||||
av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0)
|
||||
return NULL;
|
||||
if (av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0)
|
||||
av_freep(&new_elem);
|
||||
|
||||
return new_elem;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -319,7 +319,7 @@ typedef struct Option {
|
|||
} Option;
|
||||
|
||||
typedef struct OptionGroupDef {
|
||||
/** group name */
|
||||
/**< group name */
|
||||
const char *name;
|
||||
/**
|
||||
* Option to be used as group separator. Can be NULL for groups which
|
||||
|
|
|
|||
|
|
@ -81,7 +81,6 @@
|
|||
#include "ffmpeg.h"
|
||||
#include "ffmpeg_sched.h"
|
||||
#include "ffmpeg_utils.h"
|
||||
#include "graph/graphprint.h"
|
||||
|
||||
const char program_name[] = "ffmpeg";
|
||||
const int program_birth_year = 2000;
|
||||
|
|
@ -309,9 +308,6 @@ const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
|
|||
|
||||
static void ffmpeg_cleanup(int ret)
|
||||
{
|
||||
if ((print_graphs || print_graphs_file) && nb_output_files > 0)
|
||||
print_filtergraphs(filtergraphs, nb_filtergraphs, input_files, nb_input_files, output_files, nb_output_files);
|
||||
|
||||
if (do_benchmark) {
|
||||
int64_t maxrss = getmaxrss() / 1024;
|
||||
av_log(NULL, AV_LOG_INFO, "bench: maxrss=%"PRId64"KiB\n", maxrss);
|
||||
|
|
@ -344,9 +340,6 @@ static void ffmpeg_cleanup(int ret)
|
|||
|
||||
av_freep(&filter_nbthreads);
|
||||
|
||||
av_freep(&print_graphs_file);
|
||||
av_freep(&print_graphs_format);
|
||||
|
||||
av_freep(&input_files);
|
||||
av_freep(&output_files);
|
||||
|
||||
|
|
@ -562,7 +555,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
static int64_t last_time = -1;
|
||||
static int first_report = 1;
|
||||
uint64_t nb_frames_dup = 0, nb_frames_drop = 0;
|
||||
int mins, secs, ms, us;
|
||||
int mins, secs, us;
|
||||
int64_t hours;
|
||||
const char *hours_sign;
|
||||
int ret;
|
||||
|
|
@ -586,7 +579,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
vid = 0;
|
||||
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
|
||||
|
||||
for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
|
||||
const float q = ost->enc ? atomic_load(&ost->quality) / (float) FF_QP2LAMBDA : -1;
|
||||
|
||||
|
|
@ -677,15 +669,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
|
|||
av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
|
||||
}
|
||||
|
||||
secs = (int)t;
|
||||
ms = (int)((t - secs) * 1000);
|
||||
mins = secs / 60;
|
||||
secs %= 60;
|
||||
hours = mins / 60;
|
||||
mins %= 60;
|
||||
|
||||
av_bprintf(&buf, " elapsed=%"PRId64":%02d:%02d.%02d", hours, mins, secs, ms / 10);
|
||||
|
||||
if (print_stats || is_last_report) {
|
||||
const char end = is_last_report ? '\n' : '\r';
|
||||
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
|
||||
|
|
@ -745,7 +728,7 @@ static void print_stream_maps(void)
|
|||
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file->index,
|
||||
ost->index, ost->enc->enc_ctx->codec->name);
|
||||
ost->index, ost->enc_ctx->codec->name);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
@ -754,9 +737,9 @@ static void print_stream_maps(void)
|
|||
ost->ist->index,
|
||||
ost->file->index,
|
||||
ost->index);
|
||||
if (ost->enc) {
|
||||
if (ost->enc_ctx) {
|
||||
const AVCodec *in_codec = ost->ist->dec;
|
||||
const AVCodec *out_codec = ost->enc->enc_ctx->codec;
|
||||
const AVCodec *out_codec = ost->enc_ctx->codec;
|
||||
const char *decoder_name = "?";
|
||||
const char *in_codec_name = "?";
|
||||
const char *encoder_name = "?";
|
||||
|
|
@ -1029,8 +1012,5 @@ finish:
|
|||
|
||||
sch_free(&sch);
|
||||
|
||||
av_log(NULL, AV_LOG_VERBOSE, "\n");
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Exiting with exit code %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@
|
|||
#include "libavfilter/avfilter.h"
|
||||
|
||||
#include "libavutil/avutil.h"
|
||||
#include "libavutil/bprint.h"
|
||||
#include "libavutil/dict.h"
|
||||
#include "libavutil/eval.h"
|
||||
#include "libavutil/fifo.h"
|
||||
|
|
@ -164,7 +163,6 @@ typedef struct OptionsContext {
|
|||
int loop;
|
||||
int rate_emu;
|
||||
float readrate;
|
||||
float readrate_catchup;
|
||||
double readrate_initial_burst;
|
||||
int accurate_seek;
|
||||
int thread_queue_size;
|
||||
|
|
@ -233,7 +231,6 @@ typedef struct OptionsContext {
|
|||
SpecifierOptList filter_scripts;
|
||||
#endif
|
||||
SpecifierOptList reinit_filters;
|
||||
SpecifierOptList drop_changed;
|
||||
SpecifierOptList fix_sub_duration;
|
||||
SpecifierOptList fix_sub_duration_heartbeat;
|
||||
SpecifierOptList canvas_sizes;
|
||||
|
|
@ -264,7 +261,6 @@ enum IFilterFlags {
|
|||
IFILTER_FLAG_REINIT = (1 << 1),
|
||||
IFILTER_FLAG_CFR = (1 << 2),
|
||||
IFILTER_FLAG_CROP = (1 << 3),
|
||||
IFILTER_FLAG_DROPCHANGED = (1 << 4),
|
||||
};
|
||||
|
||||
typedef struct InputFilterOptions {
|
||||
|
|
@ -320,7 +316,7 @@ typedef struct OutputFilterOptions {
|
|||
AVDictionary *sws_opts;
|
||||
AVDictionary *swr_opts;
|
||||
|
||||
int64_t nb_threads;
|
||||
const char *nb_threads;
|
||||
|
||||
// A combination of OFilterFlags.
|
||||
unsigned flags;
|
||||
|
|
@ -332,8 +328,6 @@ typedef struct OutputFilterOptions {
|
|||
enum AVColorRange color_range;
|
||||
|
||||
enum VideoSyncMethod vsync_method;
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
|
||||
int sample_rate;
|
||||
AVChannelLayout ch_layout;
|
||||
|
|
@ -353,18 +347,6 @@ typedef struct OutputFilterOptions {
|
|||
typedef struct InputFilter {
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
int index;
|
||||
|
||||
// filter data type
|
||||
enum AVMediaType type;
|
||||
|
||||
AVFilterContext *filter;
|
||||
|
||||
char *input_name;
|
||||
|
||||
/* for filters that are not yet bound to an input stream,
|
||||
* this stores the input linklabel, if any */
|
||||
uint8_t *linklabel;
|
||||
} InputFilter;
|
||||
|
||||
typedef struct OutputFilter {
|
||||
|
|
@ -372,11 +354,6 @@ typedef struct OutputFilter {
|
|||
|
||||
struct FilterGraph *graph;
|
||||
uint8_t *name;
|
||||
int index;
|
||||
|
||||
AVFilterContext *filter;
|
||||
|
||||
char *output_name;
|
||||
|
||||
/* for filters that are not yet bound to an output stream,
|
||||
* this stores the output linklabel, if any */
|
||||
|
|
@ -399,9 +376,6 @@ typedef struct FilterGraph {
|
|||
int nb_inputs;
|
||||
OutputFilter **outputs;
|
||||
int nb_outputs;
|
||||
|
||||
const char *graph_desc;
|
||||
struct AVBPrint graph_print_buf;
|
||||
} FilterGraph;
|
||||
|
||||
enum DecoderFlags {
|
||||
|
|
@ -487,6 +461,14 @@ typedef struct InputStream {
|
|||
* currently video and audio only */
|
||||
InputFilter **filters;
|
||||
int nb_filters;
|
||||
|
||||
/*
|
||||
* Output targets that do not go through lavfi, i.e. subtitles or
|
||||
* streamcopy. Those two cases are distinguished by the OutputStream
|
||||
* having an encoder or not.
|
||||
*/
|
||||
struct OutputStream **outputs;
|
||||
int nb_outputs;
|
||||
} InputStream;
|
||||
|
||||
typedef struct InputFile {
|
||||
|
|
@ -563,6 +545,13 @@ typedef struct EncStats {
|
|||
int lock_initialized;
|
||||
} EncStats;
|
||||
|
||||
extern const char *const forced_keyframes_const_names[];
|
||||
|
||||
typedef enum {
|
||||
ENCODER_FINISHED = 1,
|
||||
MUXER_FINISHED = 2,
|
||||
} OSTFinished ;
|
||||
|
||||
enum {
|
||||
KF_FORCE_SOURCE = 1,
|
||||
#if FFMPEG_OPT_FORCE_KF_SOURCE_NO_DROP
|
||||
|
|
@ -586,15 +575,7 @@ typedef struct KeyframeForceCtx {
|
|||
int dropped_keyframe;
|
||||
} KeyframeForceCtx;
|
||||
|
||||
typedef struct Encoder {
|
||||
const AVClass *class;
|
||||
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
} Encoder;
|
||||
typedef struct Encoder Encoder;
|
||||
|
||||
enum CroppingType {
|
||||
CROP_DISABLED = 0,
|
||||
|
|
@ -613,6 +594,12 @@ typedef struct OutputStream {
|
|||
|
||||
int index; /* stream index in the output file */
|
||||
|
||||
/**
|
||||
* Codec parameters for packets submitted to the muxer (i.e. before
|
||||
* bitstream filtering, if any).
|
||||
*/
|
||||
AVCodecParameters *par_in;
|
||||
|
||||
/* input stream that is the source for this output stream;
|
||||
* may be NULL for streams with no well-defined source, e.g.
|
||||
* attachments or outputs from complex filtergraphs */
|
||||
|
|
@ -621,8 +608,12 @@ typedef struct OutputStream {
|
|||
AVStream *st; /* stream in the output file */
|
||||
|
||||
Encoder *enc;
|
||||
AVCodecContext *enc_ctx;
|
||||
|
||||
/* video only */
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
int force_fps;
|
||||
#if FFMPEG_OPT_TOP
|
||||
int top_field_first;
|
||||
#endif
|
||||
|
|
@ -645,6 +636,9 @@ typedef struct OutputStream {
|
|||
/* stats */
|
||||
// number of packets send to the muxer
|
||||
atomic_uint_least64_t packets_written;
|
||||
// number of frames/samples sent to the encoder
|
||||
uint64_t frames_encoded;
|
||||
uint64_t samples_encoded;
|
||||
|
||||
/* packet quality factor */
|
||||
atomic_int quality;
|
||||
|
|
@ -737,11 +731,7 @@ extern float max_error_rate;
|
|||
|
||||
extern char *filter_nbthreads;
|
||||
extern int filter_complex_nbthreads;
|
||||
extern int filter_buffered_frames;
|
||||
extern int vstats_version;
|
||||
extern int print_graphs;
|
||||
extern char *print_graphs_file;
|
||||
extern char *print_graphs_format;
|
||||
extern int auto_conversion_filters;
|
||||
|
||||
extern const AVIOInterruptCB int_cb;
|
||||
|
|
@ -772,11 +762,10 @@ int find_codec(void *logctx, const char *name,
|
|||
int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global);
|
||||
|
||||
int filtergraph_is_simple(const FilterGraph *fg);
|
||||
int fg_create_simple(FilterGraph **pfg,
|
||||
InputStream *ist,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
|
||||
char *graph_desc,
|
||||
Scheduler *sch, unsigned sch_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
int fg_finalise_bindings(void);
|
||||
|
||||
/**
|
||||
|
|
@ -790,7 +779,7 @@ const FrameData *frame_data_c(AVFrame *frame);
|
|||
FrameData *packet_data (AVPacket *pkt);
|
||||
const FrameData *packet_data_c(AVPacket *pkt);
|
||||
|
||||
int ofilter_bind_enc(OutputFilter *ofilter,
|
||||
int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
|
||||
unsigned sched_idx_enc,
|
||||
const OutputFilterOptions *opts);
|
||||
|
||||
|
|
@ -869,7 +858,7 @@ int dec_request_view(Decoder *dec, const ViewSpecifier *vs,
|
|||
SchedulerNode *src);
|
||||
|
||||
int enc_alloc(Encoder **penc, const AVCodec *codec,
|
||||
Scheduler *sch, unsigned sch_idx, void *log_parent);
|
||||
Scheduler *sch, unsigned sch_idx);
|
||||
void enc_free(Encoder **penc);
|
||||
|
||||
int enc_open(void *opaque, const AVFrame *frame);
|
||||
|
|
@ -882,8 +871,7 @@ int enc_loopback(Encoder *enc);
|
|||
*
|
||||
* Open the muxer once all the streams have been initialized.
|
||||
*/
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost,
|
||||
const AVCodecContext *enc_ctx);
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost);
|
||||
int of_write_trailer(OutputFile *of);
|
||||
int of_open(const OptionsContext *o, const char *filename, Scheduler *sch);
|
||||
void of_free(OutputFile **pof);
|
||||
|
|
@ -895,8 +883,7 @@ int64_t of_filesize(OutputFile *of);
|
|||
int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch);
|
||||
void ifile_close(InputFile **f);
|
||||
|
||||
int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src);
|
||||
int ist_output_add(InputStream *ist, OutputStream *ost);
|
||||
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
|
||||
const ViewSpecifier *vs, InputFilterOptions *opts,
|
||||
SchedulerNode *src);
|
||||
|
|
|
|||
|
|
@ -733,12 +733,13 @@ static int packet_decode(DecoderPriv *dp, AVPacket *pkt, AVFrame *frame)
|
|||
av_log(dp, AV_LOG_ERROR, "Error submitting %s to decoder: %s\n",
|
||||
pkt ? "packet" : "EOF", av_err2str(ret));
|
||||
|
||||
if (ret == AVERROR_EOF)
|
||||
return ret;
|
||||
if (ret != AVERROR_EOF) {
|
||||
dp->dec.decode_errors++;
|
||||
if (!exit_on_error)
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
dp->dec.decode_errors++;
|
||||
if (exit_on_error)
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
|
|
@ -1594,7 +1595,7 @@ static int dec_open(DecoderPriv *dp, AVDictionary **dec_opts,
|
|||
if (o->flags & DECODER_FLAG_BITEXACT)
|
||||
dp->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
|
||||
|
||||
// we apply cropping ourselves
|
||||
// we apply cropping outselves
|
||||
dp->apply_cropping = dp->dec_ctx->apply_cropping;
|
||||
dp->dec_ctx->apply_cropping = 0;
|
||||
|
||||
|
|
@ -1637,11 +1638,6 @@ static int dec_open(DecoderPriv *dp, AVDictionary **dec_opts,
|
|||
param_out->color_range = dp->dec_ctx->color_range;
|
||||
}
|
||||
|
||||
av_frame_side_data_free(¶m_out->side_data, ¶m_out->nb_side_data);
|
||||
ret = clone_side_data(¶m_out->side_data, ¶m_out->nb_side_data,
|
||||
dp->dec_ctx->decoded_side_data, dp->dec_ctx->nb_decoded_side_data, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
param_out->time_base = dp->dec_ctx->pkt_timebase;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -67,18 +67,17 @@ typedef struct DemuxStream {
|
|||
int reinit_filters;
|
||||
int autorotate;
|
||||
int apply_cropping;
|
||||
int drop_changed;
|
||||
|
||||
|
||||
int wrap_correction_done;
|
||||
int saw_first_ts;
|
||||
/// dts of the first packet read for this stream (in AV_TIME_BASE units)
|
||||
///< dts of the first packet read for this stream (in AV_TIME_BASE units)
|
||||
int64_t first_dts;
|
||||
|
||||
/* predicted dts of the next packet read for this stream or (when there are
|
||||
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
|
||||
int64_t next_dts;
|
||||
/// dts of the last packet read for this stream (in AV_TIME_BASE units)
|
||||
///< dts of the last packet read for this stream (in AV_TIME_BASE units)
|
||||
int64_t dts;
|
||||
|
||||
const AVCodecDescriptor *codec_desc;
|
||||
|
|
@ -95,12 +94,6 @@ typedef struct DemuxStream {
|
|||
uint64_t nb_packets;
|
||||
// combined size of all the packets read
|
||||
uint64_t data_size;
|
||||
// latest wallclock time at which packet reading resumed after a stall - used for readrate
|
||||
int64_t resume_wc;
|
||||
// timestamp of first packet sent after the latest stall - used for readrate
|
||||
int64_t resume_pts;
|
||||
// measure of how far behind packet reading is against spceified readrate
|
||||
int64_t lag;
|
||||
} DemuxStream;
|
||||
|
||||
typedef struct Demuxer {
|
||||
|
|
@ -134,7 +127,6 @@ typedef struct Demuxer {
|
|||
|
||||
float readrate;
|
||||
double readrate_initial_burst;
|
||||
float readrate_catchup;
|
||||
|
||||
Scheduler *sch;
|
||||
|
||||
|
|
@ -248,7 +240,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
|
|||
}
|
||||
} else {
|
||||
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
|
||||
av_log(ist, AV_LOG_WARNING,
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n",
|
||||
pkt->dts, ds->next_dts, pkt->stream_index);
|
||||
pkt->dts = AV_NOPTS_VALUE;
|
||||
|
|
@ -257,7 +249,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
|
|||
int64_t pkt_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q);
|
||||
delta = pkt_pts - ds->next_dts;
|
||||
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
|
||||
av_log(ist, AV_LOG_WARNING,
|
||||
av_log(NULL, AV_LOG_WARNING,
|
||||
"PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n",
|
||||
pkt->pts, ds->next_dts, pkt->stream_index);
|
||||
pkt->pts = AV_NOPTS_VALUE;
|
||||
|
|
@ -269,7 +261,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
|
|||
int64_t delta = pkt_dts - d->last_ts;
|
||||
if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
|
||||
d->ts_offset_discont -= delta;
|
||||
av_log(ist, AV_LOG_DEBUG,
|
||||
av_log(NULL, AV_LOG_DEBUG,
|
||||
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
|
||||
delta, d->ts_offset_discont);
|
||||
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
|
||||
|
|
@ -484,7 +476,7 @@ static int input_packet_process(Demuxer *d, AVPacket *pkt, unsigned *send_flags)
|
|||
fd->wallclock[LATENCY_PROBE_DEMUX] = av_gettime_relative();
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(ist, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
|
||||
av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
|
||||
f->index, pkt->stream_index,
|
||||
av_get_media_type_string(ist->par->codec_type),
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base),
|
||||
|
|
@ -503,42 +495,16 @@ static void readrate_sleep(Demuxer *d)
|
|||
(f->start_time_effective != AV_NOPTS_VALUE ? f->start_time_effective * !start_at_zero : 0) +
|
||||
(f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
|
||||
);
|
||||
int64_t initial_burst = AV_TIME_BASE * d->readrate_initial_burst;
|
||||
int resume_warn = 0;
|
||||
|
||||
int64_t burst_until = AV_TIME_BASE * d->readrate_initial_burst;
|
||||
for (int i = 0; i < f->nb_streams; i++) {
|
||||
InputStream *ist = f->streams[i];
|
||||
DemuxStream *ds = ds_from_ist(ist);
|
||||
int64_t stream_ts_offset, pts, now, wc_elapsed, elapsed, lag, max_pts, limit_pts;
|
||||
|
||||
if (ds->discard) continue;
|
||||
|
||||
int64_t stream_ts_offset, pts, now;
|
||||
stream_ts_offset = FFMAX(ds->first_dts != AV_NOPTS_VALUE ? ds->first_dts : 0, file_start);
|
||||
pts = av_rescale(ds->dts, 1000000, AV_TIME_BASE);
|
||||
now = av_gettime_relative();
|
||||
wc_elapsed = now - d->wallclock_start;
|
||||
max_pts = stream_ts_offset + initial_burst + wc_elapsed * d->readrate;
|
||||
lag = FFMAX(max_pts - pts, 0);
|
||||
if ( (!ds->lag && lag > 0.3 * AV_TIME_BASE) || ( lag > ds->lag + 0.3 * AV_TIME_BASE) ) {
|
||||
ds->lag = lag;
|
||||
ds->resume_wc = now;
|
||||
ds->resume_pts = pts;
|
||||
av_log_once(ds, AV_LOG_WARNING, AV_LOG_DEBUG, &resume_warn,
|
||||
"Resumed reading at pts %0.3f with rate %0.3f after a lag of %0.3fs\n",
|
||||
(float)pts/AV_TIME_BASE, d->readrate_catchup, (float)lag/AV_TIME_BASE);
|
||||
}
|
||||
if (ds->lag && !lag)
|
||||
ds->lag = ds->resume_wc = ds->resume_pts = 0;
|
||||
if (ds->resume_wc) {
|
||||
elapsed = now - ds->resume_wc;
|
||||
limit_pts = ds->resume_pts + elapsed * d->readrate_catchup;
|
||||
} else {
|
||||
elapsed = wc_elapsed;
|
||||
limit_pts = max_pts;
|
||||
}
|
||||
|
||||
if (pts > limit_pts)
|
||||
av_usleep(pts - limit_pts);
|
||||
now = (av_gettime_relative() - d->wallclock_start) * d->readrate + stream_ts_offset;
|
||||
if (pts - burst_until > now)
|
||||
av_usleep(pts - burst_until - now);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -874,6 +840,7 @@ static void ist_free(InputStream **pist)
|
|||
|
||||
av_dict_free(&ds->decoder_opts);
|
||||
av_freep(&ist->filters);
|
||||
av_freep(&ist->outputs);
|
||||
av_freep(&ds->dec_opts.hwaccel_device);
|
||||
|
||||
avcodec_parameters_free(&ist->par);
|
||||
|
|
@ -907,8 +874,8 @@ void ifile_close(InputFile **pf)
|
|||
av_freep(pf);
|
||||
}
|
||||
|
||||
int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src)
|
||||
static int ist_use(InputStream *ist, int decoding_needed,
|
||||
const ViewSpecifier *vs, SchedulerNode *src)
|
||||
{
|
||||
Demuxer *d = demuxer_from_ifile(ist->file);
|
||||
DemuxStream *ds = ds_from_ist(ist);
|
||||
|
|
@ -945,18 +912,9 @@ int ist_use(InputStream *ist, int decoding_needed,
|
|||
|
||||
if (decoding_needed && ds->sch_idx_dec < 0) {
|
||||
int is_audio = ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
|
||||
int is_unreliable = !!(d->f.ctx->iformat->flags & AVFMT_NOTIMESTAMPS);
|
||||
int64_t use_wallclock_as_timestamps;
|
||||
|
||||
ret = av_opt_get_int(d->f.ctx, "use_wallclock_as_timestamps", 0, &use_wallclock_as_timestamps);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (use_wallclock_as_timestamps)
|
||||
is_unreliable = 0;
|
||||
|
||||
ds->dec_opts.flags |= (!!ist->fix_sub_duration * DECODER_FLAG_FIX_SUB_DURATION) |
|
||||
(!!is_unreliable * DECODER_FLAG_TS_UNRELIABLE) |
|
||||
(!!(d->f.ctx->iformat->flags & AVFMT_NOTIMESTAMPS) * DECODER_FLAG_TS_UNRELIABLE) |
|
||||
(!!(d->loop && is_audio) * DECODER_FLAG_SEND_END_TS)
|
||||
#if FFMPEG_OPT_TOP
|
||||
| ((ist->top_field_first >= 0) * DECODER_FLAG_TOP_FIELD_FIRST)
|
||||
|
|
@ -1017,6 +975,25 @@ int ist_use(InputStream *ist, int decoding_needed,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ist_output_add(InputStream *ist, OutputStream *ost)
|
||||
{
|
||||
DemuxStream *ds = ds_from_ist(ist);
|
||||
SchedulerNode src;
|
||||
int ret;
|
||||
|
||||
ret = ist_use(ist, ost->enc ? DECODING_FOR_OST : 0, NULL, &src);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = GROW_ARRAY(ist->outputs, ist->nb_outputs);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ist->outputs[ist->nb_outputs - 1] = ost;
|
||||
|
||||
return ost->enc ? ds->sch_idx_dec : ds->sch_idx_stream;
|
||||
}
|
||||
|
||||
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
|
||||
const ViewSpecifier *vs, InputFilterOptions *opts,
|
||||
SchedulerNode *src)
|
||||
|
|
@ -1109,8 +1086,7 @@ int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
|
|||
return AVERROR(ENOMEM);
|
||||
|
||||
opts->flags |= IFILTER_FLAG_AUTOROTATE * !!(ds->autorotate) |
|
||||
IFILTER_FLAG_REINIT * !!(ds->reinit_filters) |
|
||||
IFILTER_FLAG_DROPCHANGED* !!(ds->drop_changed);
|
||||
IFILTER_FLAG_REINIT * !!(ds->reinit_filters);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1148,7 +1124,7 @@ static int choose_decoder(const OptionsContext *o, void *logctx,
|
|||
|
||||
for (int j = 0; config = avcodec_get_hw_config(c, j); j++) {
|
||||
if (config->device_type == hwaccel_device_type) {
|
||||
av_log(logctx, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n",
|
||||
av_log(NULL, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n",
|
||||
c->name, av_hwdevice_get_type_name(hwaccel_device_type));
|
||||
*pcodec = c;
|
||||
return 0;
|
||||
|
|
@ -1421,17 +1397,6 @@ static int ist_add(const OptionsContext *o, Demuxer *d, AVStream *st, AVDictiona
|
|||
ds->reinit_filters = -1;
|
||||
opt_match_per_stream_int(ist, &o->reinit_filters, ic, st, &ds->reinit_filters);
|
||||
|
||||
ds->drop_changed = 0;
|
||||
opt_match_per_stream_int(ist, &o->drop_changed, ic, st, &ds->drop_changed);
|
||||
|
||||
if (ds->drop_changed && ds->reinit_filters) {
|
||||
if (ds->reinit_filters > 0) {
|
||||
av_log(ist, AV_LOG_ERROR, "drop_changed and reinit_filters both enabled. These are mutually exclusive.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
ds->reinit_filters = 0;
|
||||
}
|
||||
|
||||
ist->user_set_discard = AVDISCARD_NONE;
|
||||
|
||||
if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ||
|
||||
|
|
@ -1781,9 +1746,8 @@ int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch)
|
|||
/* open the input file with generic avformat function */
|
||||
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
|
||||
if (err < 0) {
|
||||
if (err != AVERROR_EXIT)
|
||||
av_log(d, AV_LOG_ERROR,
|
||||
"Error opening input: %s\n", av_err2str(err));
|
||||
av_log(d, AV_LOG_ERROR,
|
||||
"Error opening input: %s\n", av_err2str(err));
|
||||
if (err == AVERROR_PROTOCOL_NOT_FOUND)
|
||||
av_log(d, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
|
||||
return err;
|
||||
|
|
@ -1914,22 +1878,9 @@ int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch)
|
|||
d->readrate_initial_burst);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
d->readrate_catchup = o->readrate_catchup ? o->readrate_catchup : d->readrate * 1.05;
|
||||
if (d->readrate_catchup < d->readrate) {
|
||||
av_log(d, AV_LOG_ERROR,
|
||||
"Option -readrate_catchup is %0.3f; it must be at least equal to %0.3f.\n",
|
||||
d->readrate_catchup, d->readrate);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
} else {
|
||||
if (o->readrate_initial_burst) {
|
||||
av_log(d, AV_LOG_WARNING, "Option -readrate_initial_burst ignored "
|
||||
"since neither -readrate nor -re were given\n");
|
||||
}
|
||||
if (o->readrate_catchup) {
|
||||
av_log(d, AV_LOG_WARNING, "Option -readrate_catchup ignored "
|
||||
"since neither -readrate nor -re were given\n");
|
||||
}
|
||||
} else if (o->readrate_initial_burst) {
|
||||
av_log(d, AV_LOG_WARNING, "Option -readrate_initial_burst ignored "
|
||||
"since neither -readrate nor -re were given\n");
|
||||
}
|
||||
|
||||
/* Add all the streams from the given input file to the demuxer */
|
||||
|
|
|
|||
|
|
@ -38,12 +38,7 @@
|
|||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
typedef struct EncoderPriv {
|
||||
Encoder e;
|
||||
|
||||
void *log_parent;
|
||||
char log_name[32];
|
||||
|
||||
struct Encoder {
|
||||
// combined size of all the packets received from the encoder
|
||||
uint64_t data_size;
|
||||
|
||||
|
|
@ -55,12 +50,7 @@ typedef struct EncoderPriv {
|
|||
|
||||
Scheduler *sch;
|
||||
unsigned sch_idx;
|
||||
} EncoderPriv;
|
||||
|
||||
static EncoderPriv *ep_from_enc(Encoder *enc)
|
||||
{
|
||||
return (EncoderPriv*)enc;
|
||||
}
|
||||
};
|
||||
|
||||
// data that is local to the decoder thread and not visible outside of it
|
||||
typedef struct EncoderThread {
|
||||
|
|
@ -75,90 +65,56 @@ void enc_free(Encoder **penc)
|
|||
if (!enc)
|
||||
return;
|
||||
|
||||
if (enc->enc_ctx)
|
||||
av_freep(&enc->enc_ctx->stats_in);
|
||||
avcodec_free_context(&enc->enc_ctx);
|
||||
|
||||
av_freep(penc);
|
||||
}
|
||||
|
||||
static const char *enc_item_name(void *obj)
|
||||
{
|
||||
const EncoderPriv *ep = obj;
|
||||
|
||||
return ep->log_name;
|
||||
}
|
||||
|
||||
static const AVClass enc_class = {
|
||||
.class_name = "Encoder",
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.parent_log_context_offset = offsetof(EncoderPriv, log_parent),
|
||||
.item_name = enc_item_name,
|
||||
};
|
||||
|
||||
int enc_alloc(Encoder **penc, const AVCodec *codec,
|
||||
Scheduler *sch, unsigned sch_idx, void *log_parent)
|
||||
Scheduler *sch, unsigned sch_idx)
|
||||
{
|
||||
EncoderPriv *ep;
|
||||
int ret = 0;
|
||||
Encoder *enc;
|
||||
|
||||
*penc = NULL;
|
||||
|
||||
ep = av_mallocz(sizeof(*ep));
|
||||
if (!ep)
|
||||
enc = av_mallocz(sizeof(*enc));
|
||||
if (!enc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ep->e.class = &enc_class;
|
||||
ep->log_parent = log_parent;
|
||||
enc->sch = sch;
|
||||
enc->sch_idx = sch_idx;
|
||||
|
||||
ep->sch = sch;
|
||||
ep->sch_idx = sch_idx;
|
||||
|
||||
snprintf(ep->log_name, sizeof(ep->log_name), "enc:%s", codec->name);
|
||||
|
||||
ep->e.enc_ctx = avcodec_alloc_context3(codec);
|
||||
if (!ep->e.enc_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
*penc = &ep->e;
|
||||
*penc = enc;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
enc_free((Encoder**)&ep);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
|
||||
AVBufferRef *frames_ref)
|
||||
static int hw_device_setup_for_encode(OutputStream *ost, AVBufferRef *frames_ref)
|
||||
{
|
||||
const AVCodecHWConfig *config;
|
||||
HWDevice *dev = NULL;
|
||||
|
||||
if (frames_ref &&
|
||||
((AVHWFramesContext*)frames_ref->data)->format ==
|
||||
enc_ctx->pix_fmt) {
|
||||
ost->enc_ctx->pix_fmt) {
|
||||
// Matching format, will try to use hw_frames_ctx.
|
||||
} else {
|
||||
frames_ref = NULL;
|
||||
}
|
||||
|
||||
for (int i = 0;; i++) {
|
||||
config = avcodec_get_hw_config(enc_ctx->codec, i);
|
||||
config = avcodec_get_hw_config(ost->enc_ctx->codec, i);
|
||||
if (!config)
|
||||
break;
|
||||
|
||||
if (frames_ref &&
|
||||
config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
|
||||
(config->pix_fmt == AV_PIX_FMT_NONE ||
|
||||
config->pix_fmt == enc_ctx->pix_fmt)) {
|
||||
av_log(e, AV_LOG_VERBOSE, "Using input "
|
||||
config->pix_fmt == ost->enc_ctx->pix_fmt)) {
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input "
|
||||
"frames context (format %s) with %s encoder.\n",
|
||||
av_get_pix_fmt_name(enc_ctx->pix_fmt),
|
||||
enc_ctx->codec->name);
|
||||
enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
|
||||
if (!enc_ctx->hw_frames_ctx)
|
||||
av_get_pix_fmt_name(ost->enc_ctx->pix_fmt),
|
||||
ost->enc_ctx->codec->name);
|
||||
ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
|
||||
if (!ost->enc_ctx->hw_frames_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -169,11 +125,11 @@ static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
|
|||
}
|
||||
|
||||
if (dev) {
|
||||
av_log(e, AV_LOG_VERBOSE, "Using device %s "
|
||||
av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s "
|
||||
"(type %s) with %s encoder.\n", dev->name,
|
||||
av_hwdevice_get_type_name(dev->type), enc_ctx->codec->name);
|
||||
enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!enc_ctx->hw_device_ctx)
|
||||
av_hwdevice_get_type_name(dev->type), ost->enc_ctx->codec->name);
|
||||
ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
|
||||
if (!ost->enc_ctx->hw_device_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
} else {
|
||||
// No device required, or no device available.
|
||||
|
|
@ -181,13 +137,37 @@ static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int set_encoder_id(OutputFile *of, OutputStream *ost)
|
||||
{
|
||||
const char *cname = ost->enc_ctx->codec->name;
|
||||
uint8_t *encoder_string;
|
||||
int encoder_string_len;
|
||||
|
||||
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
|
||||
return 0;
|
||||
|
||||
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
|
||||
encoder_string = av_mallocz(encoder_string_len);
|
||||
if (!encoder_string)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!of->bitexact && !ost->bitexact)
|
||||
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
|
||||
else
|
||||
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
|
||||
av_strlcat(encoder_string, cname, encoder_string_len);
|
||||
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
|
||||
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int enc_open(void *opaque, const AVFrame *frame)
|
||||
{
|
||||
OutputStream *ost = opaque;
|
||||
InputStream *ist = ost->ist;
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVCodecContext *enc_ctx = e->enc_ctx;
|
||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||
Decoder *dec = NULL;
|
||||
const AVCodec *enc = enc_ctx->codec;
|
||||
OutputFile *of = ost->file;
|
||||
|
|
@ -195,7 +175,7 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
int frame_samples = 0;
|
||||
int ret;
|
||||
|
||||
if (ep->opened)
|
||||
if (e->opened)
|
||||
return 0;
|
||||
|
||||
// frame is always non-NULL for audio and video
|
||||
|
|
@ -220,6 +200,10 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
}
|
||||
}
|
||||
|
||||
ret = set_encoder_id(of, ost);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ist)
|
||||
dec = ist->decoder;
|
||||
|
||||
|
|
@ -227,6 +211,7 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
if (ost->type == AVMEDIA_TYPE_AUDIO || ost->type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->time_base = frame->time_base;
|
||||
enc_ctx->framerate = fd->frame_rate_filter;
|
||||
ost->st->avg_frame_rate = fd->frame_rate_filter;
|
||||
}
|
||||
|
||||
switch (enc_ctx->codec_type) {
|
||||
|
|
@ -253,7 +238,7 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
frame->height > 0);
|
||||
enc_ctx->width = frame->width;
|
||||
enc_ctx->height = frame->height;
|
||||
enc_ctx->sample_aspect_ratio =
|
||||
enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
|
||||
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
|
||||
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
|
||||
frame->sample_aspect_ratio;
|
||||
|
|
@ -266,26 +251,11 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
enc_ctx->bits_per_raw_sample = FFMIN(fd->bits_per_raw_sample,
|
||||
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
|
||||
|
||||
/**
|
||||
* The video color properties should always be in sync with the user-
|
||||
* requested values, since we forward them to the filter graph.
|
||||
*/
|
||||
enc_ctx->color_range = frame->color_range;
|
||||
enc_ctx->color_primaries = frame->color_primaries;
|
||||
enc_ctx->color_trc = frame->color_trc;
|
||||
enc_ctx->colorspace = frame->colorspace;
|
||||
|
||||
/* Video properties which are not part of filter graph negotiation */
|
||||
if (enc_ctx->chroma_sample_location == AVCHROMA_LOC_UNSPECIFIED) {
|
||||
enc_ctx->chroma_sample_location = frame->chroma_location;
|
||||
} else if (enc_ctx->chroma_sample_location != frame->chroma_location &&
|
||||
frame->chroma_location != AVCHROMA_LOC_UNSPECIFIED) {
|
||||
av_log(e, AV_LOG_WARNING,
|
||||
"Requested chroma sample location '%s' does not match the "
|
||||
"frame tagged sample location '%s'; result may be incorrect.\n",
|
||||
av_chroma_location_name(enc_ctx->chroma_sample_location),
|
||||
av_chroma_location_name(frame->chroma_location));
|
||||
}
|
||||
enc_ctx->chroma_sample_location = frame->chroma_location;
|
||||
|
||||
if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) ||
|
||||
(frame->flags & AV_FRAME_FLAG_INTERLACED)
|
||||
|
|
@ -342,31 +312,42 @@ int enc_open(void *opaque, const AVFrame *frame)
|
|||
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_FRAME_DURATION;
|
||||
|
||||
ret = hw_device_setup_for_encode(e, enc_ctx, frame ? frame->hw_frames_ctx : NULL);
|
||||
ret = hw_device_setup_for_encode(ost, frame ? frame->hw_frames_ctx : NULL);
|
||||
if (ret < 0) {
|
||||
av_log(e, AV_LOG_ERROR,
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"Encoding hardware device setup failed: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(enc_ctx, enc, NULL)) < 0) {
|
||||
if ((ret = avcodec_open2(ost->enc_ctx, enc, NULL)) < 0) {
|
||||
if (ret != AVERROR_EXPERIMENTAL)
|
||||
av_log(e, AV_LOG_ERROR, "Error while opening encoder - maybe "
|
||||
av_log(ost, AV_LOG_ERROR, "Error while opening encoder - maybe "
|
||||
"incorrect parameters such as bit_rate, rate, width or height.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ep->opened = 1;
|
||||
e->opened = 1;
|
||||
|
||||
if (enc_ctx->frame_size)
|
||||
frame_samples = enc_ctx->frame_size;
|
||||
if (ost->enc_ctx->frame_size)
|
||||
frame_samples = ost->enc_ctx->frame_size;
|
||||
|
||||
if (enc_ctx->bit_rate && enc_ctx->bit_rate < 1000 &&
|
||||
enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
|
||||
av_log(e, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
|
||||
ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
|
||||
av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
|
||||
" It takes bits/s as argument, not kbits/s\n");
|
||||
|
||||
ret = of_stream_init(of, ost, enc_ctx);
|
||||
ret = avcodec_parameters_from_context(ost->par_in, ost->enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec context.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
// copy timebase while removing common factors
|
||||
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
|
||||
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
|
||||
|
||||
ret = of_stream_init(of, ost);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
@ -388,20 +369,19 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
|||
AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
int subtitle_out_max_size = 1024 * 1024;
|
||||
int subtitle_out_size, nb, i, ret;
|
||||
AVCodecContext *enc;
|
||||
int64_t pts;
|
||||
|
||||
if (sub->pts == AV_NOPTS_VALUE) {
|
||||
av_log(e, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
|
||||
av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
|
||||
return exit_on_error ? AVERROR(EINVAL) : 0;
|
||||
}
|
||||
if ((of->start_time != AV_NOPTS_VALUE && sub->pts < of->start_time))
|
||||
return 0;
|
||||
|
||||
enc = e->enc_ctx;
|
||||
enc = ost->enc_ctx;
|
||||
|
||||
/* Note: DVB subtitle need one packet to draw them and one other
|
||||
packet to clear them */
|
||||
|
|
@ -440,11 +420,11 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
|||
local_sub.rects += i;
|
||||
}
|
||||
|
||||
e->frames_encoded++;
|
||||
ost->frames_encoded++;
|
||||
|
||||
subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, &local_sub);
|
||||
if (subtitle_out_size < 0) {
|
||||
av_log(e, AV_LOG_FATAL, "Subtitle encoding failed\n");
|
||||
av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
|
||||
return subtitle_out_size;
|
||||
}
|
||||
|
||||
|
|
@ -462,7 +442,7 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
|
|||
}
|
||||
pkt->dts = pkt->pts;
|
||||
|
||||
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt);
|
||||
ret = sch_enc_send(e->sch, e->sch_idx, pkt);
|
||||
if (ret < 0) {
|
||||
av_packet_unref(pkt);
|
||||
return ret;
|
||||
|
|
@ -477,7 +457,6 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
|||
uint64_t frame_num)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVIOContext *io = es->io;
|
||||
AVRational tb = frame ? frame->time_base : pkt->time_base;
|
||||
int64_t pts = frame ? frame->pts : pkt->pts;
|
||||
|
|
@ -515,7 +494,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
|||
|
||||
if (frame) {
|
||||
switch (c->type) {
|
||||
case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, e->samples_encoded); continue;
|
||||
case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
|
||||
case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
|
||||
default: av_assert0(0);
|
||||
}
|
||||
|
|
@ -533,7 +512,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
|
|||
}
|
||||
case ENC_STATS_AVG_BITRATE: {
|
||||
double duration = pkt->dts * av_q2d(tb);
|
||||
avio_printf(io, "%g", duration > 0 ? 8.0 * ep->data_size / duration : -1.);
|
||||
avio_printf(io, "%g", duration > 0 ? 8.0 * e->data_size / duration : -1.);
|
||||
continue;
|
||||
}
|
||||
default: av_assert0(0);
|
||||
|
|
@ -554,10 +533,9 @@ static inline double psnr(double d)
|
|||
static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
|
||||
NULL);
|
||||
AVCodecContext *enc = e->enc_ctx;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
enum AVPictureType pict_type;
|
||||
int64_t frame_number;
|
||||
double ti1, bitrate, avg_bitrate;
|
||||
|
|
@ -588,7 +566,7 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
|
|||
}
|
||||
}
|
||||
|
||||
frame_number = ep->packets_encoded;
|
||||
frame_number = e->packets_encoded;
|
||||
if (vstats_version <= 1) {
|
||||
fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
|
||||
quality / (float)FF_QP2LAMBDA);
|
||||
|
|
@ -608,9 +586,9 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
|
|||
ti1 = 0.01;
|
||||
|
||||
bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
|
||||
avg_bitrate = (double)(ep->data_size * 8) / ti1 / 1000.0;
|
||||
avg_bitrate = (double)(e->data_size * 8) / ti1 / 1000.0;
|
||||
fprintf(vstats_file, "s_size= %8.0fKiB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
|
||||
(double)ep->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
(double)e->data_size / 1024, ti1, bitrate, avg_bitrate);
|
||||
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(pict_type));
|
||||
|
||||
return 0;
|
||||
|
|
@ -620,8 +598,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
AVCodecContext *enc = e->enc_ctx;
|
||||
AVCodecContext *enc = ost->enc_ctx;
|
||||
const char *type_desc = av_get_media_type_string(enc->codec_type);
|
||||
const char *action = frame ? "encode" : "flush";
|
||||
int ret;
|
||||
|
|
@ -636,13 +613,13 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
|
||||
if (ost->enc_stats_pre.io)
|
||||
enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
|
||||
e->frames_encoded);
|
||||
ost->frames_encoded);
|
||||
|
||||
e->frames_encoded++;
|
||||
e->samples_encoded += frame->nb_samples;
|
||||
ost->frames_encoded++;
|
||||
ost->samples_encoded += frame->nb_samples;
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(e, AV_LOG_INFO, "encoder <- type:%s "
|
||||
av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
|
||||
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
|
||||
type_desc,
|
||||
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
|
||||
|
|
@ -657,7 +634,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
|
||||
ret = avcodec_send_frame(enc, frame);
|
||||
if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
|
||||
av_log(e, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
|
||||
av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
|
||||
type_desc);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -682,7 +659,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
return 0;
|
||||
} else if (ret < 0) {
|
||||
if (ret != AVERROR_EOF)
|
||||
av_log(e, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
|
||||
av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -693,7 +670,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
|
||||
// attach stream parameters to first packet if requested
|
||||
avcodec_parameters_free(&fd->par_enc);
|
||||
if (ep->attach_par && !ep->packets_encoded) {
|
||||
if (e->attach_par && !e->packets_encoded) {
|
||||
fd->par_enc = avcodec_parameters_alloc();
|
||||
if (!fd->par_enc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
|
@ -713,10 +690,10 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
|
||||
if (ost->enc_stats_post.io)
|
||||
enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
|
||||
ep->packets_encoded);
|
||||
e->packets_encoded);
|
||||
|
||||
if (debug_ts) {
|
||||
av_log(e, AV_LOG_INFO, "encoder -> type:%s "
|
||||
av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
|
||||
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
|
||||
"duration:%s duration_time:%s\n",
|
||||
type_desc,
|
||||
|
|
@ -725,11 +702,11 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
|
|||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
|
||||
}
|
||||
|
||||
ep->data_size += pkt->size;
|
||||
e->data_size += pkt->size;
|
||||
|
||||
ep->packets_encoded++;
|
||||
e->packets_encoded++;
|
||||
|
||||
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt);
|
||||
ret = sch_enc_send(e->sch, e->sch_idx, pkt);
|
||||
if (ret < 0) {
|
||||
av_packet_unref(pkt);
|
||||
return ret;
|
||||
|
|
@ -787,7 +764,6 @@ force_keyframe:
|
|||
|
||||
static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
||||
{
|
||||
Encoder *e = ost->enc;
|
||||
OutputFile *of = ost->file;
|
||||
enum AVMediaType type = ost->type;
|
||||
|
||||
|
|
@ -805,8 +781,8 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
|||
return AVERROR_EOF;
|
||||
|
||||
if (type == AVMEDIA_TYPE_VIDEO) {
|
||||
frame->quality = e->enc_ctx->global_quality;
|
||||
frame->pict_type = forced_kf_apply(e, &ost->kf, frame);
|
||||
frame->quality = ost->enc_ctx->global_quality;
|
||||
frame->pict_type = forced_kf_apply(ost, &ost->kf, frame);
|
||||
|
||||
#if FFMPEG_OPT_TOP
|
||||
if (ost->top_field_first >= 0) {
|
||||
|
|
@ -815,9 +791,9 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
|
|||
}
|
||||
#endif
|
||||
} else {
|
||||
if (!(e->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
|
||||
e->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) {
|
||||
av_log(e, AV_LOG_ERROR,
|
||||
if (!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
|
||||
ost->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"Audio channel count changed and encoder does not support parameter changes\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -831,7 +807,7 @@ static void enc_thread_set_name(const OutputStream *ost)
|
|||
{
|
||||
char name[16];
|
||||
snprintf(name, sizeof(name), "enc%d:%d:%s", ost->file->index, ost->index,
|
||||
ost->enc->enc_ctx->codec->name);
|
||||
ost->enc_ctx->codec->name);
|
||||
ff_thread_setname(name);
|
||||
}
|
||||
|
||||
|
|
@ -866,7 +842,6 @@ int encoder_thread(void *arg)
|
|||
{
|
||||
OutputStream *ost = arg;
|
||||
Encoder *e = ost->enc;
|
||||
EncoderPriv *ep = ep_from_enc(e);
|
||||
EncoderThread et;
|
||||
int ret = 0, input_status = 0;
|
||||
int name_set = 0;
|
||||
|
|
@ -889,17 +864,17 @@ int encoder_thread(void *arg)
|
|||
}
|
||||
|
||||
while (!input_status) {
|
||||
input_status = sch_enc_receive(ep->sch, ep->sch_idx, et.frame);
|
||||
input_status = sch_enc_receive(e->sch, e->sch_idx, et.frame);
|
||||
if (input_status < 0) {
|
||||
if (input_status == AVERROR_EOF) {
|
||||
av_log(e, AV_LOG_VERBOSE, "Encoder thread received EOF\n");
|
||||
if (ep->opened)
|
||||
av_log(ost, AV_LOG_VERBOSE, "Encoder thread received EOF\n");
|
||||
if (e->opened)
|
||||
break;
|
||||
|
||||
av_log(e, AV_LOG_ERROR, "Could not open encoder before EOF\n");
|
||||
av_log(ost, AV_LOG_ERROR, "Could not open encoder before EOF\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
av_log(e, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n",
|
||||
av_log(ost, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n",
|
||||
av_err2str(ret));
|
||||
ret = input_status;
|
||||
}
|
||||
|
|
@ -918,9 +893,9 @@ int encoder_thread(void *arg)
|
|||
|
||||
if (ret < 0) {
|
||||
if (ret == AVERROR_EOF)
|
||||
av_log(e, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n");
|
||||
av_log(ost, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n");
|
||||
else
|
||||
av_log(e, AV_LOG_ERROR, "Error encoding a frame: %s\n",
|
||||
av_log(ost, AV_LOG_ERROR, "Error encoding a frame: %s\n",
|
||||
av_err2str(ret));
|
||||
break;
|
||||
}
|
||||
|
|
@ -930,7 +905,7 @@ int encoder_thread(void *arg)
|
|||
if (ret == 0 || ret == AVERROR_EOF) {
|
||||
ret = frame_encode(ost, NULL, et.pkt);
|
||||
if (ret < 0 && ret != AVERROR_EOF)
|
||||
av_log(e, AV_LOG_ERROR, "Error flushing encoder: %s\n",
|
||||
av_log(ost, AV_LOG_ERROR, "Error flushing encoder: %s\n",
|
||||
av_err2str(ret));
|
||||
}
|
||||
|
||||
|
|
@ -946,7 +921,6 @@ finish:
|
|||
|
||||
int enc_loopback(Encoder *enc)
|
||||
{
|
||||
EncoderPriv *ep = ep_from_enc(enc);
|
||||
ep->attach_par = 1;
|
||||
return ep->sch_idx;
|
||||
enc->attach_par = 1;
|
||||
return enc->sch_idx;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -581,9 +581,9 @@ static int bsf_init(MuxStream *ms)
|
|||
int ret;
|
||||
|
||||
if (!ctx)
|
||||
return avcodec_parameters_copy(ost->st->codecpar, ms->par_in);
|
||||
return avcodec_parameters_copy(ost->st->codecpar, ost->par_in);
|
||||
|
||||
ret = avcodec_parameters_copy(ctx->par_in, ms->par_in);
|
||||
ret = avcodec_parameters_copy(ctx->par_in, ost->par_in);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
@ -608,29 +608,12 @@ static int bsf_init(MuxStream *ms)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost,
|
||||
const AVCodecContext *enc_ctx)
|
||||
int of_stream_init(OutputFile *of, OutputStream *ost)
|
||||
{
|
||||
Muxer *mux = mux_from_of(of);
|
||||
MuxStream *ms = ms_from_ost(ost);
|
||||
int ret;
|
||||
|
||||
if (enc_ctx) {
|
||||
// use upstream time base unless it has been overridden previously
|
||||
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
|
||||
ost->st->time_base = av_add_q(enc_ctx->time_base, (AVRational){0, 1});
|
||||
|
||||
ost->st->avg_frame_rate = enc_ctx->framerate;
|
||||
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio;
|
||||
|
||||
ret = avcodec_parameters_from_context(ms->par_in, enc_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL,
|
||||
"Error initializing the output stream codec parameters.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize bitstream filters for the output stream
|
||||
* needs to be done here, because the codec id for streamcopy is not
|
||||
* known until now */
|
||||
|
|
@ -661,8 +644,8 @@ static int check_written(OutputFile *of)
|
|||
|
||||
total_packets_written += packets_written;
|
||||
|
||||
if (ost->enc &&
|
||||
(ost->enc->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
|
||||
if (ost->enc_ctx &&
|
||||
(ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
|
||||
!= AV_CODEC_FLAG_PASS1)
|
||||
pass1_used = 0;
|
||||
|
||||
|
|
@ -723,9 +706,9 @@ static void mux_final_stats(Muxer *mux)
|
|||
of->index, j, av_get_media_type_string(type));
|
||||
if (ost->enc) {
|
||||
av_log(of, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
|
||||
ost->enc->frames_encoded);
|
||||
ost->frames_encoded);
|
||||
if (type == AVMEDIA_TYPE_AUDIO)
|
||||
av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->enc->samples_encoded);
|
||||
av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
|
||||
av_log(of, AV_LOG_VERBOSE, "; ");
|
||||
}
|
||||
|
||||
|
|
@ -823,7 +806,7 @@ static void ost_free(OutputStream **post)
|
|||
ost->logfile = NULL;
|
||||
}
|
||||
|
||||
avcodec_parameters_free(&ms->par_in);
|
||||
avcodec_parameters_free(&ost->par_in);
|
||||
|
||||
av_bsf_free(&ms->bsf_ctx);
|
||||
av_packet_free(&ms->bsf_pkt);
|
||||
|
|
@ -837,6 +820,10 @@ static void ost_free(OutputStream **post)
|
|||
|
||||
av_freep(&ost->attachment_filename);
|
||||
|
||||
if (ost->enc_ctx)
|
||||
av_freep(&ost->enc_ctx->stats_in);
|
||||
avcodec_free_context(&ost->enc_ctx);
|
||||
|
||||
enc_stats_uninit(&ost->enc_stats_pre);
|
||||
enc_stats_uninit(&ost->enc_stats_post);
|
||||
enc_stats_uninit(&ms->stats);
|
||||
|
|
|
|||
|
|
@ -36,12 +36,6 @@
|
|||
typedef struct MuxStream {
|
||||
OutputStream ost;
|
||||
|
||||
/**
|
||||
* Codec parameters for packets submitted to the muxer (i.e. before
|
||||
* bitstream filtering, if any).
|
||||
*/
|
||||
AVCodecParameters *par_in;
|
||||
|
||||
// name used for logging
|
||||
char log_name[32];
|
||||
|
||||
|
|
@ -85,10 +79,6 @@ typedef struct MuxStream {
|
|||
int ts_drop;
|
||||
#endif
|
||||
|
||||
AVRational frame_rate;
|
||||
AVRational max_frame_rate;
|
||||
int force_fps;
|
||||
|
||||
const char *apad;
|
||||
} MuxStream;
|
||||
|
||||
|
|
@ -123,7 +113,7 @@ typedef struct Muxer {
|
|||
|
||||
int mux_check_init(void *arg);
|
||||
|
||||
static inline MuxStream *ms_from_ost(OutputStream *ost)
|
||||
static MuxStream *ms_from_ost(OutputStream *ost)
|
||||
{
|
||||
return (MuxStream*)ost;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,9 +67,8 @@ static int check_opt_bitexact(void *ctx, const AVDictionary *opts,
|
|||
}
|
||||
|
||||
static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
|
||||
MuxStream *ms, const AVCodec **enc)
|
||||
OutputStream *ost, const AVCodec **enc)
|
||||
{
|
||||
OutputStream *ost = &ms->ost;
|
||||
enum AVMediaType type = ost->type;
|
||||
const char *codec_name = NULL;
|
||||
|
||||
|
|
@ -91,20 +90,20 @@ static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
|
|||
}
|
||||
|
||||
if (!codec_name) {
|
||||
ms->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type);
|
||||
*enc = avcodec_find_encoder(ms->par_in->codec_id);
|
||||
ost->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type);
|
||||
*enc = avcodec_find_encoder(ost->par_in->codec_id);
|
||||
if (!*enc) {
|
||||
av_log(ost, AV_LOG_FATAL, "Automatic encoder selection failed "
|
||||
"Default encoder for format %s (codec %s) is "
|
||||
"probably disabled. Please choose an encoder manually.\n",
|
||||
s->oformat->name, avcodec_get_name(ms->par_in->codec_id));
|
||||
s->oformat->name, avcodec_get_name(ost->par_in->codec_id));
|
||||
return AVERROR_ENCODER_NOT_FOUND;
|
||||
}
|
||||
} else if (strcmp(codec_name, "copy")) {
|
||||
int ret = find_codec(ost, codec_name, ost->type, 1, enc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ms->par_in->codec_id = (*enc)->id;
|
||||
ost->par_in->codec_id = (*enc)->id;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
@ -424,6 +423,27 @@ static int ost_get_filters(const OptionsContext *o, AVFormatContext *oc,
|
|||
#endif
|
||||
opt_match_per_stream_str(ost, &o->filters, oc, ost->st, &filters);
|
||||
|
||||
if (!ost->enc) {
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters_script ||
|
||||
#endif
|
||||
filters) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"%s '%s' was specified, but codec copy was selected. "
|
||||
"Filtering and streamcopy cannot be used together.\n",
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters ? "Filtergraph" : "Filtergraph script",
|
||||
filters ? filters : filters_script
|
||||
#else
|
||||
"Filtergraph", filters
|
||||
#endif
|
||||
);
|
||||
return AVERROR(ENOSYS);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!ost->ist) {
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
|
|
@ -534,7 +554,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
|
|||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
ret = avcodec_get_supported_config(ost->enc->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
|
||||
ret = avcodec_get_supported_config(ost->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
|
||||
0, (const void **) &fmts, NULL);
|
||||
if (ret < 0)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
|
@ -566,7 +586,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
|
|||
}
|
||||
|
||||
if (fmts && !fmt_in_list(fmts, fmt))
|
||||
fmt = choose_pixel_fmt(ost->enc->enc_ctx, fmt);
|
||||
fmt = choose_pixel_fmt(ost->enc_ctx, fmt);
|
||||
|
||||
return fmt;
|
||||
}
|
||||
|
|
@ -584,13 +604,13 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
st = ost->st;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->frame_rates, oc, st, &frame_rate);
|
||||
if (frame_rate && av_parse_video_rate(&ms->frame_rate, frame_rate) < 0) {
|
||||
if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
opt_match_per_stream_str(ost, &o->max_frame_rates, oc, st, &max_frame_rate);
|
||||
if (max_frame_rate && av_parse_video_rate(&ms->max_frame_rate, max_frame_rate) < 0) {
|
||||
if (max_frame_rate && av_parse_video_rate(&ost->max_frame_rate, max_frame_rate) < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Invalid maximum framerate value: %s\n", max_frame_rate);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
|
@ -611,8 +631,8 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
ost->frame_aspect_ratio = q;
|
||||
}
|
||||
|
||||
if (ost->enc) {
|
||||
AVCodecContext *video_enc = ost->enc->enc_ctx;
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *video_enc = ost->enc_ctx;
|
||||
const char *p = NULL, *fps_mode = NULL;
|
||||
const char *frame_size = NULL;
|
||||
const char *frame_pix_fmt = NULL;
|
||||
|
|
@ -725,10 +745,10 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
ost->logfile_prefix ? ost->logfile_prefix :
|
||||
DEFAULT_PASS_LOGFILENAME_PREFIX,
|
||||
ost_idx);
|
||||
if (!strcmp(video_enc->codec->name, "libx264") || !strcmp(video_enc->codec->name, "libvvenc")) {
|
||||
if (av_opt_is_set_to_default_by_name(video_enc, "stats",
|
||||
if (!strcmp(ost->enc_ctx->codec->name, "libx264") || !strcmp(ost->enc_ctx->codec->name, "libvvenc")) {
|
||||
if (av_opt_is_set_to_default_by_name(ost->enc_ctx, "stats",
|
||||
AV_OPT_SEARCH_CHILDREN) > 0)
|
||||
av_opt_set(video_enc, "stats", logfilename,
|
||||
av_opt_set(ost->enc_ctx, "stats", logfilename,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
} else {
|
||||
if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
|
||||
|
|
@ -754,7 +774,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
}
|
||||
}
|
||||
|
||||
opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ms->force_fps);
|
||||
opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ost->force_fps);
|
||||
|
||||
#if FFMPEG_OPT_TOP
|
||||
ost->top_field_first = -1;
|
||||
|
|
@ -775,7 +795,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if ((ms->frame_rate.num || ms->max_frame_rate.num) &&
|
||||
if ((ost->frame_rate.num || ost->max_frame_rate.num) &&
|
||||
!(*vsync_method == VSYNC_AUTO ||
|
||||
*vsync_method == VSYNC_CFR || *vsync_method == VSYNC_VSCFR)) {
|
||||
av_log(ost, AV_LOG_FATAL, "One of -r/-fpsmax was specified "
|
||||
|
|
@ -784,7 +804,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
|
|||
}
|
||||
|
||||
if (*vsync_method == VSYNC_AUTO) {
|
||||
if (ms->frame_rate.num || ms->max_frame_rate.num) {
|
||||
if (ost->frame_rate.num || ost->max_frame_rate.num) {
|
||||
*vsync_method = VSYNC_CFR;
|
||||
} else if (!strcmp(oc->oformat->name, "avi")) {
|
||||
*vsync_method = VSYNC_VFR;
|
||||
|
|
@ -821,8 +841,8 @@ static int new_stream_audio(Muxer *mux, const OptionsContext *o,
|
|||
AVFormatContext *oc = mux->fc;
|
||||
AVStream *st = ost->st;
|
||||
|
||||
if (ost->enc) {
|
||||
AVCodecContext *audio_enc = ost->enc->enc_ctx;
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *audio_enc = ost->enc_ctx;
|
||||
int channels = 0;
|
||||
const char *layout = NULL;
|
||||
const char *sample_fmt = NULL;
|
||||
|
|
@ -860,8 +880,8 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
|
|||
|
||||
st = ost->st;
|
||||
|
||||
if (ost->enc) {
|
||||
AVCodecContext *subtitle_enc = ost->enc->enc_ctx;
|
||||
if (ost->enc_ctx) {
|
||||
AVCodecContext *subtitle_enc = ost->enc_ctx;
|
||||
|
||||
AVCodecDescriptor const *input_descriptor =
|
||||
avcodec_descriptor_get(ost->ist->par->codec_id);
|
||||
|
|
@ -896,16 +916,14 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
|
|||
|
||||
static int
|
||||
ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
||||
const OptionsContext *o,
|
||||
const OptionsContext *o, char *filters,
|
||||
AVRational enc_tb, enum VideoSyncMethod vsync_method,
|
||||
int keep_pix_fmt, int autoscale, int threads_manual,
|
||||
const ViewSpecifier *vs,
|
||||
SchedulerNode *src)
|
||||
const ViewSpecifier *vs)
|
||||
{
|
||||
OutputStream *ost = &ms->ost;
|
||||
AVCodecContext *enc_ctx = ost->enc->enc_ctx;
|
||||
AVCodecContext *enc_ctx = ost->enc_ctx;
|
||||
char name[16];
|
||||
char *filters = NULL;
|
||||
int ret;
|
||||
|
||||
OutputFilterOptions opts = {
|
||||
|
|
@ -918,8 +936,6 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
|||
.color_space = enc_ctx->colorspace,
|
||||
.color_range = enc_ctx->color_range,
|
||||
.vsync_method = vsync_method,
|
||||
.frame_rate = ms->frame_rate,
|
||||
.max_frame_rate = ms->max_frame_rate,
|
||||
.sample_rate = enc_ctx->sample_rate,
|
||||
.ch_layout = enc_ctx->ch_layout,
|
||||
.sws_opts = o->g->sws_dict,
|
||||
|
|
@ -930,7 +946,6 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
|||
.ts_offset = mux->of.start_time == AV_NOPTS_VALUE ?
|
||||
0 : mux->of.start_time,
|
||||
.vs = vs,
|
||||
.nb_threads = -1,
|
||||
|
||||
.flags = OFILTER_FLAG_DISABLE_CONVERT * !!keep_pix_fmt |
|
||||
OFILTER_FLAG_AUTOSCALE * !!autoscale |
|
||||
|
|
@ -947,7 +962,7 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (!ms->force_fps) {
|
||||
if (!ost->force_fps) {
|
||||
ret = avcodec_get_supported_config(enc_ctx, NULL,
|
||||
AV_CODEC_CONFIG_FRAME_RATE, 0,
|
||||
(const void **) &opts.frame_rates, NULL);
|
||||
|
|
@ -983,77 +998,46 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
|
|||
}
|
||||
|
||||
if (threads_manual) {
|
||||
ret = av_opt_get_int(enc_ctx, "threads", 0, &opts.nb_threads);
|
||||
ret = av_opt_get(enc_ctx, "threads", 0, (uint8_t**)&opts.nb_threads);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ost_get_filters(o, mux->fc, ost, &filters);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ofilter) {
|
||||
av_assert0(!filters);
|
||||
ost->filter = ofilter;
|
||||
ret = ofilter_bind_enc(ofilter, ms->sch_idx_enc, &opts);
|
||||
ret = ofilter_bind_ost(ofilter, ost, ms->sch_idx_enc, &opts);
|
||||
} else {
|
||||
ret = fg_create_simple(&ost->fg_simple, ost->ist, filters,
|
||||
mux->sch, ms->sch_idx_enc, &opts);
|
||||
if (ret >= 0)
|
||||
ost->filter = ost->fg_simple->outputs[0];
|
||||
|
||||
ret = init_simple_filtergraph(ost->ist, ost, filters,
|
||||
mux->sch, ms->sch_idx_enc, &opts);
|
||||
}
|
||||
av_freep(&opts.nb_threads);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*src = SCH_ENC(ms->sch_idx_enc);
|
||||
ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
|
||||
SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int streamcopy_init(const OptionsContext *o, const Muxer *mux,
|
||||
OutputStream *ost, AVDictionary **encoder_opts)
|
||||
static int streamcopy_init(const Muxer *mux, OutputStream *ost, AVDictionary **encoder_opts)
|
||||
{
|
||||
MuxStream *ms = ms_from_ost(ost);
|
||||
|
||||
const InputStream *ist = ost->ist;
|
||||
const InputFile *ifile = ist->file;
|
||||
|
||||
AVCodecParameters *par = ms->par_in;
|
||||
AVCodecParameters *par = ost->par_in;
|
||||
uint32_t codec_tag = par->codec_tag;
|
||||
|
||||
AVCodecContext *codec_ctx = NULL;
|
||||
|
||||
AVRational fr = ms->frame_rate;
|
||||
AVRational fr = ost->frame_rate;
|
||||
|
||||
int ret = 0;
|
||||
|
||||
const char *filters = NULL;
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
const char *filters_script = NULL;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->filter_scripts, mux->fc, ost->st, &filters_script);
|
||||
#endif
|
||||
opt_match_per_stream_str(ost, &o->filters, mux->fc, ost->st, &filters);
|
||||
|
||||
if (
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters_script ||
|
||||
#endif
|
||||
filters) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"%s '%s' was specified, but codec copy was selected. "
|
||||
"Filtering and streamcopy cannot be used together.\n",
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
filters ? "Filtergraph" : "Filtergraph script",
|
||||
filters ? filters : filters_script
|
||||
#else
|
||||
"Filtergraph", filters
|
||||
#endif
|
||||
);
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
codec_ctx = avcodec_alloc_context3(NULL);
|
||||
if (!codec_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
|
@ -1155,28 +1139,6 @@ fail:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int set_encoder_id(OutputStream *ost, const AVCodec *codec)
|
||||
{
|
||||
const char *cname = codec->name;
|
||||
uint8_t *encoder_string;
|
||||
int encoder_string_len;
|
||||
|
||||
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
|
||||
encoder_string = av_mallocz(encoder_string_len);
|
||||
if (!encoder_string)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (!ost->file->bitexact && !ost->bitexact)
|
||||
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
|
||||
else
|
||||
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
|
||||
av_strlcat(encoder_string, cname, encoder_string_len);
|
||||
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
|
||||
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
||||
InputStream *ist, OutputFilter *ofilter, const ViewSpecifier *vs,
|
||||
OutputStream **post)
|
||||
|
|
@ -1186,14 +1148,13 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
OutputStream *ost;
|
||||
const AVCodec *enc;
|
||||
AVStream *st;
|
||||
SchedulerNode src = { .type = SCH_NODE_TYPE_NONE };
|
||||
AVDictionary *encoder_opts = NULL;
|
||||
int ret = 0, keep_pix_fmt = 0, autoscale = 1;
|
||||
int threads_manual = 0;
|
||||
AVRational enc_tb = { 0, 0 };
|
||||
enum VideoSyncMethod vsync_method = VSYNC_AUTO;
|
||||
const char *bsfs = NULL, *time_base = NULL, *codec_tag = NULL;
|
||||
char *next;
|
||||
char *filters = NULL, *next;
|
||||
double qscale = -1;
|
||||
|
||||
st = avformat_new_stream(oc, NULL);
|
||||
|
|
@ -1237,8 +1198,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
}
|
||||
}
|
||||
|
||||
ms->par_in = avcodec_parameters_alloc();
|
||||
if (!ms->par_in)
|
||||
ost->par_in = avcodec_parameters_alloc();
|
||||
if (!ost->par_in)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ms->last_mux_dts = AV_NOPTS_VALUE;
|
||||
|
|
@ -1246,23 +1207,27 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
ost->st = st;
|
||||
ost->ist = ist;
|
||||
ost->kf.ref_pts = AV_NOPTS_VALUE;
|
||||
ms->par_in->codec_type = type;
|
||||
ost->par_in->codec_type = type;
|
||||
st->codecpar->codec_type = type;
|
||||
|
||||
ret = choose_encoder(o, oc, ms, &enc);
|
||||
ret = choose_encoder(o, oc, ost, &enc);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_FATAL, "Error selecting an encoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (enc) {
|
||||
ost->enc_ctx = avcodec_alloc_context3(enc);
|
||||
if (!ost->enc_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = sch_add_enc(mux->sch, encoder_thread, ost,
|
||||
ost->type == AVMEDIA_TYPE_SUBTITLE ? NULL : enc_open);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ms->sch_idx_enc = ret;
|
||||
|
||||
ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc, ost);
|
||||
ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
@ -1297,21 +1262,21 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
if (!ms->pkt)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (ost->enc) {
|
||||
if (ost->enc_ctx) {
|
||||
AVIOContext *s = NULL;
|
||||
char *buf = NULL, *arg = NULL;
|
||||
const char *enc_stats_pre = NULL, *enc_stats_post = NULL, *mux_stats = NULL;
|
||||
const char *enc_time_base = NULL, *preset = NULL;
|
||||
|
||||
ret = filter_codec_opts(o->g->codec_opts, enc->id,
|
||||
oc, st, enc, &encoder_opts,
|
||||
ret = filter_codec_opts(o->g->codec_opts, ost->enc_ctx->codec_id,
|
||||
oc, st, ost->enc_ctx->codec, &encoder_opts,
|
||||
&mux->enc_opts_used);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
opt_match_per_stream_str(ost, &o->presets, oc, st, &preset);
|
||||
opt_match_per_stream_int(ost, &o->autoscale, oc, st, &autoscale);
|
||||
if (preset && (!(ret = get_preset_file_2(preset, enc->name, &s)))) {
|
||||
if (preset && (!(ret = get_preset_file_2(preset, ost->enc_ctx->codec->name, &s)))) {
|
||||
AVBPrint bprint;
|
||||
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
|
||||
do {
|
||||
|
|
@ -1411,7 +1376,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
|
||||
threads_manual = !!av_dict_get(encoder_opts, "threads", NULL, 0);
|
||||
|
||||
ret = av_opt_set_dict2(ost->enc->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
ret = av_opt_set_dict2(ost->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(ost, AV_LOG_ERROR, "Error applying encoder options: %s\n",
|
||||
av_err2str(ret));
|
||||
|
|
@ -1424,7 +1389,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
|
||||
// default to automatic thread count
|
||||
if (!threads_manual)
|
||||
ost->enc->enc_ctx->thread_count = 0;
|
||||
ost->enc_ctx->thread_count = 0;
|
||||
} else {
|
||||
ret = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st,
|
||||
NULL, &encoder_opts,
|
||||
|
|
@ -1436,14 +1401,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
|
||||
if (o->bitexact) {
|
||||
ost->bitexact = 1;
|
||||
} else if (ost->enc) {
|
||||
ost->bitexact = !!(ost->enc->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT);
|
||||
}
|
||||
|
||||
if (enc) {
|
||||
ret = set_encoder_id(ost, enc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
} else if (ost->enc_ctx) {
|
||||
ost->bitexact = !!(ost->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT);
|
||||
}
|
||||
|
||||
opt_match_per_stream_str(ost, &o->time_bases, oc, st, &time_base);
|
||||
|
|
@ -1488,15 +1447,15 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
tag = AV_RL32(buf);
|
||||
}
|
||||
ost->st->codecpar->codec_tag = tag;
|
||||
ms->par_in->codec_tag = tag;
|
||||
if (ost->enc)
|
||||
ost->enc->enc_ctx->codec_tag = tag;
|
||||
ost->par_in->codec_tag = tag;
|
||||
if (ost->enc_ctx)
|
||||
ost->enc_ctx->codec_tag = tag;
|
||||
}
|
||||
|
||||
opt_match_per_stream_dbl(ost, &o->qscale, oc, st, &qscale);
|
||||
if (ost->enc && qscale >= 0) {
|
||||
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
ost->enc->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
if (ost->enc_ctx && qscale >= 0) {
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
|
||||
ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
|
||||
}
|
||||
|
||||
if (ms->sch_idx >= 0) {
|
||||
|
|
@ -1518,8 +1477,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
opt_match_per_stream_int(ost, &o->fix_sub_duration_heartbeat,
|
||||
oc, st, &ost->fix_sub_duration_heartbeat);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc)
|
||||
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc_ctx)
|
||||
ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
opt_match_per_stream_int(ost, &o->copy_initial_nonkeyframes,
|
||||
oc, st, &ms->copy_initial_nonkeyframes);
|
||||
|
|
@ -1531,43 +1490,48 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
|
|||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO) {
|
||||
ret = ost_get_filters(o, oc, ost, &filters);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ost->enc &&
|
||||
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
|
||||
ret = ost_bind_filter(mux, ms, ofilter, o, enc_tb, vsync_method,
|
||||
keep_pix_fmt, autoscale, threads_manual, vs, &src);
|
||||
ret = ost_bind_filter(mux, ms, ofilter, o, filters, enc_tb, vsync_method,
|
||||
keep_pix_fmt, autoscale, threads_manual, vs);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else if (ost->ist) {
|
||||
ret = ist_use(ost->ist, !!ost->enc, NULL, &src);
|
||||
if (ret < 0) {
|
||||
int sched_idx = ist_output_add(ost->ist, ost);
|
||||
if (sched_idx < 0) {
|
||||
av_log(ost, AV_LOG_ERROR,
|
||||
"Error binding an input stream\n");
|
||||
ret = sched_idx;
|
||||
goto fail;
|
||||
}
|
||||
ms->sch_idx_src = src.idx;
|
||||
ms->sch_idx_src = sched_idx;
|
||||
|
||||
// src refers to a decoder for transcoding, demux stream otherwise
|
||||
if (ost->enc) {
|
||||
ret = sch_connect(mux->sch,
|
||||
src, SCH_ENC(ms->sch_idx_enc));
|
||||
ret = sch_connect(mux->sch, SCH_DEC_OUT(sched_idx, 0),
|
||||
SCH_ENC(ms->sch_idx_enc));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
src = SCH_ENC(ms->sch_idx_enc);
|
||||
}
|
||||
}
|
||||
|
||||
if (src.type != SCH_NODE_TYPE_NONE) {
|
||||
ret = sch_connect(mux->sch,
|
||||
src, SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
// only attachment streams don't have a source
|
||||
av_assert0(type == AVMEDIA_TYPE_ATTACHMENT && ms->sch_idx < 0);
|
||||
ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
|
||||
SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
} else {
|
||||
ret = sch_connect(mux->sch, SCH_DSTREAM(ost->ist->file->index, sched_idx),
|
||||
SCH_MSTREAM(ost->file->index, ms->sch_idx));
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
if (ost->ist && !ost->enc) {
|
||||
ret = streamcopy_init(o, mux, ost, &encoder_opts);
|
||||
ret = streamcopy_init(mux, ost, &encoder_opts);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
|
@ -1593,7 +1557,7 @@ static int map_auto_video(Muxer *mux, const OptionsContext *o)
|
|||
{
|
||||
AVFormatContext *oc = mux->fc;
|
||||
InputStream *best_ist = NULL;
|
||||
int64_t best_score = 0;
|
||||
int best_score = 0;
|
||||
int qcr;
|
||||
|
||||
/* video: highest resolution */
|
||||
|
|
@ -1604,16 +1568,16 @@ static int map_auto_video(Muxer *mux, const OptionsContext *o)
|
|||
for (int j = 0; j < nb_input_files; j++) {
|
||||
InputFile *ifile = input_files[j];
|
||||
InputStream *file_best_ist = NULL;
|
||||
int64_t file_best_score = 0;
|
||||
int file_best_score = 0;
|
||||
for (int i = 0; i < ifile->nb_streams; i++) {
|
||||
InputStream *ist = ifile->streams[i];
|
||||
int64_t score;
|
||||
int score;
|
||||
|
||||
if (ist->user_set_discard == AVDISCARD_ALL ||
|
||||
ist->st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
|
||||
continue;
|
||||
|
||||
score = ist->st->codecpar->width * (int64_t)ist->st->codecpar->height
|
||||
score = ist->st->codecpar->width * ist->st->codecpar->height
|
||||
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
|
||||
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
|
||||
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
|
||||
|
|
@ -1835,7 +1799,6 @@ loop_end:
|
|||
|
||||
static int of_add_attachments(Muxer *mux, const OptionsContext *o)
|
||||
{
|
||||
MuxStream *ms;
|
||||
OutputStream *ost;
|
||||
int err;
|
||||
|
||||
|
|
@ -1903,11 +1866,9 @@ read_fail:
|
|||
return err;
|
||||
}
|
||||
|
||||
ms = ms_from_ost(ost);
|
||||
|
||||
ost->attachment_filename = attachment_filename;
|
||||
ms->par_in->extradata = attachment;
|
||||
ms->par_in->extradata_size = len;
|
||||
ost->par_in->extradata = attachment;
|
||||
ost->par_in->extradata_size = len;
|
||||
|
||||
p = strrchr(o->attachments[i], '/');
|
||||
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
|
||||
|
|
@ -2048,7 +2009,7 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
|
|||
int limit_frames = 0, limit_frames_av_enc = 0;
|
||||
|
||||
#define IS_AV_ENC(ost, type) \
|
||||
(ost->enc && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
|
||||
(ost->enc_ctx && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
|
||||
#define IS_INTERLEAVED(type) (type != AVMEDIA_TYPE_ATTACHMENT)
|
||||
|
||||
for (int i = 0; i < oc->nb_streams; i++) {
|
||||
|
|
@ -2060,8 +2021,8 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
|
|||
|
||||
nb_interleaved += IS_INTERLEAVED(type);
|
||||
nb_av_enc += IS_AV_ENC(ost, type);
|
||||
nb_audio_fs += (ost->enc && type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE));
|
||||
nb_audio_fs += (ost->enc_ctx && type == AVMEDIA_TYPE_AUDIO &&
|
||||
!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE));
|
||||
|
||||
limit_frames |= ms->max_frames < INT64_MAX;
|
||||
limit_frames_av_enc |= (ms->max_frames < INT64_MAX) && IS_AV_ENC(ost, type);
|
||||
|
|
@ -3016,6 +2977,9 @@ static int copy_meta(Muxer *mux, const OptionsContext *o)
|
|||
if (!ost->ist) /* this is true e.g. for attached files */
|
||||
continue;
|
||||
av_dict_copy(&ost->st->metadata, ost->ist->st->metadata, AV_DICT_DONT_OVERWRITE);
|
||||
if (ost->enc_ctx) {
|
||||
av_dict_set(&ost->st->metadata, "encoder", NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
@ -3092,7 +3056,7 @@ finish:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const char *const forced_keyframes_const_names[] = {
|
||||
const char *const forced_keyframes_const_names[] = {
|
||||
"n",
|
||||
"n_forced",
|
||||
"prev_forced_n",
|
||||
|
|
@ -3194,7 +3158,7 @@ static int process_forced_keyframes(Muxer *mux, const OptionsContext *o)
|
|||
mux->fc, ost->st, &forced_keyframes);
|
||||
|
||||
if (!(ost->type == AVMEDIA_TYPE_VIDEO &&
|
||||
ost->enc && forced_keyframes))
|
||||
ost->enc_ctx && forced_keyframes))
|
||||
continue;
|
||||
|
||||
if (!strncmp(forced_keyframes, "expr:", 5)) {
|
||||
|
|
@ -3421,7 +3385,7 @@ int of_open(const OptionsContext *o, const char *filename, Scheduler *sch)
|
|||
OutputStream *ost = of->streams[i];
|
||||
|
||||
if (!ost->enc) {
|
||||
err = of_stream_init(of, ost, NULL);
|
||||
err = of_stream_init(of, ost);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -47,12 +47,12 @@
|
|||
#include "libavutil/opt.h"
|
||||
#include "libavutil/parseutils.h"
|
||||
#include "libavutil/stereo3d.h"
|
||||
#include "graph/graphprint.h"
|
||||
|
||||
HWDevice *filter_hw_device;
|
||||
|
||||
char *vstats_filename;
|
||||
|
||||
float audio_drift_threshold = 0.1;
|
||||
float dts_delta_threshold = 10;
|
||||
float dts_error_threshold = 3600*30;
|
||||
|
||||
|
|
@ -75,11 +75,7 @@ int stdin_interaction = 1;
|
|||
float max_error_rate = 2.0/3;
|
||||
char *filter_nbthreads;
|
||||
int filter_complex_nbthreads = 0;
|
||||
int filter_buffered_frames = 0;
|
||||
int vstats_version = 2;
|
||||
int print_graphs = 0;
|
||||
char *print_graphs_file = NULL;
|
||||
char *print_graphs_format = NULL;
|
||||
int auto_conversion_filters = 1;
|
||||
int64_t stats_period = 500000;
|
||||
|
||||
|
|
@ -90,15 +86,6 @@ int ignore_unknown_streams = 0;
|
|||
int copy_unknown_streams = 0;
|
||||
int recast_media = 0;
|
||||
|
||||
// this struct is passed as the optctx argument
|
||||
// to func_arg() for global options
|
||||
typedef struct GlobalOptionsContext {
|
||||
Scheduler *sch;
|
||||
|
||||
char **filtergraphs;
|
||||
int nb_filtergraphs;
|
||||
} GlobalOptionsContext;
|
||||
|
||||
static void uninit_options(OptionsContext *o)
|
||||
{
|
||||
/* all OPT_SPEC and OPT_TYPE_STRING can be freed in generic way */
|
||||
|
|
@ -358,7 +345,7 @@ static void correct_input_start_times(void)
|
|||
if (copy_ts && start_at_zero)
|
||||
ifile->ts_offset = -new_start_time;
|
||||
else if (!copy_ts) {
|
||||
abs_start_seek = is->start_time + ((ifile->start_time != AV_NOPTS_VALUE) ? ifile->start_time : 0);
|
||||
abs_start_seek = is->start_time + (ifile->start_time != AV_NOPTS_VALUE) ? ifile->start_time : 0;
|
||||
ifile->ts_offset = abs_start_seek > new_start_time ? -abs_start_seek : -new_start_time;
|
||||
} else if (copy_ts)
|
||||
ifile->ts_offset = 0;
|
||||
|
|
@ -624,8 +611,8 @@ static int opt_attach(void *optctx, const char *opt, const char *arg)
|
|||
|
||||
static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
GlobalOptionsContext *go = optctx;
|
||||
return sch_sdp_filename(go->sch, arg);
|
||||
Scheduler *sch = optctx;
|
||||
return sch_sdp_filename(sch, arg);
|
||||
}
|
||||
|
||||
#if CONFIG_VAAPI
|
||||
|
|
@ -1163,46 +1150,26 @@ static int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
|
|||
|
||||
static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
GlobalOptionsContext *go = optctx;
|
||||
char *graph_desc;
|
||||
int ret;
|
||||
|
||||
graph_desc = av_strdup(arg);
|
||||
Scheduler *sch = optctx;
|
||||
char *graph_desc = av_strdup(arg);
|
||||
if (!graph_desc)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = GROW_ARRAY(go->filtergraphs, go->nb_filtergraphs);
|
||||
if (ret < 0) {
|
||||
av_freep(&graph_desc);
|
||||
return ret;
|
||||
}
|
||||
go->filtergraphs[go->nb_filtergraphs - 1] = graph_desc;
|
||||
|
||||
return 0;
|
||||
return fg_create(NULL, graph_desc, sch);
|
||||
}
|
||||
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
static int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
|
||||
{
|
||||
GlobalOptionsContext *go = optctx;
|
||||
char *graph_desc;
|
||||
int ret;
|
||||
|
||||
graph_desc = file_read(arg);
|
||||
Scheduler *sch = optctx;
|
||||
char *graph_desc = file_read(arg);
|
||||
if (!graph_desc)
|
||||
return AVERROR(EINVAL);
|
||||
|
||||
av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -/filter_complex %s instead\n",
|
||||
opt, arg);
|
||||
|
||||
ret = GROW_ARRAY(go->filtergraphs, go->nb_filtergraphs);
|
||||
if (ret < 0) {
|
||||
av_freep(&graph_desc);
|
||||
return ret;
|
||||
}
|
||||
go->filtergraphs[go->nb_filtergraphs - 1] = graph_desc;
|
||||
|
||||
return 0;
|
||||
return fg_create(NULL, graph_desc, sch);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
@ -1379,7 +1346,6 @@ static int open_files(OptionGroupList *l, const char *inout, Scheduler *sch,
|
|||
|
||||
int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
|
||||
{
|
||||
GlobalOptionsContext go = { .sch = sch };
|
||||
OptionParseContext octx;
|
||||
const char *errmsg = NULL;
|
||||
int ret;
|
||||
|
|
@ -1395,7 +1361,7 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
|
|||
}
|
||||
|
||||
/* apply global options */
|
||||
ret = parse_optgroup(&go, &octx.global_opts, options);
|
||||
ret = parse_optgroup(sch, &octx.global_opts, options);
|
||||
if (ret < 0) {
|
||||
errmsg = "parsing global options";
|
||||
goto fail;
|
||||
|
|
@ -1404,14 +1370,6 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
|
|||
/* configure terminal and setup signal handlers */
|
||||
term_init();
|
||||
|
||||
/* create complex filtergraphs */
|
||||
for (int i = 0; i < go.nb_filtergraphs; i++) {
|
||||
ret = fg_create(NULL, go.filtergraphs[i], sch);
|
||||
go.filtergraphs[i] = NULL;
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* open input files */
|
||||
ret = open_files(&octx.groups[GROUP_INFILE], "input", sch, ifile_open);
|
||||
if (ret < 0) {
|
||||
|
|
@ -1447,10 +1405,6 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
|
|||
goto fail;
|
||||
|
||||
fail:
|
||||
for (int i = 0; i < go.nb_filtergraphs; i++)
|
||||
av_freep(&go.filtergraphs[i]);
|
||||
av_freep(&go.filtergraphs);
|
||||
|
||||
uninit_parse_context(&octx);
|
||||
if (ret < 0 && ret != AVERROR_EXIT) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Error %s: %s\n",
|
||||
|
|
@ -1512,6 +1466,7 @@ static int opt_adrift_threshold(void *optctx, const char *opt, const char *arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
static const char *const alt_bsf[] = { "absf", "vbsf", NULL };
|
||||
static const char *const alt_channel_layout[] = { "ch_layout", NULL};
|
||||
static const char *const alt_codec[] = { "c", "acodec", "vcodec", "scodec", "dcodec", NULL };
|
||||
static const char *const alt_filter[] = { "af", "vf", NULL };
|
||||
|
|
@ -1643,9 +1598,6 @@ const OptionDef options[] = {
|
|||
{ "readrate_initial_burst", OPT_TYPE_DOUBLE, OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(readrate_initial_burst) },
|
||||
"The initial amount of input to burst read before imposing any readrate", "seconds" },
|
||||
{ "readrate_catchup", OPT_TYPE_FLOAT, OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
|
||||
{ .off = OFFSET(readrate_catchup) },
|
||||
"Temporary readrate used to catch up if an input lags behind the specified readrate", "speed" },
|
||||
{ "target", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_PERFILE | OPT_EXPERT | OPT_OUTPUT,
|
||||
{ .func_arg = opt_target },
|
||||
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
|
||||
|
|
@ -1715,9 +1667,6 @@ const OptionDef options[] = {
|
|||
{ "filter_threads", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT,
|
||||
{ .func_arg = opt_filter_threads },
|
||||
"number of non-complex filter threads" },
|
||||
{ "filter_buffered_frames", OPT_TYPE_INT, OPT_EXPERT,
|
||||
{ &filter_buffered_frames },
|
||||
"maximum number of buffered frames in a filter graph" },
|
||||
#if FFMPEG_OPT_FILTER_SCRIPT
|
||||
{ "filter_script", OPT_TYPE_STRING, OPT_PERSTREAM | OPT_EXPERT | OPT_OUTPUT,
|
||||
{ .off = OFFSET(filter_scripts) },
|
||||
|
|
@ -1726,9 +1675,6 @@ const OptionDef options[] = {
|
|||
{ "reinit_filter", OPT_TYPE_INT, OPT_PERSTREAM | OPT_INPUT | OPT_EXPERT,
|
||||
{ .off = OFFSET(reinit_filters) },
|
||||
"reinit filtergraph on input parameter changes", "" },
|
||||
{ "drop_changed", OPT_TYPE_INT, OPT_PERSTREAM | OPT_INPUT | OPT_EXPERT,
|
||||
{ .off = OFFSET(drop_changed) },
|
||||
"drop frame instead of reiniting filtergraph on input parameter changes", "" },
|
||||
{ "filter_complex", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT,
|
||||
{ .func_arg = opt_filter_complex },
|
||||
"create a complex filtergraph", "graph_description" },
|
||||
|
|
@ -1743,15 +1689,6 @@ const OptionDef options[] = {
|
|||
{ .func_arg = opt_filter_complex_script },
|
||||
"deprecated, use -/filter_complex instead", "filename" },
|
||||
#endif
|
||||
{ "print_graphs", OPT_TYPE_BOOL, 0,
|
||||
{ &print_graphs },
|
||||
"print execution graph data to stderr" },
|
||||
{ "print_graphs_file", OPT_TYPE_STRING, 0,
|
||||
{ &print_graphs_file },
|
||||
"write execution graph data to the specified file", "filename" },
|
||||
{ "print_graphs_format", OPT_TYPE_STRING, 0,
|
||||
{ &print_graphs_format },
|
||||
"set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml, mermaid, mermaidhtml)", "format" },
|
||||
{ "auto_conversion_filters", OPT_TYPE_BOOL, OPT_EXPERT,
|
||||
{ &auto_conversion_filters },
|
||||
"enable automatic conversion filters globally" },
|
||||
|
|
|
|||
|
|
@ -285,9 +285,8 @@ struct Scheduler {
|
|||
pthread_mutex_t mux_ready_lock;
|
||||
|
||||
unsigned nb_mux_done;
|
||||
unsigned task_failed;
|
||||
pthread_mutex_t finish_lock;
|
||||
pthread_cond_t finish_cond;
|
||||
pthread_mutex_t mux_done_lock;
|
||||
pthread_cond_t mux_done_cond;
|
||||
|
||||
|
||||
SchDec *dec;
|
||||
|
|
@ -307,6 +306,7 @@ struct Scheduler {
|
|||
|
||||
enum SchedulerState state;
|
||||
atomic_int terminate;
|
||||
atomic_int task_failed;
|
||||
|
||||
pthread_mutex_t schedule_lock;
|
||||
|
||||
|
|
@ -375,6 +375,7 @@ static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_si
|
|||
enum QueueType type)
|
||||
{
|
||||
ThreadQueue *tq;
|
||||
ObjPool *op;
|
||||
|
||||
if (queue_size <= 0) {
|
||||
if (type == QUEUE_FRAMES)
|
||||
|
|
@ -392,11 +393,18 @@ static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_si
|
|||
av_assert0(queue_size == DEFAULT_FRAME_THREAD_QUEUE_SIZE);
|
||||
}
|
||||
|
||||
tq = tq_alloc(nb_streams, queue_size,
|
||||
(type == QUEUE_PACKETS) ? THREAD_QUEUE_PACKETS : THREAD_QUEUE_FRAMES);
|
||||
if (!tq)
|
||||
op = (type == QUEUE_PACKETS) ? objpool_alloc_packets() :
|
||||
objpool_alloc_frames();
|
||||
if (!op)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
tq = tq_alloc(nb_streams, queue_size, op,
|
||||
(type == QUEUE_PACKETS) ? pkt_move : frame_move);
|
||||
if (!tq) {
|
||||
objpool_free(&op);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
*ptq = tq;
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -563,8 +571,8 @@ void sch_free(Scheduler **psch)
|
|||
|
||||
pthread_mutex_destroy(&sch->mux_ready_lock);
|
||||
|
||||
pthread_mutex_destroy(&sch->finish_lock);
|
||||
pthread_cond_destroy(&sch->finish_cond);
|
||||
pthread_mutex_destroy(&sch->mux_done_lock);
|
||||
pthread_cond_destroy(&sch->mux_done_cond);
|
||||
|
||||
av_freep(psch);
|
||||
}
|
||||
|
|
@ -594,11 +602,11 @@ Scheduler *sch_alloc(void)
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = pthread_mutex_init(&sch->finish_lock, NULL);
|
||||
ret = pthread_mutex_init(&sch->mux_done_lock, NULL);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = pthread_cond_init(&sch->finish_cond, NULL);
|
||||
ret = pthread_cond_init(&sch->mux_done_cond, NULL);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
|
|
@ -1103,52 +1111,22 @@ static int mux_task_start(SchMux *mux)
|
|||
return ret;
|
||||
|
||||
/* flush the pre-muxing queues */
|
||||
while (1) {
|
||||
int min_stream = -1;
|
||||
Timestamp min_ts = { .ts = AV_NOPTS_VALUE };
|
||||
|
||||
for (unsigned i = 0; i < mux->nb_streams; i++) {
|
||||
SchMuxStream *ms = &mux->streams[i];
|
||||
AVPacket *pkt;
|
||||
|
||||
// find the stream with the earliest dts or EOF in pre-muxing queue
|
||||
for (unsigned i = 0; i < mux->nb_streams; i++) {
|
||||
SchMuxStream *ms = &mux->streams[i];
|
||||
|
||||
if (av_fifo_peek(ms->pre_mux_queue.fifo, &pkt, 1, 0) < 0)
|
||||
continue;
|
||||
|
||||
if (!pkt || pkt->dts == AV_NOPTS_VALUE) {
|
||||
min_stream = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (min_ts.ts == AV_NOPTS_VALUE ||
|
||||
av_compare_ts(min_ts.ts, min_ts.tb, pkt->dts, pkt->time_base) > 0) {
|
||||
min_stream = i;
|
||||
min_ts = (Timestamp){ .ts = pkt->dts, .tb = pkt->time_base };
|
||||
}
|
||||
}
|
||||
|
||||
if (min_stream >= 0) {
|
||||
SchMuxStream *ms = &mux->streams[min_stream];
|
||||
|
||||
ret = av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1);
|
||||
av_assert0(ret >= 0);
|
||||
|
||||
while (av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1) >= 0) {
|
||||
if (pkt) {
|
||||
if (!ms->init_eof)
|
||||
ret = tq_send(mux->queue, min_stream, pkt);
|
||||
ret = tq_send(mux->queue, i, pkt);
|
||||
av_packet_free(&pkt);
|
||||
if (ret == AVERROR_EOF)
|
||||
ms->init_eof = 1;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
} else
|
||||
tq_send_finish(mux->queue, min_stream);
|
||||
|
||||
continue;
|
||||
tq_send_finish(mux->queue, i);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
atomic_store(&mux->mux_started, 1);
|
||||
|
|
@ -1656,27 +1634,29 @@ fail:
|
|||
|
||||
int sch_wait(Scheduler *sch, uint64_t timeout_us, int64_t *transcode_ts)
|
||||
{
|
||||
int ret;
|
||||
int ret, err;
|
||||
|
||||
// convert delay to absolute timestamp
|
||||
timeout_us += av_gettime();
|
||||
|
||||
pthread_mutex_lock(&sch->finish_lock);
|
||||
pthread_mutex_lock(&sch->mux_done_lock);
|
||||
|
||||
if (sch->nb_mux_done < sch->nb_mux) {
|
||||
struct timespec tv = { .tv_sec = timeout_us / 1000000,
|
||||
.tv_nsec = (timeout_us % 1000000) * 1000 };
|
||||
pthread_cond_timedwait(&sch->finish_cond, &sch->finish_lock, &tv);
|
||||
pthread_cond_timedwait(&sch->mux_done_cond, &sch->mux_done_lock, &tv);
|
||||
}
|
||||
|
||||
// abort transcoding if any task failed
|
||||
ret = sch->nb_mux_done == sch->nb_mux || sch->task_failed;
|
||||
ret = sch->nb_mux_done == sch->nb_mux;
|
||||
|
||||
pthread_mutex_unlock(&sch->finish_lock);
|
||||
pthread_mutex_unlock(&sch->mux_done_lock);
|
||||
|
||||
*transcode_ts = atomic_load(&sch->last_dts);
|
||||
|
||||
return ret;
|
||||
// abort transcoding if any task failed
|
||||
err = atomic_load(&sch->task_failed);
|
||||
|
||||
return ret || err;
|
||||
}
|
||||
|
||||
static int enc_open(Scheduler *sch, SchEnc *enc, const AVFrame *frame)
|
||||
|
|
@ -1836,7 +1816,7 @@ static int mux_queue_packet(SchMux *mux, SchMuxStream *ms, AVPacket *pkt)
|
|||
if (new_size <= packets) {
|
||||
av_log(mux, AV_LOG_ERROR,
|
||||
"Too many packets buffered for output stream.\n");
|
||||
return AVERROR_BUFFER_TOO_SMALL;
|
||||
return AVERROR(ENOSPC);
|
||||
}
|
||||
ret = av_fifo_grow2(q->fifo, new_size - packets);
|
||||
if (ret < 0)
|
||||
|
|
@ -1899,7 +1879,7 @@ static int send_to_mux(Scheduler *sch, SchMux *mux, unsigned stream_idx,
|
|||
|
||||
update_schedule:
|
||||
// TODO: use atomics to check whether this changes trailing dts
|
||||
// to avoid locking unnecessarily
|
||||
// to avoid locking unnecesarily
|
||||
if (dts != AV_NOPTS_VALUE || !pkt) {
|
||||
pthread_mutex_lock(&sch->schedule_lock);
|
||||
|
||||
|
|
@ -2142,14 +2122,14 @@ static int mux_done(Scheduler *sch, unsigned mux_idx)
|
|||
|
||||
pthread_mutex_unlock(&sch->schedule_lock);
|
||||
|
||||
pthread_mutex_lock(&sch->finish_lock);
|
||||
pthread_mutex_lock(&sch->mux_done_lock);
|
||||
|
||||
av_assert0(sch->nb_mux_done < sch->nb_mux);
|
||||
sch->nb_mux_done++;
|
||||
|
||||
pthread_cond_signal(&sch->finish_cond);
|
||||
pthread_cond_signal(&sch->mux_done_cond);
|
||||
|
||||
pthread_mutex_unlock(&sch->finish_lock);
|
||||
pthread_mutex_unlock(&sch->mux_done_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -2542,12 +2522,8 @@ static void *task_wrapper(void *arg)
|
|||
// EOF is considered normal termination
|
||||
if (ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
if (ret < 0) {
|
||||
pthread_mutex_lock(&sch->finish_lock);
|
||||
sch->task_failed = 1;
|
||||
pthread_cond_signal(&sch->finish_cond);
|
||||
pthread_mutex_unlock(&sch->finish_lock);
|
||||
}
|
||||
if (ret < 0)
|
||||
atomic_store(&sch->task_failed, 1);
|
||||
|
||||
av_log(task->func_arg, ret < 0 ? AV_LOG_ERROR : AV_LOG_VERBOSE,
|
||||
"Terminating thread with return code %d (%s)\n", ret,
|
||||
|
|
|
|||
|
|
@ -355,7 +355,7 @@ enum DemuxSendFlags {
|
|||
* @retval "non-negative value" success
|
||||
* @retval AVERROR_EOF all consumers for the stream are done
|
||||
* @retval AVERROR_EXIT all consumers are done, should terminate demuxing
|
||||
* @retval "another negative error code" other failure
|
||||
* @retval "anoter negative error code" other failure
|
||||
*/
|
||||
int sch_demux_send(Scheduler *sch, unsigned demux_idx, struct AVPacket *pkt,
|
||||
unsigned flags);
|
||||
|
|
@ -436,7 +436,7 @@ void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
|
|||
*
|
||||
* @retval "non-negative value" success
|
||||
* @retval AVERROR_EOF all consumers are done
|
||||
* @retval "another negative error code" other failure
|
||||
* @retval "anoter negative error code" other failure
|
||||
*/
|
||||
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx,
|
||||
struct AVFrame *frame);
|
||||
|
|
|
|||
|
|
@ -44,20 +44,14 @@ static inline int err_merge(int err0, int err1)
|
|||
return (err0 < 0) ? err0 : FFMIN(err1, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
|
||||
* It does not clear dst beforehand. */
|
||||
static inline int clone_side_data(AVFrameSideData ***dst, int *nb_dst,
|
||||
AVFrameSideData * const *src, int nb_src,
|
||||
unsigned int flags)
|
||||
static inline void pkt_move(void *dst, void *src)
|
||||
{
|
||||
for (int i = 0; i < nb_src; i++) {
|
||||
int ret = av_frame_side_data_clone(dst, nb_dst, src[i], flags);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
av_packet_move_ref(dst, src);
|
||||
}
|
||||
|
||||
return 0;
|
||||
static inline void frame_move(void *dst, void *src)
|
||||
{
|
||||
av_frame_move_ref(dst, src);
|
||||
}
|
||||
|
||||
#endif // FFTOOLS_FFMPEG_UTILS_H
|
||||
|
|
|
|||
|
|
@ -388,6 +388,7 @@ static const struct TextureFormatEntry {
|
|||
{ AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
|
||||
{ AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
|
||||
{ AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
|
||||
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
|
||||
};
|
||||
|
||||
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
|
||||
|
|
@ -894,7 +895,7 @@ static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_B
|
|||
format == AV_PIX_FMT_BGR32 ||
|
||||
format == AV_PIX_FMT_BGR32_1)
|
||||
*sdl_blendmode = SDL_BLENDMODE_BLEND;
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) {
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
|
||||
if (format == sdl_texture_format_map[i].format) {
|
||||
*sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
|
||||
return;
|
||||
|
|
@ -940,6 +941,7 @@ static enum AVColorSpace sdl_supported_color_spaces[] = {
|
|||
AVCOL_SPC_BT709,
|
||||
AVCOL_SPC_BT470BG,
|
||||
AVCOL_SPC_SMPTE170M,
|
||||
AVCOL_SPC_UNSPECIFIED,
|
||||
};
|
||||
|
||||
static void set_sdl_yuv_conversion_mode(AVFrame *frame)
|
||||
|
|
@ -1859,6 +1861,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
|||
{
|
||||
enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
|
||||
char sws_flags_str[512] = "";
|
||||
char buffersrc_args[256];
|
||||
int ret;
|
||||
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
|
||||
AVCodecParameters *codecpar = is->video_st->codecpar;
|
||||
|
|
@ -1872,13 +1875,14 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
|||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < renderer_info.num_texture_formats; i++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) {
|
||||
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
|
||||
if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
|
||||
pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
|
||||
|
||||
while ((e = av_dict_iterate(sws_dict, e))) {
|
||||
if (!strcmp(e->key, "sws_flags")) {
|
||||
|
|
@ -1891,49 +1895,36 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
|
|||
|
||||
graph->scale_sws_opts = av_strdup(sws_flags_str);
|
||||
|
||||
snprintf(buffersrc_args, sizeof(buffersrc_args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
|
||||
"colorspace=%d:range=%d",
|
||||
frame->width, frame->height, frame->format,
|
||||
is->video_st->time_base.num, is->video_st->time_base.den,
|
||||
codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
|
||||
frame->colorspace, frame->color_range);
|
||||
if (fr.num && fr.den)
|
||||
av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
|
||||
|
||||
filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"),
|
||||
"ffplay_buffer");
|
||||
if (!filt_src) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
if ((ret = avfilter_graph_create_filter(&filt_src,
|
||||
avfilter_get_by_name("buffer"),
|
||||
"ffplay_buffer", buffersrc_args, NULL,
|
||||
graph)) < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
par->format = frame->format;
|
||||
par->time_base = is->video_st->time_base;
|
||||
par->width = frame->width;
|
||||
par->height = frame->height;
|
||||
par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
|
||||
par->color_space = frame->colorspace;
|
||||
par->color_range = frame->color_range;
|
||||
par->frame_rate = fr;
|
||||
par->hw_frames_ctx = frame->hw_frames_ctx;
|
||||
ret = av_buffersrc_parameters_set(filt_src, par);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
ret = avfilter_init_dict(filt_src, NULL);
|
||||
ret = avfilter_graph_create_filter(&filt_out,
|
||||
avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink", NULL, NULL, graph);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"),
|
||||
"ffplay_buffersink");
|
||||
if (!filt_out) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
|
||||
0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
|
||||
if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto fail;
|
||||
if (!vk_renderer &&
|
||||
(ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN,
|
||||
0, FF_ARRAY_ELEMS(sdl_supported_color_spaces),
|
||||
AV_OPT_TYPE_INT, sdl_supported_color_spaces)) < 0)
|
||||
goto fail;
|
||||
|
||||
ret = avfilter_init_dict(filt_out, NULL);
|
||||
if (ret < 0)
|
||||
(ret = av_opt_set_int_list(filt_out, "color_spaces", sdl_supported_color_spaces, AVCOL_SPC_UNSPECIFIED, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto fail;
|
||||
|
||||
last_filter = filt_out;
|
||||
|
|
@ -2003,6 +1994,8 @@ fail:
|
|||
|
||||
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
|
||||
{
|
||||
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
|
||||
int sample_rates[2] = { 0, -1 };
|
||||
AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
|
||||
char aresample_swr_opts[512] = "";
|
||||
const AVDictionaryEntry *e = NULL;
|
||||
|
|
@ -2036,28 +2029,30 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
|
|||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
|
||||
"ffplay_abuffersink");
|
||||
if (!filt_asink) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
ret = avfilter_graph_create_filter(&filt_asink,
|
||||
avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
|
||||
NULL, NULL, is->agraph);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
|
||||
if (force_output_format) {
|
||||
if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0)
|
||||
av_bprint_clear(&bp);
|
||||
av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
|
||||
sample_rates [0] = is->audio_tgt.freq;
|
||||
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN,
|
||||
0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0)
|
||||
if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_init_dict(filt_asink, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
|
||||
goto end;
|
||||
|
|
|
|||
|
|
@ -259,8 +259,8 @@ static int create_vk_by_hwcontext(VkRenderer *renderer,
|
|||
|
||||
ctx->get_proc_addr = hwctx->get_proc_addr;
|
||||
ctx->inst = hwctx->inst;
|
||||
|
||||
struct pl_vulkan_import_params import_params = {
|
||||
ctx->placebo_vulkan = pl_vulkan_import(ctx->vk_log,
|
||||
pl_vulkan_import_params(
|
||||
.instance = hwctx->inst,
|
||||
.get_proc_addr = hwctx->get_proc_addr,
|
||||
.phys_device = hwctx->phys_dev,
|
||||
|
|
@ -272,36 +272,18 @@ static int create_vk_by_hwcontext(VkRenderer *renderer,
|
|||
.unlock_queue = hwctx_unlock_queue,
|
||||
.queue_ctx = dev,
|
||||
.queue_graphics = {
|
||||
.index = VK_QUEUE_FAMILY_IGNORED,
|
||||
.count = 0,
|
||||
.index = hwctx->queue_family_index,
|
||||
.count = hwctx->nb_graphics_queues,
|
||||
},
|
||||
.queue_compute = {
|
||||
.index = VK_QUEUE_FAMILY_IGNORED,
|
||||
.count = 0,
|
||||
.index = hwctx->queue_family_comp_index,
|
||||
.count = hwctx->nb_comp_queues,
|
||||
},
|
||||
.queue_transfer = {
|
||||
.index = VK_QUEUE_FAMILY_IGNORED,
|
||||
.count = 0,
|
||||
.index = hwctx->queue_family_tx_index,
|
||||
.count = hwctx->nb_tx_queues,
|
||||
},
|
||||
};
|
||||
for (int i = 0; i < hwctx->nb_qf; i++) {
|
||||
const AVVulkanDeviceQueueFamily *qf = &hwctx->qf[i];
|
||||
|
||||
if (qf->flags & VK_QUEUE_GRAPHICS_BIT) {
|
||||
import_params.queue_graphics.index = qf->idx;
|
||||
import_params.queue_graphics.count = qf->num;
|
||||
}
|
||||
if (qf->flags & VK_QUEUE_COMPUTE_BIT) {
|
||||
import_params.queue_compute.index = qf->idx;
|
||||
import_params.queue_compute.count = qf->num;
|
||||
}
|
||||
if (qf->flags & VK_QUEUE_TRANSFER_BIT) {
|
||||
import_params.queue_transfer.index = qf->idx;
|
||||
import_params.queue_transfer.count = qf->num;
|
||||
}
|
||||
}
|
||||
|
||||
ctx->placebo_vulkan = pl_vulkan_import(ctx->vk_log, &import_params);
|
||||
));
|
||||
if (!ctx->placebo_vulkan)
|
||||
return AVERROR_EXTERNAL;
|
||||
|
||||
|
|
@ -409,8 +391,8 @@ static int create_vk_by_placebo(VkRenderer *renderer,
|
|||
device_ctx->user_opaque = ctx;
|
||||
|
||||
vk_dev_ctx = device_ctx->hwctx;
|
||||
vk_dev_ctx->lock_queue = placebo_lock_queue;
|
||||
vk_dev_ctx->unlock_queue = placebo_unlock_queue;
|
||||
vk_dev_ctx->lock_queue = placebo_lock_queue,
|
||||
vk_dev_ctx->unlock_queue = placebo_unlock_queue;
|
||||
|
||||
vk_dev_ctx->get_proc_addr = ctx->placebo_instance->get_proc_addr;
|
||||
|
||||
|
|
@ -426,38 +408,21 @@ static int create_vk_by_placebo(VkRenderer *renderer,
|
|||
vk_dev_ctx->enabled_dev_extensions = ctx->placebo_vulkan->extensions;
|
||||
vk_dev_ctx->nb_enabled_dev_extensions = ctx->placebo_vulkan->num_extensions;
|
||||
|
||||
int nb_qf = 0;
|
||||
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) {
|
||||
.idx = ctx->placebo_vulkan->queue_graphics.index,
|
||||
.num = ctx->placebo_vulkan->queue_graphics.count,
|
||||
.flags = VK_QUEUE_GRAPHICS_BIT,
|
||||
};
|
||||
nb_qf++;
|
||||
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) {
|
||||
.idx = ctx->placebo_vulkan->queue_transfer.index,
|
||||
.num = ctx->placebo_vulkan->queue_transfer.count,
|
||||
.flags = VK_QUEUE_TRANSFER_BIT,
|
||||
};
|
||||
nb_qf++;
|
||||
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) {
|
||||
.idx = ctx->placebo_vulkan->queue_compute.index,
|
||||
.num = ctx->placebo_vulkan->queue_compute.count,
|
||||
.flags = VK_QUEUE_COMPUTE_BIT,
|
||||
};
|
||||
nb_qf++;
|
||||
vk_dev_ctx->queue_family_index = ctx->placebo_vulkan->queue_graphics.index;
|
||||
vk_dev_ctx->nb_graphics_queues = ctx->placebo_vulkan->queue_graphics.count;
|
||||
|
||||
vk_dev_ctx->queue_family_tx_index = ctx->placebo_vulkan->queue_transfer.index;
|
||||
vk_dev_ctx->nb_tx_queues = ctx->placebo_vulkan->queue_transfer.count;
|
||||
|
||||
vk_dev_ctx->queue_family_comp_index = ctx->placebo_vulkan->queue_compute.index;
|
||||
vk_dev_ctx->nb_comp_queues = ctx->placebo_vulkan->queue_compute.count;
|
||||
|
||||
ret = get_decode_queue(renderer, &decode_index, &decode_count);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) {
|
||||
.idx = decode_index,
|
||||
.num = decode_count,
|
||||
.flags = VK_QUEUE_VIDEO_DECODE_BIT_KHR,
|
||||
};
|
||||
nb_qf++;
|
||||
|
||||
vk_dev_ctx->nb_qf = nb_qf;
|
||||
vk_dev_ctx->queue_family_decode_index = decode_index;
|
||||
vk_dev_ctx->nb_decode_queues = decode_count;
|
||||
|
||||
ret = av_hwdevice_ctx_init(ctx->hw_device_ref);
|
||||
if (ret < 0)
|
||||
|
|
@ -732,7 +697,6 @@ static int display(VkRenderer *renderer, AVFrame *frame)
|
|||
struct pl_frame target = {0};
|
||||
RendererContext *ctx = (RendererContext *) renderer;
|
||||
int ret = 0;
|
||||
struct pl_color_space hint = {0};
|
||||
|
||||
ret = convert_frame(renderer, frame);
|
||||
if (ret < 0)
|
||||
|
|
@ -745,8 +709,6 @@ static int display(VkRenderer *renderer, AVFrame *frame)
|
|||
return AVERROR_EXTERNAL;
|
||||
}
|
||||
|
||||
pl_color_space_from_avframe(&hint, frame);
|
||||
pl_swapchain_colorspace_hint(ctx->swapchain, &hint);
|
||||
if (!pl_swapchain_start_frame(ctx->swapchain, &swap_frame)) {
|
||||
av_log(NULL, AV_LOG_ERROR, "start frame failed\n");
|
||||
ret = AVERROR_EXTERNAL;
|
||||
|
|
|
|||
2783
fftools/ffprobe.c
2783
fftools/ffprobe.c
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018-2025 - softworkz
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef FFTOOLS_GRAPH_GRAPHPRINT_H
|
||||
#define FFTOOLS_GRAPH_GRAPHPRINT_H
|
||||
|
||||
#include "fftools/ffmpeg.h"
|
||||
|
||||
int print_filtergraphs(FilterGraph **graphs, int nb_graphs, InputFile **ifiles, int nb_ifiles, OutputFile **ofiles, int nb_ofiles);
|
||||
|
||||
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph);
|
||||
|
||||
#endif /* FFTOOLS_GRAPH_GRAPHPRINT_H */
|
||||
131
fftools/objpool.c
Normal file
131
fftools/objpool.c
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "libavcodec/packet.h"
|
||||
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
#include "objpool.h"
|
||||
|
||||
struct ObjPool {
|
||||
void *pool[32];
|
||||
unsigned int pool_count;
|
||||
|
||||
ObjPoolCBAlloc alloc;
|
||||
ObjPoolCBReset reset;
|
||||
ObjPoolCBFree free;
|
||||
};
|
||||
|
||||
ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
|
||||
ObjPoolCBFree cb_free)
|
||||
{
|
||||
ObjPool *op = av_mallocz(sizeof(*op));
|
||||
|
||||
if (!op)
|
||||
return NULL;
|
||||
|
||||
op->alloc = cb_alloc;
|
||||
op->reset = cb_reset;
|
||||
op->free = cb_free;
|
||||
|
||||
return op;
|
||||
}
|
||||
|
||||
void objpool_free(ObjPool **pop)
|
||||
{
|
||||
ObjPool *op = *pop;
|
||||
|
||||
if (!op)
|
||||
return;
|
||||
|
||||
for (unsigned int i = 0; i < op->pool_count; i++)
|
||||
op->free(&op->pool[i]);
|
||||
|
||||
av_freep(pop);
|
||||
}
|
||||
|
||||
int objpool_get(ObjPool *op, void **obj)
|
||||
{
|
||||
if (op->pool_count) {
|
||||
*obj = op->pool[--op->pool_count];
|
||||
op->pool[op->pool_count] = NULL;
|
||||
} else
|
||||
*obj = op->alloc();
|
||||
|
||||
return *obj ? 0 : AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
void objpool_release(ObjPool *op, void **obj)
|
||||
{
|
||||
if (!*obj)
|
||||
return;
|
||||
|
||||
op->reset(*obj);
|
||||
|
||||
if (op->pool_count < FF_ARRAY_ELEMS(op->pool))
|
||||
op->pool[op->pool_count++] = *obj;
|
||||
else
|
||||
op->free(obj);
|
||||
|
||||
*obj = NULL;
|
||||
}
|
||||
|
||||
static void *alloc_packet(void)
|
||||
{
|
||||
return av_packet_alloc();
|
||||
}
|
||||
static void *alloc_frame(void)
|
||||
{
|
||||
return av_frame_alloc();
|
||||
}
|
||||
|
||||
static void reset_packet(void *obj)
|
||||
{
|
||||
av_packet_unref(obj);
|
||||
}
|
||||
static void reset_frame(void *obj)
|
||||
{
|
||||
av_frame_unref(obj);
|
||||
}
|
||||
|
||||
static void free_packet(void **obj)
|
||||
{
|
||||
AVPacket *pkt = *obj;
|
||||
av_packet_free(&pkt);
|
||||
*obj = NULL;
|
||||
}
|
||||
static void free_frame(void **obj)
|
||||
{
|
||||
AVFrame *frame = *obj;
|
||||
av_frame_free(&frame);
|
||||
*obj = NULL;
|
||||
}
|
||||
|
||||
ObjPool *objpool_alloc_packets(void)
|
||||
{
|
||||
return objpool_alloc(alloc_packet, reset_packet, free_packet);
|
||||
}
|
||||
ObjPool *objpool_alloc_frames(void)
|
||||
{
|
||||
return objpool_alloc(alloc_frame, reset_frame, free_frame);
|
||||
}
|
||||
|
|
@ -16,24 +16,22 @@
|
|||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifndef AVCODEC_APV_DSP_H
|
||||
#define AVCODEC_APV_DSP_H
|
||||
#ifndef FFTOOLS_OBJPOOL_H
|
||||
#define FFTOOLS_OBJPOOL_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
typedef struct ObjPool ObjPool;
|
||||
|
||||
typedef void* (*ObjPoolCBAlloc)(void);
|
||||
typedef void (*ObjPoolCBReset)(void *);
|
||||
typedef void (*ObjPoolCBFree)(void **);
|
||||
|
||||
typedef struct APVDSPContext {
|
||||
void (*decode_transquant)(void *output,
|
||||
ptrdiff_t pitch,
|
||||
const int16_t *input,
|
||||
const int16_t *qmatrix,
|
||||
int bit_depth,
|
||||
int qp_shift);
|
||||
} APVDSPContext;
|
||||
void objpool_free(ObjPool **op);
|
||||
ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
|
||||
ObjPoolCBFree cb_free);
|
||||
ObjPool *objpool_alloc_packets(void);
|
||||
ObjPool *objpool_alloc_frames(void);
|
||||
|
||||
void ff_apv_dsp_init(APVDSPContext *dsp);
|
||||
int objpool_get(ObjPool *op, void **obj);
|
||||
void objpool_release(ObjPool *op, void **obj);
|
||||
|
||||
void ff_apv_dsp_init_x86_64(APVDSPContext *dsp);
|
||||
|
||||
#endif /* AVCODEC_APV_DSP_H */
|
||||
#endif // FFTOOLS_OBJPOOL_H
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue