Compare commits

..

No commits in common. "master" and "n7.2-dev" have entirely different histories.

3837 changed files with 66898 additions and 137107 deletions

View file

@ -1,96 +0,0 @@
# This file describes the expected reviewers for a PR based on the changed
# files. Unlike what the name of the file suggests they don't own the code, but
# merely have a good understanding of that area of the codebase and therefore
# are usually suited as a reviewer.
# Lines in this file match changed paths via Go-Style regular expressions:
# https://pkg.go.dev/regexp/syntax
# Mind the alphabetical order
# avcodec
# =======
libavcodec/.*aac.* @lynne
libavcodec/.*ac3.* @lynne
libavcodec/.*atrac9.* @lynne
libavcodec/.*bitpacked.* @lynne
libavcodec/.*d3d12va.* @jianhuaw
libavcodec/.*dirac.* @lynne
libavcodec/.*ffv1.* @lynne @michaelni
libavcodec/golomb.* @michaelni
libavcodec/.*h266.* @frankplow @NuoMi @jianhuaw
libavcodec/h26x/.* @frankplow @NuoMi @jianhuaw
libavcodec/.*jpegxl.* @lynne @Traneptora
libavcodec/.*jxl.* @lynne @Traneptora
libavcodec/.*opus.* @lynne
libavcodec/.*png.* @Traneptora
libavcodec/.*prores.* @lynne
libavcodec/rangecoder.* @michaelni
libavcodec/ratecontrol.* @michaelni
libavcodec/.*siren.* @lynne
libavcodec/.*vc2.* @lynne
libavcodec/.*vvc.* @frankplow @NuoMi @jianhuaw
libavcodec/aarch64/.* @lynne @mstorsjo
libavcodec/arm/.* @mstorsjo
libavcodec/ppc/.* @sean_mcg
libavcodec/x86/.* @lynne
# avfilter
# =======
libavfilter/aarch64/.* @mstorsjo
libavfilter/af_whisper.* @vpalmisano
libavfilter/vf_yadif.* @michaelni
libavfilter/vsrc_mandelbrot.* @michaelni
# avformat
# =======
libavformat/iamf.* @jamrial
libavformat/.*jpegxl.* @Traneptora
libavformat/.*jxl.* @Traneptora
# avutil
# ======
libavutil/.*crc.* @lynne @michaelni
libavutil/.*d3d12va.* @jianhuaw
libavutil/eval.* @michaelni
libavutil/iamf.* @jamrial
libavutil/integer.* @michaelni
libavutil/lfg.* @michaelni
libavutil/lls.* @michaelni
libavutil/md5.* @michaelni
libavutil/mathematics.* @michaelni
libavutil/mem.* @michaelni
libavutil/qsort.* @michaelni
libavutil/random_seed.* @michaelni
libavutil/rational.* @michaelni
libavutil/sfc.* @michaelni
libavutil/softfloat.* @michaelni
libavutil/tree.* @michaelni
libavutil/tx.* @lynne
libavutil/aarch64/.* @lynne @mstorsjo
libavutil/arm/.* @mstorsjo
libavutil/ppc/.* @sean_mcg
libavutil/x86/.* @lynne
# swresample
# =======
libswresample/aarch64/.* @mstorsjo
libswresample/arm/.* @mstorsjo
libswresample/.* @michaelni
# swscale
# =======
libswscale/aarch64/.* @mstorsjo
libswscale/arm/.* @mstorsjo
libswscale/ppc/.* @sean_mcg
# doc
# ===
doc/.* @GyanD
# Frameworks
# ==========
.*d3d12va.* @jianhuaw
.*vulkan.* @lynne

View file

@ -1,9 +0,0 @@
# Summary of the bug
Briefly describe the issue you're experiencing. Include any error messages, unexpected behavior, or relevant observations.
# Steps to reproduce
List the steps required to trigger the bug.
Include the exact CLI command used, if any.
Provide sample input files, logs, or scripts if available.

View file

@ -1,72 +0,0 @@
module.exports = async ({github, context}) => {
const title = (context.payload.pull_request?.title || context.payload.issue?.title || '').toLowerCase();
const labels = [];
const issueNumber = context.payload.pull_request?.number || context.payload.issue?.number;
const kwmap = {
'avcodec': 'avcodec',
'avdevice': 'avdevice',
'avfilter': 'avfilter',
'avformat': 'avformat',
'avutil': 'avutil',
'swresample': 'swresample',
'swscale': 'swscale',
'fftools': 'CLI'
};
async function isOrgMember(username) {
try {
const response = await github.rest.orgs.checkMembershipForUser({
org: context.repo.owner,
username: username
});
return response.status === 204;
} catch (error) {
return false;
}
}
if (context.payload.action === 'closed' ||
(context.payload.action !== 'opened' && (
context.payload.action === 'assigned' ||
context.payload.action === 'label_updated' ||
context.payload.comment) &&
await isOrgMember(context.payload.sender.login))
) {
try {
await github.rest.issues.removeLabel({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
// this should say 'new', but forgejo deviates from GitHub API here and expects the ID
name: '41'
});
console.log('Removed "new" label');
} catch (error) {
if (error.status !== 404 && error.status !== 410) {
console.log('Could not remove "new" label');
}
}
} else if (context.payload.action === 'opened') {
labels.push('new');
console.log('Detected label: new');
}
if ((context.payload.action === 'opened' || context.payload.action === 'edited') && context.eventName !== 'issue_comment') {
for (const [kw, label] of Object.entries(kwmap)) {
if (title.includes(kw)) {
labels.push(label);
console.log('Detected label: ' + label);
}
}
}
if (labels.length > 0) {
await github.rest.issues.addLabels({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issueNumber,
labels: labels,
});
}
}

View file

@ -1,31 +0,0 @@
avcodec:
- changed-files:
- any-glob-to-any-file: libavcodec/**
avdevice:
- changed-files:
- any-glob-to-any-file: libavdevice/**
avfilter:
- changed-files:
- any-glob-to-any-file: libavfilter/**
avformat:
- changed-files:
- any-glob-to-any-file: libavformat/**
avutil:
- changed-files:
- any-glob-to-any-file: libavutil/**
swresample:
- changed-files:
- any-glob-to-any-file: libswresample/**
swscale:
- changed-files:
- any-glob-to-any-file: libswscale/**
CLI:
- changed-files:
- any-glob-to-any-file: fftools/**

View file

@ -1,28 +0,0 @@
exclude: ^tests/ref/
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: check-case-conflict
- id: check-executables-have-shebangs
- id: check-illegal-windows-names
- id: check-shebang-scripts-are-executable
- id: check-yaml
- id: end-of-file-fixer
- id: file-contents-sorter
files:
.forgejo/pre-commit/ignored-words.txt
args:
- --ignore-case
- id: fix-byte-order-marker
- id: mixed-line-ending
- id: trailing-whitespace
- repo: https://github.com/codespell-project/codespell
rev: v2.4.1
hooks:
- id: codespell
args:
- --ignore-words=.forgejo/pre-commit/ignored-words.txt
- --ignore-multiline-regex=codespell:off.*?(codespell:on|\Z)
exclude: ^tools/(patcheck|clean-diff)$

View file

@ -1,119 +0,0 @@
abl
ACN
acount
addin
alis
alls
ALOG
ALS
als
ANC
anc
ANS
ans
anull
basf
bloc
brane
BREIF
BU
bu
bufer
CAF
caf
clen
clens
Collet
compre
dum
endin
erro
FIEL
fiel
filp
fils
FILTERD
filterd
fle
fo
FPR
fro
Hald
indx
ine
inh
inout
inouts
inport
ist
LAF
laf
lastr
LinS
mapp
mis
mot
nd
nIn
offsetp
orderd
ot
outout
padd
PAETH
paeth
PARM
parm
parms
pEvents
PixelX
Psot
quater
readd
recuse
redY
Reencode
reencode
remaind
renderD
rin
SAV
SEH
SER
ser
setts
shft
SIZ
siz
skipd
sme
som
sover
STAP
startd
statics
struc
suble
TE
tE
te
tha
tne
tolen
tpye
tre
TRUN
trun
truns
Tung
TYE
ue
UES
ues
vai
vas
vie
VILL
vor
wel
wih

View file

@ -1,28 +0,0 @@
on:
pull_request_target:
types: [opened, edited, synchronize, closed, assigned, labeled, unlabeled]
issues:
types: [opened, edited, closed, assigned, labeled, unlabeled]
issue_comment:
types: [created]
jobs:
pr_labeler:
runs-on: utilities
if: ${{ github.event.sender.login != 'ffmpeg-devel' }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Label by file-changes
uses: https://github.com/actions/labeler@v5
if: ${{ forge.event_name == 'pull_request_target' }}
with:
configuration-path: .forgejo/labeler/labeler.yml
repo-token: ${{ secrets.AUTOLABELER_TOKEN }}
- name: Label by title-match
uses: https://github.com/actions/github-script@v7
with:
script: |
const script = require('.forgejo/labeler/labeler.js')
await script({github, context})
github-token: ${{ secrets.AUTOLABELER_TOKEN }}

View file

@ -1,26 +0,0 @@
on:
push:
branches:
- master
pull_request:
jobs:
lint:
runs-on: utilities
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Install pre-commit CI
id: install
run: |
python3 -m venv ~/pre-commit
~/pre-commit/bin/pip install --upgrade pip setuptools
~/pre-commit/bin/pip install pre-commit
echo "envhash=$({ python3 --version && cat .forgejo/pre-commit/config.yaml; } | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
- name: Cache
uses: actions/cache@v4
with:
path: ~/.cache/pre-commit
key: pre-commit-${{ steps.install.outputs.envhash }}
- name: Run pre-commit CI
run: ~/pre-commit/bin/pre-commit run -c .forgejo/pre-commit/config.yaml --show-diff-on-failure --color=always --all-files

View file

@ -1,59 +0,0 @@
on:
push:
branches:
- master
pull_request:
jobs:
run_fate:
strategy:
fail-fast: false
matrix:
runner: [linux-amd64,linux-aarch64]
runs-on: ${{ matrix.runner }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Configure
run: ./configure --enable-gpl --enable-nonfree --enable-memory-poisoning --assert-level=2
- name: Build
run: make -j$(nproc)
- name: Restore Cached Fate-Suite
id: cache
uses: actions/cache/restore@v4
with:
path: fate-suite
key: fate-suite
restore-keys: |
fate-suite-
- name: Sync Fate-Suite
id: fate
run: |
make fate-rsync SAMPLES=$PWD/fate-suite
echo "hash=$(find fate-suite -type f | sort | sha256sum | cut -d' ' -f1)" >> $FORGEJO_OUTPUT
- name: Cache Fate-Suite
uses: actions/cache/save@v4
if: ${{ format('fate-suite-{0}', steps.fate.outputs.hash) != steps.cache.outputs.cache-matched-key }}
with:
path: fate-suite
key: fate-suite-${{ steps.fate.outputs.hash }}
- name: Run Fate
run: make fate SAMPLES=$PWD/fate-suite -j$(nproc)
compile_only:
strategy:
fail-fast: false
matrix:
image: ["ghcr.io/btbn/ffmpeg-builds/win64-gpl:latest"]
runs-on: linux-amd64
container: ${{ matrix.image }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Configure
run: |
./configure --pkg-config-flags="--static" $FFBUILD_TARGET_FLAGS $FF_CONFIGURE \
--cc="$CC" --cxx="$CXX" --ar="$AR" --ranlib="$RANLIB" --nm="$NM" \
--extra-cflags="$FF_CFLAGS" --extra-cxxflags="$FF_CXXFLAGS" \
--extra-libs="$FF_LIBS" --extra-ldflags="$FF_LDFLAGS" --extra-ldexeflags="$FF_LDEXEFLAGS"
- name: Build
run: make -j$(nproc)

1
.gitattributes vendored
View file

@ -1,2 +1 @@
*.pnm -diff -text *.pnm -diff -text
Changelog merge=union

5
.gitignore vendored
View file

@ -1,6 +1,5 @@
*.a *.a
*.o *.o
*.objs
*.o.* *.o.*
*.d *.d
*.def *.def
@ -42,7 +41,3 @@
/src /src
/mapfile /mapfile
/tools/python/__pycache__/ /tools/python/__pycache__/
/libavcodec/vulkan/*.c
/libavfilter/vulkan/*.c
/.*/
!/.forgejo/

View file

@ -26,5 +26,3 @@ rcombs <rcombs@rcombs.me> <rodger.combs@gmail.com>
Cosmin Stejerean <cosmin@cosmin.at> Cosmin Stejerean via ffmpeg-devel <ffmpeg-devel@ffmpeg.org> Cosmin Stejerean <cosmin@cosmin.at> Cosmin Stejerean via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
<wutong1208@outlook.com> <tong1.wu-at-intel.com@ffmpeg.org> <wutong1208@outlook.com> <tong1.wu-at-intel.com@ffmpeg.org>
<wutong1208@outlook.com> <tong1.wu@intel.com> <wutong1208@outlook.com> <tong1.wu@intel.com>
<toqsxw@outlook.com> <jianhua.wu-at-intel.com@ffmpeg.org>
<toqsxw@outlook.com> <jianhua.wu@intel.com>

View file

@ -55,7 +55,7 @@ modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be author's reputation will not be affected by problems that might be
introduced by others. introduced by others.
Finally, software patents pose a constant threat to the existence of Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a effectively restrict the users of a free program by obtaining a
@ -111,7 +111,7 @@ modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The "work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must former contains code derived from the library, whereas the latter must
be combined with the library in order to run. be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
@ -158,7 +158,7 @@ Library.
You may charge a fee for the physical act of transferring a copy, You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a and you may at your option offer warranty protection in exchange for a
fee. fee.
2. You may modify your copy or copies of the Library or any portion 2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1 distribute such modifications or work under the terms of Section 1
@ -216,7 +216,7 @@ instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in that version instead if you wish.) Do not make any other change in
these notices. these notices.
Once this change is made in a given copy, it is irreversible for Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy. subsequent copies and derivative works made from that copy.
@ -267,7 +267,7 @@ Library will still fall under Section 6.)
distribute the object code for the work under the terms of Section 6. distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6, Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself. whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or 6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work work containing portions of the Library, and distribute that work
@ -329,7 +329,7 @@ restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you use both them and the Library together in an executable that you
distribute. distribute.
7. You may place library facilities that are a work based on the 7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined facilities not covered by this License, and distribute such a combined
@ -370,7 +370,7 @@ subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein. restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with You are not responsible for enforcing compliance by third parties with
this License. this License.
11. If, as a consequence of a court judgment or allegation of patent 11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues), infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or conditions are imposed on you (whether by court order, agreement or
@ -422,7 +422,7 @@ conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by license version number, you may choose any version ever published by
the Free Software Foundation. the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free 14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these, programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is write to the author to ask for permission. For software which is
@ -456,7 +456,7 @@ SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES. DAMAGES.
END OF TERMS AND CONDITIONS END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest If you develop a new library, and you want it to be of the greatest

View file

@ -1,45 +1,6 @@
Entries are sorted chronologically from oldest to youngest within each release, Entries are sorted chronologically from oldest to youngest within each release,
releases are sorted from youngest to oldest. releases are sorted from youngest to oldest.
version <next>:
version 8.0:
- Whisper filter
- Drop support for OpenSSL < 1.1.0
- Enable TLS peer certificate verification by default (on next major version bump)
- Drop support for OpenSSL < 1.1.1
- yasm support dropped, users need to use nasm
- VVC VAAPI decoder
- RealVideo 6.0 decoder
- OpenMAX encoders deprecated
- libx265 alpha layer encoding
- ADPCM IMA Xbox decoder
- Enhanced FLV v2: Multitrack audio/video, modern codec support
- Animated JPEG XL encoding (via libjxl)
- VVC in Matroska
- CENC AV1 support in MP4 muxer
- pngenc: set default prediction method to PAETH
- APV decoder and APV raw bitstream muxing and demuxing
- APV parser
- APV encoding support through a libopenapv wrapper
- VVC decoder supports all content of SCC (Screen Content Coding):
IBC (Inter Block Copy), Palette Mode and ACT (Adaptive Color Transform
- G.728 decoder
- pad_cuda filter
- Sanyo LD-ADPCM decoder
- APV in MP4/ISOBMFF muxing and demuxing
- OpenHarmony hardware decoder/encoder
- Colordetect filter
- Add vf_scale_d3d11 filter
- No longer disabling GCC autovectorization, on X86, ARM and AArch64
- VP9 Vulkan hwaccel
- AV1 Vulkan encoder
- ProRes RAW decoder
- ProRes RAW Vulkan hwaccel
- ffprobe -codec option
version 7.1: version 7.1:
- Raw Captions with Time (RCWT) closed caption demuxer - Raw Captions with Time (RCWT) closed caption demuxer
- LC3/LC3plus decoding/encoding using external library liblc3 - LC3/LC3plus decoding/encoding using external library liblc3
@ -66,20 +27,6 @@ version 7.1:
- LCEVC enhancement data exporting in H.26x and MP4/ISOBMFF - LCEVC enhancement data exporting in H.26x and MP4/ISOBMFF
- LCEVC filter - LCEVC filter
- MV-HEVC decoding - MV-HEVC decoding
- minor stream specifier syntax changes:
- when matching by metadata (:m:<key>:<val>), the colon character
in keys or values now has to be backslash-escaped
- in optional maps (-map ....?) with a metadata-matching stream specifier,
the value has to be separated from the question mark by a colon, i.e.
-map ....:m:<key>:<val>:? (otherwise it would be ambiguous whether the
question mark is a part of <val> or not)
- multiple stream types in a single specifier (e.g. :s:s:0) now cause an
error, as such a specifier makes no sense
- Mastering Display and Content Light Level metadata support in hevc_nvenc
and av1_nvenc encoders
- libswresample now accepts custom order channel layouts as input, with some
constrains
- FFV1 parser
version 7.0: version 7.0:
@ -168,7 +115,6 @@ version 6.1:
variable-fields elements within the same parent element variable-fields elements within the same parent element
- ffprobe -output_format option added as an alias of -of - ffprobe -output_format option added as an alias of -of
# codespell:off
version 6.0: version 6.0:
- Radiance HDR image support - Radiance HDR image support

View file

@ -1,7 +0,0 @@
{
"drips": {
"ethereum": {
"ownedBy": "0x2f3900e7064eE63D30d749971265858612AA7139"
}
}
}

View file

@ -1,8 +1,5 @@
## Installing FFmpeg ## Installing FFmpeg
0. If you like to include source plugins, merge them before configure
for example run tools/merge-all-source-plugins
1. Type `./configure` to create the configuration. A list of configure 1. Type `./configure` to create the configuration. A list of configure
options is printed by running `configure --help`. options is printed by running `configure --help`.
@ -18,11 +15,3 @@ NOTICE
------ ------
- Non system dependencies (e.g. libx264, libvpx) are disabled by default. - Non system dependencies (e.g. libx264, libvpx) are disabled by default.
NOTICE for Package Maintainers
------------------------------
- It is recommended to build FFmpeg twice, first with minimal external dependencies so
that 3rd party packages, which depend on FFmpegs libavutil/libavfilter/libavcodec/libavformat
can then be built. And last build FFmpeg with full dependencies (which may in turn depend on
some of these 3rd party packages). This avoids circular dependencies during build.

View file

@ -12,6 +12,7 @@ configure to activate them. In this case, FFmpeg's license changes to GPL v2+.
Specifically, the GPL parts of FFmpeg are: Specifically, the GPL parts of FFmpeg are:
- libpostproc
- optional x86 optimization in the files - optional x86 optimization in the files
- `libavcodec/x86/flac_dsp_gpl.asm` - `libavcodec/x86/flac_dsp_gpl.asm`
- `libavcodec/x86/idct_mmx.c` - `libavcodec/x86/idct_mmx.c`
@ -44,6 +45,7 @@ Specifically, the GPL parts of FFmpeg are:
- `vf_owdenoise.c` - `vf_owdenoise.c`
- `vf_perspective.c` - `vf_perspective.c`
- `vf_phase.c` - `vf_phase.c`
- `vf_pp.c`
- `vf_pp7.c` - `vf_pp7.c`
- `vf_pullup.c` - `vf_pullup.c`
- `vf_repeatfields.c` - `vf_repeatfields.c`

View file

@ -45,20 +45,17 @@ Commandline utility code:
QuickTime faststart: QuickTime faststart:
tools/qt-faststart.c Baptiste Coudurier tools/qt-faststart.c Baptiste Coudurier
Execution Graph Printing
fftools/graph, fftools/resources [2] softworkz
Miscellaneous Areas Miscellaneous Areas
=================== ===================
documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi documentation Stefano Sabatini, Mike Melanson, Timothy Gu, Gyan Doshi
project server day to day operations (L: root@ffmpeg.org) Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler project server day to day operations (L: root@ffmpeg.org) Árpád Gereöffy, Michael Niedermayer, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
project server emergencies (L: root@ffmpeg.org) Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler project server emergencies (L: root@ffmpeg.org) Árpád Gereöffy, Reimar Doeffinger, Alexander Strasser, Nikolay Aleksandrov, Timo Rothenpieler
presets [0] presets [0]
metadata subsystem Aurelien Jacobs metadata subsystem Aurelien Jacobs
release management Michael Niedermayer release management Michael Niedermayer
API tests [0] API tests [0]
samples-request [2] Thilo Borgmann, James Almer, Ben Littler
Communication Communication
@ -85,14 +82,12 @@ Other:
aes_ctr.c, aes_ctr.h Eran Kornblau aes_ctr.c, aes_ctr.h Eran Kornblau
bprint Nicolas George bprint Nicolas George
bswap.h bswap.h
csp.c, csp.h Leo Izen, Ronald S. Bultje
des Reimar Doeffinger des Reimar Doeffinger
dynarray.h Nicolas George dynarray.h Nicolas George
eval.c, eval.h [2] Michael Niedermayer eval.c, eval.h [2] Michael Niedermayer
float_dsp Loren Merritt float_dsp Loren Merritt
hash Reimar Doeffinger hash Reimar Doeffinger
hwcontext_cuda* Timo Rothenpieler hwcontext_cuda* Timo Rothenpieler
hwcontext_d3d12va* Wu Jianhua
hwcontext_vulkan* [2] Lynne hwcontext_vulkan* [2] Lynne
intfloat* Michael Niedermayer intfloat* Michael Niedermayer
integer.c, integer.h Michael Niedermayer integer.c, integer.h Michael Niedermayer
@ -138,6 +133,8 @@ Generic Parts:
ratecontrol.c [2] Michael Niedermayer ratecontrol.c [2] Michael Niedermayer
simple IDCT: simple IDCT:
simple_idct.c, simple_idct.h [2] Michael Niedermayer simple_idct.c, simple_idct.h [2] Michael Niedermayer
postprocessing:
libpostproc/* [2] Michael Niedermayer
table generation: table generation:
tableprint.c, tableprint.h Reimar Doeffinger tableprint.c, tableprint.h Reimar Doeffinger
fixed point FFT: fixed point FFT:
@ -179,7 +176,6 @@ Codecs:
dss_sp.c Oleksij Rempel dss_sp.c Oleksij Rempel
dv.c Roman Shaposhnik dv.c Roman Shaposhnik
dvbsubdec.c Anshul Maheshwari dvbsubdec.c Anshul Maheshwari
dxv.*, dxvenc.* Emma Worley
eacmv*, eaidct*, eat* Peter Ross eacmv*, eaidct*, eat* Peter Ross
exif.c, exif.h Thilo Borgmann exif.c, exif.h Thilo Borgmann
ffv1* [2] Michael Niedermayer ffv1* [2] Michael Niedermayer
@ -210,6 +206,7 @@ Codecs:
libgsm.c Michel Bardiaux libgsm.c Michel Bardiaux
libkvazaar.c Arttu Ylä-Outinen libkvazaar.c Arttu Ylä-Outinen
libopenh264enc.c Martin Storsjo, Linjie Fu libopenh264enc.c Martin Storsjo, Linjie Fu
libopenjpeg.c Jaikrishnan Menon
libopenjpegenc.c Michael Bradshaw libopenjpegenc.c Michael Bradshaw
libtheoraenc.c David Conrad libtheoraenc.c David Conrad
libvorbis.c David Conrad libvorbis.c David Conrad
@ -247,7 +244,6 @@ Codecs:
rpza.c Roberto Togni rpza.c Roberto Togni
rtjpeg.c, rtjpeg.h Reimar Doeffinger rtjpeg.c, rtjpeg.h Reimar Doeffinger
rv10.c Michael Niedermayer rv10.c Michael Niedermayer
sanm.c Manuel Lauss
smc.c Mike Melanson smc.c Mike Melanson
snow* Michael Niedermayer, Loren Merritt snow* Michael Niedermayer, Loren Merritt
sonic.c Alex Beregszaszi sonic.c Alex Beregszaszi
@ -270,7 +266,7 @@ Codecs:
vp8 David Conrad, Ronald Bultje vp8 David Conrad, Ronald Bultje
vp9 Ronald Bultje vp9 Ronald Bultje
vqavideo.c Mike Melanson vqavideo.c Mike Melanson
vvc [2] Nuo Mi, Wu Jianhua, Frank Plowman vvc [2] Nuo Mi
wmaprodec.c Sascha Sommer wmaprodec.c Sascha Sommer
wmavoice.c Ronald S. Bultje wmavoice.c Ronald S. Bultje
wmv2.c Michael Niedermayer wmv2.c Michael Niedermayer
@ -280,7 +276,6 @@ Codecs:
Hardware acceleration: Hardware acceleration:
dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme dxva2* Hendrik Leppkes, Laurent Aimar, Steve Lhomme
d3d11va* Steve Lhomme d3d11va* Steve Lhomme
d3d12va* Wu Jianhua
d3d12va_encode* Tong Wu d3d12va_encode* Tong Wu
mediacodec* Matthieu Bouron, Aman Gupta, Zhao Zhili mediacodec* Matthieu Bouron, Aman Gupta, Zhao Zhili
vaapi* Haihao Xiang vaapi* Haihao Xiang
@ -396,7 +391,7 @@ Muxers/Demuxers:
dss.c Oleksij Rempel dss.c Oleksij Rempel
dtsdec.c foo86 dtsdec.c foo86
dv.c Roman Shaposhnik dv.c Roman Shaposhnik
dvdvideodec.c [2] Marth64 dvdvideodec.c Marth64
electronicarts.c Peter Ross electronicarts.c Peter Ross
evc* Samsung (Dawid Kozinski) evc* Samsung (Dawid Kozinski)
ffm* Baptiste Coudurier ffm* Baptiste Coudurier
@ -450,8 +445,7 @@ Muxers/Demuxers:
pva.c Ivo van Poorten pva.c Ivo van Poorten
r3d.c Baptiste Coudurier r3d.c Baptiste Coudurier
raw.c Michael Niedermayer raw.c Michael Niedermayer
rcwtdec.c [2] Marth64 rcwtenc.c Marth64
rcwtenc.c [2] Marth64
rdt.c Ronald S. Bultje rdt.c Ronald S. Bultje
rl2.c Sascha Sommer rl2.c Sascha Sommer
rmdec.c, rmenc.c Ronald S. Bultje rmdec.c, rmenc.c Ronald S. Bultje
@ -470,7 +464,6 @@ Muxers/Demuxers:
sdp.c Martin Storsjo sdp.c Martin Storsjo
segafilm.c Mike Melanson segafilm.c Mike Melanson
segment.c Stefano Sabatini segment.c Stefano Sabatini
smush.c Manuel Lauss
spdif* Anssi Hannula spdif* Anssi Hannula
srtdec.c Aurelien Jacobs srtdec.c Aurelien Jacobs
swf.c Baptiste Coudurier swf.c Baptiste Coudurier
@ -518,10 +511,9 @@ Operating systems / CPU architectures
Alpha [0] Alpha [0]
MIPS Manojkumar Bhosale, Shiyou Yin MIPS Manojkumar Bhosale, Shiyou Yin
LoongArch [2] Shiyou Yin LoongArch [2] Shiyou Yin
Darwin (macOS, iOS) [2] Marvin Scholz Mac OS X / PowerPC Romain Dolbeau, Guillaume Poirier
Mac OS X / PowerPC [0]
Amiga / PowerPC Colin Ward Amiga / PowerPC Colin Ward
Linux / PowerPC [1] Lauri Kasanen Linux / PowerPC Lauri Kasanen
RISC-V [2] Rémi Denis-Courmont RISC-V [2] Rémi Denis-Courmont
Windows MinGW Alex Beregszaszi, Ramiro Polla Windows MinGW Alex Beregszaszi, Ramiro Polla
Windows Cygwin Victor Paesa Windows Cygwin Victor Paesa
@ -549,7 +541,6 @@ James Darnley
Jan Ekström Jan Ekström
Joakim Plate Joakim Plate
Jun Zhao Jun Zhao
Kacper Michajłow
Kieran Kunhya Kieran Kunhya
Kirill Gavrilov Kirill Gavrilov
Limin Wang Limin Wang
@ -591,7 +582,6 @@ Benoit Fouet B22A 4F4F 43EF 636B BB66 FCDC 0023 AE1E 2985 49C8
Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9 Clément Bœsch 52D0 3A82 D445 F194 DB8B 2B16 87EE 2CB8 F4B8 FCF9
Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7 Daniel Verkamp 78A6 07ED 782C 653E C628 B8B9 F0EB 8DD8 2F0E 21C7
FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8 FFmpeg release signing key FCF9 86EA 15E6 E293 A564 4F10 B432 2F04 D676 58D8
Frank Plowman 34E2 48D6 B7DF 4769 70C7 3304 03A8 4C6A 098F 2C6B
Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B Ganesh Ajjanagadde C96A 848E 97C3 CEA2 AB72 5CE4 45F9 6A2D 3C36 FB1B
Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4 Gwenole Beauchesne 2E63 B3A6 3E44 37E2 017D 2704 53C7 6266 B153 99C4
Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F Haihao Xiang (haihao) 1F0C 31E8 B4FE F7A4 4DC1 DC99 E0F5 76D4 76FC 437F

View file

@ -19,20 +19,14 @@ vpath %/fate_config.sh.template $(SRC_PATH)
TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch TESTTOOLS = audiogen videogen rotozoom tiny_psnr tiny_ssim base64 audiomatch
HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options HOSTPROGS := $(TESTTOOLS:%=tests/%) doc/print_options
ALLFFLIBS = \ ALLFFLIBS = avcodec avdevice avfilter avformat avutil postproc swscale swresample
avcodec \
avdevice \
avfilter \
avformat \
avutil \
swscale \
swresample \
# $(FFLIBS-yes) needs to be in linking order # $(FFLIBS-yes) needs to be in linking order
FFLIBS-$(CONFIG_AVDEVICE) += avdevice FFLIBS-$(CONFIG_AVDEVICE) += avdevice
FFLIBS-$(CONFIG_AVFILTER) += avfilter FFLIBS-$(CONFIG_AVFILTER) += avfilter
FFLIBS-$(CONFIG_AVFORMAT) += avformat FFLIBS-$(CONFIG_AVFORMAT) += avformat
FFLIBS-$(CONFIG_AVCODEC) += avcodec FFLIBS-$(CONFIG_AVCODEC) += avcodec
FFLIBS-$(CONFIG_POSTPROC) += postproc
FFLIBS-$(CONFIG_SWRESAMPLE) += swresample FFLIBS-$(CONFIG_SWRESAMPLE) += swresample
FFLIBS-$(CONFIG_SWSCALE) += swscale FFLIBS-$(CONFIG_SWSCALE) += swscale
@ -110,7 +104,7 @@ SUBDIR_VARS := CLEANFILES FFLIBS HOSTPROGS TESTPROGS TOOLS \
ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \ ALTIVEC-OBJS VSX-OBJS MMX-OBJS X86ASM-OBJS \
MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \ MIPSFPU-OBJS MIPSDSPR2-OBJS MIPSDSP-OBJS MSA-OBJS \
MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS RVVB-OBJS \ MMI-OBJS LSX-OBJS LASX-OBJS RV-OBJS RVV-OBJS RVVB-OBJS \
OBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS SIMD128-OBJS OBJS SLIBOBJS SHLIBOBJS STLIBOBJS HOSTOBJS TESTOBJS
define RESET define RESET
$(1) := $(1) :=

View file

@ -1 +1 @@
8.0.git 7.0.git

View file

@ -189,7 +189,4 @@ static inline __device__ float __cosf(float a) { return __nvvm_cos_approx_f(a);
static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); } static inline __device__ float __expf(float a) { return __nvvm_ex2_approx_f(a * (float)__builtin_log2(__builtin_exp(1))); }
static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); } static inline __device__ float __powf(float a, float b) { return __nvvm_ex2_approx_f(__nvvm_lg2_approx_f(a) * b); }
// Misc helper functions
extern "C" __device__ int printf(const char*, ...);
#endif /* COMPAT_CUDA_CUDA_RUNTIME_H */ #endif /* COMPAT_CUDA_CUDA_RUNTIME_H */

View file

@ -38,7 +38,7 @@ static int optind = 1;
static int optopt; static int optopt;
static char *optarg; static char *optarg;
static int getopt(int argc, char *argv[], const char *opts) static int getopt(int argc, char *argv[], char *opts)
{ {
static int sp = 1; static int sp = 1;
int c; int c;

View file

@ -218,7 +218,7 @@ while (<F>) {
# Lines of the form '} SOME_VERSION_NAME_1.0;' # Lines of the form '} SOME_VERSION_NAME_1.0;'
if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) { if (/^[ \t]*\}[ \tA-Z0-9_.a-z]+;[ \t]*$/) {
$glob = 'glob'; $glob = 'glob';
# We tried to match symbols against this version, but none matched. # We tried to match symbols agains this version, but none matched.
# Emit dummy hidden symbol to avoid marking this version WEAK. # Emit dummy hidden symbol to avoid marking this version WEAK.
if ($matches_attempted && $matched_symbols == 0) { if ($matches_attempted && $matched_symbols == 0) {
print " hidden:\n"; print " hidden:\n";

View file

@ -44,14 +44,13 @@
#include "libavutil/internal.h" #include "libavutil/internal.h"
#include "libavutil/mem.h" #include "libavutil/mem.h"
#include "libavutil/time.h" #include "libavutil/time.h"
#include "libavutil/wchar_filename.h"
typedef struct pthread_t { typedef struct pthread_t {
void *handle; void *handle;
void *(*func)(void* arg); void *(*func)(void* arg);
void *arg; void *arg;
void *ret; void *ret;
} *pthread_t; } pthread_t;
/* use light weight mutex/condition variable API for Windows Vista and later */ /* use light weight mutex/condition variable API for Windows Vista and later */
typedef SRWLOCK pthread_mutex_t; typedef SRWLOCK pthread_mutex_t;
@ -75,7 +74,7 @@ typedef CONDITION_VARIABLE pthread_cond_t;
static av_unused THREADFUNC_RETTYPE static av_unused THREADFUNC_RETTYPE
__stdcall attribute_align_arg win32thread_worker(void *arg) __stdcall attribute_align_arg win32thread_worker(void *arg)
{ {
pthread_t h = (pthread_t)arg; pthread_t *h = (pthread_t*)arg;
h->ret = h->func(h->arg); h->ret = h->func(h->arg);
return 0; return 0;
} }
@ -83,35 +82,21 @@ __stdcall attribute_align_arg win32thread_worker(void *arg)
static av_unused int pthread_create(pthread_t *thread, const void *unused_attr, static av_unused int pthread_create(pthread_t *thread, const void *unused_attr,
void *(*start_routine)(void*), void *arg) void *(*start_routine)(void*), void *arg)
{ {
pthread_t ret; thread->func = start_routine;
thread->arg = arg;
ret = av_mallocz(sizeof(*ret));
if (!ret)
return EAGAIN;
ret->func = start_routine;
ret->arg = arg;
#if HAVE_WINRT #if HAVE_WINRT
ret->handle = (void*)CreateThread(NULL, 0, win32thread_worker, ret, thread->handle = (void*)CreateThread(NULL, 0, win32thread_worker, thread,
0, NULL); 0, NULL);
#else #else
ret->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, ret, thread->handle = (void*)_beginthreadex(NULL, 0, win32thread_worker, thread,
0, NULL); 0, NULL);
#endif #endif
return !thread->handle;
if (!ret->handle) {
av_free(ret);
return EAGAIN;
}
*thread = ret;
return 0;
} }
static av_unused int pthread_join(pthread_t thread, void **value_ptr) static av_unused int pthread_join(pthread_t thread, void **value_ptr)
{ {
DWORD ret = WaitForSingleObject(thread->handle, INFINITE); DWORD ret = WaitForSingleObject(thread.handle, INFINITE);
if (ret != WAIT_OBJECT_0) { if (ret != WAIT_OBJECT_0) {
if (ret == WAIT_ABANDONED) if (ret == WAIT_ABANDONED)
return EINVAL; return EINVAL;
@ -119,9 +104,8 @@ static av_unused int pthread_join(pthread_t thread, void **value_ptr)
return EDEADLK; return EDEADLK;
} }
if (value_ptr) if (value_ptr)
*value_ptr = thread->ret; *value_ptr = thread.ret;
CloseHandle(thread->handle); CloseHandle(thread.handle);
av_free(thread);
return 0; return 0;
} }
@ -210,38 +194,4 @@ static inline int pthread_setcancelstate(int state, int *oldstate)
return 0; return 0;
} }
static inline int win32_thread_setname(const char *name)
{
#if !HAVE_UWP
typedef HRESULT (WINAPI *SetThreadDescriptionFn)(HANDLE, PCWSTR);
// Although SetThreadDescription lives in kernel32.dll, on Windows Server 2016,
// Windows 10 LTSB 2016 and Windows 10 version 1607, it was only available in
// kernelbase.dll. So, load it from there for maximum coverage.
HMODULE kernelbase = GetModuleHandleW(L"kernelbase.dll");
if (!kernelbase)
return AVERROR(ENOSYS);
SetThreadDescriptionFn pSetThreadDescription =
(SetThreadDescriptionFn)GetProcAddress(kernelbase, "SetThreadDescription");
if (!pSetThreadDescription)
return AVERROR(ENOSYS);
wchar_t *wname;
if (utf8towchar(name, &wname) < 0)
return AVERROR(ENOMEM);
HRESULT hr = pSetThreadDescription(GetCurrentThread(), wname);
av_free(wname);
return SUCCEEDED(hr) ? 0 : AVERROR(EINVAL);
#else
// UWP is not supported because we cannot use LoadLibrary/GetProcAddress to
// detect the availability of the SetThreadDescription API. There is a small
// gap in Windows builds 1507-1607 where it was not available. UWP allows
// querying the availability of APIs with QueryOptionalDelayLoadedAPI, but it
// requires /DELAYLOAD:kernel32.dll during linking, and we cannot enforce that.
return AVERROR(ENOSYS);
#endif
}
#endif /* COMPAT_W32PTHREADS_H */ #endif /* COMPAT_W32PTHREADS_H */

482
configure vendored

File diff suppressed because it is too large Load diff

View file

@ -1,222 +1,7 @@
The last version increases of all libraries were on 2025-03-28 The last version increases of all libraries were on 2024-03-07
API changes, most recent first: API changes, most recent first:
2025-07-29 - 1c85a3832af - lavc 62.10.100 - smpte_436m.h
Add a new public header smpte_436m.h with API for
manipulating AV_CODEC_ID_SMPTE_436M_ANC data.
2025-07-10 - a566fcb9dc0 - lavf 62.2.100
mxf [de]muxer now uses AV_CODEC_ID_SMPTE_436M_ANC for
the vbi_vanc_smpte_436M streams instead of AV_CODEC_ID_NONE.
2025-07-10 - f4ff379baea - lavc 62.10.100 - codec_id.h
Add AV_CODEC_ID_SMPTE_436M_ANC.
2025-08-08 - 83b36f54108 - lavc 62.9.100 - codec_id.h
Add AV_CODEC_ID_PRORES_RAW.
2025-07-31 - 119d127d05c - lavu 60.7.100 - spherical.h
Add AV_SPHERICAL_PARAMETRIC_IMMERSIVE.
2025-07-20 - 157d3b007e9 - lavu 60.6.100 - attributes.h, avstring.h
Add av_scanf_format() and use it on av_sscanf().
2025-07-18 - fbda5ffb953 - lavu 60.5.100 - pixfmt.h
Add AV_PIX_FMT_OHCODEC.
2025-07-18 - fbda5ffb953 - lavu 60.5.100 - hwcontext.h
Add AV_HWDEVICE_TYPE_OHCODEC and AVOHCodecDeviceContext.
2025-07-14 - b24155cae11 - lavfi 11.2.100 - avfilter.h
Add AVFilterGraph->max_buffered_frames.
2025-07-07 - eca477da52 - lavc 62.6.100 - packet.h
Add AV_PKT_DATA_RTCP_SR.
2025-07-01 - 39d5a998bd - lavc 62.4.101 - packet.h
Add AV_PKT_DATA_3D_REFERENCE_DISPLAYS.
2025-07-01 - b2e4b0e282 - lavu 60.4.101 - frame.h
Add AV_FRAME_DATA_3D_REFERENCE_DISPLAYS.
2025-07-01 - 80a05bea4f - lavu 60.4.100 - tdrdi.h
Add AV3DReferenceDisplaysInfo and AV3DReferenceDisplay structs.
Add av_tdrdi_alloc() and av_tdrdi_get_display().
2025-05-21 - 004cc60f0e3 - lavu 60.3.100 - avassert.h
Add av_unreachable() and av_assume() macros.
2025-02-15 - e2f39671ae2 - lavfi 10.10.100 - avfilter.h
Add avfilter_link_get_hw_frames_ctx().
2025-04-21 - bf1579c904a - lavu 60.2.100 - log.h
Add AV_CLASS_CATEGORY_HWDEVICE.
2025-04-16 - c818c67991 - libpostproc 59.1.100 - postprocess.h
Deprecate PP_CPU_CAPS_3DNOW.
2025-04-07 - 19e9a203b7 - lavu 60.01.100 - dict.h
Add AV_DICT_DEDUP.
2025-03-17 - 49af9746e8f - lavu 59.60.100 - pixfmt.h
Add AV_PIX_FMT_GBRAP32BE and AV_PIX_FMT_GBRAP32LE.
2025-03-10 - 61fc9b6fee1 - lavu 59.59.100 - pixfmt.h
Add AV_PIX_FMT_YAF16BE, AV_PIX_FMT_YAF16LE, AV_PIX_FMT_YAF32BE,
and AV_PIX_FMT_YAF32LE.
2025-03-01 - 0245e9382c7 - lavu 59.58.100 - pixfmt.h
Add AV_PIX_FMT_GRAY32BE and AV_PIX_FMT_GRAY32LE.
2025-02-04 - 0ef678f5c50 - lavu 59.56.000 - pixfmt.h
Add AV_PIX_FMT_AMF_SURFACE.
2025-01-09 - a73760da537 - lavu 59.55.100 - pixfmt.h
Add AV_PIX_FMT_GBRPF16BE, AV_PIX_FMT_GBRPF16LE, AV_PIX_FMT_GBRAPF16BE,
AV_PIX_FMT_GBRAPF16LE, AV_PIX_FMT_GRAYF16BE, and AV_PIX_FMT_GRAYF16LE.
2025-02-16 - c79cdae3777 - lavu 59.57.100 - log.h
Add flags AV_LOG_PRINT_TIME and AV_LOG_PRINT_DATETIME.
2025-02-09 - 9fb806fa577 - lavc 61.32.100 - codec_id.h
Add AV_CODEC_ID_IVTV_VBI.
2025-01-25 - ea3c3b42dff - lavu 59.56.100 - frame.h
Add AV_SIDE_DATA_PROP_CHANNEL_DEPENDENT.
2025-01-25 - 6707d970c04 - lavfi 10.9.100 - buffersink.h
Add av_buffersink_get_side_data().
2025-01-25 - 7a025e1cb5f - lavfi 10.8.100 - buffersrc.h
Add AVBufferSrcParameters.side_data and AVBufferSrcParameters.nb_side_data
2025-01-25 - ef1cb1c9c81 - lavfi 10.7.100 - avfilter.h
Add AVFilterLink.side_data and AVFilterLink.nb_side_data
2025-01-05 - 42e72d5c8b5 - lavu 59.55.100 - frame.h
Add AV_FRAME_SIDE_DATA_FLAG_NEW_REF.
2025-01-05 - 19c95ecbff8 - lavc 61.31.100 - avcodec.h
Deprecate AVCodecContext->properties.
2025-01-05 - 2d91f89445d - lavc 61.30.100 - frame.h
Add AV_FRAME_FLAG_LOSSLESS.
2025-01-03 - f3c40826455 - lavc 61.29.100 - codec_id.h
Add AV_CODEC_ID_JPEGXL_ANIM.
2025-01-03 - da9dcaba69d - lavu 59.54.100 - frame.h
Add AV_CH_LAYOUT_5POINT1POINT2 and AV_CHANNEL_LAYOUT_5POINT1POINT2.
2024-12-23 - b88944a8aa5 - lavu 59.53.100 - frame.h
Add av_frame_side_data_remove_by_props().
2024-12-23 - 3428a8d8303 - lavu 59.52.100 - frame.h
Add AV_SIDE_DATA_PROP_SIZE_DEPENDENT and AV_FRAME_DATA_PROP_COLOR_DEPENDENT.
2024-12-23 - 45f0a7ad338 - lsws 8.13.100 - swscale.h
Add enum SwsIntent and SwsContext.intent.
2024-12-15 - 2ac34d08542 - lavc 61.27.100 packet.h
Add av_container_fifo_alloc_avpacket().
2024-12-15 - 56ba57b6725 - lavu 59.51.100 - refstruct.h container_fifo.h
Add a new public header refstruct.h with new API for
reference-counted objects.
Add a new public header container_fifo.h with new API for
a FIFO of container objects (e.g. AVFrame or AVPacket).
2024-12-13 - 6eb4bf04e92 - lavu 59.50.100 - channel_layout.h
Add AV_CH_LAYOUT_9POINT1POINT6 and AV_CHANNEL_LAYOUT_9POINT1POINT6.
2024-12-05 - 06f084468e0 - lavu 59.49.100 - csp.h
Add av_csp_itu_eotf() and av_csp_itu_eotf_inv().
2024-12-05 - bf0a6c41111 - lavu 59.48.100 - csp.h
Add av_csp_trc_func_inv_from_id().
2024-11-25 - 2a091d4f2ee - lsws 8.12.100 - swscale.h
Allow using sws_frame_scale() dynamically, without first initializing the
SwsContext. Deprecate sws_init_context(). Add sws_frame_setup() instead.
2024-11-25 - fb169640092 - lsws 8.11.100 - swscale.h
Replace #define-based SWS_* flags by enum SwsFlags.
2024-11-25 - ed5dd675624 - lsws 8.10.100 - swscale.h
Publicly expose struct SwsContext, enum SwsDither, and enum SwsAlphaBlend.
2024-11-16 - 46cb7b8d9dc - lavu 59.47.101 - frame.h
av_frame_get_buffer() now also aligns the data pointers according to
the requested alignment.
2024-11-13 - 20af68b63a4 - lavu 59.47.100 - channel_layout.h
Add AV_CHAN_BINAURAL_LEFT, AV_CHAN_BINAURAL_RIGHT
Add AV_CH_BINAURAL_LEFT, AV_CH_BINAURAL_RIGHT
Add AV_CH_LAYOUT_BINAURAL, AV_CHANNEL_LAYOUT_BINAURAL
2024-10-26 - e02a3b40a5e - lavu 59.46.100 - pixfmt.h
Add AV_PIX_FMT_XV48.
2024-10-23 - b03c758600f - lsws 8.9.100 - swscale.h
Add sws_is_noop().
2024-10-23 - 5e50a56b9c4 - lsws 8.8.100 - swscale.h
Add frame property testing API:
- sws_test_format()
- sws_test_colorspace()
- sws_test_primaries()
- sws_test_transfer()
- sws_test_frame()
2024-10-23 - 87baf9ab2c2 - lsws 8.7.100 - swscale.h
Add sws_free_context().
2024-10-23 - f462ba05f54 - lavu 59.45.100 - pixfmt.h
Add AV_PIX_FMT_Y216.
2024-10-15 - 2336e685657 - lavu 59.44.100 - pixfmt.h
Add AV_PIX_FMT_RGB96 and AV_PIX_FMT_RGBA128.
2024-10-14 - c993a91bea - lavu 59.43.100 - pixfmt.h
Add AV_PIX_FMT_RGBF16.
2024-10-08 - 29ea34728f1 - lavu 59.42.100 - pixfmt.h
Add AV_PIX_FMT_AYUV, AV_PIX_FMT_UYVA, AV_PIX_FMT_VYU444,
and AV_PIX_FMT_V30X.
2024-10-01 - 0548ab2e425 - lavu 59.41.100 - log.h
Add AVClass.state_flags_offset and AV_CLASS_STATE_INITIALIZED.
2024-09-30 - 50d1b89fa0d - lavf 61.9.100 - avformat.h
Add {nb_}coded_side_data to AVStreamGroupTileGrid.
2024-09-30 - df9b80d21a2 - lavu 59
Deprecate av_int_list_length_for_size(), av_int_list_length(), and
av_opt_set_int_list() without replacement. All AVOptions using these
should be replaced with AV_OPT_TYPE_FLAG_ARRAY.
2024-09-30 - 1efcdbc54d9 - lavfi 10.6.100
Buffersink now has array-type options
- pixel_formats
- colorspaces
- colorranges
replacing the int-list options
- pix_fmts
- color_spaces
- color_ranges
abuffersink now has array-type options
- sample_formats
- samplerates
- channel_layouts
replacing the int-list/string options
- sample_fmts
- sample_rates
- ch_layouts
-------- 8< --------- FFmpeg 7.1 was cut here -------- 8< ---------
2024-09-23 - 6940a6de2f0 - lavu 59.38.100 - frame.h 2024-09-23 - 6940a6de2f0 - lavu 59.38.100 - frame.h
Add AV_FRAME_DATA_VIEW_ID. Add AV_FRAME_DATA_VIEW_ID.
@ -627,7 +412,7 @@ API changes, most recent first:
Deprecate AVFrame.palette_has_changed without replacement. Deprecate AVFrame.palette_has_changed without replacement.
2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h 2023-05-15 - 7d1d61cc5f5 - lavc 60 - avcodec.h
Deprecate AVCodecContext.ticks_per_frame in favor of Depreate AVCodecContext.ticks_per_frame in favor of
AVCodecContext.framerate (encoding) and AVCodecContext.framerate (encoding) and
AV_CODEC_PROP_FIELDS (decoding). AV_CODEC_PROP_FIELDS (decoding).
@ -635,7 +420,7 @@ API changes, most recent first:
Add AV_CODEC_PROP_FIELDS. Add AV_CODEC_PROP_FIELDS.
2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h 2023-05-15 - 8b20d0dcb5c - lavc 60 - codec.h
Deprecate AV_CODEC_CAP_SUBFRAMES without replacement. Depreate AV_CODEC_CAP_SUBFRAMES without replacement.
2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h 2023-05-07 - c2ae8e30b7f - lavc 60.11.100 - codec_par.h
Add AVCodecParameters.framerate. Add AVCodecParameters.framerate.

View file

@ -1093,7 +1093,7 @@ HTML_STYLESHEET =
# cascading style sheets that are included after the standard style sheets # cascading style sheets that are included after the standard style sheets
# created by doxygen. Using this option one can overrule certain style aspects. # created by doxygen. Using this option one can overrule certain style aspects.
# This is preferred over using HTML_STYLESHEET since it does not replace the # This is preferred over using HTML_STYLESHEET since it does not replace the
# standard style sheet and is therefore more robust against future updates. # standard style sheet and is therefor more robust against future updates.
# Doxygen will copy the style sheet files to the output directory. # Doxygen will copy the style sheet files to the output directory.
# Note: The order of the extra stylesheet files is of importance (e.g. the last # Note: The order of the extra stylesheet files is of importance (e.g. the last
# stylesheet in the list overrules the setting of the previous ones in the # stylesheet in the list overrules the setting of the previous ones in the
@ -1636,7 +1636,7 @@ EXTRA_PACKAGES =
# Note: Only use a user-defined header if you know what you are doing! The # Note: Only use a user-defined header if you know what you are doing! The
# following commands have a special meaning inside the header: $title, # following commands have a special meaning inside the header: $title,
# $datetime, $date, $doxygenversion, $projectname, $projectnumber, # $datetime, $date, $doxygenversion, $projectname, $projectnumber,
# $projectbrief, $projectlogo. Doxygen will replace $title with the empty string, # $projectbrief, $projectlogo. Doxygen will replace $title with the empy string,
# for the replacement values of the other commands the user is referred to # for the replacement values of the other commands the user is referred to
# HTML_HEADER. # HTML_HEADER.
# This tag requires that the tag GENERATE_LATEX is set to YES. # This tag requires that the tag GENERATE_LATEX is set to YES.

View file

@ -189,52 +189,6 @@ see page 44-46 or section 5.5 of
Extract the core from a E-AC-3 stream, dropping extra channels. Extract the core from a E-AC-3 stream, dropping extra channels.
@section eia608_to_smpte436m
Convert from a @code{EIA_608} stream to a @code{SMPTE_436M_ANC} data stream, wrapping the closed captions in CTA-708 CDP VANC packets.
@table @option
@item line_number
Choose which line number the generated VANC packets should go on. You generally want either line 9 (the default) or 11.
@item wrapping_type
Choose the SMPTE 436M wrapping type, defaults to @samp{vanc_frame}.
It accepts the values:
@table @samp
@item vanc_frame
VANC frame (interlaced or segmented progressive frame)
@item vanc_field_1
@item vanc_field_2
@item vanc_progressive_frame
@end table
@item sample_coding
Choose the SMPTE 436M sample coding, defaults to @samp{8bit_luma}.
It accepts the values:
@table @samp
@item 8bit_luma
8-bit component luma samples
@item 8bit_color_diff
8-bit component color difference samples
@item 8bit_luma_and_color_diff
8-bit component luma and color difference samples
@item 10bit_luma
10-bit component luma samples
@item 10bit_color_diff
10-bit component color difference samples
@item 10bit_luma_and_color_diff
10-bit component luma and color difference samples
@item 8bit_luma_parity_error
8-bit component luma samples with parity error
@item 8bit_color_diff_parity_error
8-bit component color difference samples with parity error
@item 8bit_luma_and_color_diff_parity_error
8-bit component luma and color difference samples with parity error
@end table
@item initial_cdp_sequence_cntr
The initial value of the CDP's 16-bit unsigned integer @code{cdp_hdr_sequence_cntr} and @code{cdp_ftr_sequence_cntr} fields. Defaults to 0.
@item cdp_frame_rate
Set the CDP's @code{cdp_frame_rate} field. This doesn't actually change the timing of the data stream, it just changes the values inserted in that field in the generated CDP packets. Defaults to @samp{30000/1001}.
@end table
@section extract_extradata @section extract_extradata
Extract the in-band extradata. Extract the in-band extradata.
@ -469,21 +423,9 @@ Please note that this filter is auto-inserted for MPEG-TS (muxer
@section h264_redundant_pps @section h264_redundant_pps
This applies a specific fixup to some Blu-ray BDMV H264 streams This applies a specific fixup to some Blu-ray streams which contain
which contain redundant PPSs. The PPSs modify irrelevant parameters redundant PPSs modifying irrelevant parameters of the stream which
of the stream, confusing other transformations which require confuse other transformations which require correct extradata.
the correct extradata.
The encoder used on these impacted streams adds extra PPSs throughout
the stream, varying the initial QP and whether weighted prediction
was enabled. This causes issues after copying the stream into
a global header container, as the starting PPS is not suitable
for the rest of the stream. One side effect, for example,
is seeking will return garbled output until a new PPS appears.
This BSF removes the extra PPSs and rewrites the slice headers
such that the stream uses a single leading PPS in the global header,
which resolves the issue.
@section hevc_metadata @section hevc_metadata
@ -754,12 +696,12 @@ ffmpeg -i INPUT -c copy -bsf noise=1 output.mkv
Drop every video packet not marked as a keyframe after timestamp 30s but do not Drop every video packet not marked as a keyframe after timestamp 30s but do not
modify any of the remaining packets. modify any of the remaining packets.
@example @example
ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(pts*tb\,30)*not(key)' output.mkv ffmpeg -i INPUT -c copy -bsf:v noise=drop='gt(t\,30)*not(key)' output.mkv
@end example @end example
Drop one second of audio every 10 seconds and add some random noise to the rest. Drop one second of audio every 10 seconds and add some random noise to the rest.
@example @example
ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(pts*tb\,10)\,9\,10)' output.mkv ffmpeg -i INPUT -c copy -bsf:a noise=amount=-1:drop='between(mod(t\,10)\,9\,10)' output.mkv
@end example @end example
@section null @section null
@ -994,11 +936,6 @@ ffmpeg -i INPUT -c:a copy -bsf:a setts=pts=DTS out.mkv
Log basic packet information. Mainly useful for testing, debugging, Log basic packet information. Mainly useful for testing, debugging,
and development. and development.
@section smpte436m_to_eia608
Convert from a @code{SMPTE_436M_ANC} data stream to a @code{EIA_608} stream,
extracting the closed captions from CTA-708 CDP VANC packets, and ignoring all other data.
@anchor{text2movsub} @anchor{text2movsub}
@section text2movsub @section text2movsub

View file

@ -30,13 +30,6 @@ fate
fate-list fate-list
List all fate/regression test targets. List all fate/regression test targets.
fate-list-failing
List the fate tests that failed the last time they were executed.
fate-clear-reports
Remove the test reports from previous test executions (getting rid of
potentially stale results from fate-list-failing).
install install
Install headers, libraries and programs. Install headers, libraries and programs.
@ -70,3 +63,4 @@ make -j<num>
make -k make -k
Continue build in case of errors, this is useful for the regression tests Continue build in case of errors, this is useful for the regression tests
sometimes but note that it will still not run all reg tests. sometimes but note that it will still not run all reg tests.

View file

@ -664,8 +664,6 @@ for codecs that support it. At present, those are H.264 and VP9.
@item film_grain @item film_grain
Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}). Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
Supported at present by AV1 decoders. Supported at present by AV1 decoders.
@item enhancements
Export picture enhancement metadata through frame side data, e.g. LCEVC (see @code{AV_FRAME_DATA_LCEVC}).
@end table @end table
@item threads @var{integer} (@emph{decoding/encoding,video}) @item threads @var{integer} (@emph{decoding/encoding,video})

View file

@ -395,7 +395,7 @@ without this library.
@c man end AUDIO DECODERS @c man end AUDIO DECODERS
@chapter Subtitles Decoders @chapter Subtitles Decoders
@c man begin SUBTITLES DECODERS @c man begin SUBTILES DECODERS
@section libaribb24 @section libaribb24
@ -427,7 +427,7 @@ Enabled by default.
Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption} Yet another ARIB STD-B24 caption decoder using external @dfn{libaribcaption}
library. library.
Implements profiles A and C of the Japanese ARIB STD-B24 standard, Implements profiles A and C of the Japanse ARIB STD-B24 standard,
Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T. Brazilian ABNT NBR 15606-1, and Philippines version of ISDB-T.
Requires the presence of the libaribcaption headers and library Requires the presence of the libaribcaption headers and library
@ -477,7 +477,7 @@ Specify comma-separated list of font family names to be used for @dfn{bitmap}
or @dfn{ass} type subtitle rendering. or @dfn{ass} type subtitle rendering.
Only first font name is used for @dfn{ass} type subtitle. Only first font name is used for @dfn{ass} type subtitle.
If not specified, use internally defined default font family. If not specified, use internaly defined default font family.
@item -ass_single_rect @var{boolean} @item -ass_single_rect @var{boolean}
ARIB STD-B24 specifies that some captions may be displayed at different ARIB STD-B24 specifies that some captions may be displayed at different
@ -495,7 +495,7 @@ default behavior at compilation.
@item -force_outline_text @var{boolean} @item -force_outline_text @var{boolean}
Specify whether always render outline text for all characters regardless of Specify whether always render outline text for all characters regardless of
the indication by character style. the indication by charactor style.
The default is @var{false}. The default is @var{false}.
@ -696,4 +696,4 @@ box and an end box, typically subtitles. Default value is 0 if
@end table @end table
@c man end SUBTITLES DECODERS @c man end SUBTILES DECODERS

View file

@ -292,6 +292,7 @@ DVD-Video demuxer, powered by libdvdnav and libdvdread.
Can directly ingest DVD titles, specifically sequential PGCs, into Can directly ingest DVD titles, specifically sequential PGCs, into
a conversion pipeline. Menu assets, such as background video or audio, a conversion pipeline. Menu assets, such as background video or audio,
can also be demuxed given the menu's coordinates (at best effort). can also be demuxed given the menu's coordinates (at best effort).
Seeking is not supported at this time.
Block devices (DVD drives), ISO files, and directory structures are accepted. Block devices (DVD drives), ISO files, and directory structures are accepted.
Activate with @code{-f dvdvideo} in front of one of these inputs. Activate with @code{-f dvdvideo} in front of one of these inputs.
@ -379,11 +380,11 @@ Default is false.
@item menu_lu @var{int} @item menu_lu @var{int}
The menu language to demux. In DVD, menus are grouped by language. The menu language to demux. In DVD, menus are grouped by language.
Default is 1, the first language unit. Default is 0, the first language unit.
@item menu_vts @var{int} @item menu_vts @var{int}
The VTS where the menu lives, or 0 if it is a VMG menu (root-level). The VTS where the menu lives, or 0 if it is a VMG menu (root-level).
Default is 1, menu of the first VTS. Default is 0, VMG menu.
@item pgc @var{int} @item pgc @var{int}
The entry PGC to start playback, in conjunction with @option{pg}. The entry PGC to start playback, in conjunction with @option{pg}.
@ -396,7 +397,8 @@ Default is 0, automatically resolve from value of @option{title}.
The entry PG to start playback, in conjunction with @option{pgc}. The entry PG to start playback, in conjunction with @option{pgc}.
Alternative to setting @option{title}. Alternative to setting @option{title}.
Chapter markers are not supported at this time. Chapter markers are not supported at this time.
Default is 1, the first PG of the PGC. Default is 0, automatically resolve from value of @option{title}, or
start from the beginning (PG 1) of the menu.
@item preindex @var{bool} @item preindex @var{bool}
Enable this to have accurate chapter (PTT) markers and duration measurement, Enable this to have accurate chapter (PTT) markers and duration measurement,
@ -404,6 +406,7 @@ which requires a slow second pass read in order to index the chapter marker
timestamps from NAV packets. This is non-ideal extra work for real optical drives. timestamps from NAV packets. This is non-ideal extra work for real optical drives.
It is recommended and faster to use this option with a backup of the DVD structure It is recommended and faster to use this option with a backup of the DVD structure
stored on a hard drive. Not compatible with @option{pgc} and @option{pg}. stored on a hard drive. Not compatible with @option{pgc} and @option{pg}.
Not applicable to menus.
Default is 0, false. Default is 0, false.
@item trim @var{bool} @item trim @var{bool}
@ -564,13 +567,6 @@ prefer to use #EXT-X-START if it's in playlist instead of live_start_index.
@item allowed_extensions @item allowed_extensions
',' separated list of file extensions that hls is allowed to access. ',' separated list of file extensions that hls is allowed to access.
@item extension_picky
This blocks disallowed extensions from probing
It also requires all available segments to have matching extensions to the format
except mpegts, which is always allowed.
It is recommended to set the whitelists correctly instead of depending on extensions
Enabled by default.
@item max_reload @item max_reload
Maximum number of times a insufficient list is attempted to be reloaded. Maximum number of times a insufficient list is attempted to be reloaded.
Default value is 1000. Default value is 1000.
@ -855,32 +851,6 @@ Set the sample rate for libopenmpt to output.
Range is from 1000 to INT_MAX. The value default is 48000. Range is from 1000 to INT_MAX. The value default is 48000.
@end table @end table
@anchor{mccdec}
@section mcc
Demuxer for MacCaption MCC files, it supports MCC versions 1.0 and 2.0.
MCC files store VANC data, which can include closed captions (EIA-608 and CEA-708), ancillary time code, pan-scan data, etc.
By default, for backward compatibility, the MCC demuxer extracts just the EIA-608 and CEA-708 closed captions and returns a @code{EIA_608} stream, ignoring all other VANC data.
You can change it to return all VANC data in a @code{SMPTE_436M_ANC} data stream by setting @option{-eia608_extract 0}
@subsection Examples
@itemize
@item
Convert a MCC file to Scenarist (SCC) format:
@example
ffmpeg -i CC.mcc -c:s copy CC.scc
@end example
Note that the SCC format only supports EIA-608, so this will discard all other data such as CEA-708 extensions.
@item
Merge a MCC file into a MXF file:
@example
ffmpeg -i video_and_audio.mxf -eia608_extract 0 -i CC.mcc -c copy -map 0 -map 1 out.mxf
@end example
This retains all VANC data and inserts it into the output MXF file as a @code{SMPTE_436M_ANC} data stream.
@end itemize
@section mov/mp4/3gp @section mov/mp4/3gp
Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12). Demuxer for Quicktime File Format & ISO/IEC Base Media File Format (ISO/IEC 14496-12 or MPEG-4 Part 12, ISO/IEC 15444-12 or JPEG 2000 Part 12).
@ -1016,7 +986,7 @@ to 1 (-1 means automatic setting, 1 means enabled, 0 means
disabled). Default value is -1. disabled). Default value is -1.
@item merge_pmt_versions @item merge_pmt_versions
Reuse existing streams when a PMT's version is updated and elementary Re-use existing streams when a PMT's version is updated and elementary
streams move to different PIDs. Default value is 0. streams move to different PIDs. Default value is 0.
@item max_packet_size @item max_packet_size

View file

@ -70,6 +70,9 @@ variable-length arrays;
@item @item
complex numbers; complex numbers;
@item
mixed statements and declarations.
@end itemize @end itemize
@subsection SIMD/DSP @subsection SIMD/DSP
@ -112,7 +115,7 @@ Objective-C where required for interacting with macOS-specific interfaces.
@section Code formatting conventions @section Code formatting conventions
There are the following guidelines regarding the code style in files: There are the following guidelines regarding the indentation in files:
@itemize @bullet @itemize @bullet
@item @item
@ -132,104 +135,6 @@ K&R coding style is used.
@end itemize @end itemize
The presentation is one inspired by 'indent -i4 -kr -nut'. The presentation is one inspired by 'indent -i4 -kr -nut'.
@subsection Examples
Some notable examples to illustrate common code style in FFmpeg:
@itemize @bullet
@item
Space around assignments and after
@code{if}/@code{do}/@code{while}/@code{for} keywords:
@example c, good
// Good
if (condition)
av_foo();
@end example
@example c, good
// Good
for (size_t i = 0; i < len; i++)
av_bar(i);
@end example
@example c, good
// Good
size_t size = 0;
@end example
However no spaces between the parentheses and condition, unless it helps
readability of complex conditions, so the following should not be done:
@example c, bad
// Bad style
if ( condition )
av_foo();
@end example
@item
No unnecessary parentheses, unless it helps readability:
@example c, good
// Good
int fields = ilace ? 2 : 1;
@end example
@item
Don't wrap single-line blocks in braces. Use braces only if there is an accompanying else statement. This keeps future code changes easier to keep track of.
@example c, good
// Good
if (bits_pixel == 24) @{
avctx->pix_fmt = AV_PIX_FMT_BGR24;
@} else if (bits_pixel == 8) @{
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
@} else
return AVERROR_INVALIDDATA;
@end example
@item
Avoid assignments in conditions where it makes sense:
@example c, good
// Good
video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)
if (!video_enc->chroma_intra_matrix)
return AVERROR(ENOMEM);
@end example
@example c, bad
// Bad style
if (!(video_enc->chroma_intra_matrix = av_mallocz(sizeof(*video_enc->chroma_intra_matrix) * 64)))
return AVERROR(ENOMEM);
@end example
@example c, good
// Ok
while ((entry = av_dict_iterate(options, entry)))
av_log(ctx, AV_LOG_INFO, "Item '%s': '%s'\n", entry->key, entry->value);
@end example
@item
When declaring a pointer variable, the @code{*} goes with the variable not the type:
@example c, good
// Good
AVStream *stream;
@end example
@example c, bad
// Bad style
AVStream* stream;
@end example
@end itemize
If you work on a file that does not follow these guidelines consistently,
change the parts that you are editing to follow these guidelines but do
not make unrelated changes in the file to make it conform to these.
@subsection Vim configuration @subsection Vim configuration
In order to configure Vim to follow FFmpeg formatting conventions, paste In order to configure Vim to follow FFmpeg formatting conventions, paste
the following snippet into your @file{.vimrc}: the following snippet into your @file{.vimrc}:
@ -546,7 +451,7 @@ FFmpeg also has a defined scope - your new API must fit within it.
@subsubheading Replacing existing APIs @subsubheading Replacing existing APIs
If your new API is replacing an existing one, it should be strictly superior to If your new API is replacing an existing one, it should be strictly superior to
it, so that the advantages of using the new API outweigh the cost to the it, so that the advantages of using the new API outweight the cost to the
callers of changing their code. After adding the new API you should then callers of changing their code. After adding the new API you should then
deprecate the old one and schedule it for removal, as described in deprecate the old one and schedule it for removal, as described in
@ref{Removing interfaces}. @ref{Removing interfaces}.
@ -596,7 +501,7 @@ change in @file{doc/APIchanges}.
Backward-incompatible API or ABI changes require incrementing (bumping) the Backward-incompatible API or ABI changes require incrementing (bumping) the
major version number, as described in @ref{Major version bumps}. Major major version number, as described in @ref{Major version bumps}. Major
bumps are significant events that happen on a schedule - so if your change bumps are significant events that happen on a schedule - so if your change
strictly requires one you should add it under @code{#if} preprocessor guards that strictly requires one you should add it under @code{#if} preprocesor guards that
disable it until the next major bump happens. disable it until the next major bump happens.
New APIs that can be added without breaking API or ABI compatibility require New APIs that can be added without breaking API or ABI compatibility require
@ -917,10 +822,10 @@ improves readability.
Consider adding a regression test for your code. All new modules Consider adding a regression test for your code. All new modules
should be covered by tests. That includes demuxers, muxers, decoders, encoders should be covered by tests. That includes demuxers, muxers, decoders, encoders
filters, bitstream filters, parsers. If its not possible to do that, add filters, bitstream filters, parsers. If its not possible to do that, add
an explanation why to your patchset, its ok to not test if there's a reason. an explanation why to your patchset, its ok to not test if theres a reason.
@item @item
If you added NASM code please check that things still work with --disable-x86asm. If you added YASM code please check that things still work with --disable-yasm.
@item @item
Test your code with valgrind and or Address Sanitizer to ensure it's free Test your code with valgrind and or Address Sanitizer to ensure it's free
@ -1022,25 +927,6 @@ In case you need finer control over how valgrind is invoked, use the
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in @code{--target-exec='valgrind <your_custom_valgrind_options>} option in
your configure line instead. your configure line instead.
@anchor{Maintenance}
@chapter Maintenance process
@anchor{MAINTAINERS}
@section MAINTAINERS
The developers maintaining each part of the codebase are listed in @file{MAINTAINERS}.
Being listed in @file{MAINTAINERS}, gives one the right to have git write access to
the specific repository.
@anchor{Becoming a maintainer}
@section Becoming a maintainer
People add themselves to @file{MAINTAINERS} by sending a patch like any other code
change. These get reviewed by the community like any other patch. It is expected
that, if someone has an objection to a new maintainer, she is willing to object
in public with her full name and is willing to take over maintainership for the area.
@anchor{Release process} @anchor{Release process}
@chapter Release process @chapter Release process

View file

@ -106,8 +106,15 @@ debugging by setting the option to "disable".
Enables the use of the long term prediction extension which increases coding Enables the use of the long term prediction extension which increases coding
efficiency in very low bandwidth situations such as encoding of voice or efficiency in very low bandwidth situations such as encoding of voice or
solo piano music by extending constant harmonic peaks in bands throughout solo piano music by extending constant harmonic peaks in bands throughout
frames. This option is implied by profile:a aac_low. frames. This option is implied by profile:a aac_low and is incompatible with
Use in conjunction with @option{-ar} to decrease the samplerate. aac_pred. Use in conjunction with @option{-ar} to decrease the samplerate.
@item aac_pred
Enables the use of a more traditional style of prediction where the spectral
coefficients transmitted are replaced by the difference of the current
coefficients minus the previous "predicted" coefficients. In theory and sometimes
in practice this can improve quality for low to mid bitrate audio.
This option implies the aac_main profile and is incompatible with aac_ltp.
@item profile @item profile
Sets the encoding profile, possible values: Sets the encoding profile, possible values:
@ -125,6 +132,10 @@ MPEG4 specifications.
Long term prediction profile, is enabled by and will enable the @option{aac_ltp} Long term prediction profile, is enabled by and will enable the @option{aac_ltp}
option. Introduced in MPEG4. option. Introduced in MPEG4.
@item aac_main
Main-type prediction profile, is enabled by and will enable the @option{aac_pred}
option. Introduced in MPEG2.
@end table @end table
If this option is unspecified it is set to @samp{aac_low}. If this option is unspecified it is set to @samp{aac_low}.
@end table @end table
@ -1038,7 +1049,7 @@ forces a wideband cutoff for bitrates < 15 kbps, unless CELT-only
Set channel mapping family to be used by the encoder. The default value of -1 Set channel mapping family to be used by the encoder. The default value of -1
uses mapping family 0 for mono and stereo inputs, and mapping family 1 uses mapping family 0 for mono and stereo inputs, and mapping family 1
otherwise. The default also disables the surround masking and LFE bandwidth otherwise. The default also disables the surround masking and LFE bandwidth
optimizations in libopus, and requires that the input contains 8 channels or optimzations in libopus, and requires that the input contains 8 channels or
fewer. fewer.
Other values include 0 for mono and stereo, 1 for surround sound with masking Other values include 0 for mono and stereo, 1 for surround sound with masking
@ -1380,48 +1391,6 @@ Higher is better but slower.
@end table @end table
@anchor{ffv1}
@section ffv1
FFv1 Encoder
@subsection Options
The following options are supported by FFmpeg's FFv1 encoder.
@table @option
@item context
Sets the context size, 0 (default) is small, 1 is big.
@item coder
Set the coder,
@table @samp
@item rice
Golomb rice coder
@item range_def
Range coder with default table
@item range_tab
Range coder with custom table
@end table
@item slicecrc
-1 (default, automatic), 1 use crc with zero initial and final state, 2 use crc with non zero initial and final state
@item qtable
@table @samp
@item default
default, automatic
@item 8bit
use 8bit default
@item greater8bit
use >8bit default
@end table
@item remap_optimizer
0 - 5, default 3, how much effort the encoder puts into optimizing the remap table.
@end table
@section GIF @section GIF
GIF image/animation encoder. GIF image/animation encoder.
@ -1889,42 +1858,6 @@ ffmpeg -i input -c:v libaom-av1 -b:v 500K -aom-params tune=psnr:enable-tpl-model
@end table @end table
@section liboapv
Advanced Professional Video codec encoder wrapper.
This encoder requires the presence of the liboapv headers and library
during configuration. You need to explicitly configure the build with
@option{--enable-liboapv}.
@float NOTE
Many liboapv encoder options are mapped to FFmpeg global codec options,
while unique encoder options are provided through private options.
@end float
The apv project website is at @url{https://github.com/AcademySoftwareFoundation/openapv}.
@subsection Options
The following options are supported by the liboapv wrapper.
@float NOTE
To get a more extensive documentation of the liboapv options, consult the
liboapv documentation.
@end float
@table @option
@item preset
Set the quality-speed tradeoff [fastest, fast, medium, slow, placebo, default]
@item qp
Set the quantization parameter value for CQP rate control mode.
@item oapv-params (@emph{parse_apv_params})
Set liboapvenc options using a list of @var{key}=@var{value} pairs separated
by ":". See the liboapv encoder user guide for a list of accepted parameters.
@end table
@section libsvtav1 @section libsvtav1
SVT-AV1 encoder wrapper. SVT-AV1 encoder wrapper.
@ -3342,75 +3275,6 @@ fastest.
@end table @end table
@section MediaCodec
MediaCodec encoder wrapper enables hardware-accelerated video encoding on
Android device. It supports H.264, H.265 (HEVC), VP8, VP9, MPEG-4, and AV1
encoding (whether works or not is device dependent).
Android provides two sets of APIs: Java MediaCodec and NDK MediaCodec. The
MediaCodec encoder wrapper supports both. Note that the NDK MediaCodec API
operates without requiring JVM, but may fail to function outside the JVM
environment due to dependencies on system framework services, particularly
after Android 15.
@table @option
@item ndk_codec @var{boolean}
Use the NDK-based MediaCodec API instead of the Java API. Enabled by default
if @code{av_jni_get_java_vm()} return NULL.
@item ndk_async @var{boolean}
Use NDK MediaCodec in async mode. Async mode has less overhead than poll in a
loop in sync mode. The drawback of async mode is AV_CODEC_FLAG_GLOBAL_HEADER
doesn't work (use extract_extradata bsf when necessary). It doesn't work and
will be disabled automatically on devices below Android 8.0.
@item codec_name @var{string}
A codec type can have multiple implementations on a single device, this option
specify which backend to use (via MediaCodec createCodecByName API). It's NULL
by default, and encoder is created by createEncoderByType.
@item bitrate_mode @var{integer}
Possible values:
@table @samp
@item cq
Constant quality mode
@item vbr
Variable bitrate mode
@item cbr
Constant bitrate mode
@item cbr_fd
Constant bitrate mode with frame drops
@end table
@item pts_as_dts @var{boolean}
Use PTS as DTS. This is a workaround since MediaCodec API doesn't provide
decoding timestamp. It is enabled automatically if B frame is 0.
@item operating_rate @var{integer}
The desired operating rate that the codec will need to operate at, zero for
unspecified. This is used for cases like high-speed/slow-motion video capture,
where the video encoder format contains the target playback rate (e.g. 30fps),
but the component must be able to handle the high operating capture rate (e.g.
240fps). This rate will be used by codec for resource planning and setting the
operating points.
@item qp_i_min @var{integer}
Minimum quantization parameter for I frame.
@item qp_p_min @var{integer}
Minimum quantization parameter for P frame.
@item qp_b_min @var{integer}
Minimum quantization parameter for B frame.
@item qp_i_max @var{integer}
Maximum quantization parameter for I frame.
@item qp_p_max @var{integer}
Maximum quantization parameter for P frame.
@item qp_b_max @var{integer}
Maximum quantization parameter for B frame.
@end table
@section MediaFoundation @section MediaFoundation
This provides wrappers to encoders (both audio and video) in the This provides wrappers to encoders (both audio and video) in the
@ -3493,13 +3357,6 @@ Default is 1 (on).
PNG image encoder. PNG image encoder.
@subsection Options
@table @option
@item compression_level
Sets the compression level, from 0 to 9(default)
@end table
@subsection Private options @subsection Private options
@table @option @table @option
@ -3507,8 +3364,6 @@ Sets the compression level, from 0 to 9(default)
Set physical density of pixels, in dots per inch, unset by default Set physical density of pixels, in dots per inch, unset by default
@item dpm @var{integer} @item dpm @var{integer}
Set physical density of pixels, in dots per meter, unset by default Set physical density of pixels, in dots per meter, unset by default
@item pred @var{method}
Set prediction method (none, sub, up, avg, paeth, mixed), default is paeth
@end table @end table
@section ProRes @section ProRes
@ -3703,7 +3558,7 @@ For encoders set this flag to ON to reduce power consumption and GPU usage.
@end table @end table
@subsection Runtime Options @subsection Runtime Options
Following options can be used during qsv encoding. Following options can be used durning qsv encoding.
@table @option @table @option
@item @var{global_quality} @item @var{global_quality}
@ -3813,7 +3668,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
on performance and objective visual quality metric. on performance and objective visual quality metric.
@item @var{low_delay_brc} @item @var{low_delay_brc}
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
more accurate bitrate control to minimize the variance of bitstream size frame more accurate bitrate control to minimize the variance of bitstream size frame
by frame. Value: -1-default 0-off 1-on by frame. Value: -1-default 0-off 1-on
@ -4012,7 +3867,7 @@ improves subjective visual quality. Enabling this flag may have negative impact
on performance and objective visual quality metric. on performance and objective visual quality metric.
@item @var{low_delay_brc} @item @var{low_delay_brc}
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
more accurate bitrate control to minimize the variance of bitstream size frame more accurate bitrate control to minimize the variance of bitstream size frame
by frame. Value: -1-default 0-off 1-on by frame. Value: -1-default 0-off 1-on
@ -4246,7 +4101,7 @@ Extended bitrate control.
Depth of look ahead in number frames, available when extbrc option is enabled. Depth of look ahead in number frames, available when extbrc option is enabled.
@item @var{low_delay_brc} @item @var{low_delay_brc}
Setting this flag turns on or off LowDelayBRC feature in qsv plugin, which provides Setting this flag turns on or off LowDelayBRC feautre in qsv plugin, which provides
more accurate bitrate control to minimize the variance of bitstream size frame more accurate bitrate control to minimize the variance of bitstream size frame
by frame. Value: -1-default 0-off 1-on by frame. Value: -1-default 0-off 1-on
@ -4602,25 +4457,6 @@ Reduces detail but attempts to preserve color at extremely low bitrates.
@chapter Subtitles Encoders @chapter Subtitles Encoders
@c man begin SUBTITLES ENCODERS @c man begin SUBTITLES ENCODERS
@section dvbsub
This codec encodes the bitmap subtitle format that is used in DVB
broadcasts and recordings. The bitmaps are typically embedded in a
container such as MPEG-TS as a separate stream.
@subsection Options
@table @option
@item min_bpp @var{integer (2, 4, or 8)}
Set a minimum bits-per-pixel value for the subtitle color lookup tables.
DVB supports 2, 4, and 8 bits-per-pixel color lookup tables. This
option enables forcing a particular bits-per-pixel value regardless of
the number of colors. Since not all players support or properly
support 2 bits-per-pixel, this value defaults to 4.
@end table
@section dvdsub @section dvdsub
This codec encodes the bitmap subtitle format that is used in DVDs. This codec encodes the bitmap subtitle format that is used in DVDs.
@ -4648,18 +4484,4 @@ one byte per subtitle on average.
By default, this work-around is disabled. By default, this work-around is disabled.
@end table @end table
@section lrc
This codec encodes the LRC lyrics format.
@subsection Options
@table @option
@item precision
Specify the precision of the fractional part of the timestamp. Time base is
determined based on this value.
Defaults to 2 for centiseconds.
@end table
@c man end SUBTITLES ENCODERS @c man end SUBTITLES ENCODERS

View file

@ -96,7 +96,6 @@ int main(int argc, char *argv[])
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
0, &bd, &read_packet, NULL, NULL); 0, &bd, &read_packet, NULL, NULL);
if (!avio_ctx) { if (!avio_ctx) {
av_freep(&avio_ctx_buffer);
ret = AVERROR(ENOMEM); ret = AVERROR(ENOMEM);
goto end; goto end;
} }

View file

@ -128,10 +128,6 @@ int main(int argc, char **argv)
outfilename = argv[2]; outfilename = argv[2];
pkt = av_packet_alloc(); pkt = av_packet_alloc();
if (!pkt) {
fprintf(stderr, "Could not allocate AVPacket\n");
exit(1); /* or proper cleanup and returning */
}
/* find the MPEG audio decoder */ /* find the MPEG audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2); codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
@ -165,7 +161,7 @@ int main(int argc, char **argv)
} }
outfile = fopen(outfilename, "wb"); outfile = fopen(outfilename, "wb");
if (!outfile) { if (!outfile) {
fprintf(stderr, "Could not open %s\n", outfilename); av_free(c);
exit(1); exit(1);
} }

View file

@ -30,6 +30,8 @@
* file to be played with ffplay. * file to be played with ffplay.
*/ */
#include <unistd.h>
#include <libavcodec/avcodec.h> #include <libavcodec/avcodec.h>
#include <libavformat/avformat.h> #include <libavformat/avformat.h>
#include <libavfilter/buffersink.h> #include <libavfilter/buffersink.h>
@ -94,7 +96,8 @@ static int init_filters(const char *filters_descr)
const AVFilter *abuffersink = avfilter_get_by_name("abuffersink"); const AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc();
static const int out_sample_rate = 8000; static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
static const int out_sample_rates[] = { 8000, -1 };
const AVFilterLink *outlink; const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base; AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
@ -120,40 +123,34 @@ static int init_filters(const char *filters_descr)
} }
/* buffer audio sink: to terminate the filter chain. */ /* buffer audio sink: to terminate the filter chain. */
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "out"); ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
if (!buffersink_ctx) { NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
ret = AVERROR(ENOMEM);
goto end; goto end;
} }
ret = av_opt_set(buffersink_ctx, "sample_formats", "s16", ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN); AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end; goto end;
} }
ret = av_opt_set(buffersink_ctx, "channel_layouts", "mono", ret = av_opt_set(buffersink_ctx, "ch_layouts", "mono",
AV_OPT_SEARCH_CHILDREN); AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end; goto end;
} }
ret = av_opt_set_array(buffersink_ctx, "samplerates", AV_OPT_SEARCH_CHILDREN, ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
0, 1, AV_OPT_TYPE_INT, &out_sample_rate); AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end; goto end;
} }
ret = avfilter_init_dict(buffersink_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
goto end;
}
/* /*
* Set the endpoints for the filter graph. The filter_graph will * Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr. * be linked to the graph described by filters_descr.

View file

@ -27,6 +27,8 @@
* @example decode_filter_video.c * @example decode_filter_video.c
*/ */
#define _XOPEN_SOURCE 600 /* for usleep */
#include <unistd.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
@ -36,7 +38,6 @@
#include <libavfilter/buffersrc.h> #include <libavfilter/buffersrc.h>
#include <libavutil/mem.h> #include <libavutil/mem.h>
#include <libavutil/opt.h> #include <libavutil/opt.h>
#include <libavutil/time.h>
const char *filter_descr = "scale=78:24,transpose=cclock"; const char *filter_descr = "scale=78:24,transpose=cclock";
/* other way: /* other way:
@ -98,6 +99,7 @@ static int init_filters(const char *filters_descr)
AVFilterInOut *outputs = avfilter_inout_alloc(); AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc(); AVFilterInOut *inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base; AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc(); filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) { if (!outputs || !inputs || !filter_graph) {
@ -120,26 +122,20 @@ static int init_filters(const char *filters_descr)
} }
/* buffer video sink: to terminate the filter chain. */ /* buffer video sink: to terminate the filter chain. */
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out"); ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
if (!buffersink_ctx) { NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
ret = AVERROR(ENOMEM);
goto end; goto end;
} }
ret = av_opt_set(buffersink_ctx, "pixel_formats", "gray8", ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_OPT_SEARCH_CHILDREN); AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end; goto end;
} }
ret = avfilter_init_dict(buffersink_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
goto end;
}
/* /*
* Set the endpoints for the filter graph. The filter_graph will * Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr. * be linked to the graph described by filters_descr.
@ -194,7 +190,7 @@ static void display_frame(const AVFrame *frame, AVRational time_base)
delay = av_rescale_q(frame->pts - last_pts, delay = av_rescale_q(frame->pts - last_pts,
time_base, AV_TIME_BASE_Q); time_base, AV_TIME_BASE_Q);
if (delay > 0 && delay < 1000000) if (delay > 0 && delay < 1000000)
av_usleep(delay); usleep(delay);
} }
last_pts = frame->pts; last_pts = frame->pts;
} }

View file

@ -270,6 +270,7 @@ int main(int argc, char *argv[])
AVFilterGraph *graph; AVFilterGraph *graph;
AVFilterContext *src, *sink; AVFilterContext *src, *sink;
AVFrame *frame; AVFrame *frame;
uint8_t errstr[1024];
float duration; float duration;
int err, nb_frames, i; int err, nb_frames, i;
@ -294,7 +295,6 @@ int main(int argc, char *argv[])
md5 = av_md5_alloc(); md5 = av_md5_alloc();
if (!md5) { if (!md5) {
av_frame_free(&frame);
fprintf(stderr, "Error allocating the MD5 context\n"); fprintf(stderr, "Error allocating the MD5 context\n");
return 1; return 1;
} }
@ -302,10 +302,8 @@ int main(int argc, char *argv[])
/* Set up the filtergraph. */ /* Set up the filtergraph. */
err = init_filter_graph(&graph, &src, &sink); err = init_filter_graph(&graph, &src, &sink);
if (err < 0) { if (err < 0) {
av_frame_free(&frame);
av_freep(&md5);
fprintf(stderr, "Unable to init filter graph:"); fprintf(stderr, "Unable to init filter graph:");
return 1; goto fail;
} }
/* the main filtering loop */ /* the main filtering loop */
@ -356,10 +354,7 @@ int main(int argc, char *argv[])
return 0; return 0;
fail: fail:
avfilter_graph_free(&graph); av_strerror(err, errstr, sizeof(errstr));
av_frame_free(&frame); fprintf(stderr, "%s\n", errstr);
av_freep(&md5);
fprintf(stderr, "%s\n", av_err2str(err));
return 1; return 1;
} }

View file

@ -418,7 +418,7 @@ static void open_video(AVFormatContext *oc, const AVCodec *codec,
exit(1); exit(1);
} }
/* allocate and init a reusable frame */ /* allocate and init a re-usable frame */
ost->frame = alloc_frame(c->pix_fmt, c->width, c->height); ost->frame = alloc_frame(c->pix_fmt, c->width, c->height);
if (!ost->frame) { if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n"); fprintf(stderr, "Could not allocate video frame\n");

View file

@ -219,8 +219,11 @@ int main(int argc, char **argv)
ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx); ret = decode_packet(decoder_ctx, frame, sw_frame, NULL, output_ctx);
finish: finish:
if (ret < 0) if (ret < 0) {
fprintf(stderr, "%s\n", av_err2str(ret)); char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "%s\n", buf);
}
avformat_close_input(&input_ctx); avformat_close_input(&input_ctx);

View file

@ -101,7 +101,7 @@ static int dynamic_set_parameter(AVCodecContext *avctx)
/* Set codec specific option */ /* Set codec specific option */
if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0) if ((ret = av_opt_set_dict(avctx->priv_data, &opts)) < 0)
goto fail; goto fail;
/* There is no "framerate" option in common option list. Use "-r" to set /* There is no "framerate" option in commom option list. Use "-r" to set
* framerate, which is compatible with ffmpeg commandline. The video is * framerate, which is compatible with ffmpeg commandline. The video is
* assumed to be average frame rate, so set time_base to 1/framerate. */ * assumed to be average frame rate, so set time_base to 1/framerate. */
e = av_dict_get(opts, "r", NULL, 0); e = av_dict_get(opts, "r", NULL, 0);
@ -180,7 +180,7 @@ static int open_input_file(char *filename)
decoder = avcodec_find_decoder_by_name("mjpeg_qsv"); decoder = avcodec_find_decoder_by_name("mjpeg_qsv");
break; break;
default: default:
fprintf(stderr, "Codec is not supported by qsv\n"); fprintf(stderr, "Codec is not supportted by qsv\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
@ -289,7 +289,7 @@ static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, char *optstr)
fprintf(stderr, "Failed to set encoding parameter.\n"); fprintf(stderr, "Failed to set encoding parameter.\n");
goto fail; goto fail;
} }
/* There is no "framerate" option in common option list. Use "-r" to /* There is no "framerate" option in commom option list. Use "-r" to
* set framerate, which is compatible with ffmpeg commandline. The * set framerate, which is compatible with ffmpeg commandline. The
* video is assumed to be average frame rate, so set time_base to * video is assumed to be average frame rate, so set time_base to
* 1/framerate. */ * 1/framerate. */

View file

@ -171,38 +171,23 @@ static int open_output_file(const char *filename)
* sample rate etc.). These properties can be changed for output * sample rate etc.). These properties can be changed for output
* streams easily using filters */ * streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) { if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
const enum AVPixelFormat *pix_fmts = NULL;
enc_ctx->height = dec_ctx->height; enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width; enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio; enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
ret = avcodec_get_supported_config(dec_ctx, NULL,
AV_CODEC_CONFIG_PIX_FORMAT, 0,
(const void**)&pix_fmts, NULL);
/* take first format from list of supported formats */ /* take first format from list of supported formats */
enc_ctx->pix_fmt = (ret >= 0 && pix_fmts) ? if (encoder->pix_fmts)
pix_fmts[0] : dec_ctx->pix_fmt; enc_ctx->pix_fmt = encoder->pix_fmts[0];
else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */ /* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = av_inv_q(dec_ctx->framerate); enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
} else { } else {
const enum AVSampleFormat *sample_fmts = NULL;
enc_ctx->sample_rate = dec_ctx->sample_rate; enc_ctx->sample_rate = dec_ctx->sample_rate;
ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout); ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = avcodec_get_supported_config(dec_ctx, NULL,
AV_CODEC_CONFIG_SAMPLE_FORMAT, 0,
(const void**)&sample_fmts, NULL);
/* take first format from list of supported formats */ /* take first format from list of supported formats */
enc_ctx->sample_fmt = (ret >= 0 && sample_fmts) ? enc_ctx->sample_fmt = encoder->sample_fmts[0];
sample_fmts[0] : dec_ctx->sample_fmt;
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate}; enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
} }
@ -298,10 +283,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
goto end; goto end;
} }
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out"); ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
if (!buffersink_ctx) { NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n"); av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
ret = AVERROR(ENOMEM);
goto end; goto end;
} }
@ -312,12 +297,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end; goto end;
} }
ret = avfilter_init_dict(buffersink_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot initialize buffer sink\n");
goto end;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) { } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
char buf[64]; char buf[64];
buffersrc = avfilter_get_by_name("abuffer"); buffersrc = avfilter_get_by_name("abuffer");
@ -343,10 +322,10 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
goto end; goto end;
} }
buffersink_ctx = avfilter_graph_alloc_filter(filter_graph, buffersink, "out"); ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
if (!buffersink_ctx) { NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n"); av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
ret = AVERROR(ENOMEM);
goto end; goto end;
} }
@ -373,15 +352,6 @@ static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n"); av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end; goto end;
} }
if (enc_ctx->frame_size > 0)
av_buffersink_set_frame_size(buffersink_ctx, enc_ctx->frame_size);
ret = avfilter_init_dict(buffersink_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot initialize audio buffer sink\n");
goto end;
}
} else { } else {
ret = AVERROR_UNKNOWN; ret = AVERROR_UNKNOWN;
goto end; goto end;

View file

@ -208,13 +208,6 @@ Download/synchronize sample files to the configured samples directory.
@item fate-list @item fate-list
Will list all fate/regression test targets. Will list all fate/regression test targets.
@item fate-list-failing
List the fate tests that failed the last time they were executed.
@item fate-clear-reports
Remove the test reports from previous test executions (getting rid of
potentially stale results from fate-list-failing).
@item fate @item fate
Run the FATE test suite (requires the fate-suite dataset). Run the FATE test suite (requires the fate-suite dataset).
@end table @end table

View file

@ -1,5 +1,5 @@
slot= # some unique identifier slot= # some unique identifier
repo=https://git.ffmpeg.org/ffmpeg.git # the source repository repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
#branch=release/2.6 # the branch to test #branch=release/2.6 # the branch to test
samples= # path to samples directory samples= # path to samples directory
workdir= # directory in which to do all the work workdir= # directory in which to do all the work

View file

@ -21,24 +21,22 @@ ffmpeg [@var{global_options}] @{[@var{input_file_options}] -i @file{input_url}@}
inputs - including live grabbing/recording devices - filter, and transcode them inputs - including live grabbing/recording devices - filter, and transcode them
into a plethora of output formats. into a plethora of output formats.
@command{ffmpeg} reads from an arbitrary number of inputs (which can be regular @command{ffmpeg} reads from an arbitrary number of input "files" (which can be regular
files, pipes, network streams, grabbing devices, etc.), specified by the files, pipes, network streams, grabbing devices, etc.), specified by the
@code{-i} option, and writes to an arbitrary number of outputs, which are @code{-i} option, and writes to an arbitrary number of output "files", which are
specified by a plain output url. Anything found on the command line which cannot specified by a plain output url. Anything found on the command line which
be interpreted as an option is considered to be an output url. cannot be interpreted as an option is considered to be an output url.
Each input or output can, in principle, contain any number of elementary streams Each input or output url can, in principle, contain any number of streams of
of different types (video/audio/subtitle/attachment/data), though the allowed different types (video/audio/subtitle/attachment/data). The allowed number and/or
stream counts and/or types may be limited by the container format. Selecting types of streams may be limited by the container format. Selecting which
which streams from which inputs will go into which output is either done streams from which inputs will go into which output is either done automatically
automatically or with the @code{-map} option (see the @ref{Stream selection} or with the @code{-map} option (see the Stream selection chapter).
chapter).
To refer to inputs/outputs in options, you must use their indices (0-based). To refer to input files in options, you must use their indices (0-based). E.g.
E.g. the first input is @code{0}, the second is @code{1}, etc. Similarly, the first input file is @code{0}, the second is @code{1}, etc. Similarly, streams
streams within an input/output are referred to by their indices. E.g. @code{2:3} within a file are referred to by their indices. E.g. @code{2:3} refers to the
refers to the fourth stream in the third input or output. Also see the fourth stream in the third input file. Also see the Stream specifiers chapter.
@ref{Stream specifiers} chapter.
As a general rule, options are applied to the next specified As a general rule, options are applied to the next specified
file. Therefore, order is important, and you can have the same file. Therefore, order is important, and you can have the same
@ -87,405 +85,140 @@ The format option may be needed for raw input files.
@chapter Detailed description @chapter Detailed description
@c man begin DETAILED DESCRIPTION @c man begin DETAILED DESCRIPTION
@command{ffmpeg} builds a transcoding pipeline out of the components listed The transcoding process in @command{ffmpeg} for each output can be described by
below. The program's operation then consists of input data chunks flowing from the following diagram:
the sources down the pipes towards the sinks, while being transformed by the
components they encounter along the way.
The following kinds of components are available:
@itemize
@item
@emph{Demuxers} (short for "demultiplexers") read an input source in order to
extract
@itemize
@item
global properties such as metadata or chapters;
@item
list of input elementary streams and their properties
@end itemize
One demuxer instance is created for each @option{-i} option, and sends encoded
@emph{packets} to @emph{decoders} or @emph{muxers}.
In other literature, demuxers are sometimes called @emph{splitters}, because
their main function is splitting a file into elementary streams (though some
files only contain one elementary stream).
A schematic representation of a demuxer looks like this:
@verbatim @verbatim
┌──────────┬───────────────────────┐ _______ ______________
│ demuxer │ │ packets for stream 0 | | | |
╞══════════╡ elementary stream 0 ├──────────────────────► | input | demuxer | encoded data | decoder
│ │ │ | file | ---------> | packets | -----+
│ global ├───────────────────────┤ |_______| |______________| |
│properties│ │ packets for stream 1 v
│ and │ elementary stream 1 ├──────────────────────► _________
│ metadata │ │ | |
│ ├───────────────────────┤ | decoded |
│ │ │ | frames |
│ │ ........... │ |_________|
│ │ │ ________ ______________ |
│ ├───────────────────────┤ | | | | |
│ │ │ packets for stream N | output | <-------- | encoded data | <----+
│ │ elementary stream N ├──────────────────────► | file | muxer | packets | encoder
│ │ │ |________| |______________|
└──────────┴───────────────────────┘
│ read from file, network stream,
│ grabbing device, etc.
@end verbatim @end verbatim
@item @command{ffmpeg} calls the libavformat library (containing demuxers) to read
@emph{Decoders} receive encoded (compressed) @emph{packets} for an audio, video, input files and get packets containing encoded data from them. When there are
or subtitle elementary stream, and decode them into raw @emph{frames} (arrays of multiple input files, @command{ffmpeg} tries to keep them synchronized by
pixels for video, PCM for audio). A decoder is typically associated with (and tracking lowest timestamp on any active input stream.
receives its input from) an elementary stream in a @emph{demuxer}, but sometimes
may also exist on its own (see @ref{Loopback decoders}).
A schematic representation of a decoder looks like this:
@verbatim
┌─────────┐
packets │ │ raw frames
─────────►│ decoder ├────────────►
│ │
└─────────┘
@end verbatim
@item
@emph{Filtergraphs} process and transform raw audio or video @emph{frames}. A
filtergraph consists of one or more individual @emph{filters} linked into a
graph. Filtergraphs come in two flavors - @emph{simple} and @emph{complex},
configured with the @option{-filter} and @option{-filter_complex} options,
respectively.
A simple filtergraph is associated with an @emph{output elementary stream}; it
receives the input to be filtered from a @emph{decoder} and sends filtered
output to that output stream's @emph{encoder}.
A simple video filtergraph that performs deinterlacing (using the @code{yadif}
deinterlacer) followed by resizing (using the @code{scale} filter) can look like
this:
@verbatim
┌────────────────────────┐
│ simple filtergraph │
frames from ╞════════════════════════╡ frames for
a decoder │ ┌───────┐ ┌───────┐ │ an encoder
────────────►├─►│ yadif ├─►│ scale ├─►│────────────►
│ └───────┘ └───────┘ │
└────────────────────────┘
@end verbatim
A complex filtergraph is standalone and not associated with any specific stream.
It may have multiple (or zero) inputs, potentially of different types (audio or
video), each of which receiving data either from a decoder or another complex
filtergraph's output. It also has one or more outputs that feed either an
encoder or another complex filtergraph's input.
The following example diagram represents a complex filtergraph with 3 inputs and
2 outputs (all video):
@verbatim
┌─────────────────────────────────────────────────┐
│ complex filtergraph │
╞═════════════════════════════════════════════════╡
frames ├───────┐ ┌─────────┐ ┌─────────┐ ┌────────┤ frames
─────────►│input 0├─►│ overlay ├─────►│ overlay ├─►│output 0├────────►
├───────┘ │ │ │ │ └────────┤
frames ├───────┐╭►│ │ ╭►│ │ │
─────────►│input 1├╯ └─────────┘ │ └─────────┘ │
├───────┘ │ │
frames ├───────┐ ┌─────┐ ┌─────┬─╯ ┌────────┤ frames
─────────►│input 2├►│scale├►│split├───────────────►│output 1├────────►
├───────┘ └─────┘ └─────┘ └────────┤
└─────────────────────────────────────────────────┘
@end verbatim
Frames from second input are overlaid over those from the first. Frames from the
third input are rescaled, then the duplicated into two identical streams. One of
them is overlaid over the combined first two inputs, with the result exposed as
the filtergraph's first output. The other duplicate ends up being the
filtergraph's second output.
@item
@emph{Encoders} receive raw audio, video, or subtitle @emph{frames} and encode
them into encoded @emph{packets}. The encoding (compression) process is
typically @emph{lossy} - it degrades stream quality to make the output smaller;
some encoders are @emph{lossless}, but at the cost of much higher output size. A
video or audio encoder receives its input from some filtergraph's output,
subtitle encoders receive input from a decoder (since subtitle filtering is not
supported yet). Every encoder is associated with some muxer's @emph{output
elementary stream} and sends its output to that muxer.
A schematic representation of an encoder looks like this:
@verbatim
┌─────────┐
raw frames │ │ packets
────────────►│ encoder ├─────────►
│ │
└─────────┘
@end verbatim
@item
@emph{Muxers} (short for "multiplexers") receive encoded @emph{packets} for
their elementary streams from encoders (the @emph{transcoding} path) or directly
from demuxers (the @emph{streamcopy} path), interleave them (when there is more
than one elementary stream), and write the resulting bytes into the output file
(or pipe, network stream, etc.).
A schematic representation of a muxer looks like this:
@verbatim
┌──────────────────────┬───────────┐
packets for stream 0 │ │ muxer │
──────────────────────►│ elementary stream 0 ╞═══════════╡
│ │ │
├──────────────────────┤ global │
packets for stream 1 │ │properties │
──────────────────────►│ elementary stream 1 │ and │
│ │ metadata │
├──────────────────────┤ │
│ │ │
│ ........... │ │
│ │ │
├──────────────────────┤ │
packets for stream N │ │ │
──────────────────────►│ elementary stream N │ │
│ │ │
└──────────────────────┴─────┬─────┘
write to file, network stream, │
grabbing device, etc. │
@end verbatim
@end itemize
@section Streamcopy
The simplest pipeline in @command{ffmpeg} is single-stream
@emph{streamcopy}, that is copying one @emph{input elementary stream}'s packets
without decoding, filtering, or encoding them. As an example, consider an input
file called @file{INPUT.mkv} with 3 elementary streams, from which we take the
second and write it to file @file{OUTPUT.mp4}. A schematic representation of
such a pipeline looks like this:
@verbatim
┌──────────┬─────────────────────┐
│ demuxer │ │ unused
╞══════════╡ elementary stream 0 ├────────╳
│ │ │
│INPUT.mkv ├─────────────────────┤ ┌──────────────────────┬───────────┐
│ │ │ packets │ │ muxer │
│ │ elementary stream 1 ├─────────►│ elementary stream 0 ╞═══════════╡
│ │ │ │ │OUTPUT.mp4 │
│ ├─────────────────────┤ └──────────────────────┴───────────┘
│ │ │ unused
│ │ elementary stream 2 ├────────╳
│ │ │
└──────────┴─────────────────────┘
@end verbatim
The above pipeline can be constructed with the following commandline:
@example
ffmpeg -i INPUT.mkv -map 0:1 -c copy OUTPUT.mp4
@end example
In this commandline
@itemize
@item
there is a single input @file{INPUT.mkv};
@item
there are no input options for this input;
@item
there is a single output @file{OUTPUT.mp4};
@item
there are two output options for this output:
@itemize
@item
@code{-map 0:1} selects the input stream to be used - from input with index 0
(i.e. the first one) the stream with index 1 (i.e. the second one);
@item
@code{-c copy} selects the @code{copy} encoder, i.e. streamcopy with no decoding
or encoding.
@end itemize
@end itemize
Streamcopy is useful for changing the elementary stream count, container format,
or modifying container-level metadata. Since there is no decoding or encoding,
it is very fast and there is no quality loss. However, it might not work in some
cases because of a variety of factors (e.g. certain information required by the
target container is not available in the source). Applying filters is obviously
also impossible, since filters work on decoded frames.
More complex streamcopy scenarios can be constructed - e.g. combining streams
from two input files into a single output:
@verbatim
┌──────────┬────────────────────┐ ┌────────────────────┬───────────┐
│ demuxer 0│ │ packets │ │ muxer │
╞══════════╡elementary stream 0 ├────────►│elementary stream 0 ╞═══════════╡
│INPUT0.mkv│ │ │ │OUTPUT.mp4 │
└──────────┴────────────────────┘ ├────────────────────┤ │
┌──────────┬────────────────────┐ │ │ │
│ demuxer 1│ │ packets │elementary stream 1 │ │
╞══════════╡elementary stream 0 ├────────►│ │ │
│INPUT1.aac│ │ └────────────────────┴───────────┘
└──────────┴────────────────────┘
@end verbatim
that can be built by the commandline
@example
ffmpeg -i INPUT0.mkv -i INPUT1.aac -map 0:0 -map 1:0 -c copy OUTPUT.mp4
@end example
The output @option{-map} option is used twice here, creating two streams in the
output file - one fed by the first input and one by the second. The single
instance of the @option{-c} option selects streamcopy for both of those streams.
You could also use multiple instances of this option together with
@ref{Stream specifiers} to apply different values to each stream, as will be
demonstrated in following sections.
A converse scenario is splitting multiple streams from a single input into
multiple outputs:
@verbatim
┌──────────┬─────────────────────┐ ┌───────────────────┬───────────┐
│ demuxer │ │ packets │ │ muxer 0 │
╞══════════╡ elementary stream 0 ├─────────►│elementary stream 0╞═══════════╡
│ │ │ │ │OUTPUT0.mp4│
│INPUT.mkv ├─────────────────────┤ └───────────────────┴───────────┘
│ │ │ packets ┌───────────────────┬───────────┐
│ │ elementary stream 1 ├─────────►│ │ muxer 1 │
│ │ │ │elementary stream 0╞═══════════╡
└──────────┴─────────────────────┘ │ │OUTPUT1.mp4│
└───────────────────┴───────────┘
@end verbatim
built with
@example
ffmpeg -i INPUT.mkv -map 0:0 -c copy OUTPUT0.mp4 -map 0:1 -c copy OUTPUT1.mp4
@end example
Note how a separate instance of the @option{-c} option is needed for every
output file even though their values are the same. This is because non-global
options (which is most of them) only apply in the context of the file before
which they are placed.
These examples can of course be further generalized into arbitrary remappings
of any number of inputs into any number of outputs.
@section Transcoding
@emph{Transcoding} is the process of decoding a stream and then encoding it
again. Since encoding tends to be computationally expensive and in most cases
degrades the stream quality (i.e. it is @emph{lossy}), you should only transcode
when you need to and perform streamcopy otherwise. Typical reasons to transcode
are:
@itemize
@item
applying filters - e.g. resizing, deinterlacing, or overlaying video; resampling
or mixing audio;
@item
you want to feed the stream to something that cannot decode the original codec.
@end itemize
Note that @command{ffmpeg} will transcode all audio, video, and subtitle streams
unless you specify @option{-c copy} for them.
Consider an example pipeline that reads an input file with one audio and one
video stream, transcodes the video and copies the audio into a single output
file. This can be schematically represented as follows
@verbatim
┌──────────┬─────────────────────┐
│ demuxer │ │ audio packets
╞══════════╡ stream 0 (audio) ├─────────────────────────────────────╮
│ │ │ │
│INPUT.mkv ├─────────────────────┤ video ┌─────────┐ raw │
│ │ │ packets │ video │ video frames │
│ │ stream 1 (video) ├─────────►│ decoder ├──────────────╮ │
│ │ │ │ │ │ │
└──────────┴─────────────────────┘ └─────────┘ │ │
▼ ▼
│ │
┌──────────┬─────────────────────┐ video ┌─────────┐ │ │
│ muxer │ │ packets │ video │ │ │
╞══════════╡ stream 0 (video) │◄─────────┤ encoder ├──────────────╯ │
│ │ │ │(libx264)│ │
│OUTPUT.mp4├─────────────────────┤ └─────────┘ │
│ │ │ │
│ │ stream 1 (audio) │◄────────────────────────────────────╯
│ │ │
└──────────┴─────────────────────┘
@end verbatim
and implemented with the following commandline:
@example
ffmpeg -i INPUT.mkv -map 0:v -map 0:a -c:v libx264 -c:a copy OUTPUT.mp4
@end example
Note how it uses stream specifiers @code{:v} and @code{:a} to select input
streams and apply different values of the @option{-c} option to them; see the
@ref{Stream specifiers} section for more details.
Encoded packets are then passed to the decoder (unless streamcopy is selected
for the stream, see further for a description). The decoder produces
uncompressed frames (raw video/PCM audio/...) which can be processed further by
filtering (see next section). After filtering, the frames are passed to the
encoder, which encodes them and outputs encoded packets. Finally, those are
passed to the muxer, which writes the encoded packets to the output file.
@section Filtering @section Filtering
Before encoding, @command{ffmpeg} can process raw audio and video frames using
When transcoding, audio and video streams can be filtered before encoding, with filters from the libavfilter library. Several chained filters form a filter
either a @emph{simple} or @emph{complex} filtergraph. graph. @command{ffmpeg} distinguishes between two types of filtergraphs:
simple and complex.
@subsection Simple filtergraphs @subsection Simple filtergraphs
Simple filtergraphs are those that have exactly one input and output, both of Simple filtergraphs are those that have exactly one input and output, both of
the same type (audio or video). They are configured with the per-stream the same type. In the above diagram they can be represented by simply inserting
@option{-filter} option (with @option{-vf} and @option{-af} aliases for an additional step between decoding and encoding:
@option{-filter:v} (video) and @option{-filter:a} (audio) respectively). Note
that simple filtergraphs are tied to their output stream, so e.g. if you have
multiple audio streams, @option{-af} will create a separate filtergraph for each
one.
Taking the transcoding example from above, adding filtering (and omitting audio,
for clarity) makes it look like this:
@verbatim @verbatim
┌──────────┬───────────────┐ _________ ______________
│ demuxer │ │ ┌─────────┐ | | | |
╞══════════╡ video stream │ packets │ video │ frames | decoded | | encoded data |
│INPUT.mkv │ ├─────────►│ decoder ├─────►───╮ | frames |\ _ | packets |
│ │ │ └─────────┘ │ |_________| \ /||______________|
└──────────┴───────────────┘ │ \ __________ /
╭───────────◄───────────╯ simple _\|| | / encoder
│ ┌────────────────────────┐ filtergraph | filtered |/
│ │ simple filtergraph │ | frames |
│ ╞════════════════════════╡ |__________|
│ │ ┌───────┐ ┌───────┐ │
╰──►├─►│ yadif ├─►│ scale ├─►├╮
│ └───────┘ └───────┘ ││
└────────────────────────┘│
┌──────────┬───────────────┐ video ┌─────────┐ │
│ muxer │ │ packets │ video │ │
╞══════════╡ video stream │◄─────────┤ encoder ├───────◄───────╯
│OUTPUT.mp4│ │ │ │
│ │ │ └─────────┘
└──────────┴───────────────┘
@end verbatim @end verbatim
@subsection Complex filtergraphs Simple filtergraphs are configured with the per-stream @option{-filter} option
(with @option{-vf} and @option{-af} aliases for video and audio respectively).
A simple filtergraph for video can look for example like this:
@verbatim
_______ _____________ _______ ________
| | | | | | | |
| input | ---> | deinterlace | ---> | scale | ---> | output |
|_______| |_____________| |_______| |________|
@end verbatim
Note that some filters change frame properties but not frame contents. E.g. the
@code{fps} filter in the example above changes number of frames, but does not
touch the frame contents. Another example is the @code{setpts} filter, which
only sets timestamps and otherwise passes the frames unchanged.
@subsection Complex filtergraphs
Complex filtergraphs are those which cannot be described as simply a linear Complex filtergraphs are those which cannot be described as simply a linear
processing chain applied to one stream. This is the case, for example, when the processing chain applied to one stream. This is the case, for example, when the graph has
graph has more than one input and/or output, or when output stream type is more than one input and/or output, or when output stream type is different from
different from input. Complex filtergraphs are configured with the input. They can be represented with the following diagram:
@option{-filter_complex} option. Note that this option is global, since a
complex filtergraph, by its nature, cannot be unambiguously associated with a @verbatim
single stream or file. Each instance of @option{-filter_complex} creates a new _________
complex filtergraph, and there can be any number of them. | |
| input 0 |\ __________
|_________| \ | |
\ _________ /| output 0 |
\ | | / |__________|
_________ \| complex | /
| | | |/
| input 1 |---->| filter |\
|_________| | | \ __________
/| graph | \ | |
/ | | \| output 1 |
_________ / |_________| |__________|
| | /
| input 2 |/
|_________|
@end verbatim
Complex filtergraphs are configured with the @option{-filter_complex} option.
Note that this option is global, since a complex filtergraph, by its nature,
cannot be unambiguously associated with a single stream or file.
The @option{-lavfi} option is equivalent to @option{-filter_complex}.
A trivial example of a complex filtergraph is the @code{overlay} filter, which A trivial example of a complex filtergraph is the @code{overlay} filter, which
has two video inputs and one video output, containing one video overlaid on top has two video inputs and one video output, containing one video overlaid on top
of the other. Its audio counterpart is the @code{amix} filter. of the other. Its audio counterpart is the @code{amix} filter.
@anchor{Loopback decoders} @section Stream copy
Stream copy is a mode selected by supplying the @code{copy} parameter to the
@option{-codec} option. It makes @command{ffmpeg} omit the decoding and encoding
step for the specified stream, so it does only demuxing and muxing. It is useful
for changing the container format or modifying container-level metadata. The
diagram above will, in this case, simplify to this:
@verbatim
_______ ______________ ________
| | | | | |
| input | demuxer | encoded data | muxer | output |
| file | ---------> | packets | -------> | file |
|_______| |______________| |________|
@end verbatim
Since there is no decoding or encoding, it is very fast and there is no quality
loss. However, it might not work in some cases because of many factors. Applying
filters is obviously also impossible, since filters work on uncompressed data.
@section Loopback decoders @section Loopback decoders
While decoders are normally associated with demuxer streams, it is also possible While decoders are normally associated with demuxer streams, it is also possible
to create "loopback" decoders that decode the output from some encoder and allow to create "loopback" decoders that decode the output from some encoder and allow
@ -526,41 +259,8 @@ reads an input video and
@end itemize @end itemize
Such a transcoding pipeline can be represented with the following diagram:
@verbatim
┌──────────┬───────────────┐
│ demuxer │ │ ┌─────────┐ ┌─────────┐ ┌────────────────────┐
╞══════════╡ video stream │ │ video │ │ video │ │ null muxer │
│ INPUT │ ├──►│ decoder ├──┬────────►│ encoder ├─┬─►│(discards its input)│
│ │ │ └─────────┘ │ │(libx264)│ │ └────────────────────┘
└──────────┴───────────────┘ │ └─────────┘ │
╭───────◄──╯ ┌─────────┐ │
│ │loopback │ │
│ ╭─────◄──────┤ decoder ├────◄──╯
│ │ └─────────┘
│ │
│ │
│ │ ┌───────────────────┐
│ │ │complex filtergraph│
│ │ ╞═══════════════════╡
│ │ │ ┌─────────────┐ │
╰─╫─►├─►│ hstack ├─►├╮
╰─►├─►│ │ ││
│ └─────────────┘ ││
└───────────────────┘│
┌──────────┬───────────────┐ ┌─────────┐ │
│ muxer │ │ │ video │ │
╞══════════╡ video stream │◄─┤ encoder ├───────◄──────────╯
│ OUTPUT │ │ │ (ffv1) │
│ │ │ └─────────┘
└──────────┴───────────────┘
@end verbatim
@c man end DETAILED DESCRIPTION @c man end DETAILED DESCRIPTION
@anchor{Stream selection}
@chapter Stream selection @chapter Stream selection
@c man begin STREAM SELECTION @c man begin STREAM SELECTION
@ -921,25 +621,24 @@ ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
@end example @end example
@item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream}) @item -disposition[:stream_specifier] @var{value} (@emph{output,per-stream})
Sets the disposition flags for a stream. Sets the disposition for a stream.
Default value: by default, all disposition flags are copied from the input stream, By default, the disposition is copied from the input stream, unless the output
unless the output stream this option applies to is fed by a complex filtergraph stream this option applies to is fed by a complex filtergraph - in that case the
- in that case no disposition flags are set by default. disposition is unset by default.
@var{value} is a sequence of disposition flags separated by '+' or '-'. A '+' @var{value} is a sequence of items separated by '+' or '-'. The first item may
prefix adds the given disposition, '-' removes it. If the first flag is also also be prefixed with '+' or '-', in which case this option modifies the default
prefixed with '+' or '-', the resulting disposition is the default value value. Otherwise (the first item is not prefixed) this options overrides the
updated by @var{value}. If the first flag is not prefixed, the resulting default value. A '+' prefix adds the given disposition, '-' removes it. It is
disposition is @var{value}. It is also possible to clear the disposition by also possible to clear the disposition by setting it to 0.
setting it to 0.
If no @code{-disposition} options were specified for an output file, ffmpeg will If no @code{-disposition} options were specified for an output file, ffmpeg will
automatically set the 'default' disposition flag on the first stream of each type, automatically set the 'default' disposition on the first stream of each type,
when there are multiple streams of this type in the output file and no stream of when there are multiple streams of this type in the output file and no stream of
that type is already marked as default. that type is already marked as default.
The @code{-dispositions} option lists the known disposition flags. The @code{-dispositions} option lists the known dispositions.
For example, to make the second audio stream the default stream: For example, to make the second audio stream the default stream:
@example @example
@ -957,29 +656,6 @@ To add an embedded cover/thumbnail:
ffmpeg -i in.mp4 -i IMAGE -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic out.mp4 ffmpeg -i in.mp4 -i IMAGE -map 0 -map 1 -c copy -c:v:1 png -disposition:v:1 attached_pic out.mp4
@end example @end example
To add the 'original' and remove the 'comment' disposition flag from the first
audio stream without removing its other disposition flags:
@example
ffmpeg -i in.mkv -c copy -disposition:a:0 +original-comment out.mkv
@end example
To remove the 'original' and add the 'comment' disposition flag to the first
audio stream without removing its other disposition flags:
@example
ffmpeg -i in.mkv -c copy -disposition:a:0 -original+comment out.mkv
@end example
To set only the 'original' and 'comment' disposition flags on the first audio
stream (and remove its other disposition flags):
@example
ffmpeg -i in.mkv -c copy -disposition:a:0 original+comment out.mkv
@end example
To remove all disposition flags from the first audio stream:
@example
ffmpeg -i in.mkv -c copy -disposition:a:0 0 out.mkv
@end example
Not all muxers support embedded thumbnails, and those who do, only support a few formats, like JPEG or PNG. Not all muxers support embedded thumbnails, and those who do, only support a few formats, like JPEG or PNG.
@item -program [title=@var{title}:][program_num=@var{program_num}:]st=@var{stream}[:st=@var{stream}...] (@emph{output}) @item -program [title=@var{title}:][program_num=@var{program_num}:]st=@var{stream}[:st=@var{stream}...] (@emph{output})
@ -1048,7 +724,7 @@ The following flags are available:
@table @option @table @option
@item recon_gain @item recon_gain
Whether to signal if recon_gain is present as metadata in parameter blocks within frames Wether to signal if recon_gain is present as metadata in parameter blocks within frames
@end table @end table
@item output_gain @item output_gain
@ -1196,9 +872,9 @@ ffmpeg -i front.wav -i back.wav -i center.wav -i lfe.wav
demixing=parameter_id=998, demixing=parameter_id=998,
recon_gain=parameter_id=101, recon_gain=parameter_id=101,
layer=ch_layout=stereo, layer=ch_layout=stereo,
layer=ch_layout=5.1(side), layer=ch_layout=5.1,
-stream_group type=iamf_mix_presentation:id=2:stg=0:annotations=en-us=Mix_Presentation, -stream_group type=iamf_mix_presentation:id=2:stg=0:annotations=en-us=Mix_Presentation,
submix=parameter_id=100:parameter_rate=48000|element=stg=0:parameter_id=100:annotations=en-us=Scalable_Submix|layout=sound_system=stereo|layout=sound_system=5.1(side) submix=parameter_id=100:parameter_rate=48000|element=stg=0:parameter_id=100:annotations=en-us=Scalable_Submix|layout=sound_system=stereo|layout=sound_system=5.1
-streamid 0:0 -streamid 1:1 -streamid 2:2 -streamid 3:3 output.iamf -streamid 0:0 -streamid 1:1 -streamid 2:2 -streamid 3:3 output.iamf
@end example @end example
@ -1373,62 +1049,31 @@ The properties where a change triggers reinitialization are,
for video, frame resolution or pixel format; for video, frame resolution or pixel format;
for audio, sample format, sample rate, channel count or channel layout. for audio, sample format, sample rate, channel count or channel layout.
@item -drop_changed[:@var{stream_specifier}] @var{integer} (@emph{input,per-stream})
This boolean option determines whether a frame with differing frame parameters mid-stream
gets dropped instead of leading to filtergraph reinitialization, as that would lead to loss
of filter state. Generally useful to avoid corrupted yet decodable packets in live streaming
inputs. Default is false.
@item -filter_threads @var{nb_threads} (@emph{global}) @item -filter_threads @var{nb_threads} (@emph{global})
Defines how many threads are used to process a filter pipeline. Each pipeline Defines how many threads are used to process a filter pipeline. Each pipeline
will produce a thread pool with this many threads available for parallel processing. will produce a thread pool with this many threads available for parallel processing.
The default is the number of available CPUs. The default is the number of available CPUs.
@item -filter_buffered_frames @var{nb_frames} (@emph{global})
Defines the maximum number of buffered frames allowed in a filtergraph. Under
normal circumstances, a filtergraph should not buffer more than a few frames,
especially if frames are being fed to it and read from it in a balanced way
(which is the intended behavior in ffmpeg). That said, this option allows you
to limit the total number of frames buffered across all links in a filtergraph.
If more frames are generated, filtering is aborted and an error is returned.
The default value is 0, which means no limit.
@item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream}) @item -pre[:@var{stream_specifier}] @var{preset_name} (@emph{output,per-stream})
Specify the preset for matching stream(s). Specify the preset for matching stream(s).
@item -stats (@emph{global}) @item -stats (@emph{global})
Log encoding progress/statistics as "info"-level log (see @code{-loglevel}). Print encoding progress/statistics. It is on by default, to explicitly
It is on by default, to explicitly disable it you need to specify @code{-nostats}. disable it you need to specify @code{-nostats}.
@item -stats_period @var{time} (@emph{global}) @item -stats_period @var{time} (@emph{global})
Set period at which encoding progress/statistics are updated. Default is 0.5 seconds. Set period at which encoding progress/statistics are updated. Default is 0.5 seconds.
@item -print_graphs (@emph{global})
Prints execution graph details to stderr in the format set via -print_graphs_format.
@item -print_graphs_file @var{filename} (@emph{global})
Writes execution graph details to the specified file in the format set via -print_graphs_format.
@item -print_graphs_format @var{format} (@emph{global})
Sets the output format (available formats are: default, compact, csv, flat, ini, json, xml, mermaid, mermaidhtml)
The default format is json.
@item -progress @var{url} (@emph{global}) @item -progress @var{url} (@emph{global})
Send program-friendly progress information to @var{url}. Send program-friendly progress information to @var{url}.
Progress information is written periodically and at the end of Progress information is written periodically and at the end of
the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key} the encoding process. It is made of "@var{key}=@var{value}" lines. @var{key}
consists of only alphanumeric characters. The last key of a sequence of consists of only alphanumeric characters. The last key of a sequence of
progress information is always "progress" with the value "continue" or "end". progress information is always "progress".
The update period is set using @code{-stats_period}. The update period is set using @code{-stats_period}.
For example, log progress information to stdout:
@example
ffmpeg -progress pipe:1 -i in.mkv out.mkv
@end example
@anchor{stdin option} @anchor{stdin option}
@item -stdin @item -stdin
Enable interaction on standard input. On by default unless standard input is Enable interaction on standard input. On by default unless standard input is
@ -2010,9 +1655,6 @@ transcoding, without copying the frames into the system memory.
For it to work, both the decoder and the encoder must support QSV acceleration For it to work, both the decoder and the encoder must support QSV acceleration
and no filters must be used. and no filters must be used.
@item videotoolbox
Use Video Toolbox hardware acceleration.
@end table @end table
This option has no effect if the selected hwaccel is not available or not This option has no effect if the selected hwaccel is not available or not
@ -2157,7 +1799,7 @@ Set the size of the canvas used to render subtitles.
@section Advanced options @section Advanced options
@table @option @table @option
@item -map [-]@var{input_file_id}[:@var{stream_specifier}][:@var{view_specifier}][:?] | @var{[linklabel]} (@emph{output}) @item -map [-]@var{input_file_id}[:@var{stream_specifier}][:@var{view_specifier}][?] | @var{[linklabel]} (@emph{output})
Create one or more streams in the output file. This option has two forms for Create one or more streams in the output file. This option has two forms for
specifying the data source(s): the first selects one or more streams from some specifying the data source(s): the first selects one or more streams from some
@ -2353,11 +1995,6 @@ Read input at native frame rate. This is equivalent to setting @code{-readrate 1
@item -readrate_initial_burst @var{seconds} @item -readrate_initial_burst @var{seconds}
Set an initial read burst time, in seconds, after which @option{-re/-readrate} Set an initial read burst time, in seconds, after which @option{-re/-readrate}
will be enforced. will be enforced.
@item -readrate_catchup @var{speed} (@emph{input})
If either the input or output is blocked leading to actual read speed falling behind the
specified readrate, then this rate takes effect till the input catches up with the
specified readrate. Must not be lower than the primary readrate.
@item -vsync @var{parameter} (@emph{global}) @item -vsync @var{parameter} (@emph{global})
@itemx -fps_mode[:@var{stream_specifier}] @var{parameter} (@emph{output,per-stream}) @itemx -fps_mode[:@var{stream_specifier}] @var{parameter} (@emph{output,per-stream})
Set video sync method / framerate mode. vsync is applied to all output video streams Set video sync method / framerate mode. vsync is applied to all output video streams

View file

@ -139,6 +139,13 @@ stream.
All the container format information is printed within a section with All the container format information is printed within a section with
name "FORMAT". name "FORMAT".
@item -show_format_entry @var{name}
Like @option{-show_format}, but only prints the specified entry of the
container format information, rather than all. This option may be given more
than once, then all specified entries will be shown.
This option is deprecated, use @code{show_entries} instead.
@item -show_entries @var{section_entries} @item -show_entries @var{section_entries}
Set list of entries to show. Set list of entries to show.
@ -344,19 +351,6 @@ while other writers always print them. This option enables one to control this b
Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}. Valid values are @code{always}/@code{1}, @code{never}/@code{0} and @code{auto}/@code{-1}.
Default is @var{auto}. Default is @var{auto}.
@item -analyze_frames
Analyze frames and/or their side data up to the provided read interval,
providing additional information that may be useful at a stream level.
Must be paired with the @option{-show_streams} option or it will have no effect.
Currently, the additional fields provided by this option when enabled are the
@code{closed_captions} and @code{film_grain} fields.
For example, to analyze the first 20 seconds and populate these fields:
@example
ffprobe -show_streams -analyze_frames -read_intervals "%+20" INPUT
@end example
@item -bitexact @item -bitexact
Force bitexact output, useful to produce output which is not dependent Force bitexact output, useful to produce output which is not dependent
on the specific build. on the specific build.
@ -368,12 +362,6 @@ Read @var{input_url}.
Write output to @var{output_url}. If not specified, the output is sent Write output to @var{output_url}. If not specified, the output is sent
to stdout. to stdout.
@item -c:@var{media_specifier} @var{codec_name}
@itemx -codec:@var{media_specifier} @var{codec_name}
Force a specific decoder implementation for the stream identified by
@var{media_specifier}, which can assume the values @code{a} (audio),
@code{v} (video), @code{s} (subtitle), and @code{d} (data).
@end table @end table
@c man end @c man end

View file

@ -129,7 +129,6 @@
<xsd:attribute name="pict_type" type="xsd:string"/> <xsd:attribute name="pict_type" type="xsd:string"/>
<xsd:attribute name="interlaced_frame" type="xsd:int" /> <xsd:attribute name="interlaced_frame" type="xsd:int" />
<xsd:attribute name="top_field_first" type="xsd:int" /> <xsd:attribute name="top_field_first" type="xsd:int" />
<xsd:attribute name="lossless" type="xsd:int" />
<xsd:attribute name="repeat_pict" type="xsd:int" /> <xsd:attribute name="repeat_pict" type="xsd:int" />
<xsd:attribute name="color_range" type="xsd:string"/> <xsd:attribute name="color_range" type="xsd:string"/>
<xsd:attribute name="color_space" type="xsd:string"/> <xsd:attribute name="color_space" type="xsd:string"/>

View file

@ -78,8 +78,7 @@ Match the stream by stream id (e.g. PID in MPEG-TS container).
@item m:@var{key}[:@var{value}] @item m:@var{key}[:@var{value}]
Matches streams with the metadata tag @var{key} having the specified value. If Matches streams with the metadata tag @var{key} having the specified value. If
@var{value} is not given, matches streams that contain the given tag with any @var{value} is not given, matches streams that contain the given tag with any
value. The colon character ':' in @var{key} or @var{value} needs to be value.
backslash-escaped.
@item disp:@var{dispositions}[:@var{additional_stream_specifier}] @item disp:@var{dispositions}[:@var{additional_stream_specifier}]
Matches streams with the given disposition(s). @var{dispositions} is a list of Matches streams with the given disposition(s). @var{dispositions} is a list of
one or more dispositions (as printed by the @option{-dispositions} option) one or more dispositions (as printed by the @option{-dispositions} option)
@ -226,10 +225,6 @@ and the "Last message repeated n times" line will be omitted.
Indicates that log output should add a @code{[level]} prefix to each message Indicates that log output should add a @code{[level]} prefix to each message
line. This can be used as an alternative to log coloring, e.g. when dumping the line. This can be used as an alternative to log coloring, e.g. when dumping the
log to file. log to file.
@item time
Indicates that log lines should be prefixed with time information.
@item datetime
Indicates that log lines should be prefixed with date and time information.
@end table @end table
Flags can also be used alone by adding a '+'/'-' prefix to set/reset a single Flags can also be used alone by adding a '+'/'-' prefix to set/reset a single
flag without affecting other @var{flags} or changing @var{loglevel}. When flag without affecting other @var{flags} or changing @var{loglevel}. When

View file

@ -214,7 +214,6 @@ Frame scheduling
FF_FILTER_FORWARD_STATUS(inlink, outlink); FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_STATUS_ALL(inlink, filter); FF_FILTER_FORWARD_STATUS_ALL(inlink, filter);
FF_FILTER_FORWARD_WANTED(outlink, inlink); FF_FILTER_FORWARD_WANTED(outlink, inlink);
FF_FILTER_FORWARD_WANTED_ANY(filter, inlink);
filter_frame filter_frame
------------ ------------

File diff suppressed because it is too large Load diff

View file

@ -162,7 +162,7 @@ Then pass @code{--enable-libmp3lame} to configure to enable it.
@section LCEVCdec @section LCEVCdec
FFmpeg can make use of the liblcevc_dec library for LCEVC enhancement layer FFmpeg can make use of the liblcevc_dec library for LCEVC enhacement layer
decoding on supported bitstreams. decoding on supported bitstreams.
Go to @url{https://github.com/v-novaltd/LCEVCdec} and follow the instructions Go to @url{https://github.com/v-novaltd/LCEVCdec} and follow the instructions
@ -625,7 +625,6 @@ library:
@item raw AMR-NB @tab @tab X @item raw AMR-NB @tab @tab X
@item raw AMR-WB @tab @tab X @item raw AMR-WB @tab @tab X
@item raw APAC @tab @tab X @item raw APAC @tab @tab X
@item raw APV @tab X @tab X
@item raw aptX @tab X @tab X @item raw aptX @tab X @tab X
@item raw aptX HD @tab X @tab X @item raw aptX HD @tab X @tab X
@item raw Bonk @tab @tab X @item raw Bonk @tab @tab X
@ -638,7 +637,6 @@ library:
@item raw E-AC-3 @tab X @tab X @item raw E-AC-3 @tab X @tab X
@item raw EVC @tab X @tab X @item raw EVC @tab X @tab X
@item raw FLAC @tab X @tab X @item raw FLAC @tab X @tab X
@item raw G.728 @tab @tab X
@item raw GSM @tab @tab X @item raw GSM @tab @tab X
@item raw H.261 @tab X @tab X @item raw H.261 @tab X @tab X
@item raw H.263 @tab X @tab X @item raw H.263 @tab X @tab X
@ -897,7 +895,6 @@ following image formats are supported:
@tab fourcc: apch,apcn,apcs,apco,ap4h,ap4x @tab fourcc: apch,apcn,apcs,apco,ap4h,ap4x
@item Apple QuickDraw @tab @tab X @item Apple QuickDraw @tab @tab X
@tab fourcc: qdrw @tab fourcc: qdrw
@item APV @tab @tab X
@item Argonaut Video @tab @tab X @item Argonaut Video @tab @tab X
@tab Used in some Argonaut games. @tab Used in some Argonaut games.
@item Asus v1 @tab X @tab X @item Asus v1 @tab X @tab X
@ -1112,7 +1109,6 @@ following image formats are supported:
@item RealVideo 3.0 @tab @tab X @item RealVideo 3.0 @tab @tab X
@tab still far from ideal @tab still far from ideal
@item RealVideo 4.0 @tab @tab X @item RealVideo 4.0 @tab @tab X
@item RealVideo 6.0 @tab @tab X
@item Renderware TXD (TeXture Dictionary) @tab @tab X @item Renderware TXD (TeXture Dictionary) @tab @tab X
@tab Texture dictionaries used by the Renderware Engine. @tab Texture dictionaries used by the Renderware Engine.
@item RivaTuner Video @tab @tab X @item RivaTuner Video @tab @tab X
@ -1236,7 +1232,6 @@ following image formats are supported:
@item ADPCM IMA Duck DK4 @tab @tab X @item ADPCM IMA Duck DK4 @tab @tab X
@tab Used in some Sega Saturn console games. @tab Used in some Sega Saturn console games.
@item ADPCM IMA Radical @tab @tab X @item ADPCM IMA Radical @tab @tab X
@item ADPCM IMA Xbox @tab @tab X
@item ADPCM Microsoft @tab X @tab X @item ADPCM Microsoft @tab X @tab X
@item ADPCM MS IMA @tab X @tab X @item ADPCM MS IMA @tab X @tab X
@item ADPCM Nintendo Gamecube AFC @tab @tab X @item ADPCM Nintendo Gamecube AFC @tab @tab X
@ -1244,7 +1239,6 @@ following image formats are supported:
@item ADPCM Nintendo THP @tab @tab X @item ADPCM Nintendo THP @tab @tab X
@item ADPCM Playstation @tab @tab X @item ADPCM Playstation @tab @tab X
@item ADPCM QT IMA @tab X @tab X @item ADPCM QT IMA @tab X @tab X
@item ADPCM Sanyo @tab @tab X
@item ADPCM SEGA CRI ADX @tab X @tab X @item ADPCM SEGA CRI ADX @tab X @tab X
@tab Used in Sega Dreamcast games. @tab Used in Sega Dreamcast games.
@item ADPCM Shockwave Flash @tab X @tab X @item ADPCM Shockwave Flash @tab X @tab X
@ -1319,7 +1313,6 @@ following image formats are supported:
@item FLAC (Free Lossless Audio Codec) @tab X @tab IX @item FLAC (Free Lossless Audio Codec) @tab X @tab IX
@item FTR Voice @tab @tab X @item FTR Voice @tab @tab X
@item G.723.1 @tab X @tab X @item G.723.1 @tab X @tab X
@item G.728 @tab @tab X
@item G.729 @tab @tab X @item G.729 @tab @tab X
@item GSM @tab E @tab X @item GSM @tab E @tab X
@tab encoding supported through external library libgsm @tab encoding supported through external library libgsm

View file

@ -71,6 +71,7 @@ git clone git@@ffmpeg.org:ffmpeg-web <target>
This will put the source of the FFmpeg website into the directory This will put the source of the FFmpeg website into the directory
@var{<target>} and let you push back your changes to the remote repository. @var{<target>} and let you push back your changes to the remote repository.
(Note that @var{gil} stands for GItoLite and is not a typo of @var{git}.)
If you don't have write-access to the ffmpeg-web repository, you can If you don't have write-access to the ffmpeg-web repository, you can
create patches after making a read-only ffmpeg-web clone: create patches after making a read-only ffmpeg-web clone:
@ -142,7 +143,7 @@ git log <filename(s)>
@end example @end example
You may also use the graphical tools like @command{gitview} or @command{gitk} You may also use the graphical tools like @command{gitview} or @command{gitk}
or the web interface available at @url{https://git.ffmpeg.org/ffmpeg.git}. or the web interface available at @url{http://source.ffmpeg.org/}.
@section Checking source tree status @section Checking source tree status

View file

@ -1,6 +0,0 @@
ffmpeg mono ./ffmpeg.html
ffmpeg-filters mono ./ffmpeg-filters.html
ffmpeg-formats mono ./ffmpeg-formats.html
ffmpeg-resampler mono ./ffmpeg-resampler.html
ffmpeg-scaler mono ./ffmpeg-scaler.html
ffmpeg-utils mono ./ffmpeg-utils.html

View file

@ -220,6 +220,41 @@ $ ffmpeg -f avfoundation -capture_raw_data true -i "zr100:none" out.dv
@end itemize @end itemize
@section bktr
BSD video input device. Deprecated and will be removed - please contact
the developers if you are interested in maintaining it.
@subsection Options
@table @option
@item framerate
Set the frame rate.
@item video_size
Set the video frame size. Default is @code{vga}.
@item standard
Available values are:
@table @samp
@item pal
@item ntsc
@item secam
@item paln
@item palm
@item ntscj
@end table
@end table
@section decklink @section decklink
The decklink input device provides capture capabilities for Blackmagic The decklink input device provides capture capabilities for Blackmagic
@ -704,7 +739,7 @@ Win32 GDI-based screen capture device.
This device allows you to capture a region of the display on Windows. This device allows you to capture a region of the display on Windows.
Amongst options for the input filenames are such elements as: Amongst options for the imput filenames are such elements as:
@example @example
desktop desktop
@end example @end example

View file

@ -1,18 +1,8 @@
FFmpeg Infrastructure: FFmpeg Infrastructure:
====================== ======================
Trademark:
~~~~~~~~~~
ffmpeg trademark registered in france by ffmpeg creator.
Domain + NS:
~~~~~~~~~~~~
ffmpeg.org domain name
ns1.avcodec.org Primary Name server (provided by Telepoint, hosted at Telepoint in bulgaria)
ns2.avcodec.org Replica Name server (provided by an ffmpeg developer, hosted at Hetzner in germany)
ns3.avcodec.org Replica Name server (provided by an ffmpeg developer, hosted at Prometeus Cdlan in italy)
Servers: Servers:
~~~~~~~~ ~~~~~~~~
@ -33,8 +23,6 @@ Web, mail, and public facing git, also website git
fftrac VM: fftrac VM:
---------- ----------
trac.ffmpeg.org Issue tracking trac.ffmpeg.org Issue tracking
gpg encrypted backups of the trac repositories are created once a day
and can be downloaded by any of the admins.
ffaux VM: ffaux VM:
@ -77,9 +65,6 @@ Github mirrors are redundantly synced by multiple people
You need a new git repository related to FFmpeg ? contact root at ffmpeg.org You need a new git repository related to FFmpeg ? contact root at ffmpeg.org
git repositories are managed by gitolite, every change to permissions is
logged, including when, what and by whom
Fate: Fate:
~~~~~ ~~~~~
@ -102,47 +87,8 @@ You need a VM, docker container for FFmpeg? contact root at ffmpeg.org
Multimedia Wiki:
~~~~~~~~~~~~~~~~
The Multimedia Wiki http://wiki.multimedia.cx is ran by Mike Melanson.
While not directly part of FFmpeg infrastructure, technical codec and format
information written by FFmpeg developers can be found within.
It is our unofficial official tech wiki. For access contact Mike.
IRC: IRC:
~~~~ ~~~~
irc channels are at https://libera.chat/ irc channels are at https://libera.chat/
irc channel archives are at https://libera.irclog.whitequark.org irc channel archives are at https://libera.irclog.whitequark.org
#ffmpeg and #ffmpeg-devel founder/admins: BtbN, Michael, Compn
#ffmpeg-meeting founder/admins: BtbN, Michael
Twitter aka X:
~~~~~~~~~~~~~~
https://twitter.com/FFmpeg or https://x.com/FFmpeg
If you would like to post to twitter please contact twitter MAINTAINERS
for access. We want more developers posting to twitter!
Reddit:
~~~~~~~
https://www.reddit.com/r/ffmpeg/
moderated by Gyan
Facebook:
~~~~~~~~~
https://www.facebook.com/ffmpeg
???
Wikipedia entry:
~~~~~~~~~~~~~~~~
https://en.wikipedia.org/wiki/FFmpeg

115
doc/libav-merge.txt Normal file
View file

@ -0,0 +1,115 @@
CONTEXT
=======
The FFmpeg project merges all the changes from the Libav project
(https://libav.org) since the origin of the fork (around 2011).
With the exceptions of some commits due to technical/political disagreements or
issues, the changes are merged on a more or less regular schedule (daily for
years thanks to Michael, but more sparse nowadays).
WHY
===
The majority of the active developers believe the project needs to keep this
policy for various reasons.
The most important one is that we don't want our users to have to choose
between two distributors of libraries of the exact same name in order to have a
different set of features and bugfixes. By taking the responsibility of
unifying the two codebases, we allow users to benefit from the changes from the
two teams.
Today, FFmpeg has a much larger user database (we are distributed by every
major distribution), so we consider this mission a priority.
A different approach to the merge could have been to pick the changes we are
interested in and drop most of the cosmetics and other less important changes.
Unfortunately, this makes the following picks much harder, especially since the
Libav project is involved in various deep API changes. As a result, we decide
to virtually take everything done there.
Any Libav developer is of course welcome anytime to contribute directly to the
FFmpeg tree. Of course, we fully understand and are forced to accept that very
few Libav developers are interested in doing so, but we still want to recognize
their work. This leads us to create merge commits for every single one from
Libav. The original commit appears totally unchanged with full authorship in
our history (and the conflict are solved in the merge one). That way, not a
single thing from Libav will be lost in the future in case some reunification
happens, or that project disappears one way or another.
DOWNSIDES
=========
Of course, there are many downsides to this approach.
- It causes a non negligible merge commits pollution. We make sure there are
not several level of merges entangled (we do a 1:1 merge/commit), but it's
still a non-linear history.
- Many duplicated work. For instance, we added libavresample in our tree to
keep compatibility with Libav when our libswresample was already covering the
exact same purpose. The same thing happened for various elements such as the
ProRes support (but differences in features, bugs, licenses, ...). There are
many work to do to unify them, and any help is very much welcome.
- So much manpower from both FFmpeg and Libav is lost because of this mess. We
know it, and we don't know how to fix it. It takes incredible time to do
these merges, so we have even less time to work on things we personally care
about. The bad vibes also do not help with keeping our developers motivated.
- There is a growing technical risk factor with the merges due to the codebase
differing more and more.
MERGE GUIDELINES
================
The following gives developer guidelines on how to proceed when merging Libav commits.
Before starting, you can reduce the risk of errors on merge conflicts by using
a different merge conflict style:
$ git config --global merge.conflictstyle diff3
tools/libav-merge-next-commit is a script to help merging the next commit in
the queue. It assumes a remote named libav. It has two modes: merge, and noop.
The noop mode creates a merge with no change to the HEAD. You can pass a hash
as extra argument to reference a justification (it is common that we already
have the change done in FFmpeg).
Also see tools/murge, you can copy and paste a 3 way conflict into its stdin
and it will display colored diffs. Any arguments to murge (like ones to suppress
whitespace differences) are passed into colordiff.
TODO/FIXME/UNMERGED
===================
Stuff that didn't reach the codebase:
-------------------------------------
- HEVC DSP and x86 MC SIMD improvements from Libav (see https://ffmpeg.org/pipermail/ffmpeg-devel/2015-December/184777.html)
- 1f821750f hevcdsp: split the qpel functions by width instead of by the subpixel fraction
- 818bfe7f0 hevcdsp: split the epel functions by width
- 688417399 hevcdsp: split the pred functions by width
- a853388d2 hevc: change the stride of the MC buffer to be in bytes instead of elements
- 0cef06df0 checkasm: add HEVC MC tests
- e7078e842 hevcdsp: add x86 SIMD for MC
- 7993ec19a hevc: Add hevc_get_pixel_4/8/12/16/24/32/48/64
- use av_cpu_max_align() instead of hardcoding alignment requirements (see https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/215834.html)
- f44ec22e0 lavc: use av_cpu_max_align() instead of hardcoding alignment requirements
- 4de220d2e frame: allow align=0 (meaning automatic) for av_frame_get_buffer()
- Support recovery from an already present HLS playlist (see 16cb06bb30)
- Remove all output devices (see 8e7e042d41, 8d3db95f20, 6ce13070bd, d46cd24986 and https://ffmpeg.org/pipermail/ffmpeg-devel/2017-September/216904.html)
- avcodec/libaomenc: export the Sequence Header OBU as extradata (See a024c3ce9a)
Collateral damage that needs work locally:
------------------------------------------
- Merge proresenc_anatoliy.c and proresenc_kostya.c
- Fix MIPS AC3 downmix
Extra changes needed to be aligned with Libav:
----------------------------------------------
- Switching our examples to the new encode/decode API (see 67d28f4a0f)
- HEVC IDCT bit depth 12-bit support (Libav added 8 and 10 but doesn't have 12)

View file

@ -60,5 +60,6 @@ Files that have MIPS copyright notice in them:
compute_antialias_float.h compute_antialias_float.h
lsp_mips.h lsp_mips.h
fmtconvert_mips.c fmtconvert_mips.c
iirfilter_mips.c
mpegaudiodsp_mips_fixed.c mpegaudiodsp_mips_fixed.c
mpegaudiodsp_mips_float.c mpegaudiodsp_mips_float.c

View file

@ -319,7 +319,7 @@ This is the same as the @samp{vob} muxer with a few differences.
@table @option @table @option
@item muxrate @var{rate} @item muxrate @var{rate}
Set user-defined mux rate expressed as a number of bits/s. If not Set user-defined mux rate expressed as a number of bits/s. If not
specified the automatically computed mux rate is employed. Default value specied the automatically computed mux rate is employed. Default value
is @code{0}. is @code{0}.
@item preload @var{delay} @item preload @var{delay}
@ -772,7 +772,7 @@ Force a delay expressed in seconds after the last frame of each
repetition. Default value is @code{0.0}. repetition. Default value is @code{0.0}.
@item plays @var{repetitions} @item plays @var{repetitions}
specify how many times to play the content, @code{0} causes an infinite specify how many times to play the content, @code{0} causes an infinte
loop, with @code{1} there is no loop loop, with @code{1} there is no loop
@end table @end table
@ -1770,7 +1770,7 @@ for looping indefinitely (default).
@item final_delay @var{delay} @item final_delay @var{delay}
Force the delay (expressed in centiseconds) after the last frame. Each frame Force the delay (expressed in centiseconds) after the last frame. Each frame
ends with a delay until the next frame. The default is @code{-1}, which is a ends with a delay until the next frame. The default is @code{-1}, which is a
special value to tell the muxer to reuse the previous delay. In case of a special value to tell the muxer to re-use the previous delay. In case of a
loop, you might want to customize this value to mark a pause for instance. loop, you might want to customize this value to mark a pause for instance.
@end table @end table
@ -1856,7 +1856,7 @@ This muxer creates an .f4m (Adobe Flash Media Manifest File) manifest, an .abst
(Adobe Bootstrap File) for each stream, and segment files in a directory (Adobe Bootstrap File) for each stream, and segment files in a directory
specified as the output. specified as the output.
These needs to be accessed by an HDS player through HTTPS for it to be able to These needs to be accessed by an HDS player throuhg HTTPS for it to be able to
perform playback on the generated stream. perform playback on the generated stream.
@subsection Options @subsection Options
@ -2436,14 +2436,13 @@ ffmpeg -re -i in.ts -b:a:0 32k -b:a:1 64k -b:v:0 1000k \
@item @item
Create a single variant stream. Add the @code{#EXT-X-MEDIA} tag with Create a single variant stream. Add the @code{#EXT-X-MEDIA} tag with
@code{TYPE=SUBTITLES} in the master playlist with webvtt subtitle group name @code{TYPE=SUBTITLES} in the master playlist with webvtt subtitle group name
'subtitle' and optional subtitle name, e.g. 'English'. Make sure the input 'subtitle'. Make sure the input file has one text subtitle stream at least.
file has one text subtitle stream at least.
@example @example
ffmpeg -y -i input_with_subtitle.mkv \ ffmpeg -y -i input_with_subtitle.mkv \
-b:v:0 5250k -c:v h264 -pix_fmt yuv420p -profile:v main -level 4.1 \ -b:v:0 5250k -c:v h264 -pix_fmt yuv420p -profile:v main -level 4.1 \
-b:a:0 256k \ -b:a:0 256k \
-c:s webvtt -c:a mp2 -ar 48000 -ac 2 -map 0:v -map 0:a:0 -map 0:s:0 \ -c:s webvtt -c:a mp2 -ar 48000 -ac 2 -map 0:v -map 0:a:0 -map 0:s:0 \
-f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle,sname:English" \ -f hls -var_stream_map "v:0,a:0,s:0,sgroup:subtitle" \
-master_pl_name master.m3u8 -t 300 -hls_time 10 -hls_init_time 4 -hls_list_size \ -master_pl_name master.m3u8 -t 300 -hls_time 10 -hls_init_time 4 -hls_list_size \
10 -master_pl_publish_rate 10 -hls_flags \ 10 -master_pl_publish_rate 10 -hls_flags \
delete_segments+discont_start+split_by_time ./tmp/video.m3u8 delete_segments+discont_start+split_by_time ./tmp/video.m3u8
@ -2538,7 +2537,7 @@ these applications, audio may be played back on a wide range of devices, e.g.,
headphones, mobile phones, tablets, TVs, sound bars, home theater systems, and headphones, mobile phones, tablets, TVs, sound bars, home theater systems, and
big screens. big screens.
This format was promoted and designed by Alliance for Open Media. This format was promoted and desgined by Alliance for Open Media.
For more information about this format, see @url{https://aomedia.org/iamf/}. For more information about this format, see @url{https://aomedia.org/iamf/}.
@ -2940,44 +2939,6 @@ ffmpeg -i INPUT -f md5 -
@end example @end example
@end itemize @end itemize
@anchor{mccenc}
@section mcc
Muxer for MacCaption MCC files, it supports MCC versions 1.0 and 2.0.
MCC files store VANC data, which can include closed captions (EIA-608 and CEA-708), ancillary time code, pan-scan data, etc.
@subsection Options
The muxer options are:
@table @option
@item override_time_code_rate
Override the @code{Time Code Rate} value in the output. Defaults to trying to deduce from the stream's @code{time_base}, which often doesn't work.
@item use_u_alias
Use the @code{U} alias for the byte sequence @code{E1h 00h 00h 00h}.
Disabled by default because some @file{.mcc} files disagree on whether it has 2 or 3 zero bytes.
@item mcc_version
The MCC file format version. Must be either 1 or 2, defaults to 2.
@item creation_program
The creation program. Defaults to this version of FFmpeg.
@item creation_time
The creation time. Defaults to the current time.
@end table
@subsection Examples
@itemize
@item
Extract a MXF @code{SMPTE_436M_ANC} stream from a MXF file and write it to a MCC file at 30 fps.
@example
ffmpeg -i input.mxf -c copy -map 0:d -override_time_code_rate 30 out.mcc
@end example
@item
Extract EIA-608/CTA-708 closed captions from a @file{.mp4} file and write them to a MCC file at 29.97 fps.
@example
ffmpeg -f lavfi -i "movie=input.mp4[out+subcc]" -c:s copy -map 0:s -override_time_code_rate 30000/1001 out.mcc
@end example
@end itemize
@section microdvd @section microdvd
MicroDVD subtitle format muxer. MicroDVD subtitle format muxer.
@ -3917,53 +3878,4 @@ ffmpeg -f webm_dash_manifest -i video1.webm \
manifest.xml manifest.xml
@end example @end example
@anchor{whip}
@section whip
WebRTC (Real-Time Communication) muxer that supports sub-second latency streaming according to
the WHIP (WebRTC-HTTP ingestion protocol) specification.
This is an experimental feature.
It uses HTTP as a signaling protocol to exchange SDP capabilities and ICE lite candidates. Then,
it uses STUN binding requests and responses to establish a session over UDP. Subsequently, it
initiates a DTLS handshake to exchange the SRTP encryption keys. Lastly, it splits video and
audio frames into RTP packets and encrypts them using SRTP.
Ensure that you use H.264 without B frames and Opus for the audio codec. For example, to convert
an input file with @command{ffmpeg} to WebRTC:
@example
ffmpeg -re -i input.mp4 -acodec libopus -ar 48000 -ac 2 \
-vcodec libx264 -profile:v baseline -tune zerolatency -threads 1 -bf 0 \
-f whip "http://localhost:1985/rtc/v1/whip/?app=live&stream=livestream"
@end example
For this example, we have employed low latency options, resulting in an end-to-end latency of
approximately 150ms.
@subsection Options
This muxer supports the following options:
@table @option
@item handshake_timeout @var{integer}
Set the timeout in milliseconds for ICE and DTLS handshake.
Default value is 5000.
@item pkt_size @var{integer}
Set the maximum size, in bytes, of RTP packets that send out.
Default value is 1500.
@item authorization @var{string}
The optional Bearer token for WHIP Authorization.
@item cert_file @var{string}
The optional certificate file path for DTLS.
@item key_file @var{string}
The optional private key file path for DTLS.
@end table
@c man end MUXERS @c man end MUXERS

View file

@ -157,3 +157,4 @@ PFD[32] would for example be signed 32 bit little-endian IEEE float
@item XVID @tab non-compliant MPEG-4 generated by old Xvid @item XVID @tab non-compliant MPEG-4 generated by old Xvid
@item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug @item XVIX @tab non-compliant MPEG-4 generated by old Xvid with interlacing bug
@end multitable @end multitable

View file

@ -188,7 +188,7 @@ Code that depends on data in registries being untouched, should be written as
a single __asm__() statement. Ideally, a single function contains only one a single __asm__() statement. Ideally, a single function contains only one
__asm__() block. __asm__() block.
Use external asm (nasm) or inline asm (__asm__()), do not use intrinsics. Use external asm (nasm/yasm) or inline asm (__asm__()), do not use intrinsics.
The latter requires a good optimizing compiler which gcc is not. The latter requires a good optimizing compiler which gcc is not.
When debugging a x86 external asm compilation issue, if lost in the macro When debugging a x86 external asm compilation issue, if lost in the macro
@ -199,7 +199,7 @@ actual lines causing issues.
Inline asm vs. external asm Inline asm vs. external asm
--------------------------- ---------------------------
Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc) Both inline asm (__asm__("..") in a .c file, handled by a compiler such as gcc)
and external asm (.s or .asm files, handled by an assembler such as nasm) and external asm (.s or .asm files, handled by an assembler such as nasm/yasm)
are accepted in FFmpeg. Which one to use differs per specific case. are accepted in FFmpeg. Which one to use differs per specific case.
- if your code is intended to be inlined in a C function, inline asm is always - if your code is intended to be inlined in a C function, inline asm is always

View file

@ -301,6 +301,45 @@ ffmpeg -re -i INPUT -c:v rawvideo -pix_fmt bgra -f fbdev /dev/fb0
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1). See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
@section opengl
OpenGL output device. Deprecated and will be removed.
To enable this output device you need to configure FFmpeg with @code{--enable-opengl}.
This output device allows one to render to OpenGL context.
Context may be provided by application or default SDL window is created.
When device renders to external context, application must implement handlers for following messages:
@code{AV_DEV_TO_APP_CREATE_WINDOW_BUFFER} - create OpenGL context on current thread.
@code{AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER} - make OpenGL context current.
@code{AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER} - swap buffers.
@code{AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER} - destroy OpenGL context.
Application is also required to inform a device about current resolution by sending @code{AV_APP_TO_DEV_WINDOW_SIZE} message.
@subsection Options
@table @option
@item background
Set background color. Black is a default.
@item no_window
Disables default SDL window when set to non-zero value.
Application must provide OpenGL context and both @code{window_size_cb} and @code{window_swap_buffers_cb} callbacks when set.
@item window_title
Set the SDL window title, if not specified default to the filename specified for the output device.
Ignored when @option{no_window} is set.
@item window_size
Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
Mostly usable when @option{no_window} is not set.
@end table
@subsection Examples
Play a file on SDL window using OpenGL rendering:
@example
ffmpeg -i INPUT -f opengl "window title"
@end example
@section oss @section oss
OSS (Open Sound System) output device. OSS (Open Sound System) output device.
@ -367,6 +406,78 @@ Play a file on default device on default server:
ffmpeg -i INPUT -f pulse "stream name" ffmpeg -i INPUT -f pulse "stream name"
@end example @end example
@section sdl
SDL (Simple DirectMedia Layer) output device. Deprecated and will be removed.
For monitoring purposes in FFmpeg, pipes and a video player such as ffplay can be used:
@example
ffmpeg -i INPUT -f nut -c:v rawvideo - | ffplay -
@end example
"sdl2" can be used as alias for "sdl".
This output device allows one to show a video stream in an SDL
window. Only one SDL window is allowed per application, so you can
have only one instance of this output device in an application.
To enable this output device you need libsdl installed on your system
when configuring your build.
For more information about SDL, check:
@url{http://www.libsdl.org/}
@subsection Options
@table @option
@item window_borderless
Set SDL window border off.
Default value is 0 (enable window border).
@item window_enable_quit
Enable quit action (using window button or keyboard key)
when non-zero value is provided.
Default value is 1 (enable quit action).
@item window_fullscreen
Set fullscreen mode when non-zero value is provided.
Default value is zero.
@item window_size
Set the SDL window size, can be a string of the form
@var{width}x@var{height} or a video size abbreviation.
If not specified it defaults to the size of the input video,
downscaled according to the aspect ratio.
@item window_title
Set the SDL window title, if not specified default to the filename
specified for the output device.
@item window_x
@item window_y
Set the position of the window on the screen.
@end table
@subsection Interactive commands
The window created by the device can be controlled through the
following interactive commands.
@table @key
@item q, ESC
Quit the device immediately.
@end table
@subsection Examples
The following command shows the @command{ffmpeg} output is an
SDL window, forcing its size to the qcif format:
@example
ffmpeg -i INPUT -c:v rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
@end example
@section sndio @section sndio
sndio audio output device. sndio audio output device.

View file

@ -158,7 +158,7 @@ You will need the following prerequisites:
To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from To set up a proper environment in MSYS2, you need to run @code{msys_shell.bat} from
the Visual Studio or Intel Compiler command prompt. the Visual Studio or Intel Compiler command prompt.
Place @code{nasm.exe} somewhere in your @code{PATH}. Place @code{yasm.exe} somewhere in your @code{PATH}.
Next, make sure any other headers and libs you want to use, such as zlib, are Next, make sure any other headers and libs you want to use, such as zlib, are
located in a spot that the compiler can see. Do so by modifying the @code{LIB} located in a spot that the compiler can see. Do so by modifying the @code{LIB}
@ -301,7 +301,7 @@ These library packages are only available from
@uref{http://sourceware.org/cygwinports/, Cygwin Ports}: @uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
@example @example
libSDL-devel, libgsm-devel, libmp3lame-devel, yasm, libSDL-devel, libgsm-devel, libmp3lame-devel,
speex-devel, libtheora-devel, libxvidcore-devel speex-devel, libtheora-devel, libxvidcore-devel
@end example @end example

View file

@ -71,7 +71,7 @@ client may also set a user/password for authentication. The default for both
fields is "guest". Name of virtual host on broker can be set with vhost. The fields is "guest". Name of virtual host on broker can be set with vhost. The
default value is "/". default value is "/".
Multiple subscribers may stream from the broker using the command: Muliple subscribers may stream from the broker using the command:
@example @example
ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost] ffplay amqp://[[user]:[password]@@]hostname[:port][/vhost]
@end example @end example
@ -607,7 +607,7 @@ The resource requested by a client, when the experimental HTTP server is in use.
The HTTP code returned to the client, when the experimental HTTP server is in use. The HTTP code returned to the client, when the experimental HTTP server is in use.
@item short_seek_size @item short_seek_size
Set the threshold, in bytes, for when a readahead should be preferred over a seek and Set the threshold, in bytes, for when a readahead should be prefered over a seek and
new HTTP request. This is useful, for example, to make sure the same connection new HTTP request. This is useful, for example, to make sure the same connection
is used for reading large video packets with small audio packets in between. is used for reading large video packets with small audio packets in between.
@ -1150,15 +1150,10 @@ ffplay "rtmp://myserver/live/mystream live=1"
Real-time Transport Protocol. Real-time Transport Protocol.
The required syntax for an RTP URL is: The required syntax for an RTP URL is:
@example rtp://@var{hostname}[:@var{port}][?@var{option}=@var{val}...]
rtp://@var{hostname}[:@var{port}][?@var{options}]
@end example
@var{port} specifies the RTP port to use. @var{port} specifies the RTP port to use.
@var{options} contains a list of &-separated options of the form
@var{key}=@var{val}.
The following URL options are supported: The following URL options are supported:
@table @option @table @option
@ -1198,15 +1193,16 @@ set to 1) or to a default remote address (if set to 0).
@item localport=@var{n} @item localport=@var{n}
Set the local RTP port to @var{n}. Set the local RTP port to @var{n}.
This is a deprecated option. Instead, @option{localrtpport} should be
used.
@item localaddr=@var{addr} @item localaddr=@var{addr}
Local IP address of a network interface used for sending packets or joining Local IP address of a network interface used for sending packets or joining
multicast groups. multicast groups.
@item timeout=@var{n} @item timeout=@var{n}
Set timeout (in microseconds) of socket I/O operations to @var{n}. Set timeout (in microseconds) of socket I/O operations to @var{n}.
This is a deprecated option. Instead, @option{localrtpport} should be
used.
@end table @end table
Important notes: Important notes:
@ -2028,87 +2024,6 @@ To play back a stream from the TLS/SSL server using @command{ffplay}:
ffplay tls://@var{hostname}:@var{port} ffplay tls://@var{hostname}:@var{port}
@end example @end example
@section dtls
Datagram Transport Layer Security (DTLS)
The required syntax for a DTLS URL is:
@example
dtls://@var{hostname}:@var{port}
@end example
DTLS shares most options with TLS, but operates over UDP instead of TCP.
The following parameters can be set via command line options
(or in code via @code{AVOption}s):
@table @option
@item ca_file, cafile=@var{filename}
A file containing certificate authority (CA) root certificates to treat
as trusted. If the linked TLS library contains a default this might not
need to be specified for verification to work, but not all libraries and
setups have defaults built in.
The file must be in OpenSSL PEM format.
@item tls_verify=@var{1|0}
If enabled, try to verify the peer that we are communicating with.
Note, if using OpenSSL, this currently only makes sure that the
peer certificate is signed by one of the root certificates in the CA
database, but it does not validate that the certificate actually
matches the host name we are trying to connect to.
This is disabled by default since it requires a CA database to be
provided by the caller in many cases.
@item cert_file, cert=@var{filename}
A file containing a certificate to use in the handshake with the peer.
(When operating as server, in listen mode, this is more often required
by the peer, while client certificates only are mandated in certain
setups.)
@item key_file, key=@var{filename}
A file containing the private key for the certificate.
@item cert_pem=@var{string}
Certificate PEM string
@item key_pem=@var{string}
Private key PEM string
@item listen=@var{1|0}
If enabled, listen for connections on the provided port, and assume
the server role in the handshake instead of the client role.
@item mtu=@var{size}
Set the Maximum Transmission Unit (MTU) for DTLS packets.
@item use_srtp=@var{1|0}
Enable the use_srtp DTLS extension.
This is used in WebRTC applications to establish SRTP encryption keys
through the DTLS handshake. Default is disabled.
@item external_sock=@var{1|0}
Use an external socket instead of creating a new one.
This option only makes sense to pass when interacting with the code via
API, enabling this from CLI will cause immediate failure.
Default is disabled.
@end table
Example command lines:
To create a DTLS server:
@example
ffmpeg -listen 1 -i dtls://@var{hostname}:@var{port} @var{output}
@end example
To create a DTLS client and send data to server:
@example
ffmpeg -i @var{input} -f @var{format} dtls://@var{hostname}:@var{port}
@end example
@section udp @section udp
User Datagram Protocol. User Datagram Protocol.

View file

@ -96,9 +96,6 @@ If value is set to @code{1}, indicates source is full range. Default value is
If value is set to @code{1}, enable full range for destination. Default value If value is set to @code{1}, enable full range for destination. Default value
is @code{0}, which enables limited range. is @code{0}, which enables limited range.
@item gamma @var{(boolean)}
If value is set to @code{1}, enable gamma correct scaling. Default value is @code{0}.
@anchor{sws_params} @anchor{sws_params}
@item param0, param1 @item param0, param1
Set scaling algorithm parameters. The specified values are specific of Set scaling algorithm parameters. The specified values are specific of

2
doc/style.min.css vendored

File diff suppressed because one or more lines are too long

View file

@ -1,344 +0,0 @@
New swscale design to change everything (tm)
============================================
SwsGraph
--------
The entry point to the new architecture, SwsGraph is what coordinates
multiple "passes". These can include cascaded scaling passes, error diffusion
dithering, and so on. Or we could have separate passes for the vertical and
horizontal scaling. In between each SwsPass lies a fully allocated image buffer.
Graph passes may have different levels of threading, e.g. we can have a single
threaded error diffusion pass following a multi-threaded scaling pass.
SwsGraph is internally recreated whenever the image format, dimensions or
settings change in any way. sws_scale_frame() is itself just a light-weight
wrapper that runs ff_sws_graph_create() whenever the format changes, splits
interlaced images into separate fields, and calls ff_sws_graph_run() on each.
From the point of view of SwsGraph itself, all inputs are progressive.
SwsOp / SwsOpList
-----------------
This is the newly introduced abstraction layer between the high-level format
handling logic and the low-level backing implementation. Each SwsOp is designed
to be as small and atomic as possible, with the possible exception of the
read / write operations due to their numerous variants.
The basic idea is to split logic between three major components:
1. The high-level format "business logic", which generates in a very
naive way a sequence of operations guaranteed to get you from point A
to point B. This logic is written with correctness in mind only, and
ignoring any performance concerns or low-level implementation decisions.
Semantically, everything is always decoded from the input format to
normalized (real valued) RGB, and then encoded back to output format.
This code lives in libswscale/format.c
2. The optimizer. This is where the "magic" happens, so to speak. The
optimizer's job is to take the abstract sequence of operations
produced by the high-level format analysis code and incrementally
optimize it. Each optimization step is designed to be minute and provably
lossless, or otherwise guarded behind the BITEXACT flag. This ensures that
the resulting output is always identical, no matter how many layers of
optimization we add.
This code lives in libswscale/ops.c
3. The compiler. Once we have a sequence of operations as output by the
optimizer, we "compile" this down to a callable function. This is then
applied by the dispatch wrapper by striping it over the input image.
See libswscale/ops_backend.c for the reference backend, or
libswscale/x86/ops.c for a more complex SIMD example.
This overall approach has a considerable number of benefits:
1. It allows us to verify correctness of logic and spot semantic errors at a
very high level, by simply looking at the sequence of operations (available
by default at debug / verbose log level), without having to dig through the
multiple levels of complicated, interwoven format handling code that is
legacy swscale.
2. Because most of the brains lives inside the the powerful optimizer, we get
fast paths "for free" for any suitable format conversion, rather than having
to enumerate them one by one. SIMD code itself can be written in a very
general way and does need to be tied to specific pixel formats - subsequent
low-level implementations can be strung together without much overhead.
3. We can in the future, with relative ease, compile these operations
down to SPIR-V (or even LLVM IR) and generate efficient GPU or
target-machine specific implementations. This also opens the window for
adding hardware frame support to libswscale, and even transparently using
GPU acceleration for CPU frames.
4. Platform-specific SIMD can be reduced down to a comparatively small set of
optimized routines, while still providing 100% coverage for all possible
pixel formats and operations. (As of writing, the x86 example backend has
about 60 unique implementations, of which 20 are trivial swizzles, 10 are
read/write ops, 10 are pixel type conversions and the remaining 20 are the
various logic/arithmetic ops).
5. Backends hide behind a layer of abstraction offering them a considerable
deal of flexibility in how they want to implement their operations. For
example, the x86 backend has a dedicated function for compiling compatible
operations down to a single in-place pshufb instruction.
Platform specific low level data is self-contained within its own setup()
function and private data structure, eliminating all reads into SwsContext
or the possibility of conflicts between platforms.
6. We can compute an exact reference result for each operation with fixed
precision (ff_sws_op_apply_q), and use that to e.g. measure the amount of
error introduced by dithering, or even catch bugs in the reference C
implementation. (In theory - currently checkasm just compares against C)
Examples of SwsOp in action
---------------------------
For illustration, here is the sequence of operations currently generated by
my prototype, for a conversion from RGB24 to YUV444P:
Unoptimized operation list:
[ u8 .... -> ....] SWS_OP_READ : 3 elem(s) packed >> 0
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
[ u8 .... -> ....] SWS_OP_RSHIFT : >> 0
[ u8 .... -> ....] SWS_OP_CLEAR : {_ _ _ 0}
[ u8 .... -> ....] SWS_OP_CONVERT : u8 -> f32
[f32 .... -> ....] SWS_OP_LINEAR : diag3+alpha [[1/255 0 0 0 0] [0 1/255 0 0 0] [0 0 1/255 0 0] [0 0 0 1 1]]
[f32 .... -> ....] SWS_OP_LINEAR : matrix3 [[0.299000 0.587000 0.114000 0 0] [-0.168736 -0.331264 1/2 0 0] [1/2 -0.418688 -57/701 0 0] [0 0 0 1 0]]
[f32 .... -> ....] SWS_OP_LINEAR : diag3+off3 [[219 0 0 0 16] [0 224 0 0 128] [0 0 224 0 128] [0 0 0 1 0]]
[f32 .... -> ....] SWS_OP_DITHER : 16x16 matrix
[f32 .... -> ....] SWS_OP_MAX : {0 0 0 0} <= x
[f32 .... -> ....] SWS_OP_MIN : x <= {255 255 255 _}
[f32 .... -> ....] SWS_OP_CONVERT : f32 -> u8
[ u8 .... -> ....] SWS_OP_LSHIFT : << 0
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
[ u8 .... -> ....] SWS_OP_WRITE : 3 elem(s) planar >> 0
This is optimized into the following sequence:
Optimized operation list:
[ u8 XXXX -> +++X] SWS_OP_READ : 3 elem(s) packed >> 0
[ u8 ...X -> +++X] SWS_OP_CONVERT : u8 -> f32
[f32 ...X -> ...X] SWS_OP_LINEAR : matrix3+off3 [[0.256788 0.504129 0.097906 0 16] [-0.148223 -0.290993 112/255 0 128] [112/255 -0.367788 -0.071427 0 128] [0 0 0 1 0]]
[f32 ...X -> ...X] SWS_OP_DITHER : 16x16 matrix
[f32 ...X -> +++X] SWS_OP_CONVERT : f32 -> u8
[ u8 ...X -> +++X] SWS_OP_WRITE : 3 elem(s) planar >> 0
(X = unused, + = exact, 0 = zero)
The extra metadata on the left of the operation list is just a dump of the
internal state used by the optimizer during optimization. It keeps track of
knowledge about the pixel values, such as their value range, whether or not
they're exact integers, and so on.
In this example, you can see that the input values are exact (except for
the alpha channel, which is undefined), until the first SWS_OP_LINEAR
multiplies them by a noninteger constant. They regain their exact integer
status only after the (truncating) conversion to U8 in the output step.
Example of more aggressive optimization
---------------------------------------
Conversion pass for gray -> rgb48:
Unoptimized operation list:
[ u8 .... -> ....] SWS_OP_READ : 1 elem(s) planar >> 0
[ u8 .... -> ....] SWS_OP_SWIZZLE : 0123
[ u8 .... -> ....] SWS_OP_RSHIFT : >> 0
[ u8 .... -> ....] SWS_OP_CLEAR : {_ 0 0 0}
[ u8 .... -> ....] SWS_OP_CONVERT : u8 -> f32
[f32 .... -> ....] SWS_OP_LINEAR : luma+alpha [[1/255 0 0 0 0] [0 1 0 0 0] [0 0 1 0 0] [0 0 0 1 1]]
[f32 .... -> ....] SWS_OP_LINEAR : matrix3 [[1 0 701/500 0 0] [1 -0.344136 -0.714136 0 0] [1 443/250 0 0 0] [0 0 0 1 0]]
[f32 .... -> ....] SWS_OP_LINEAR : diag3 [[65535 0 0 0 0] [0 65535 0 0 0] [0 0 65535 0 0] [0 0 0 1 0]]
[f32 .... -> ....] SWS_OP_MAX : {0 0 0 0} <= x
[f32 .... -> ....] SWS_OP_MIN : x <= {65535 65535 65535 _}
[f32 .... -> ....] SWS_OP_CONVERT : f32 -> u16
[u16 .... -> ....] SWS_OP_LSHIFT : << 0
[u16 .... -> ....] SWS_OP_SWIZZLE : 0123
[u16 .... -> ....] SWS_OP_WRITE : 3 elem(s) packed >> 0
Optimized operation list:
[ u8 XXXX -> +XXX] SWS_OP_READ : 1 elem(s) planar >> 0
[ u8 .XXX -> +XXX] SWS_OP_CONVERT : u8 -> u16 (expand)
[u16 .XXX -> +++X] SWS_OP_SWIZZLE : 0003
[u16 ...X -> +++X] SWS_OP_WRITE : 3 elem(s) packed >> 0
(X = unused, + = exact, 0 = zero)
Here, the optimizer has managed to eliminate all of the unnecessary linear
operations on previously zero'd values, turn the resulting column matrix into
a swizzle operation, avoid the unnecessary dither (and round trip via float)
because the pixel values are guaranteed to be bit exact, and finally, turns
the multiplication by 65535 / 255 = 257 into a simple integer expand operation.
As a final bonus, the x86 backend further optimizes this into a 12-byte shuffle:
pshufb = {0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1}
time=208 us, ref=4212 us, speedup=20.236x faster (single thread)
time=57 us, ref=472 us, speedup=8.160x faster (multi thread)
Compiler and underlying implementation layer (SwsOpChain)
---------------------------------------------------------
While the backend API is flexible enough to permit more exotic implementations
(e.g. using JIT code generation), we establish a common set of helpers for use
in "traditional" SIMD implementations.
The basic idea is to have one "kernel" (or implementation) per operation,
and then just chain a list of these kernels together as separate function
calls. For best performance, we want to keep data in vector registers in
between function calls using a custom calling convention, thus avoiding any
unnecessary memory accesses. Additionally, we want the per-kernel overhead to
be as low as possible, with each kernel ideally just jumping directly into
the next kernel.
As a result, we arrive at a design where we first divide the image into small
chunks, or "blocks", and then dispatch the "chain" of kernels on each chunk in
sequence. Each kernel processes a fixed number of pixels, with the overall
entry point taking care of looping. Remaining pixels (the "tail") are handled
generically by the backend-invariant dispatch code (located in ops.c), using a
partial memcpy into a suitably sized temporary buffer.
To minimize the per-kernel function call overhead, we use a "continuation
passing style" for chaining kernels. Each operation computes its result and
then directly calls the next operation in the sequence, with the appropriate
internal function signature.
The C reference backend reads data into the stack and then passes the array
pointers to the next continuation as regular function arguments:
void process(GlobalContext *ctx, OpContext *op,
block_t x, block_t y, block_t z, block_t w)
{
for (int i = 0; i < SWS_BLOCK_SIZE; i++)
// do something with x[i], y[i], z[i], w[i]
op->next(ctx, &op[1], x, y, z, w);
}
With type conversions pushing the new data onto the stack as well:
void convert8to16(GlobalContext *ctx, OpContext *op,
block_t x, block_t y, block_t z, block_t w)
{
/* Pseudo-code */
u16block_t x16 = (u16block_t) x;
u16block_t y16 = (u16block_t) y;
u16block_t z16 = (u16block_t) z;
u16block_t w16 = (u16block_t) w;
op->next(ctx, &op[1], x16, y16, z16, w16);
}
By contrast, the x86 backend always keeps the X/Y/Z/W values pinned in specific
vector registers (ymm0-ymm3 for the lower half, and ymm4-ymm7 for the second
half).
Each kernel additionally has access to a 32 byte per-op context storing the
pointer to the next kernel plus 16 bytes of arbitrary private data. This is
used during construction of the function chain to place things like small
constants.
In assembly, the per-kernel overhead looks like this:
load $tmp, $arg1
...
add $arg1, 32
jump $tmp
This design gives vastly better performance than the alternative of returning
out to a central loop or "trampoline". This is partly because the order of
kernels within a chain is always the same, so the branch predictor can easily
remember the target address of each "jump" instruction.
The only way to realistically improve on this design would be to directly
stitch the kernel body together using runtime code generation.
Future considerations and limitations
-------------------------------------
My current prototype has a number of severe limitations and opportunities
for improvements:
1. It does not handle scaling at all. I am not yet entirely sure on how I want
to handle scaling; this includes handling of subsampled content. I have a
number of vague ideas in my head, but nothing where I can say with certainty
that it will work out well.
It's possible that we won't come up with a perfect solution here, and will
need to decide on which set of compromises we are comfortable accepting:
1. Do we need the ability to scale YUV -> YUV by handling luma and chroma
independently? When downscaling 100x100 4:2:0 to 50x50 4:4:4, should we
support the option of reusing the chroma plane directly (even though
this would introduce a subpixel shift for typical chroma siting)?
Looking towards zimg, I am also thinking that we probably also want to do
scaling on floating point values, since this is best for both performance
and accuracy, especially given that we need to go up to 32-bit intermediates
during scaling anyway.
So far, the most promising approach seems to be to handle subsampled
input/output as a dedicated read/write operation type; perhaps even with a
fixed/static subsampling kernel. To avoid compromising on performance when
chroma resampling is not necessary, the optimizer could then relax the
pipeline to use non-interpolating read/writes when all intermediate
operations are component-independent.
2. Since each operation is conceptually defined on 4-component pixels, we end
up defining a lot of variants of each implementation for each possible
*subset*. For example, we have four different implementations for
SWS_OP_SCALE in my current templates:
- op_scale_1000
- op_scale_1001
- op_scale_1110
- op_scale_1111
This reflects the four different arrangements of pixel components that are
typically present (or absent). While best for performance, it does turn into
a bit of a chore when implementing these kernels.
The only real alternative would be to either branch inside the kernel (bad),
or to use separate kernels for each individual component and chain them all
together. I have not yet tested whether the latter approach would be faster
after the latest round of refactors to the kernel glue code.
3. I do not yet have any support for LUTs. But when I add them, something we
could do is have the optimized pass automatically "promote" a sequence of
operations to LUTs. For example, any sequence that looks like:
1. [u8] SWS_OP_CONVERT -> X
2. [X] ... // only per-component operations
4. [X] SWS_OP_CONVERT -> Y
3. [Y] SWS_OP_WRITE
could be replaced by a LUT with 256 entries. This is especially important
for anything involving packed 8-bit input (e.g. rgb8, rgb4_byte).
We also definitely want to hook this up to the existing CMS code for
transformations between different primaries.
4. Because we rely on AVRational math to generate the coefficients for
operations, we need to be able to represent all pixel values as an
AVRational. However, this presents a challenge for 32-bit formats (e.g.
GRAY32, RGBA128), because their size exceeds INT_MAX, which is the maximum
value representable by an AVRational.
It's possible we may want to introduce an AVRational64 for this, or
perhaps more flexibly, extend AVRational to an AVFloating type which is
represented as { AVRational n; int exp; }, representing n/d * 2^exp. This
would preserve our ability to represent all pixel values exactly, while
opening up the range arbitrarily.
5. Is there ever a situation where the use of floats introduces the risk of
non bit-exact output? For this reason, and possible performance advantages,
we may want to explore the use of a fixed-point 16 bit path as an alternative
to the floating point math.
So far, I have managed to avoid any bit exactness issues inside the x86
backend by ensuring that the order of linear operations is identical
between the C backend and the x86 backend, but this may not be practical
to guarantee on all backends. The x86 float code is also dramatically
faster than the old fixed point code, so I'm tentatively optimistic about
the lack of a need for a fixed point path.

View file

@ -54,24 +54,12 @@ sub get_formatting_function($$) {
} }
# determine texinfo version # determine texinfo version
my $package_version = ff_get_conf('PACKAGE_VERSION'); my $program_version_num = version->declare(ff_get_conf('PACKAGE_VERSION'))->numify;
$package_version =~ s/\+dev$//;
my $program_version_num = version->declare($package_version)->numify;
my $program_version_6_8 = $program_version_num >= 6.008000; my $program_version_6_8 = $program_version_num >= 6.008000;
# no navigation elements # no navigation elements
ff_set_from_init_file('HEADERS', 0); ff_set_from_init_file('HEADERS', 0);
my %sectioning_commands = %Texinfo::Common::sectioning_commands;
if (scalar(keys(%sectioning_commands)) == 0) {
%sectioning_commands = %Texinfo::Commands::sectioning_heading_commands;
}
my %root_commands = %Texinfo::Common::root_commands;
if (scalar(keys(%root_commands)) == 0) {
%root_commands = %Texinfo::Commands::root_commands;
}
sub ffmpeg_heading_command($$$$$) sub ffmpeg_heading_command($$$$$)
{ {
my $self = shift; my $self = shift;
@ -89,9 +77,6 @@ sub ffmpeg_heading_command($$$$$)
return $result; return $result;
} }
# no need to set it as the $element_id is output unconditionally
my $heading_id;
my $element_id = $self->command_id($command); my $element_id = $self->command_id($command);
$result .= "<a name=\"$element_id\"></a>\n" $result .= "<a name=\"$element_id\"></a>\n"
if (defined($element_id) and $element_id ne ''); if (defined($element_id) and $element_id ne '');
@ -99,40 +84,24 @@ sub ffmpeg_heading_command($$$$$)
print STDERR "Process $command " print STDERR "Process $command "
.Texinfo::Structuring::_print_root_command_texi($command)."\n" .Texinfo::Structuring::_print_root_command_texi($command)."\n"
if ($self->get_conf('DEBUG')); if ($self->get_conf('DEBUG'));
my $output_unit; my $element;
if ($root_commands{$command->{'cmdname'}}) { if ($Texinfo::Common::root_commands{$command->{'cmdname'}}
if ($command->{'associated_unit'}) { and $command->{'parent'}
$output_unit = $command->{'associated_unit'}; and $command->{'parent'}->{'type'}
} elsif ($command->{'structure'} and $command->{'parent'}->{'type'} eq 'element') {
and $command->{'structure'}->{'associated_unit'}) { $element = $command->{'parent'};
$output_unit = $command->{'structure'}->{'associated_unit'};
} elsif ($command->{'parent'}
and $command->{'parent'}->{'type'}
and $command->{'parent'}->{'type'} eq 'element') {
$output_unit = $command->{'parent'};
}
} }
if ($element) {
if ($output_unit) {
$result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname, $result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname,
$command, $output_unit); $command, $element);
} }
my $heading_level; my $heading_level;
# node is used as heading if there is nothing else. # node is used as heading if there is nothing else.
if ($cmdname eq 'node') { if ($cmdname eq 'node') {
if (!$output_unit or if (!$element or (!$element->{'extra'}->{'section'}
(((!$output_unit->{'extra'}->{'section'} and $element->{'extra'}->{'node'}
and $output_unit->{'extra'}->{'node'} and $element->{'extra'}->{'node'} eq $command
and $output_unit->{'extra'}->{'node'} eq $command)
or
((($output_unit->{'extra'}->{'unit_command'}
and $output_unit->{'extra'}->{'unit_command'} eq $command)
or
($output_unit->{'unit_command'}
and $output_unit->{'unit_command'} eq $command))
and $command->{'extra'}
and not $command->{'extra'}->{'associated_section'}))
# bogus node may not have been normalized # bogus node may not have been normalized
and defined($command->{'extra'}->{'normalized'}))) { and defined($command->{'extra'}->{'normalized'}))) {
if ($command->{'extra'}->{'normalized'} eq 'Top') { if ($command->{'extra'}->{'normalized'} eq 'Top') {
@ -142,15 +111,7 @@ sub ffmpeg_heading_command($$$$$)
} }
} }
} else { } else {
if (defined($command->{'extra'}) $heading_level = $command->{'level'};
and defined($command->{'extra'}->{'section_level'})) {
$heading_level = $command->{'extra'}->{'section_level'};
} elsif ($command->{'structure'}
and defined($command->{'structure'}->{'section_level'})) {
$heading_level = $command->{'structure'}->{'section_level'};
} else {
$heading_level = $command->{'level'};
}
} }
my $heading = $self->command_text($command); my $heading = $self->command_text($command);
@ -158,8 +119,8 @@ sub ffmpeg_heading_command($$$$$)
# if there is an error in the node. # if there is an error in the node.
if (defined($heading) and $heading ne '' and defined($heading_level)) { if (defined($heading) and $heading ne '' and defined($heading_level)) {
if ($root_commands{$cmdname} if ($Texinfo::Common::root_commands{$cmdname}
and $sectioning_commands{$cmdname}) { and $Texinfo::Common::sectioning_commands{$cmdname}) {
my $content_href = $self->command_contents_href($command, 'contents', my $content_href = $self->command_contents_href($command, 'contents',
$self->{'current_filename'}); $self->{'current_filename'});
if ($content_href) { if ($content_href) {
@ -179,13 +140,7 @@ sub ffmpeg_heading_command($$$$$)
} }
} }
my $in_preformatted; if ($self->in_preformatted()) {
if ($program_version_num >= 7.001090) {
$in_preformatted = $self->in_preformatted_context();
} else {
$in_preformatted = $self->in_preformatted();
}
if ($in_preformatted) {
$result .= $heading."\n"; $result .= $heading."\n";
} else { } else {
# if the level was changed, set the command name right # if the level was changed, set the command name right
@ -194,25 +149,21 @@ sub ffmpeg_heading_command($$$$$)
$cmdname $cmdname
= $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level]; = $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level];
} }
# format_heading_text expects an array of headings for texinfo >= 7.0
if ($program_version_num >= 7.000000) { if ($program_version_num >= 7.000000) {
$result .= &{get_formatting_function($self,'format_heading_text')}($self, $heading = [$heading];
$cmdname, [$cmdname], $heading, }
$heading_level +$self->get_conf('CHAPTER_HEADER_LEVEL') -1, $result .= &{get_formatting_function($self,'format_heading_text')}(
$heading_id, $command);
} else {
$result .= &{get_formatting_function($self,'format_heading_text')}(
$self, $cmdname, $heading, $self, $cmdname, $heading,
$heading_level + $heading_level +
$self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command); $self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command);
}
} }
} }
$result .= $content if (defined($content)); $result .= $content if (defined($content));
return $result; return $result;
} }
foreach my $command (keys(%sectioning_commands), 'node') { foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') {
texinfo_register_command_formatting($command, \&ffmpeg_heading_command); texinfo_register_command_formatting($command, \&ffmpeg_heading_command);
} }
@ -237,56 +188,28 @@ sub ffmpeg_begin_file($$$)
my $filename = shift; my $filename = shift;
my $element = shift; my $element = shift;
my ($element_command, $node_command, $command_for_title); my $command;
if ($element) { if ($element and $self->get_conf('SPLIT')) {
if ($element->{'unit_command'}) { $command = $self->element_command($element);
$element_command = $element->{'unit_command'};
} elsif ($self->can('tree_unit_element_command')) {
$element_command = $self->tree_unit_element_command($element);
} elsif ($self->can('tree_unit_element_command')) {
$element_command = $self->element_command($element);
}
$node_command = $element_command;
if ($element_command and $element_command->{'cmdname'}
and $element_command->{'cmdname'} ne 'node'
and $element_command->{'extra'}
and $element_command->{'extra'}->{'associated_node'}) {
$node_command = $element_command->{'extra'}->{'associated_node'};
}
$command_for_title = $element_command if ($self->get_conf('SPLIT'));
} }
my ($title, $description, $keywords, $encoding, $date, $css_lines, $doctype, my ($title, $description, $encoding, $date, $css_lines,
$root_html_element_attributes, $body_attributes, $copying_comment, $doctype, $bodytext, $copying_comment, $after_body_open,
$after_body_open, $extra_head, $program_and_version, $program_homepage, $extra_head, $program_and_version, $program_homepage,
$program, $generator); $program, $generator);
if ($program_version_num >= 7.001090) { if ($program_version_num >= 7.000000) {
($title, $description, $keywords, $encoding, $date, $css_lines, $doctype, ($title, $description, $encoding, $date, $css_lines,
$root_html_element_attributes, $body_attributes, $copying_comment, $doctype, $bodytext, $copying_comment, $after_body_open,
$after_body_open, $extra_head, $program_and_version, $program_homepage,
$program, $generator) = $self->_file_header_information($command_for_title,
$filename);
} elsif ($program_version_num >= 7.000000) {
($title, $description, $encoding, $date, $css_lines, $doctype,
$root_html_element_attributes, $copying_comment, $after_body_open,
$extra_head, $program_and_version, $program_homepage, $extra_head, $program_and_version, $program_homepage,
$program, $generator) = $self->_file_header_information($command_for_title, $program, $generator) = $self->_file_header_information($command);
$filename);
} else { } else {
($title, $description, $encoding, $date, $css_lines, ($title, $description, $encoding, $date, $css_lines,
$doctype, $root_html_element_attributes, $copying_comment, $doctype, $bodytext, $copying_comment, $after_body_open,
$after_body_open, $extra_head, $program_and_version, $program_homepage, $extra_head, $program_and_version, $program_homepage,
$program, $generator) = $self->_file_header_informations($command_for_title); $program, $generator) = $self->_file_header_informations($command);
} }
my $links; my $links = $self->_get_links ($filename, $element);
if ($program_version_num >= 7.000000) {
$links = $self->_get_links($filename, $element, $node_command);
} else {
$links = $self->_get_links ($filename, $element);
}
my $head1 = $ENV{"FFMPEG_HEADER1"} || <<EOT; my $head1 = $ENV{"FFMPEG_HEADER1"} || <<EOT;
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
@ -329,25 +252,13 @@ sub ffmpeg_program_string($)
if (defined($self->get_conf('PROGRAM')) if (defined($self->get_conf('PROGRAM'))
and $self->get_conf('PROGRAM') ne '' and $self->get_conf('PROGRAM') ne ''
and defined($self->get_conf('PACKAGE_URL'))) { and defined($self->get_conf('PACKAGE_URL'))) {
if ($program_version_num >= 7.001090) { return $self->convert_tree(
return $self->convert_tree(
$self->cdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
{ 'program_homepage' => {'text' => $self->get_conf('PACKAGE_URL')},
'program' => {'text' => $self->get_conf('PROGRAM') }}));
} else {
return $self->convert_tree(
$self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.', $self->gdt('This document was generated using @uref{{program_homepage}, @emph{{program}}}.',
{ 'program_homepage' => {'text' => $self->get_conf('PACKAGE_URL')}, { 'program_homepage' => $self->get_conf('PACKAGE_URL'),
'program' => {'text' => $self->get_conf('PROGRAM') }})); 'program' => $self->get_conf('PROGRAM') }));
}
} else { } else {
if ($program_version_num >= 7.001090) { return $self->convert_tree(
return $self->convert_tree( $self->gdt('This document was generated automatically.'));
$self->cdt('This document was generated automatically.'));
} else {
return $self->convert_tree(
$self->gdt('This document was generated automatically.'));
}
} }
} }
if ($program_version_6_8) { if ($program_version_6_8) {

0
doc/texi2pod.pl Executable file → Normal file
View file

0
doc/texidep.pl Executable file → Normal file
View file

View file

@ -44,3 +44,4 @@ a+b*c;
here the reader knows that a,b,c are meant to be signed integers but for C here the reader knows that a,b,c are meant to be signed integers but for C
standard compliance / to avoid undefined behavior they are stored in unsigned standard compliance / to avoid undefined behavior they are stored in unsigned
ints. ints.

View file

@ -731,12 +731,8 @@ FL+FR+FC+LFE+BL+BR+SL+SR+TFL+TFR+TBL+TBR
FL+FR+FC+LFE+BL+BR+SL+SR+TFL+TFR+TBC+LFE2 FL+FR+FC+LFE+BL+BR+SL+SR+TFL+TFR+TBC+LFE2
@item 9.1.4 @item 9.1.4
FL+FR+FC+LFE+BL+BR+FLC+FRC+SL+SR+TFL+TFR+TBL+TBR FL+FR+FC+LFE+BL+BR+FLC+FRC+SL+SR+TFL+TFR+TBL+TBR
@item 9.1.6
FL+FR+FC+LFE+BL+BR+FLC+FRC+SL+SR+TFL+TFR+TBL+TBR+TSL+TSR
@item hexadecagonal @item hexadecagonal
FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR FL+FR+FC+BL+BR+BC+SL+SR+WL+WR+TBL+TBR+TBC+TFC+TFL+TFR
@item binaural
BIL+BIR
@item downmix @item downmix
DL+DR DL+DR
@item 22.2 @item 22.2

View file

@ -3,8 +3,6 @@ OBJS-$(HAVE_ARMV6) += $(ARMV6-OBJS) $(ARMV6-OBJS-yes)
OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes) OBJS-$(HAVE_ARMV8) += $(ARMV8-OBJS) $(ARMV8-OBJS-yes)
OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes) OBJS-$(HAVE_VFP) += $(VFP-OBJS) $(VFP-OBJS-yes)
OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes) OBJS-$(HAVE_NEON) += $(NEON-OBJS) $(NEON-OBJS-yes)
OBJS-$(HAVE_SVE) += $(SVE-OBJS) $(SVE-OBJS-yes)
OBJS-$(HAVE_SVE2) += $(SVE2-OBJS) $(SVE2-OBJS-yes)
OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes) OBJS-$(HAVE_MIPSFPU) += $(MIPSFPU-OBJS) $(MIPSFPU-OBJS-yes)
OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes) OBJS-$(HAVE_MIPSDSP) += $(MIPSDSP-OBJS) $(MIPSDSP-OBJS-yes)
@ -21,7 +19,5 @@ OBJS-$(HAVE_RV) += $(RV-OBJS) $(RV-OBJS-yes)
OBJS-$(HAVE_RVV) += $(RVV-OBJS) $(RVV-OBJS-yes) OBJS-$(HAVE_RVV) += $(RVV-OBJS) $(RVV-OBJS-yes)
OBJS-$(HAVE_RV_ZVBB) += $(RVVB-OBJS) $(RVVB-OBJS-yes) OBJS-$(HAVE_RV_ZVBB) += $(RVVB-OBJS) $(RVVB-OBJS-yes)
OBJS-$(HAVE_SIMD128) += $(SIMD128-OBJS) $(SIMD128-OBJS-yes)
OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes) OBJS-$(HAVE_MMX) += $(MMX-OBJS) $(MMX-OBJS-yes)
OBJS-$(HAVE_X86ASM) += $(X86ASM-OBJS) $(X86ASM-OBJS-yes) OBJS-$(HAVE_X86ASM) += $(X86ASM-OBJS) $(X86ASM-OBJS-yes)

View file

@ -38,10 +38,8 @@ int main(int argc, char **argv)
return -1; return -1;
output = fopen(argv[2], "wb"); output = fopen(argv[2], "wb");
if (!output) { if (!output)
fclose(input);
return -1; return -1;
}
if (argc == 4) { if (argc == 4) {
name = argv[3]; name = argv[3];
@ -69,10 +67,8 @@ int main(int argc, char **argv)
fclose(output); fclose(output);
if (ferror(input) || !feof(input)) { if (ferror(input) || !feof(input))
fclose(input);
return -1; return -1;
}
fclose(input); fclose(input);

View file

@ -18,7 +18,7 @@ BIN2C = $(BIN2CEXE)
ifndef V ifndef V
Q = @ Q = @
ECHO = printf "$(1)\t%s\n" $(2) ECHO = printf "$(1)\t%s\n" $(2)
BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C METALCC METALLIB BRIEF = CC CXX OBJCC HOSTCC HOSTLD AS X86ASM AR LD STRIP CP WINDRES NVCC BIN2C
SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM SILENT = DEPCC DEPHOSTCC DEPAS DEPX86ASM RANLIB RM
MSG = $@ MSG = $@
@ -115,12 +115,6 @@ COMPILE_LASX = $(call COMPILE,CC,LASXFLAGS)
$(BIN2CEXE): ffbuild/bin2c_host.o $(BIN2CEXE): ffbuild/bin2c_host.o
$(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS) $(HOSTLD) $(HOSTLDFLAGS) $(HOSTLD_O) $^ $(HOSTEXTRALIBS)
RUN_BIN2C = $(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
RUN_GZIP = $(M)gzip -nc9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
RUN_MINIFY = $(M)sed 's!/\\*.*\\*/!!g' $< | tr '\n' ' ' | tr -s ' ' | sed 's/^ //; s/ $$//' > $@
%.gz: TAG = GZIP
%.min: TAG = MINIFY
%.metal.air: %.metal %.metal.air: %.metal
$(METALCC) $< -o $@ $(METALCC) $< -o $@
@ -128,46 +122,21 @@ RUN_MINIFY = $(M)sed 's!/\\*.*\\*/!!g' $< | tr '\n' ' ' | tr -s ' ' | sed 's/^ /
$(METALLIB) --split-module-without-linking $< -o $@ $(METALLIB) --split-module-without-linking $< -o $@
%.metallib.c: %.metallib $(BIN2CEXE) %.metallib.c: %.metallib $(BIN2CEXE)
$(RUN_BIN2C) $(BIN2C) $< $@ $(subst .,_,$(basename $(notdir $@)))
%.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h %.ptx: %.cu $(SRC_PATH)/compat/cuda/cuda_runtime.h
$(COMPILE_NVCC) $(COMPILE_NVCC)
ifdef CONFIG_PTX_COMPRESSION ifdef CONFIG_PTX_COMPRESSION
%.ptx.gz: TAG = GZIP
%.ptx.gz: %.ptx %.ptx.gz: %.ptx
$(RUN_GZIP) $(M)gzip -nc9 $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) >$@
%.ptx.c: %.ptx.gz $(BIN2CEXE) %.ptx.c: %.ptx.gz $(BIN2CEXE)
$(RUN_BIN2C) $(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
else else
%.ptx.c: %.ptx $(BIN2CEXE) %.ptx.c: %.ptx $(BIN2CEXE)
$(RUN_BIN2C) $(BIN2C) $(patsubst $(SRC_PATH)/%,$(SRC_LINK)/%,$<) $@ $(subst .,_,$(basename $(notdir $@)))
endif
%.css.min: %.css
$(RUN_MINIFY)
ifdef CONFIG_RESOURCE_COMPRESSION
%.css.min.gz: %.css.min
$(RUN_GZIP)
%.css.c: %.css.min.gz $(BIN2CEXE)
$(RUN_BIN2C)
%.html.gz: %.html
$(RUN_GZIP)
%.html.c: %.html.gz $(BIN2CEXE)
$(RUN_BIN2C)
else # NO COMPRESSION
%.css.c: %.css.min $(BIN2CEXE)
$(RUN_BIN2C)
%.html.c: %.html $(BIN2CEXE)
$(RUN_BIN2C)
endif endif
clean:: clean::
@ -190,6 +159,7 @@ endif
include $(SRC_PATH)/ffbuild/arch.mak include $(SRC_PATH)/ffbuild/arch.mak
OBJS += $(OBJS-yes) OBJS += $(OBJS-yes)
SLIBOBJS += $(SLIBOBJS-yes)
SHLIBOBJS += $(SHLIBOBJS-yes) SHLIBOBJS += $(SHLIBOBJS-yes)
STLIBOBJS += $(STLIBOBJS-yes) STLIBOBJS += $(STLIBOBJS-yes)
FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS) FFLIBS := $($(NAME)_FFLIBS) $(FFLIBS-yes) $(FFLIBS)
@ -199,6 +169,7 @@ LDLIBS = $(FFLIBS:%=%$(BUILDSUF))
FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=EXTRALIBS-%),$($(lib))) $(EXTRALIBS) FFEXTRALIBS := $(LDLIBS:%=$(LD_LIB)) $(foreach lib,EXTRALIBS-$(NAME) $(FFLIBS:%=EXTRALIBS-%),$($(lib))) $(EXTRALIBS)
OBJS := $(sort $(OBJS:%=$(SUBDIR)%)) OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
SLIBOBJS := $(sort $(SLIBOBJS:%=$(SUBDIR)%))
SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%)) SHLIBOBJS := $(sort $(SHLIBOBJS:%=$(SUBDIR)%))
STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%)) STLIBOBJS := $(sort $(STLIBOBJS:%=$(SUBDIR)%))
TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o) TESTOBJS := $(TESTOBJS:%=$(SUBDIR)tests/%) $(TESTPROGS:%=$(SUBDIR)tests/%.o)
@ -223,6 +194,7 @@ PTXOBJS = $(filter %.ptx.o,$(OBJS))
$(HOBJS): CCFLAGS += $(CFLAGS_HEADERS) $(HOBJS): CCFLAGS += $(CFLAGS_HEADERS)
checkheaders: $(HOBJS) checkheaders: $(HOBJS)
.SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=.gz) $(PTXOBJS:.o=) .SECONDARY: $(HOBJS:.o=.c) $(PTXOBJS:.o=.c) $(PTXOBJS:.o=.gz) $(PTXOBJS:.o=)
alltools: $(TOOLS) alltools: $(TOOLS)
$(HOSTOBJS): %.o: %.c $(HOSTOBJS): %.o: %.c
@ -234,14 +206,15 @@ $(HOSTPROGS): %$(HOSTEXESUF): %.o
$(OBJS): | $(sort $(dir $(OBJS))) $(OBJS): | $(sort $(dir $(OBJS)))
$(HOBJS): | $(sort $(dir $(HOBJS))) $(HOBJS): | $(sort $(dir $(HOBJS)))
$(HOSTOBJS): | $(sort $(dir $(HOSTOBJS))) $(HOSTOBJS): | $(sort $(dir $(HOSTOBJS)))
$(SLIBOBJS): | $(sort $(dir $(SLIBOBJS)))
$(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS))) $(SHLIBOBJS): | $(sort $(dir $(SHLIBOBJS)))
$(STLIBOBJS): | $(sort $(dir $(STLIBOBJS))) $(STLIBOBJS): | $(sort $(dir $(STLIBOBJS)))
$(TESTOBJS): | $(sort $(dir $(TESTOBJS))) $(TESTOBJS): | $(sort $(dir $(TESTOBJS)))
$(TOOLOBJS): | tools $(TOOLOBJS): | tools
OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS)) OUTDIRS := $(OUTDIRS) $(dir $(OBJS) $(HOBJS) $(HOSTOBJS) $(SLIBOBJS) $(SHLIBOBJS) $(STLIBOBJS) $(TESTOBJS))
CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.objs *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *.html.gz *.html.c *.css.min.gz *.css.min *.css.c *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb CLEANSUFFIXES = *.d *.gcda *.gcno *.h.c *.ho *.map *.o *.pc *.ptx *.ptx.gz *.ptx.c *.ver *.version *$(DEFAULT_X86ASMD).asm *~ *.ilk *.pdb
LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a LIBSUFFIXES = *.a *.lib *.so *.so.* *.dylib *.dll *.def *.dll.a
define RULES define RULES
@ -251,4 +224,4 @@ endef
$(eval $(RULES)) $(eval $(RULES))
-include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d) -include $(wildcard $(OBJS:.o=.d) $(HOSTOBJS:.o=.d) $(TESTOBJS:.o=.d) $(HOBJS:.o=.d) $(SHLIBOBJS:.o=.d) $(STLIBOBJS:.o=.d) $(SLIBOBJS:.o=.d)) $(OBJS:.o=$(DEFAULT_X86ASMD).d)

View file

@ -26,7 +26,7 @@ ifdef CONFIG_SHARED
# for purely shared builds. # for purely shared builds.
# Test programs are always statically linked against their library # Test programs are always statically linked against their library
# to be able to access their library's internals, even with shared builds. # to be able to access their library's internals, even with shared builds.
# Yet linking against dependent libraries still uses dynamic linking. # Yet linking against dependend libraries still uses dynamic linking.
# This means that we are in the scenario described above. # This means that we are in the scenario described above.
# In case only static libs are used, the linker will only use # In case only static libs are used, the linker will only use
# one of these copies; this depends on the duplicated object files # one of these copies; this depends on the duplicated object files
@ -35,14 +35,8 @@ OBJS += $(SHLIBOBJS)
endif endif
$(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS) $(SUBDIR)$(LIBNAME): $(OBJS) $(STLIBOBJS)
$(RM) $@ $(RM) $@
ifeq ($(RESPONSE_FILES),yes)
$(Q)echo $^ > $@.objs
$(AR) $(ARFLAGS) $(AR_O) @$@.objs
else
$(AR) $(ARFLAGS) $(AR_O) $^ $(AR) $(ARFLAGS) $(AR_O) $^
endif
$(RANLIB) $@ $(RANLIB) $@
-$(RM) $@.objs
install-headers: install-lib$(NAME)-headers install-lib$(NAME)-pkgconfig install-headers: install-lib$(NAME)-headers install-lib$(NAME)-pkgconfig
@ -70,16 +64,10 @@ $(SUBDIR)lib$(NAME).ver: $(SUBDIR)lib$(NAME).v $(OBJS)
$(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR) $(SUBDIR)$(SLIBNAME): $(SUBDIR)$(SLIBNAME_WITH_MAJOR)
$(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME) $(Q)cd ./$(SUBDIR) && $(LN_S) $(SLIBNAME_WITH_MAJOR) $(SLIBNAME)
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SUBDIR)lib$(NAME).ver $(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(OBJS) $(SHLIBOBJS) $(SLIBOBJS) $(SUBDIR)lib$(NAME).ver
$(SLIB_CREATE_DEF_CMD) $(SLIB_CREATE_DEF_CMD)
ifeq ($(RESPONSE_FILES),yes)
$(Q)echo $$(filter %.o,$$^) > $$@.objs
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) @$$@.objs $(FFEXTRALIBS)
else
$$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS) $$(LD) $(SHFLAGS) $(LDFLAGS) $(LDSOFLAGS) $$(LD_O) $$(filter %.o,$$^) $(FFEXTRALIBS)
endif
$(SLIB_EXTRA_CMD) $(SLIB_EXTRA_CMD)
-$(RM) $$@.objs
ifdef SUBDIR ifdef SUBDIR
$(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(DEP_LIBS) $(SUBDIR)$(SLIBNAME_WITH_MAJOR): $(DEP_LIBS)

View file

@ -50,7 +50,7 @@ includedir=${source_path}
prefix= prefix=
exec_prefix= exec_prefix=
libdir=\${pcfiledir}/../../../$name libdir=\${pcfiledir}/../../../$name
includedir=${includedir} includedir=${source_path}
Name: $fullname Name: $fullname
Description: $comment Description: $comment

View file

@ -9,8 +9,6 @@ AVBASENAMES = ffmpeg ffplay ffprobe
ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF)) ALLAVPROGS = $(AVBASENAMES:%=%$(PROGSSUF)$(EXESUF))
ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF)) ALLAVPROGS_G = $(AVBASENAMES:%=%$(PROGSSUF)_g$(EXESUF))
include $(SRC_PATH)/fftools/resources/Makefile
OBJS-ffmpeg += \ OBJS-ffmpeg += \
fftools/ffmpeg_dec.o \ fftools/ffmpeg_dec.o \
fftools/ffmpeg_demux.o \ fftools/ffmpeg_demux.o \
@ -21,35 +19,9 @@ OBJS-ffmpeg += \
fftools/ffmpeg_mux_init.o \ fftools/ffmpeg_mux_init.o \
fftools/ffmpeg_opt.o \ fftools/ffmpeg_opt.o \
fftools/ffmpeg_sched.o \ fftools/ffmpeg_sched.o \
fftools/graph/graphprint.o \ fftools/objpool.o \
fftools/sync_queue.o \ fftools/sync_queue.o \
fftools/thread_queue.o \ fftools/thread_queue.o \
fftools/textformat/avtextformat.o \
fftools/textformat/tf_compact.o \
fftools/textformat/tf_default.o \
fftools/textformat/tf_flat.o \
fftools/textformat/tf_ini.o \
fftools/textformat/tf_json.o \
fftools/textformat/tf_mermaid.o \
fftools/textformat/tf_xml.o \
fftools/textformat/tw_avio.o \
fftools/textformat/tw_buffer.o \
fftools/textformat/tw_stdout.o \
$(OBJS-resman) \
$(RESOBJS) \
OBJS-ffprobe += \
fftools/textformat/avtextformat.o \
fftools/textformat/tf_compact.o \
fftools/textformat/tf_default.o \
fftools/textformat/tf_flat.o \
fftools/textformat/tf_ini.o \
fftools/textformat/tf_json.o \
fftools/textformat/tf_mermaid.o \
fftools/textformat/tf_xml.o \
fftools/textformat/tw_avio.o \
fftools/textformat/tw_buffer.o \
fftools/textformat/tw_stdout.o \
OBJS-ffplay += fftools/ffplay_renderer.o OBJS-ffplay += fftools/ffplay_renderer.o
@ -59,7 +31,7 @@ ifdef HAVE_GNU_WINDRES
OBJS-$(1) += fftools/fftoolsres.o OBJS-$(1) += fftools/fftoolsres.o
endif endif
$(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1)) $(1)$(PROGSSUF)_g$(EXESUF): $$(OBJS-$(1))
$$(OBJS-$(1)): | fftools fftools/textformat fftools/resources fftools/graph $$(OBJS-$(1)): | fftools
$$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1)) $$(OBJS-$(1)): CFLAGS += $(CFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1)) $(1)$(PROGSSUF)_g$(EXESUF): LDFLAGS += $(LDFLAGS-$(1))
$(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(EXTRALIBS-$(1)) $(1)$(PROGSSUF)_g$(EXESUF): FF_EXTRALIBS += $(EXTRALIBS-$(1))
@ -72,9 +44,6 @@ all: $(AVPROGS)
fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools fftools/ffprobe.o fftools/cmdutils.o: libavutil/ffversion.h | fftools
OUTDIRS += fftools OUTDIRS += fftools
OUTDIRS += fftools/textformat
OUTDIRS += fftools/resources
OUTDIRS += fftools/graph
ifdef AVPROGS ifdef AVPROGS
install: install-progs install-data install: install-progs install-data
@ -93,4 +62,4 @@ uninstall-progs:
$(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS)) $(RM) $(addprefix "$(BINDIR)/", $(ALLAVPROGS))
clean:: clean::
$(RM) $(ALLAVPROGS) $(ALLAVPROGS_G) $(CLEANSUFFIXES:%=fftools/%) $(CLEANSUFFIXES:%=fftools/graph/%) $(CLEANSUFFIXES:%=fftools/textformat/%) $(RM) $(ALLAVPROGS) $(ALLAVPROGS_G) $(CLEANSUFFIXES:%=fftools/%)

View file

@ -255,10 +255,9 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
if (*opt == '/') { if (*opt == '/') {
opt++; opt++;
if (!opt_has_arg(po)) { if (po->type == OPT_TYPE_BOOL) {
av_log(NULL, AV_LOG_FATAL, av_log(NULL, AV_LOG_FATAL,
"Requested to load an argument from file for an option '%s'" "Requested to load an argument from file for a bool option '%s'\n",
" which does not take an argument\n",
po->name); po->name);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
@ -353,11 +352,9 @@ static int write_option(void *optctx, const OptionDef *po, const char *opt,
ret = po->u.func_arg(optctx, opt, arg); ret = po->u.func_arg(optctx, opt, arg);
if (ret < 0) { if (ret < 0) {
if ((strcmp(opt, "init_hw_device") != 0) || (strcmp(arg, "list") != 0)) { av_log(NULL, AV_LOG_ERROR,
av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s': %s\n",
"Failed to set value '%s' for option '%s': %s\n", arg, opt, av_err2str(ret));
arg, opt, av_err2str(ret));
}
goto finish; goto finish;
} }
} }
@ -495,9 +492,8 @@ int locate_option(int argc, char **argv, const OptionDef *options,
for (i = 1; i < argc; i++) { for (i = 1; i < argc; i++) {
const char *cur_opt = argv[i]; const char *cur_opt = argv[i];
if (!(cur_opt[0] == '-' && cur_opt[1])) if (*cur_opt++ != '-')
continue; continue;
cur_opt++;
po = find_option(options, cur_opt); po = find_option(options, cur_opt);
if (!po->name && cur_opt[0] == 'n' && cur_opt[1] == 'o') if (!po->name && cur_opt[0] == 'n' && cur_opt[1] == 'o')
@ -555,12 +551,11 @@ static void check_options(const OptionDef *po)
void parse_loglevel(int argc, char **argv, const OptionDef *options) void parse_loglevel(int argc, char **argv, const OptionDef *options)
{ {
int idx; int idx = locate_option(argc, argv, options, "loglevel");
char *env; char *env;
check_options(options); check_options(options);
idx = locate_option(argc, argv, options, "loglevel");
if (!idx) if (!idx)
idx = locate_option(argc, argv, options, "v"); idx = locate_option(argc, argv, options, "v");
if (idx && argv[idx + 1]) if (idx && argv[idx + 1])
@ -1471,12 +1466,9 @@ void *allocate_array_elem(void *ptr, size_t elem_size, int *nb_elems)
{ {
void *new_elem; void *new_elem;
new_elem = av_mallocz(elem_size); if (!(new_elem = av_mallocz(elem_size)) ||
if (!new_elem) av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0)
return NULL; return NULL;
if (av_dynarray_add_nofree(ptr, nb_elems, new_elem) < 0)
av_freep(&new_elem);
return new_elem; return new_elem;
} }

View file

@ -319,7 +319,7 @@ typedef struct Option {
} Option; } Option;
typedef struct OptionGroupDef { typedef struct OptionGroupDef {
/** group name */ /**< group name */
const char *name; const char *name;
/** /**
* Option to be used as group separator. Can be NULL for groups which * Option to be used as group separator. Can be NULL for groups which

View file

@ -81,7 +81,6 @@
#include "ffmpeg.h" #include "ffmpeg.h"
#include "ffmpeg_sched.h" #include "ffmpeg_sched.h"
#include "ffmpeg_utils.h" #include "ffmpeg_utils.h"
#include "graph/graphprint.h"
const char program_name[] = "ffmpeg"; const char program_name[] = "ffmpeg";
const int program_birth_year = 2000; const int program_birth_year = 2000;
@ -309,9 +308,6 @@ const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
static void ffmpeg_cleanup(int ret) static void ffmpeg_cleanup(int ret)
{ {
if ((print_graphs || print_graphs_file) && nb_output_files > 0)
print_filtergraphs(filtergraphs, nb_filtergraphs, input_files, nb_input_files, output_files, nb_output_files);
if (do_benchmark) { if (do_benchmark) {
int64_t maxrss = getmaxrss() / 1024; int64_t maxrss = getmaxrss() / 1024;
av_log(NULL, AV_LOG_INFO, "bench: maxrss=%"PRId64"KiB\n", maxrss); av_log(NULL, AV_LOG_INFO, "bench: maxrss=%"PRId64"KiB\n", maxrss);
@ -344,9 +340,6 @@ static void ffmpeg_cleanup(int ret)
av_freep(&filter_nbthreads); av_freep(&filter_nbthreads);
av_freep(&print_graphs_file);
av_freep(&print_graphs_format);
av_freep(&input_files); av_freep(&input_files);
av_freep(&output_files); av_freep(&output_files);
@ -562,7 +555,7 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
static int64_t last_time = -1; static int64_t last_time = -1;
static int first_report = 1; static int first_report = 1;
uint64_t nb_frames_dup = 0, nb_frames_drop = 0; uint64_t nb_frames_dup = 0, nb_frames_drop = 0;
int mins, secs, ms, us; int mins, secs, us;
int64_t hours; int64_t hours;
const char *hours_sign; const char *hours_sign;
int ret; int ret;
@ -586,7 +579,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
vid = 0; vid = 0;
av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) { for (OutputStream *ost = ost_iter(NULL); ost; ost = ost_iter(ost)) {
const float q = ost->enc ? atomic_load(&ost->quality) / (float) FF_QP2LAMBDA : -1; const float q = ost->enc ? atomic_load(&ost->quality) / (float) FF_QP2LAMBDA : -1;
@ -677,15 +669,6 @@ static void print_report(int is_last_report, int64_t timer_start, int64_t cur_ti
av_bprintf(&buf_script, "speed=%4.3gx\n", speed); av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
} }
secs = (int)t;
ms = (int)((t - secs) * 1000);
mins = secs / 60;
secs %= 60;
hours = mins / 60;
mins %= 60;
av_bprintf(&buf, " elapsed=%"PRId64":%02d:%02d.%02d", hours, mins, secs, ms / 10);
if (print_stats || is_last_report) { if (print_stats || is_last_report) {
const char end = is_last_report ? '\n' : '\r'; const char end = is_last_report ? '\n' : '\r';
if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) { if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
@ -745,7 +728,7 @@ static void print_stream_maps(void)
av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index); av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file->index, av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file->index,
ost->index, ost->enc->enc_ctx->codec->name); ost->index, ost->enc_ctx->codec->name);
continue; continue;
} }
@ -754,9 +737,9 @@ static void print_stream_maps(void)
ost->ist->index, ost->ist->index,
ost->file->index, ost->file->index,
ost->index); ost->index);
if (ost->enc) { if (ost->enc_ctx) {
const AVCodec *in_codec = ost->ist->dec; const AVCodec *in_codec = ost->ist->dec;
const AVCodec *out_codec = ost->enc->enc_ctx->codec; const AVCodec *out_codec = ost->enc_ctx->codec;
const char *decoder_name = "?"; const char *decoder_name = "?";
const char *in_codec_name = "?"; const char *in_codec_name = "?";
const char *encoder_name = "?"; const char *encoder_name = "?";
@ -1029,8 +1012,5 @@ finish:
sch_free(&sch); sch_free(&sch);
av_log(NULL, AV_LOG_VERBOSE, "\n");
av_log(NULL, AV_LOG_VERBOSE, "Exiting with exit code %d\n", ret);
return ret; return ret;
} }

View file

@ -39,7 +39,6 @@
#include "libavfilter/avfilter.h" #include "libavfilter/avfilter.h"
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#include "libavutil/bprint.h"
#include "libavutil/dict.h" #include "libavutil/dict.h"
#include "libavutil/eval.h" #include "libavutil/eval.h"
#include "libavutil/fifo.h" #include "libavutil/fifo.h"
@ -164,7 +163,6 @@ typedef struct OptionsContext {
int loop; int loop;
int rate_emu; int rate_emu;
float readrate; float readrate;
float readrate_catchup;
double readrate_initial_burst; double readrate_initial_burst;
int accurate_seek; int accurate_seek;
int thread_queue_size; int thread_queue_size;
@ -233,7 +231,6 @@ typedef struct OptionsContext {
SpecifierOptList filter_scripts; SpecifierOptList filter_scripts;
#endif #endif
SpecifierOptList reinit_filters; SpecifierOptList reinit_filters;
SpecifierOptList drop_changed;
SpecifierOptList fix_sub_duration; SpecifierOptList fix_sub_duration;
SpecifierOptList fix_sub_duration_heartbeat; SpecifierOptList fix_sub_duration_heartbeat;
SpecifierOptList canvas_sizes; SpecifierOptList canvas_sizes;
@ -264,7 +261,6 @@ enum IFilterFlags {
IFILTER_FLAG_REINIT = (1 << 1), IFILTER_FLAG_REINIT = (1 << 1),
IFILTER_FLAG_CFR = (1 << 2), IFILTER_FLAG_CFR = (1 << 2),
IFILTER_FLAG_CROP = (1 << 3), IFILTER_FLAG_CROP = (1 << 3),
IFILTER_FLAG_DROPCHANGED = (1 << 4),
}; };
typedef struct InputFilterOptions { typedef struct InputFilterOptions {
@ -320,7 +316,7 @@ typedef struct OutputFilterOptions {
AVDictionary *sws_opts; AVDictionary *sws_opts;
AVDictionary *swr_opts; AVDictionary *swr_opts;
int64_t nb_threads; const char *nb_threads;
// A combination of OFilterFlags. // A combination of OFilterFlags.
unsigned flags; unsigned flags;
@ -332,8 +328,6 @@ typedef struct OutputFilterOptions {
enum AVColorRange color_range; enum AVColorRange color_range;
enum VideoSyncMethod vsync_method; enum VideoSyncMethod vsync_method;
AVRational frame_rate;
AVRational max_frame_rate;
int sample_rate; int sample_rate;
AVChannelLayout ch_layout; AVChannelLayout ch_layout;
@ -353,18 +347,6 @@ typedef struct OutputFilterOptions {
typedef struct InputFilter { typedef struct InputFilter {
struct FilterGraph *graph; struct FilterGraph *graph;
uint8_t *name; uint8_t *name;
int index;
// filter data type
enum AVMediaType type;
AVFilterContext *filter;
char *input_name;
/* for filters that are not yet bound to an input stream,
* this stores the input linklabel, if any */
uint8_t *linklabel;
} InputFilter; } InputFilter;
typedef struct OutputFilter { typedef struct OutputFilter {
@ -372,11 +354,6 @@ typedef struct OutputFilter {
struct FilterGraph *graph; struct FilterGraph *graph;
uint8_t *name; uint8_t *name;
int index;
AVFilterContext *filter;
char *output_name;
/* for filters that are not yet bound to an output stream, /* for filters that are not yet bound to an output stream,
* this stores the output linklabel, if any */ * this stores the output linklabel, if any */
@ -399,9 +376,6 @@ typedef struct FilterGraph {
int nb_inputs; int nb_inputs;
OutputFilter **outputs; OutputFilter **outputs;
int nb_outputs; int nb_outputs;
const char *graph_desc;
struct AVBPrint graph_print_buf;
} FilterGraph; } FilterGraph;
enum DecoderFlags { enum DecoderFlags {
@ -487,6 +461,14 @@ typedef struct InputStream {
* currently video and audio only */ * currently video and audio only */
InputFilter **filters; InputFilter **filters;
int nb_filters; int nb_filters;
/*
* Output targets that do not go through lavfi, i.e. subtitles or
* streamcopy. Those two cases are distinguished by the OutputStream
* having an encoder or not.
*/
struct OutputStream **outputs;
int nb_outputs;
} InputStream; } InputStream;
typedef struct InputFile { typedef struct InputFile {
@ -563,6 +545,13 @@ typedef struct EncStats {
int lock_initialized; int lock_initialized;
} EncStats; } EncStats;
extern const char *const forced_keyframes_const_names[];
typedef enum {
ENCODER_FINISHED = 1,
MUXER_FINISHED = 2,
} OSTFinished ;
enum { enum {
KF_FORCE_SOURCE = 1, KF_FORCE_SOURCE = 1,
#if FFMPEG_OPT_FORCE_KF_SOURCE_NO_DROP #if FFMPEG_OPT_FORCE_KF_SOURCE_NO_DROP
@ -586,15 +575,7 @@ typedef struct KeyframeForceCtx {
int dropped_keyframe; int dropped_keyframe;
} KeyframeForceCtx; } KeyframeForceCtx;
typedef struct Encoder { typedef struct Encoder Encoder;
const AVClass *class;
AVCodecContext *enc_ctx;
// number of frames/samples sent to the encoder
uint64_t frames_encoded;
uint64_t samples_encoded;
} Encoder;
enum CroppingType { enum CroppingType {
CROP_DISABLED = 0, CROP_DISABLED = 0,
@ -613,6 +594,12 @@ typedef struct OutputStream {
int index; /* stream index in the output file */ int index; /* stream index in the output file */
/**
* Codec parameters for packets submitted to the muxer (i.e. before
* bitstream filtering, if any).
*/
AVCodecParameters *par_in;
/* input stream that is the source for this output stream; /* input stream that is the source for this output stream;
* may be NULL for streams with no well-defined source, e.g. * may be NULL for streams with no well-defined source, e.g.
* attachments or outputs from complex filtergraphs */ * attachments or outputs from complex filtergraphs */
@ -621,8 +608,12 @@ typedef struct OutputStream {
AVStream *st; /* stream in the output file */ AVStream *st; /* stream in the output file */
Encoder *enc; Encoder *enc;
AVCodecContext *enc_ctx;
/* video only */ /* video only */
AVRational frame_rate;
AVRational max_frame_rate;
int force_fps;
#if FFMPEG_OPT_TOP #if FFMPEG_OPT_TOP
int top_field_first; int top_field_first;
#endif #endif
@ -645,6 +636,9 @@ typedef struct OutputStream {
/* stats */ /* stats */
// number of packets send to the muxer // number of packets send to the muxer
atomic_uint_least64_t packets_written; atomic_uint_least64_t packets_written;
// number of frames/samples sent to the encoder
uint64_t frames_encoded;
uint64_t samples_encoded;
/* packet quality factor */ /* packet quality factor */
atomic_int quality; atomic_int quality;
@ -737,11 +731,7 @@ extern float max_error_rate;
extern char *filter_nbthreads; extern char *filter_nbthreads;
extern int filter_complex_nbthreads; extern int filter_complex_nbthreads;
extern int filter_buffered_frames;
extern int vstats_version; extern int vstats_version;
extern int print_graphs;
extern char *print_graphs_file;
extern char *print_graphs_format;
extern int auto_conversion_filters; extern int auto_conversion_filters;
extern const AVIOInterruptCB int_cb; extern const AVIOInterruptCB int_cb;
@ -772,11 +762,10 @@ int find_codec(void *logctx, const char *name,
int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global); int parse_and_set_vsync(const char *arg, int *vsync_var, int file_idx, int st_idx, int is_global);
int filtergraph_is_simple(const FilterGraph *fg); int filtergraph_is_simple(const FilterGraph *fg);
int fg_create_simple(FilterGraph **pfg, int init_simple_filtergraph(InputStream *ist, OutputStream *ost,
InputStream *ist, char *graph_desc,
char *graph_desc, Scheduler *sch, unsigned sch_idx_enc,
Scheduler *sch, unsigned sched_idx_enc, const OutputFilterOptions *opts);
const OutputFilterOptions *opts);
int fg_finalise_bindings(void); int fg_finalise_bindings(void);
/** /**
@ -790,7 +779,7 @@ const FrameData *frame_data_c(AVFrame *frame);
FrameData *packet_data (AVPacket *pkt); FrameData *packet_data (AVPacket *pkt);
const FrameData *packet_data_c(AVPacket *pkt); const FrameData *packet_data_c(AVPacket *pkt);
int ofilter_bind_enc(OutputFilter *ofilter, int ofilter_bind_ost(OutputFilter *ofilter, OutputStream *ost,
unsigned sched_idx_enc, unsigned sched_idx_enc,
const OutputFilterOptions *opts); const OutputFilterOptions *opts);
@ -869,7 +858,7 @@ int dec_request_view(Decoder *dec, const ViewSpecifier *vs,
SchedulerNode *src); SchedulerNode *src);
int enc_alloc(Encoder **penc, const AVCodec *codec, int enc_alloc(Encoder **penc, const AVCodec *codec,
Scheduler *sch, unsigned sch_idx, void *log_parent); Scheduler *sch, unsigned sch_idx);
void enc_free(Encoder **penc); void enc_free(Encoder **penc);
int enc_open(void *opaque, const AVFrame *frame); int enc_open(void *opaque, const AVFrame *frame);
@ -882,8 +871,7 @@ int enc_loopback(Encoder *enc);
* *
* Open the muxer once all the streams have been initialized. * Open the muxer once all the streams have been initialized.
*/ */
int of_stream_init(OutputFile *of, OutputStream *ost, int of_stream_init(OutputFile *of, OutputStream *ost);
const AVCodecContext *enc_ctx);
int of_write_trailer(OutputFile *of); int of_write_trailer(OutputFile *of);
int of_open(const OptionsContext *o, const char *filename, Scheduler *sch); int of_open(const OptionsContext *o, const char *filename, Scheduler *sch);
void of_free(OutputFile **pof); void of_free(OutputFile **pof);
@ -895,8 +883,7 @@ int64_t of_filesize(OutputFile *of);
int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch); int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch);
void ifile_close(InputFile **f); void ifile_close(InputFile **f);
int ist_use(InputStream *ist, int decoding_needed, int ist_output_add(InputStream *ist, OutputStream *ost);
const ViewSpecifier *vs, SchedulerNode *src);
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
const ViewSpecifier *vs, InputFilterOptions *opts, const ViewSpecifier *vs, InputFilterOptions *opts,
SchedulerNode *src); SchedulerNode *src);

View file

@ -733,12 +733,13 @@ static int packet_decode(DecoderPriv *dp, AVPacket *pkt, AVFrame *frame)
av_log(dp, AV_LOG_ERROR, "Error submitting %s to decoder: %s\n", av_log(dp, AV_LOG_ERROR, "Error submitting %s to decoder: %s\n",
pkt ? "packet" : "EOF", av_err2str(ret)); pkt ? "packet" : "EOF", av_err2str(ret));
if (ret == AVERROR_EOF) if (ret != AVERROR_EOF) {
return ret; dp->dec.decode_errors++;
if (!exit_on_error)
ret = 0;
}
dp->dec.decode_errors++; return ret;
if (exit_on_error)
return ret;
} }
while (1) { while (1) {
@ -1594,7 +1595,7 @@ static int dec_open(DecoderPriv *dp, AVDictionary **dec_opts,
if (o->flags & DECODER_FLAG_BITEXACT) if (o->flags & DECODER_FLAG_BITEXACT)
dp->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT; dp->dec_ctx->flags |= AV_CODEC_FLAG_BITEXACT;
// we apply cropping ourselves // we apply cropping outselves
dp->apply_cropping = dp->dec_ctx->apply_cropping; dp->apply_cropping = dp->dec_ctx->apply_cropping;
dp->dec_ctx->apply_cropping = 0; dp->dec_ctx->apply_cropping = 0;
@ -1637,11 +1638,6 @@ static int dec_open(DecoderPriv *dp, AVDictionary **dec_opts,
param_out->color_range = dp->dec_ctx->color_range; param_out->color_range = dp->dec_ctx->color_range;
} }
av_frame_side_data_free(&param_out->side_data, &param_out->nb_side_data);
ret = clone_side_data(&param_out->side_data, &param_out->nb_side_data,
dp->dec_ctx->decoded_side_data, dp->dec_ctx->nb_decoded_side_data, 0);
if (ret < 0)
return ret;
param_out->time_base = dp->dec_ctx->pkt_timebase; param_out->time_base = dp->dec_ctx->pkt_timebase;
} }

View file

@ -67,18 +67,17 @@ typedef struct DemuxStream {
int reinit_filters; int reinit_filters;
int autorotate; int autorotate;
int apply_cropping; int apply_cropping;
int drop_changed;
int wrap_correction_done; int wrap_correction_done;
int saw_first_ts; int saw_first_ts;
/// dts of the first packet read for this stream (in AV_TIME_BASE units) ///< dts of the first packet read for this stream (in AV_TIME_BASE units)
int64_t first_dts; int64_t first_dts;
/* predicted dts of the next packet read for this stream or (when there are /* predicted dts of the next packet read for this stream or (when there are
* several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */ * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
int64_t next_dts; int64_t next_dts;
/// dts of the last packet read for this stream (in AV_TIME_BASE units) ///< dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t dts; int64_t dts;
const AVCodecDescriptor *codec_desc; const AVCodecDescriptor *codec_desc;
@ -95,12 +94,6 @@ typedef struct DemuxStream {
uint64_t nb_packets; uint64_t nb_packets;
// combined size of all the packets read // combined size of all the packets read
uint64_t data_size; uint64_t data_size;
// latest wallclock time at which packet reading resumed after a stall - used for readrate
int64_t resume_wc;
// timestamp of first packet sent after the latest stall - used for readrate
int64_t resume_pts;
// measure of how far behind packet reading is against spceified readrate
int64_t lag;
} DemuxStream; } DemuxStream;
typedef struct Demuxer { typedef struct Demuxer {
@ -134,7 +127,6 @@ typedef struct Demuxer {
float readrate; float readrate;
double readrate_initial_burst; double readrate_initial_burst;
float readrate_catchup;
Scheduler *sch; Scheduler *sch;
@ -248,7 +240,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
} }
} else { } else {
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
av_log(ist, AV_LOG_WARNING, av_log(NULL, AV_LOG_WARNING,
"DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n",
pkt->dts, ds->next_dts, pkt->stream_index); pkt->dts, ds->next_dts, pkt->stream_index);
pkt->dts = AV_NOPTS_VALUE; pkt->dts = AV_NOPTS_VALUE;
@ -257,7 +249,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
int64_t pkt_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q); int64_t pkt_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q);
delta = pkt_pts - ds->next_dts; delta = pkt_pts - ds->next_dts;
if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) { if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
av_log(ist, AV_LOG_WARNING, av_log(NULL, AV_LOG_WARNING,
"PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n",
pkt->pts, ds->next_dts, pkt->stream_index); pkt->pts, ds->next_dts, pkt->stream_index);
pkt->pts = AV_NOPTS_VALUE; pkt->pts = AV_NOPTS_VALUE;
@ -269,7 +261,7 @@ static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
int64_t delta = pkt_dts - d->last_ts; int64_t delta = pkt_dts - d->last_ts;
if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) { if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
d->ts_offset_discont -= delta; d->ts_offset_discont -= delta;
av_log(ist, AV_LOG_DEBUG, av_log(NULL, AV_LOG_DEBUG,
"Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n", "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
delta, d->ts_offset_discont); delta, d->ts_offset_discont);
pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base); pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
@ -484,7 +476,7 @@ static int input_packet_process(Demuxer *d, AVPacket *pkt, unsigned *send_flags)
fd->wallclock[LATENCY_PROBE_DEMUX] = av_gettime_relative(); fd->wallclock[LATENCY_PROBE_DEMUX] = av_gettime_relative();
if (debug_ts) { if (debug_ts) {
av_log(ist, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n", av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
f->index, pkt->stream_index, f->index, pkt->stream_index,
av_get_media_type_string(ist->par->codec_type), av_get_media_type_string(ist->par->codec_type),
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base), av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base),
@ -503,42 +495,16 @@ static void readrate_sleep(Demuxer *d)
(f->start_time_effective != AV_NOPTS_VALUE ? f->start_time_effective * !start_at_zero : 0) + (f->start_time_effective != AV_NOPTS_VALUE ? f->start_time_effective * !start_at_zero : 0) +
(f->start_time != AV_NOPTS_VALUE ? f->start_time : 0) (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
); );
int64_t initial_burst = AV_TIME_BASE * d->readrate_initial_burst; int64_t burst_until = AV_TIME_BASE * d->readrate_initial_burst;
int resume_warn = 0;
for (int i = 0; i < f->nb_streams; i++) { for (int i = 0; i < f->nb_streams; i++) {
InputStream *ist = f->streams[i]; InputStream *ist = f->streams[i];
DemuxStream *ds = ds_from_ist(ist); DemuxStream *ds = ds_from_ist(ist);
int64_t stream_ts_offset, pts, now, wc_elapsed, elapsed, lag, max_pts, limit_pts; int64_t stream_ts_offset, pts, now;
if (ds->discard) continue;
stream_ts_offset = FFMAX(ds->first_dts != AV_NOPTS_VALUE ? ds->first_dts : 0, file_start); stream_ts_offset = FFMAX(ds->first_dts != AV_NOPTS_VALUE ? ds->first_dts : 0, file_start);
pts = av_rescale(ds->dts, 1000000, AV_TIME_BASE); pts = av_rescale(ds->dts, 1000000, AV_TIME_BASE);
now = av_gettime_relative(); now = (av_gettime_relative() - d->wallclock_start) * d->readrate + stream_ts_offset;
wc_elapsed = now - d->wallclock_start; if (pts - burst_until > now)
max_pts = stream_ts_offset + initial_burst + wc_elapsed * d->readrate; av_usleep(pts - burst_until - now);
lag = FFMAX(max_pts - pts, 0);
if ( (!ds->lag && lag > 0.3 * AV_TIME_BASE) || ( lag > ds->lag + 0.3 * AV_TIME_BASE) ) {
ds->lag = lag;
ds->resume_wc = now;
ds->resume_pts = pts;
av_log_once(ds, AV_LOG_WARNING, AV_LOG_DEBUG, &resume_warn,
"Resumed reading at pts %0.3f with rate %0.3f after a lag of %0.3fs\n",
(float)pts/AV_TIME_BASE, d->readrate_catchup, (float)lag/AV_TIME_BASE);
}
if (ds->lag && !lag)
ds->lag = ds->resume_wc = ds->resume_pts = 0;
if (ds->resume_wc) {
elapsed = now - ds->resume_wc;
limit_pts = ds->resume_pts + elapsed * d->readrate_catchup;
} else {
elapsed = wc_elapsed;
limit_pts = max_pts;
}
if (pts > limit_pts)
av_usleep(pts - limit_pts);
} }
} }
@ -874,6 +840,7 @@ static void ist_free(InputStream **pist)
av_dict_free(&ds->decoder_opts); av_dict_free(&ds->decoder_opts);
av_freep(&ist->filters); av_freep(&ist->filters);
av_freep(&ist->outputs);
av_freep(&ds->dec_opts.hwaccel_device); av_freep(&ds->dec_opts.hwaccel_device);
avcodec_parameters_free(&ist->par); avcodec_parameters_free(&ist->par);
@ -907,8 +874,8 @@ void ifile_close(InputFile **pf)
av_freep(pf); av_freep(pf);
} }
int ist_use(InputStream *ist, int decoding_needed, static int ist_use(InputStream *ist, int decoding_needed,
const ViewSpecifier *vs, SchedulerNode *src) const ViewSpecifier *vs, SchedulerNode *src)
{ {
Demuxer *d = demuxer_from_ifile(ist->file); Demuxer *d = demuxer_from_ifile(ist->file);
DemuxStream *ds = ds_from_ist(ist); DemuxStream *ds = ds_from_ist(ist);
@ -945,18 +912,9 @@ int ist_use(InputStream *ist, int decoding_needed,
if (decoding_needed && ds->sch_idx_dec < 0) { if (decoding_needed && ds->sch_idx_dec < 0) {
int is_audio = ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO; int is_audio = ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
int is_unreliable = !!(d->f.ctx->iformat->flags & AVFMT_NOTIMESTAMPS);
int64_t use_wallclock_as_timestamps;
ret = av_opt_get_int(d->f.ctx, "use_wallclock_as_timestamps", 0, &use_wallclock_as_timestamps);
if (ret < 0)
return ret;
if (use_wallclock_as_timestamps)
is_unreliable = 0;
ds->dec_opts.flags |= (!!ist->fix_sub_duration * DECODER_FLAG_FIX_SUB_DURATION) | ds->dec_opts.flags |= (!!ist->fix_sub_duration * DECODER_FLAG_FIX_SUB_DURATION) |
(!!is_unreliable * DECODER_FLAG_TS_UNRELIABLE) | (!!(d->f.ctx->iformat->flags & AVFMT_NOTIMESTAMPS) * DECODER_FLAG_TS_UNRELIABLE) |
(!!(d->loop && is_audio) * DECODER_FLAG_SEND_END_TS) (!!(d->loop && is_audio) * DECODER_FLAG_SEND_END_TS)
#if FFMPEG_OPT_TOP #if FFMPEG_OPT_TOP
| ((ist->top_field_first >= 0) * DECODER_FLAG_TOP_FIELD_FIRST) | ((ist->top_field_first >= 0) * DECODER_FLAG_TOP_FIELD_FIRST)
@ -1017,6 +975,25 @@ int ist_use(InputStream *ist, int decoding_needed,
return 0; return 0;
} }
int ist_output_add(InputStream *ist, OutputStream *ost)
{
DemuxStream *ds = ds_from_ist(ist);
SchedulerNode src;
int ret;
ret = ist_use(ist, ost->enc ? DECODING_FOR_OST : 0, NULL, &src);
if (ret < 0)
return ret;
ret = GROW_ARRAY(ist->outputs, ist->nb_outputs);
if (ret < 0)
return ret;
ist->outputs[ist->nb_outputs - 1] = ost;
return ost->enc ? ds->sch_idx_dec : ds->sch_idx_stream;
}
int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple, int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
const ViewSpecifier *vs, InputFilterOptions *opts, const ViewSpecifier *vs, InputFilterOptions *opts,
SchedulerNode *src) SchedulerNode *src)
@ -1109,8 +1086,7 @@ int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
opts->flags |= IFILTER_FLAG_AUTOROTATE * !!(ds->autorotate) | opts->flags |= IFILTER_FLAG_AUTOROTATE * !!(ds->autorotate) |
IFILTER_FLAG_REINIT * !!(ds->reinit_filters) | IFILTER_FLAG_REINIT * !!(ds->reinit_filters);
IFILTER_FLAG_DROPCHANGED* !!(ds->drop_changed);
return 0; return 0;
} }
@ -1148,7 +1124,7 @@ static int choose_decoder(const OptionsContext *o, void *logctx,
for (int j = 0; config = avcodec_get_hw_config(c, j); j++) { for (int j = 0; config = avcodec_get_hw_config(c, j); j++) {
if (config->device_type == hwaccel_device_type) { if (config->device_type == hwaccel_device_type) {
av_log(logctx, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n", av_log(NULL, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n",
c->name, av_hwdevice_get_type_name(hwaccel_device_type)); c->name, av_hwdevice_get_type_name(hwaccel_device_type));
*pcodec = c; *pcodec = c;
return 0; return 0;
@ -1421,17 +1397,6 @@ static int ist_add(const OptionsContext *o, Demuxer *d, AVStream *st, AVDictiona
ds->reinit_filters = -1; ds->reinit_filters = -1;
opt_match_per_stream_int(ist, &o->reinit_filters, ic, st, &ds->reinit_filters); opt_match_per_stream_int(ist, &o->reinit_filters, ic, st, &ds->reinit_filters);
ds->drop_changed = 0;
opt_match_per_stream_int(ist, &o->drop_changed, ic, st, &ds->drop_changed);
if (ds->drop_changed && ds->reinit_filters) {
if (ds->reinit_filters > 0) {
av_log(ist, AV_LOG_ERROR, "drop_changed and reinit_filters both enabled. These are mutually exclusive.\n");
return AVERROR(EINVAL);
}
ds->reinit_filters = 0;
}
ist->user_set_discard = AVDISCARD_NONE; ist->user_set_discard = AVDISCARD_NONE;
if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) || if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ||
@ -1781,9 +1746,8 @@ int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch)
/* open the input file with generic avformat function */ /* open the input file with generic avformat function */
err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts); err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
if (err < 0) { if (err < 0) {
if (err != AVERROR_EXIT) av_log(d, AV_LOG_ERROR,
av_log(d, AV_LOG_ERROR, "Error opening input: %s\n", av_err2str(err));
"Error opening input: %s\n", av_err2str(err));
if (err == AVERROR_PROTOCOL_NOT_FOUND) if (err == AVERROR_PROTOCOL_NOT_FOUND)
av_log(d, AV_LOG_ERROR, "Did you mean file:%s?\n", filename); av_log(d, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
return err; return err;
@ -1914,22 +1878,9 @@ int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch)
d->readrate_initial_burst); d->readrate_initial_burst);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
d->readrate_catchup = o->readrate_catchup ? o->readrate_catchup : d->readrate * 1.05; } else if (o->readrate_initial_burst) {
if (d->readrate_catchup < d->readrate) { av_log(d, AV_LOG_WARNING, "Option -readrate_initial_burst ignored "
av_log(d, AV_LOG_ERROR, "since neither -readrate nor -re were given\n");
"Option -readrate_catchup is %0.3f; it must be at least equal to %0.3f.\n",
d->readrate_catchup, d->readrate);
return AVERROR(EINVAL);
}
} else {
if (o->readrate_initial_burst) {
av_log(d, AV_LOG_WARNING, "Option -readrate_initial_burst ignored "
"since neither -readrate nor -re were given\n");
}
if (o->readrate_catchup) {
av_log(d, AV_LOG_WARNING, "Option -readrate_catchup ignored "
"since neither -readrate nor -re were given\n");
}
} }
/* Add all the streams from the given input file to the demuxer */ /* Add all the streams from the given input file to the demuxer */

View file

@ -38,12 +38,7 @@
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
typedef struct EncoderPriv { struct Encoder {
Encoder e;
void *log_parent;
char log_name[32];
// combined size of all the packets received from the encoder // combined size of all the packets received from the encoder
uint64_t data_size; uint64_t data_size;
@ -55,12 +50,7 @@ typedef struct EncoderPriv {
Scheduler *sch; Scheduler *sch;
unsigned sch_idx; unsigned sch_idx;
} EncoderPriv; };
static EncoderPriv *ep_from_enc(Encoder *enc)
{
return (EncoderPriv*)enc;
}
// data that is local to the decoder thread and not visible outside of it // data that is local to the decoder thread and not visible outside of it
typedef struct EncoderThread { typedef struct EncoderThread {
@ -75,90 +65,56 @@ void enc_free(Encoder **penc)
if (!enc) if (!enc)
return; return;
if (enc->enc_ctx)
av_freep(&enc->enc_ctx->stats_in);
avcodec_free_context(&enc->enc_ctx);
av_freep(penc); av_freep(penc);
} }
static const char *enc_item_name(void *obj)
{
const EncoderPriv *ep = obj;
return ep->log_name;
}
static const AVClass enc_class = {
.class_name = "Encoder",
.version = LIBAVUTIL_VERSION_INT,
.parent_log_context_offset = offsetof(EncoderPriv, log_parent),
.item_name = enc_item_name,
};
int enc_alloc(Encoder **penc, const AVCodec *codec, int enc_alloc(Encoder **penc, const AVCodec *codec,
Scheduler *sch, unsigned sch_idx, void *log_parent) Scheduler *sch, unsigned sch_idx)
{ {
EncoderPriv *ep; Encoder *enc;
int ret = 0;
*penc = NULL; *penc = NULL;
ep = av_mallocz(sizeof(*ep)); enc = av_mallocz(sizeof(*enc));
if (!ep) if (!enc)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ep->e.class = &enc_class; enc->sch = sch;
ep->log_parent = log_parent; enc->sch_idx = sch_idx;
ep->sch = sch; *penc = enc;
ep->sch_idx = sch_idx;
snprintf(ep->log_name, sizeof(ep->log_name), "enc:%s", codec->name);
ep->e.enc_ctx = avcodec_alloc_context3(codec);
if (!ep->e.enc_ctx) {
ret = AVERROR(ENOMEM);
goto fail;
}
*penc = &ep->e;
return 0; return 0;
fail:
enc_free((Encoder**)&ep);
return ret;
} }
static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx, static int hw_device_setup_for_encode(OutputStream *ost, AVBufferRef *frames_ref)
AVBufferRef *frames_ref)
{ {
const AVCodecHWConfig *config; const AVCodecHWConfig *config;
HWDevice *dev = NULL; HWDevice *dev = NULL;
if (frames_ref && if (frames_ref &&
((AVHWFramesContext*)frames_ref->data)->format == ((AVHWFramesContext*)frames_ref->data)->format ==
enc_ctx->pix_fmt) { ost->enc_ctx->pix_fmt) {
// Matching format, will try to use hw_frames_ctx. // Matching format, will try to use hw_frames_ctx.
} else { } else {
frames_ref = NULL; frames_ref = NULL;
} }
for (int i = 0;; i++) { for (int i = 0;; i++) {
config = avcodec_get_hw_config(enc_ctx->codec, i); config = avcodec_get_hw_config(ost->enc_ctx->codec, i);
if (!config) if (!config)
break; break;
if (frames_ref && if (frames_ref &&
config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX && config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
(config->pix_fmt == AV_PIX_FMT_NONE || (config->pix_fmt == AV_PIX_FMT_NONE ||
config->pix_fmt == enc_ctx->pix_fmt)) { config->pix_fmt == ost->enc_ctx->pix_fmt)) {
av_log(e, AV_LOG_VERBOSE, "Using input " av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using input "
"frames context (format %s) with %s encoder.\n", "frames context (format %s) with %s encoder.\n",
av_get_pix_fmt_name(enc_ctx->pix_fmt), av_get_pix_fmt_name(ost->enc_ctx->pix_fmt),
enc_ctx->codec->name); ost->enc_ctx->codec->name);
enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref); ost->enc_ctx->hw_frames_ctx = av_buffer_ref(frames_ref);
if (!enc_ctx->hw_frames_ctx) if (!ost->enc_ctx->hw_frames_ctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
return 0; return 0;
} }
@ -169,11 +125,11 @@ static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
} }
if (dev) { if (dev) {
av_log(e, AV_LOG_VERBOSE, "Using device %s " av_log(ost->enc_ctx, AV_LOG_VERBOSE, "Using device %s "
"(type %s) with %s encoder.\n", dev->name, "(type %s) with %s encoder.\n", dev->name,
av_hwdevice_get_type_name(dev->type), enc_ctx->codec->name); av_hwdevice_get_type_name(dev->type), ost->enc_ctx->codec->name);
enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref); ost->enc_ctx->hw_device_ctx = av_buffer_ref(dev->device_ref);
if (!enc_ctx->hw_device_ctx) if (!ost->enc_ctx->hw_device_ctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} else { } else {
// No device required, or no device available. // No device required, or no device available.
@ -181,13 +137,37 @@ static int hw_device_setup_for_encode(Encoder *e, AVCodecContext *enc_ctx,
return 0; return 0;
} }
static int set_encoder_id(OutputFile *of, OutputStream *ost)
{
const char *cname = ost->enc_ctx->codec->name;
uint8_t *encoder_string;
int encoder_string_len;
if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
return 0;
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
encoder_string = av_mallocz(encoder_string_len);
if (!encoder_string)
return AVERROR(ENOMEM);
if (!of->bitexact && !ost->bitexact)
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
else
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
av_strlcat(encoder_string, cname, encoder_string_len);
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
return 0;
}
int enc_open(void *opaque, const AVFrame *frame) int enc_open(void *opaque, const AVFrame *frame)
{ {
OutputStream *ost = opaque; OutputStream *ost = opaque;
InputStream *ist = ost->ist; InputStream *ist = ost->ist;
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e); AVCodecContext *enc_ctx = ost->enc_ctx;
AVCodecContext *enc_ctx = e->enc_ctx;
Decoder *dec = NULL; Decoder *dec = NULL;
const AVCodec *enc = enc_ctx->codec; const AVCodec *enc = enc_ctx->codec;
OutputFile *of = ost->file; OutputFile *of = ost->file;
@ -195,7 +175,7 @@ int enc_open(void *opaque, const AVFrame *frame)
int frame_samples = 0; int frame_samples = 0;
int ret; int ret;
if (ep->opened) if (e->opened)
return 0; return 0;
// frame is always non-NULL for audio and video // frame is always non-NULL for audio and video
@ -220,6 +200,10 @@ int enc_open(void *opaque, const AVFrame *frame)
} }
} }
ret = set_encoder_id(of, ost);
if (ret < 0)
return ret;
if (ist) if (ist)
dec = ist->decoder; dec = ist->decoder;
@ -227,6 +211,7 @@ int enc_open(void *opaque, const AVFrame *frame)
if (ost->type == AVMEDIA_TYPE_AUDIO || ost->type == AVMEDIA_TYPE_VIDEO) { if (ost->type == AVMEDIA_TYPE_AUDIO || ost->type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->time_base = frame->time_base; enc_ctx->time_base = frame->time_base;
enc_ctx->framerate = fd->frame_rate_filter; enc_ctx->framerate = fd->frame_rate_filter;
ost->st->avg_frame_rate = fd->frame_rate_filter;
} }
switch (enc_ctx->codec_type) { switch (enc_ctx->codec_type) {
@ -253,7 +238,7 @@ int enc_open(void *opaque, const AVFrame *frame)
frame->height > 0); frame->height > 0);
enc_ctx->width = frame->width; enc_ctx->width = frame->width;
enc_ctx->height = frame->height; enc_ctx->height = frame->height;
enc_ctx->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) : av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
frame->sample_aspect_ratio; frame->sample_aspect_ratio;
@ -266,26 +251,11 @@ int enc_open(void *opaque, const AVFrame *frame)
enc_ctx->bits_per_raw_sample = FFMIN(fd->bits_per_raw_sample, enc_ctx->bits_per_raw_sample = FFMIN(fd->bits_per_raw_sample,
av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth); av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
/**
* The video color properties should always be in sync with the user-
* requested values, since we forward them to the filter graph.
*/
enc_ctx->color_range = frame->color_range; enc_ctx->color_range = frame->color_range;
enc_ctx->color_primaries = frame->color_primaries; enc_ctx->color_primaries = frame->color_primaries;
enc_ctx->color_trc = frame->color_trc; enc_ctx->color_trc = frame->color_trc;
enc_ctx->colorspace = frame->colorspace; enc_ctx->colorspace = frame->colorspace;
enc_ctx->chroma_sample_location = frame->chroma_location;
/* Video properties which are not part of filter graph negotiation */
if (enc_ctx->chroma_sample_location == AVCHROMA_LOC_UNSPECIFIED) {
enc_ctx->chroma_sample_location = frame->chroma_location;
} else if (enc_ctx->chroma_sample_location != frame->chroma_location &&
frame->chroma_location != AVCHROMA_LOC_UNSPECIFIED) {
av_log(e, AV_LOG_WARNING,
"Requested chroma sample location '%s' does not match the "
"frame tagged sample location '%s'; result may be incorrect.\n",
av_chroma_location_name(enc_ctx->chroma_sample_location),
av_chroma_location_name(frame->chroma_location));
}
if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) || if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) ||
(frame->flags & AV_FRAME_FLAG_INTERLACED) (frame->flags & AV_FRAME_FLAG_INTERLACED)
@ -342,31 +312,42 @@ int enc_open(void *opaque, const AVFrame *frame)
enc_ctx->flags |= AV_CODEC_FLAG_FRAME_DURATION; enc_ctx->flags |= AV_CODEC_FLAG_FRAME_DURATION;
ret = hw_device_setup_for_encode(e, enc_ctx, frame ? frame->hw_frames_ctx : NULL); ret = hw_device_setup_for_encode(ost, frame ? frame->hw_frames_ctx : NULL);
if (ret < 0) { if (ret < 0) {
av_log(e, AV_LOG_ERROR, av_log(ost, AV_LOG_ERROR,
"Encoding hardware device setup failed: %s\n", av_err2str(ret)); "Encoding hardware device setup failed: %s\n", av_err2str(ret));
return ret; return ret;
} }
if ((ret = avcodec_open2(enc_ctx, enc, NULL)) < 0) { if ((ret = avcodec_open2(ost->enc_ctx, enc, NULL)) < 0) {
if (ret != AVERROR_EXPERIMENTAL) if (ret != AVERROR_EXPERIMENTAL)
av_log(e, AV_LOG_ERROR, "Error while opening encoder - maybe " av_log(ost, AV_LOG_ERROR, "Error while opening encoder - maybe "
"incorrect parameters such as bit_rate, rate, width or height.\n"); "incorrect parameters such as bit_rate, rate, width or height.\n");
return ret; return ret;
} }
ep->opened = 1; e->opened = 1;
if (enc_ctx->frame_size) if (ost->enc_ctx->frame_size)
frame_samples = enc_ctx->frame_size; frame_samples = ost->enc_ctx->frame_size;
if (enc_ctx->bit_rate && enc_ctx->bit_rate < 1000 && if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */) ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
av_log(e, AV_LOG_WARNING, "The bitrate parameter is set too low." av_log(ost, AV_LOG_WARNING, "The bitrate parameter is set too low."
" It takes bits/s as argument, not kbits/s\n"); " It takes bits/s as argument, not kbits/s\n");
ret = of_stream_init(of, ost, enc_ctx); ret = avcodec_parameters_from_context(ost->par_in, ost->enc_ctx);
if (ret < 0) {
av_log(ost, AV_LOG_FATAL,
"Error initializing the output stream codec context.\n");
return ret;
}
// copy timebase while removing common factors
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
ret = of_stream_init(of, ost);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -388,20 +369,19 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
AVPacket *pkt) AVPacket *pkt)
{ {
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e);
int subtitle_out_max_size = 1024 * 1024; int subtitle_out_max_size = 1024 * 1024;
int subtitle_out_size, nb, i, ret; int subtitle_out_size, nb, i, ret;
AVCodecContext *enc; AVCodecContext *enc;
int64_t pts; int64_t pts;
if (sub->pts == AV_NOPTS_VALUE) { if (sub->pts == AV_NOPTS_VALUE) {
av_log(e, AV_LOG_ERROR, "Subtitle packets must have a pts\n"); av_log(ost, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
return exit_on_error ? AVERROR(EINVAL) : 0; return exit_on_error ? AVERROR(EINVAL) : 0;
} }
if ((of->start_time != AV_NOPTS_VALUE && sub->pts < of->start_time)) if ((of->start_time != AV_NOPTS_VALUE && sub->pts < of->start_time))
return 0; return 0;
enc = e->enc_ctx; enc = ost->enc_ctx;
/* Note: DVB subtitle need one packet to draw them and one other /* Note: DVB subtitle need one packet to draw them and one other
packet to clear them */ packet to clear them */
@ -440,11 +420,11 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
local_sub.rects += i; local_sub.rects += i;
} }
e->frames_encoded++; ost->frames_encoded++;
subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, &local_sub); subtitle_out_size = avcodec_encode_subtitle(enc, pkt->data, pkt->size, &local_sub);
if (subtitle_out_size < 0) { if (subtitle_out_size < 0) {
av_log(e, AV_LOG_FATAL, "Subtitle encoding failed\n"); av_log(ost, AV_LOG_FATAL, "Subtitle encoding failed\n");
return subtitle_out_size; return subtitle_out_size;
} }
@ -462,7 +442,7 @@ static int do_subtitle_out(OutputFile *of, OutputStream *ost, const AVSubtitle *
} }
pkt->dts = pkt->pts; pkt->dts = pkt->pts;
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt); ret = sch_enc_send(e->sch, e->sch_idx, pkt);
if (ret < 0) { if (ret < 0) {
av_packet_unref(pkt); av_packet_unref(pkt);
return ret; return ret;
@ -477,7 +457,6 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
uint64_t frame_num) uint64_t frame_num)
{ {
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e);
AVIOContext *io = es->io; AVIOContext *io = es->io;
AVRational tb = frame ? frame->time_base : pkt->time_base; AVRational tb = frame ? frame->time_base : pkt->time_base;
int64_t pts = frame ? frame->pts : pkt->pts; int64_t pts = frame ? frame->pts : pkt->pts;
@ -515,7 +494,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
if (frame) { if (frame) {
switch (c->type) { switch (c->type) {
case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, e->samples_encoded); continue; case ENC_STATS_SAMPLE_NUM: avio_printf(io, "%"PRIu64, ost->samples_encoded); continue;
case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue; case ENC_STATS_NB_SAMPLES: avio_printf(io, "%d", frame->nb_samples); continue;
default: av_assert0(0); default: av_assert0(0);
} }
@ -533,7 +512,7 @@ void enc_stats_write(OutputStream *ost, EncStats *es,
} }
case ENC_STATS_AVG_BITRATE: { case ENC_STATS_AVG_BITRATE: {
double duration = pkt->dts * av_q2d(tb); double duration = pkt->dts * av_q2d(tb);
avio_printf(io, "%g", duration > 0 ? 8.0 * ep->data_size / duration : -1.); avio_printf(io, "%g", duration > 0 ? 8.0 * e->data_size / duration : -1.);
continue; continue;
} }
default: av_assert0(0); default: av_assert0(0);
@ -554,10 +533,9 @@ static inline double psnr(double d)
static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats) static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
{ {
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e);
const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS, const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
NULL); NULL);
AVCodecContext *enc = e->enc_ctx; AVCodecContext *enc = ost->enc_ctx;
enum AVPictureType pict_type; enum AVPictureType pict_type;
int64_t frame_number; int64_t frame_number;
double ti1, bitrate, avg_bitrate; double ti1, bitrate, avg_bitrate;
@ -588,7 +566,7 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
} }
} }
frame_number = ep->packets_encoded; frame_number = e->packets_encoded;
if (vstats_version <= 1) { if (vstats_version <= 1) {
fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number, fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
quality / (float)FF_QP2LAMBDA); quality / (float)FF_QP2LAMBDA);
@ -608,9 +586,9 @@ static int update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_
ti1 = 0.01; ti1 = 0.01;
bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0; bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
avg_bitrate = (double)(ep->data_size * 8) / ti1 / 1000.0; avg_bitrate = (double)(e->data_size * 8) / ti1 / 1000.0;
fprintf(vstats_file, "s_size= %8.0fKiB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ", fprintf(vstats_file, "s_size= %8.0fKiB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
(double)ep->data_size / 1024, ti1, bitrate, avg_bitrate); (double)e->data_size / 1024, ti1, bitrate, avg_bitrate);
fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(pict_type)); fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(pict_type));
return 0; return 0;
@ -620,8 +598,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
AVPacket *pkt) AVPacket *pkt)
{ {
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e); AVCodecContext *enc = ost->enc_ctx;
AVCodecContext *enc = e->enc_ctx;
const char *type_desc = av_get_media_type_string(enc->codec_type); const char *type_desc = av_get_media_type_string(enc->codec_type);
const char *action = frame ? "encode" : "flush"; const char *action = frame ? "encode" : "flush";
int ret; int ret;
@ -636,13 +613,13 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
if (ost->enc_stats_pre.io) if (ost->enc_stats_pre.io)
enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL, enc_stats_write(ost, &ost->enc_stats_pre, frame, NULL,
e->frames_encoded); ost->frames_encoded);
e->frames_encoded++; ost->frames_encoded++;
e->samples_encoded += frame->nb_samples; ost->samples_encoded += frame->nb_samples;
if (debug_ts) { if (debug_ts) {
av_log(e, AV_LOG_INFO, "encoder <- type:%s " av_log(ost, AV_LOG_INFO, "encoder <- type:%s "
"frame_pts:%s frame_pts_time:%s time_base:%d/%d\n", "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
type_desc, type_desc,
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base), av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
@ -657,7 +634,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
ret = avcodec_send_frame(enc, frame); ret = avcodec_send_frame(enc, frame);
if (ret < 0 && !(ret == AVERROR_EOF && !frame)) { if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
av_log(e, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n", av_log(ost, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
type_desc); type_desc);
return ret; return ret;
} }
@ -682,7 +659,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
return 0; return 0;
} else if (ret < 0) { } else if (ret < 0) {
if (ret != AVERROR_EOF) if (ret != AVERROR_EOF)
av_log(e, AV_LOG_ERROR, "%s encoding failed\n", type_desc); av_log(ost, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
return ret; return ret;
} }
@ -693,7 +670,7 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
// attach stream parameters to first packet if requested // attach stream parameters to first packet if requested
avcodec_parameters_free(&fd->par_enc); avcodec_parameters_free(&fd->par_enc);
if (ep->attach_par && !ep->packets_encoded) { if (e->attach_par && !e->packets_encoded) {
fd->par_enc = avcodec_parameters_alloc(); fd->par_enc = avcodec_parameters_alloc();
if (!fd->par_enc) if (!fd->par_enc)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -713,10 +690,10 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
if (ost->enc_stats_post.io) if (ost->enc_stats_post.io)
enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt, enc_stats_write(ost, &ost->enc_stats_post, NULL, pkt,
ep->packets_encoded); e->packets_encoded);
if (debug_ts) { if (debug_ts) {
av_log(e, AV_LOG_INFO, "encoder -> type:%s " av_log(ost, AV_LOG_INFO, "encoder -> type:%s "
"pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s " "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
"duration:%s duration_time:%s\n", "duration:%s duration_time:%s\n",
type_desc, type_desc,
@ -725,11 +702,11 @@ static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame,
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base)); av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
} }
ep->data_size += pkt->size; e->data_size += pkt->size;
ep->packets_encoded++; e->packets_encoded++;
ret = sch_enc_send(ep->sch, ep->sch_idx, pkt); ret = sch_enc_send(e->sch, e->sch_idx, pkt);
if (ret < 0) { if (ret < 0) {
av_packet_unref(pkt); av_packet_unref(pkt);
return ret; return ret;
@ -787,7 +764,6 @@ force_keyframe:
static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt) static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
{ {
Encoder *e = ost->enc;
OutputFile *of = ost->file; OutputFile *of = ost->file;
enum AVMediaType type = ost->type; enum AVMediaType type = ost->type;
@ -805,8 +781,8 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
return AVERROR_EOF; return AVERROR_EOF;
if (type == AVMEDIA_TYPE_VIDEO) { if (type == AVMEDIA_TYPE_VIDEO) {
frame->quality = e->enc_ctx->global_quality; frame->quality = ost->enc_ctx->global_quality;
frame->pict_type = forced_kf_apply(e, &ost->kf, frame); frame->pict_type = forced_kf_apply(ost, &ost->kf, frame);
#if FFMPEG_OPT_TOP #if FFMPEG_OPT_TOP
if (ost->top_field_first >= 0) { if (ost->top_field_first >= 0) {
@ -815,9 +791,9 @@ static int frame_encode(OutputStream *ost, AVFrame *frame, AVPacket *pkt)
} }
#endif #endif
} else { } else {
if (!(e->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) && if (!(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
e->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) { ost->enc_ctx->ch_layout.nb_channels != frame->ch_layout.nb_channels) {
av_log(e, AV_LOG_ERROR, av_log(ost, AV_LOG_ERROR,
"Audio channel count changed and encoder does not support parameter changes\n"); "Audio channel count changed and encoder does not support parameter changes\n");
return 0; return 0;
} }
@ -831,7 +807,7 @@ static void enc_thread_set_name(const OutputStream *ost)
{ {
char name[16]; char name[16];
snprintf(name, sizeof(name), "enc%d:%d:%s", ost->file->index, ost->index, snprintf(name, sizeof(name), "enc%d:%d:%s", ost->file->index, ost->index,
ost->enc->enc_ctx->codec->name); ost->enc_ctx->codec->name);
ff_thread_setname(name); ff_thread_setname(name);
} }
@ -866,7 +842,6 @@ int encoder_thread(void *arg)
{ {
OutputStream *ost = arg; OutputStream *ost = arg;
Encoder *e = ost->enc; Encoder *e = ost->enc;
EncoderPriv *ep = ep_from_enc(e);
EncoderThread et; EncoderThread et;
int ret = 0, input_status = 0; int ret = 0, input_status = 0;
int name_set = 0; int name_set = 0;
@ -889,17 +864,17 @@ int encoder_thread(void *arg)
} }
while (!input_status) { while (!input_status) {
input_status = sch_enc_receive(ep->sch, ep->sch_idx, et.frame); input_status = sch_enc_receive(e->sch, e->sch_idx, et.frame);
if (input_status < 0) { if (input_status < 0) {
if (input_status == AVERROR_EOF) { if (input_status == AVERROR_EOF) {
av_log(e, AV_LOG_VERBOSE, "Encoder thread received EOF\n"); av_log(ost, AV_LOG_VERBOSE, "Encoder thread received EOF\n");
if (ep->opened) if (e->opened)
break; break;
av_log(e, AV_LOG_ERROR, "Could not open encoder before EOF\n"); av_log(ost, AV_LOG_ERROR, "Could not open encoder before EOF\n");
ret = AVERROR(EINVAL); ret = AVERROR(EINVAL);
} else { } else {
av_log(e, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n", av_log(ost, AV_LOG_ERROR, "Error receiving a frame for encoding: %s\n",
av_err2str(ret)); av_err2str(ret));
ret = input_status; ret = input_status;
} }
@ -918,9 +893,9 @@ int encoder_thread(void *arg)
if (ret < 0) { if (ret < 0) {
if (ret == AVERROR_EOF) if (ret == AVERROR_EOF)
av_log(e, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n"); av_log(ost, AV_LOG_VERBOSE, "Encoder returned EOF, finishing\n");
else else
av_log(e, AV_LOG_ERROR, "Error encoding a frame: %s\n", av_log(ost, AV_LOG_ERROR, "Error encoding a frame: %s\n",
av_err2str(ret)); av_err2str(ret));
break; break;
} }
@ -930,7 +905,7 @@ int encoder_thread(void *arg)
if (ret == 0 || ret == AVERROR_EOF) { if (ret == 0 || ret == AVERROR_EOF) {
ret = frame_encode(ost, NULL, et.pkt); ret = frame_encode(ost, NULL, et.pkt);
if (ret < 0 && ret != AVERROR_EOF) if (ret < 0 && ret != AVERROR_EOF)
av_log(e, AV_LOG_ERROR, "Error flushing encoder: %s\n", av_log(ost, AV_LOG_ERROR, "Error flushing encoder: %s\n",
av_err2str(ret)); av_err2str(ret));
} }
@ -946,7 +921,6 @@ finish:
int enc_loopback(Encoder *enc) int enc_loopback(Encoder *enc)
{ {
EncoderPriv *ep = ep_from_enc(enc); enc->attach_par = 1;
ep->attach_par = 1; return enc->sch_idx;
return ep->sch_idx;
} }

File diff suppressed because it is too large Load diff

View file

@ -581,9 +581,9 @@ static int bsf_init(MuxStream *ms)
int ret; int ret;
if (!ctx) if (!ctx)
return avcodec_parameters_copy(ost->st->codecpar, ms->par_in); return avcodec_parameters_copy(ost->st->codecpar, ost->par_in);
ret = avcodec_parameters_copy(ctx->par_in, ms->par_in); ret = avcodec_parameters_copy(ctx->par_in, ost->par_in);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -608,29 +608,12 @@ static int bsf_init(MuxStream *ms)
return 0; return 0;
} }
int of_stream_init(OutputFile *of, OutputStream *ost, int of_stream_init(OutputFile *of, OutputStream *ost)
const AVCodecContext *enc_ctx)
{ {
Muxer *mux = mux_from_of(of); Muxer *mux = mux_from_of(of);
MuxStream *ms = ms_from_ost(ost); MuxStream *ms = ms_from_ost(ost);
int ret; int ret;
if (enc_ctx) {
// use upstream time base unless it has been overridden previously
if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
ost->st->time_base = av_add_q(enc_ctx->time_base, (AVRational){0, 1});
ost->st->avg_frame_rate = enc_ctx->framerate;
ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio;
ret = avcodec_parameters_from_context(ms->par_in, enc_ctx);
if (ret < 0) {
av_log(ost, AV_LOG_FATAL,
"Error initializing the output stream codec parameters.\n");
return ret;
}
}
/* initialize bitstream filters for the output stream /* initialize bitstream filters for the output stream
* needs to be done here, because the codec id for streamcopy is not * needs to be done here, because the codec id for streamcopy is not
* known until now */ * known until now */
@ -661,8 +644,8 @@ static int check_written(OutputFile *of)
total_packets_written += packets_written; total_packets_written += packets_written;
if (ost->enc && if (ost->enc_ctx &&
(ost->enc->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
!= AV_CODEC_FLAG_PASS1) != AV_CODEC_FLAG_PASS1)
pass1_used = 0; pass1_used = 0;
@ -723,9 +706,9 @@ static void mux_final_stats(Muxer *mux)
of->index, j, av_get_media_type_string(type)); of->index, j, av_get_media_type_string(type));
if (ost->enc) { if (ost->enc) {
av_log(of, AV_LOG_VERBOSE, "%"PRIu64" frames encoded", av_log(of, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
ost->enc->frames_encoded); ost->frames_encoded);
if (type == AVMEDIA_TYPE_AUDIO) if (type == AVMEDIA_TYPE_AUDIO)
av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->enc->samples_encoded); av_log(of, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
av_log(of, AV_LOG_VERBOSE, "; "); av_log(of, AV_LOG_VERBOSE, "; ");
} }
@ -823,7 +806,7 @@ static void ost_free(OutputStream **post)
ost->logfile = NULL; ost->logfile = NULL;
} }
avcodec_parameters_free(&ms->par_in); avcodec_parameters_free(&ost->par_in);
av_bsf_free(&ms->bsf_ctx); av_bsf_free(&ms->bsf_ctx);
av_packet_free(&ms->bsf_pkt); av_packet_free(&ms->bsf_pkt);
@ -837,6 +820,10 @@ static void ost_free(OutputStream **post)
av_freep(&ost->attachment_filename); av_freep(&ost->attachment_filename);
if (ost->enc_ctx)
av_freep(&ost->enc_ctx->stats_in);
avcodec_free_context(&ost->enc_ctx);
enc_stats_uninit(&ost->enc_stats_pre); enc_stats_uninit(&ost->enc_stats_pre);
enc_stats_uninit(&ost->enc_stats_post); enc_stats_uninit(&ost->enc_stats_post);
enc_stats_uninit(&ms->stats); enc_stats_uninit(&ms->stats);

View file

@ -36,12 +36,6 @@
typedef struct MuxStream { typedef struct MuxStream {
OutputStream ost; OutputStream ost;
/**
* Codec parameters for packets submitted to the muxer (i.e. before
* bitstream filtering, if any).
*/
AVCodecParameters *par_in;
// name used for logging // name used for logging
char log_name[32]; char log_name[32];
@ -85,10 +79,6 @@ typedef struct MuxStream {
int ts_drop; int ts_drop;
#endif #endif
AVRational frame_rate;
AVRational max_frame_rate;
int force_fps;
const char *apad; const char *apad;
} MuxStream; } MuxStream;
@ -123,7 +113,7 @@ typedef struct Muxer {
int mux_check_init(void *arg); int mux_check_init(void *arg);
static inline MuxStream *ms_from_ost(OutputStream *ost) static MuxStream *ms_from_ost(OutputStream *ost)
{ {
return (MuxStream*)ost; return (MuxStream*)ost;
} }

View file

@ -67,9 +67,8 @@ static int check_opt_bitexact(void *ctx, const AVDictionary *opts,
} }
static int choose_encoder(const OptionsContext *o, AVFormatContext *s, static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
MuxStream *ms, const AVCodec **enc) OutputStream *ost, const AVCodec **enc)
{ {
OutputStream *ost = &ms->ost;
enum AVMediaType type = ost->type; enum AVMediaType type = ost->type;
const char *codec_name = NULL; const char *codec_name = NULL;
@ -91,20 +90,20 @@ static int choose_encoder(const OptionsContext *o, AVFormatContext *s,
} }
if (!codec_name) { if (!codec_name) {
ms->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type); ost->par_in->codec_id = av_guess_codec(s->oformat, NULL, s->url, NULL, ost->type);
*enc = avcodec_find_encoder(ms->par_in->codec_id); *enc = avcodec_find_encoder(ost->par_in->codec_id);
if (!*enc) { if (!*enc) {
av_log(ost, AV_LOG_FATAL, "Automatic encoder selection failed " av_log(ost, AV_LOG_FATAL, "Automatic encoder selection failed "
"Default encoder for format %s (codec %s) is " "Default encoder for format %s (codec %s) is "
"probably disabled. Please choose an encoder manually.\n", "probably disabled. Please choose an encoder manually.\n",
s->oformat->name, avcodec_get_name(ms->par_in->codec_id)); s->oformat->name, avcodec_get_name(ost->par_in->codec_id));
return AVERROR_ENCODER_NOT_FOUND; return AVERROR_ENCODER_NOT_FOUND;
} }
} else if (strcmp(codec_name, "copy")) { } else if (strcmp(codec_name, "copy")) {
int ret = find_codec(ost, codec_name, ost->type, 1, enc); int ret = find_codec(ost, codec_name, ost->type, 1, enc);
if (ret < 0) if (ret < 0)
return ret; return ret;
ms->par_in->codec_id = (*enc)->id; ost->par_in->codec_id = (*enc)->id;
} }
return 0; return 0;
@ -424,6 +423,27 @@ static int ost_get_filters(const OptionsContext *o, AVFormatContext *oc,
#endif #endif
opt_match_per_stream_str(ost, &o->filters, oc, ost->st, &filters); opt_match_per_stream_str(ost, &o->filters, oc, ost->st, &filters);
if (!ost->enc) {
if (
#if FFMPEG_OPT_FILTER_SCRIPT
filters_script ||
#endif
filters) {
av_log(ost, AV_LOG_ERROR,
"%s '%s' was specified, but codec copy was selected. "
"Filtering and streamcopy cannot be used together.\n",
#if FFMPEG_OPT_FILTER_SCRIPT
filters ? "Filtergraph" : "Filtergraph script",
filters ? filters : filters_script
#else
"Filtergraph", filters
#endif
);
return AVERROR(ENOSYS);
}
return 0;
}
if (!ost->ist) { if (!ost->ist) {
if ( if (
#if FFMPEG_OPT_FILTER_SCRIPT #if FFMPEG_OPT_FILTER_SCRIPT
@ -534,7 +554,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
return AV_PIX_FMT_NONE; return AV_PIX_FMT_NONE;
} }
ret = avcodec_get_supported_config(ost->enc->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT, ret = avcodec_get_supported_config(ost->enc_ctx, NULL, AV_CODEC_CONFIG_PIX_FORMAT,
0, (const void **) &fmts, NULL); 0, (const void **) &fmts, NULL);
if (ret < 0) if (ret < 0)
return AV_PIX_FMT_NONE; return AV_PIX_FMT_NONE;
@ -566,7 +586,7 @@ static enum AVPixelFormat pix_fmt_parse(OutputStream *ost, const char *name)
} }
if (fmts && !fmt_in_list(fmts, fmt)) if (fmts && !fmt_in_list(fmts, fmt))
fmt = choose_pixel_fmt(ost->enc->enc_ctx, fmt); fmt = choose_pixel_fmt(ost->enc_ctx, fmt);
return fmt; return fmt;
} }
@ -584,13 +604,13 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
st = ost->st; st = ost->st;
opt_match_per_stream_str(ost, &o->frame_rates, oc, st, &frame_rate); opt_match_per_stream_str(ost, &o->frame_rates, oc, st, &frame_rate);
if (frame_rate && av_parse_video_rate(&ms->frame_rate, frame_rate) < 0) { if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate); av_log(ost, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
opt_match_per_stream_str(ost, &o->max_frame_rates, oc, st, &max_frame_rate); opt_match_per_stream_str(ost, &o->max_frame_rates, oc, st, &max_frame_rate);
if (max_frame_rate && av_parse_video_rate(&ms->max_frame_rate, max_frame_rate) < 0) { if (max_frame_rate && av_parse_video_rate(&ost->max_frame_rate, max_frame_rate) < 0) {
av_log(ost, AV_LOG_FATAL, "Invalid maximum framerate value: %s\n", max_frame_rate); av_log(ost, AV_LOG_FATAL, "Invalid maximum framerate value: %s\n", max_frame_rate);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
@ -611,8 +631,8 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
ost->frame_aspect_ratio = q; ost->frame_aspect_ratio = q;
} }
if (ost->enc) { if (ost->enc_ctx) {
AVCodecContext *video_enc = ost->enc->enc_ctx; AVCodecContext *video_enc = ost->enc_ctx;
const char *p = NULL, *fps_mode = NULL; const char *p = NULL, *fps_mode = NULL;
const char *frame_size = NULL; const char *frame_size = NULL;
const char *frame_pix_fmt = NULL; const char *frame_pix_fmt = NULL;
@ -725,10 +745,10 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
ost->logfile_prefix ? ost->logfile_prefix : ost->logfile_prefix ? ost->logfile_prefix :
DEFAULT_PASS_LOGFILENAME_PREFIX, DEFAULT_PASS_LOGFILENAME_PREFIX,
ost_idx); ost_idx);
if (!strcmp(video_enc->codec->name, "libx264") || !strcmp(video_enc->codec->name, "libvvenc")) { if (!strcmp(ost->enc_ctx->codec->name, "libx264") || !strcmp(ost->enc_ctx->codec->name, "libvvenc")) {
if (av_opt_is_set_to_default_by_name(video_enc, "stats", if (av_opt_is_set_to_default_by_name(ost->enc_ctx, "stats",
AV_OPT_SEARCH_CHILDREN) > 0) AV_OPT_SEARCH_CHILDREN) > 0)
av_opt_set(video_enc, "stats", logfilename, av_opt_set(ost->enc_ctx, "stats", logfilename,
AV_OPT_SEARCH_CHILDREN); AV_OPT_SEARCH_CHILDREN);
} else { } else {
if (video_enc->flags & AV_CODEC_FLAG_PASS2) { if (video_enc->flags & AV_CODEC_FLAG_PASS2) {
@ -754,7 +774,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
} }
} }
opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ms->force_fps); opt_match_per_stream_int(ost, &o->force_fps, oc, st, &ost->force_fps);
#if FFMPEG_OPT_TOP #if FFMPEG_OPT_TOP
ost->top_field_first = -1; ost->top_field_first = -1;
@ -775,7 +795,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
return ret; return ret;
} }
if ((ms->frame_rate.num || ms->max_frame_rate.num) && if ((ost->frame_rate.num || ost->max_frame_rate.num) &&
!(*vsync_method == VSYNC_AUTO || !(*vsync_method == VSYNC_AUTO ||
*vsync_method == VSYNC_CFR || *vsync_method == VSYNC_VSCFR)) { *vsync_method == VSYNC_CFR || *vsync_method == VSYNC_VSCFR)) {
av_log(ost, AV_LOG_FATAL, "One of -r/-fpsmax was specified " av_log(ost, AV_LOG_FATAL, "One of -r/-fpsmax was specified "
@ -784,7 +804,7 @@ static int new_stream_video(Muxer *mux, const OptionsContext *o,
} }
if (*vsync_method == VSYNC_AUTO) { if (*vsync_method == VSYNC_AUTO) {
if (ms->frame_rate.num || ms->max_frame_rate.num) { if (ost->frame_rate.num || ost->max_frame_rate.num) {
*vsync_method = VSYNC_CFR; *vsync_method = VSYNC_CFR;
} else if (!strcmp(oc->oformat->name, "avi")) { } else if (!strcmp(oc->oformat->name, "avi")) {
*vsync_method = VSYNC_VFR; *vsync_method = VSYNC_VFR;
@ -821,8 +841,8 @@ static int new_stream_audio(Muxer *mux, const OptionsContext *o,
AVFormatContext *oc = mux->fc; AVFormatContext *oc = mux->fc;
AVStream *st = ost->st; AVStream *st = ost->st;
if (ost->enc) { if (ost->enc_ctx) {
AVCodecContext *audio_enc = ost->enc->enc_ctx; AVCodecContext *audio_enc = ost->enc_ctx;
int channels = 0; int channels = 0;
const char *layout = NULL; const char *layout = NULL;
const char *sample_fmt = NULL; const char *sample_fmt = NULL;
@ -860,8 +880,8 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
st = ost->st; st = ost->st;
if (ost->enc) { if (ost->enc_ctx) {
AVCodecContext *subtitle_enc = ost->enc->enc_ctx; AVCodecContext *subtitle_enc = ost->enc_ctx;
AVCodecDescriptor const *input_descriptor = AVCodecDescriptor const *input_descriptor =
avcodec_descriptor_get(ost->ist->par->codec_id); avcodec_descriptor_get(ost->ist->par->codec_id);
@ -896,16 +916,14 @@ static int new_stream_subtitle(Muxer *mux, const OptionsContext *o,
static int static int
ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter, ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
const OptionsContext *o, const OptionsContext *o, char *filters,
AVRational enc_tb, enum VideoSyncMethod vsync_method, AVRational enc_tb, enum VideoSyncMethod vsync_method,
int keep_pix_fmt, int autoscale, int threads_manual, int keep_pix_fmt, int autoscale, int threads_manual,
const ViewSpecifier *vs, const ViewSpecifier *vs)
SchedulerNode *src)
{ {
OutputStream *ost = &ms->ost; OutputStream *ost = &ms->ost;
AVCodecContext *enc_ctx = ost->enc->enc_ctx; AVCodecContext *enc_ctx = ost->enc_ctx;
char name[16]; char name[16];
char *filters = NULL;
int ret; int ret;
OutputFilterOptions opts = { OutputFilterOptions opts = {
@ -918,8 +936,6 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
.color_space = enc_ctx->colorspace, .color_space = enc_ctx->colorspace,
.color_range = enc_ctx->color_range, .color_range = enc_ctx->color_range,
.vsync_method = vsync_method, .vsync_method = vsync_method,
.frame_rate = ms->frame_rate,
.max_frame_rate = ms->max_frame_rate,
.sample_rate = enc_ctx->sample_rate, .sample_rate = enc_ctx->sample_rate,
.ch_layout = enc_ctx->ch_layout, .ch_layout = enc_ctx->ch_layout,
.sws_opts = o->g->sws_dict, .sws_opts = o->g->sws_dict,
@ -930,7 +946,6 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
.ts_offset = mux->of.start_time == AV_NOPTS_VALUE ? .ts_offset = mux->of.start_time == AV_NOPTS_VALUE ?
0 : mux->of.start_time, 0 : mux->of.start_time,
.vs = vs, .vs = vs,
.nb_threads = -1,
.flags = OFILTER_FLAG_DISABLE_CONVERT * !!keep_pix_fmt | .flags = OFILTER_FLAG_DISABLE_CONVERT * !!keep_pix_fmt |
OFILTER_FLAG_AUTOSCALE * !!autoscale | OFILTER_FLAG_AUTOSCALE * !!autoscale |
@ -947,7 +962,7 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
if (!ms->force_fps) { if (!ost->force_fps) {
ret = avcodec_get_supported_config(enc_ctx, NULL, ret = avcodec_get_supported_config(enc_ctx, NULL,
AV_CODEC_CONFIG_FRAME_RATE, 0, AV_CODEC_CONFIG_FRAME_RATE, 0,
(const void **) &opts.frame_rates, NULL); (const void **) &opts.frame_rates, NULL);
@ -983,77 +998,46 @@ ost_bind_filter(const Muxer *mux, MuxStream *ms, OutputFilter *ofilter,
} }
if (threads_manual) { if (threads_manual) {
ret = av_opt_get_int(enc_ctx, "threads", 0, &opts.nb_threads); ret = av_opt_get(enc_ctx, "threads", 0, (uint8_t**)&opts.nb_threads);
if (ret < 0) if (ret < 0)
return ret; return ret;
} }
ret = ost_get_filters(o, mux->fc, ost, &filters);
if (ret < 0)
return ret;
if (ofilter) { if (ofilter) {
av_assert0(!filters);
ost->filter = ofilter; ost->filter = ofilter;
ret = ofilter_bind_enc(ofilter, ms->sch_idx_enc, &opts); ret = ofilter_bind_ost(ofilter, ost, ms->sch_idx_enc, &opts);
} else { } else {
ret = fg_create_simple(&ost->fg_simple, ost->ist, filters, ret = init_simple_filtergraph(ost->ist, ost, filters,
mux->sch, ms->sch_idx_enc, &opts); mux->sch, ms->sch_idx_enc, &opts);
if (ret >= 0)
ost->filter = ost->fg_simple->outputs[0];
} }
av_freep(&opts.nb_threads);
if (ret < 0) if (ret < 0)
return ret; return ret;
*src = SCH_ENC(ms->sch_idx_enc); ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
if (ret < 0)
return ret;
return 0; return ret;
} }
static int streamcopy_init(const OptionsContext *o, const Muxer *mux, static int streamcopy_init(const Muxer *mux, OutputStream *ost, AVDictionary **encoder_opts)
OutputStream *ost, AVDictionary **encoder_opts)
{ {
MuxStream *ms = ms_from_ost(ost); MuxStream *ms = ms_from_ost(ost);
const InputStream *ist = ost->ist; const InputStream *ist = ost->ist;
const InputFile *ifile = ist->file; const InputFile *ifile = ist->file;
AVCodecParameters *par = ms->par_in; AVCodecParameters *par = ost->par_in;
uint32_t codec_tag = par->codec_tag; uint32_t codec_tag = par->codec_tag;
AVCodecContext *codec_ctx = NULL; AVCodecContext *codec_ctx = NULL;
AVRational fr = ms->frame_rate; AVRational fr = ost->frame_rate;
int ret = 0; int ret = 0;
const char *filters = NULL;
#if FFMPEG_OPT_FILTER_SCRIPT
const char *filters_script = NULL;
opt_match_per_stream_str(ost, &o->filter_scripts, mux->fc, ost->st, &filters_script);
#endif
opt_match_per_stream_str(ost, &o->filters, mux->fc, ost->st, &filters);
if (
#if FFMPEG_OPT_FILTER_SCRIPT
filters_script ||
#endif
filters) {
av_log(ost, AV_LOG_ERROR,
"%s '%s' was specified, but codec copy was selected. "
"Filtering and streamcopy cannot be used together.\n",
#if FFMPEG_OPT_FILTER_SCRIPT
filters ? "Filtergraph" : "Filtergraph script",
filters ? filters : filters_script
#else
"Filtergraph", filters
#endif
);
return AVERROR(EINVAL);
}
codec_ctx = avcodec_alloc_context3(NULL); codec_ctx = avcodec_alloc_context3(NULL);
if (!codec_ctx) if (!codec_ctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
@ -1155,28 +1139,6 @@ fail:
return ret; return ret;
} }
static int set_encoder_id(OutputStream *ost, const AVCodec *codec)
{
const char *cname = codec->name;
uint8_t *encoder_string;
int encoder_string_len;
encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(cname) + 2;
encoder_string = av_mallocz(encoder_string_len);
if (!encoder_string)
return AVERROR(ENOMEM);
if (!ost->file->bitexact && !ost->bitexact)
av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
else
av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
av_strlcat(encoder_string, cname, encoder_string_len);
av_dict_set(&ost->st->metadata, "encoder", encoder_string,
AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
return 0;
}
static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type, static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
InputStream *ist, OutputFilter *ofilter, const ViewSpecifier *vs, InputStream *ist, OutputFilter *ofilter, const ViewSpecifier *vs,
OutputStream **post) OutputStream **post)
@ -1186,14 +1148,13 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
OutputStream *ost; OutputStream *ost;
const AVCodec *enc; const AVCodec *enc;
AVStream *st; AVStream *st;
SchedulerNode src = { .type = SCH_NODE_TYPE_NONE };
AVDictionary *encoder_opts = NULL; AVDictionary *encoder_opts = NULL;
int ret = 0, keep_pix_fmt = 0, autoscale = 1; int ret = 0, keep_pix_fmt = 0, autoscale = 1;
int threads_manual = 0; int threads_manual = 0;
AVRational enc_tb = { 0, 0 }; AVRational enc_tb = { 0, 0 };
enum VideoSyncMethod vsync_method = VSYNC_AUTO; enum VideoSyncMethod vsync_method = VSYNC_AUTO;
const char *bsfs = NULL, *time_base = NULL, *codec_tag = NULL; const char *bsfs = NULL, *time_base = NULL, *codec_tag = NULL;
char *next; char *filters = NULL, *next;
double qscale = -1; double qscale = -1;
st = avformat_new_stream(oc, NULL); st = avformat_new_stream(oc, NULL);
@ -1237,8 +1198,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
} }
} }
ms->par_in = avcodec_parameters_alloc(); ost->par_in = avcodec_parameters_alloc();
if (!ms->par_in) if (!ost->par_in)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ms->last_mux_dts = AV_NOPTS_VALUE; ms->last_mux_dts = AV_NOPTS_VALUE;
@ -1246,23 +1207,27 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
ost->st = st; ost->st = st;
ost->ist = ist; ost->ist = ist;
ost->kf.ref_pts = AV_NOPTS_VALUE; ost->kf.ref_pts = AV_NOPTS_VALUE;
ms->par_in->codec_type = type; ost->par_in->codec_type = type;
st->codecpar->codec_type = type; st->codecpar->codec_type = type;
ret = choose_encoder(o, oc, ms, &enc); ret = choose_encoder(o, oc, ost, &enc);
if (ret < 0) { if (ret < 0) {
av_log(ost, AV_LOG_FATAL, "Error selecting an encoder\n"); av_log(ost, AV_LOG_FATAL, "Error selecting an encoder\n");
return ret; return ret;
} }
if (enc) { if (enc) {
ost->enc_ctx = avcodec_alloc_context3(enc);
if (!ost->enc_ctx)
return AVERROR(ENOMEM);
ret = sch_add_enc(mux->sch, encoder_thread, ost, ret = sch_add_enc(mux->sch, encoder_thread, ost,
ost->type == AVMEDIA_TYPE_SUBTITLE ? NULL : enc_open); ost->type == AVMEDIA_TYPE_SUBTITLE ? NULL : enc_open);
if (ret < 0) if (ret < 0)
return ret; return ret;
ms->sch_idx_enc = ret; ms->sch_idx_enc = ret;
ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc, ost); ret = enc_alloc(&ost->enc, enc, mux->sch, ms->sch_idx_enc);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -1297,21 +1262,21 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
if (!ms->pkt) if (!ms->pkt)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (ost->enc) { if (ost->enc_ctx) {
AVIOContext *s = NULL; AVIOContext *s = NULL;
char *buf = NULL, *arg = NULL; char *buf = NULL, *arg = NULL;
const char *enc_stats_pre = NULL, *enc_stats_post = NULL, *mux_stats = NULL; const char *enc_stats_pre = NULL, *enc_stats_post = NULL, *mux_stats = NULL;
const char *enc_time_base = NULL, *preset = NULL; const char *enc_time_base = NULL, *preset = NULL;
ret = filter_codec_opts(o->g->codec_opts, enc->id, ret = filter_codec_opts(o->g->codec_opts, ost->enc_ctx->codec_id,
oc, st, enc, &encoder_opts, oc, st, ost->enc_ctx->codec, &encoder_opts,
&mux->enc_opts_used); &mux->enc_opts_used);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
opt_match_per_stream_str(ost, &o->presets, oc, st, &preset); opt_match_per_stream_str(ost, &o->presets, oc, st, &preset);
opt_match_per_stream_int(ost, &o->autoscale, oc, st, &autoscale); opt_match_per_stream_int(ost, &o->autoscale, oc, st, &autoscale);
if (preset && (!(ret = get_preset_file_2(preset, enc->name, &s)))) { if (preset && (!(ret = get_preset_file_2(preset, ost->enc_ctx->codec->name, &s)))) {
AVBPrint bprint; AVBPrint bprint;
av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED); av_bprint_init(&bprint, 0, AV_BPRINT_SIZE_UNLIMITED);
do { do {
@ -1411,7 +1376,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
threads_manual = !!av_dict_get(encoder_opts, "threads", NULL, 0); threads_manual = !!av_dict_get(encoder_opts, "threads", NULL, 0);
ret = av_opt_set_dict2(ost->enc->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN); ret = av_opt_set_dict2(ost->enc_ctx, &encoder_opts, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) { if (ret < 0) {
av_log(ost, AV_LOG_ERROR, "Error applying encoder options: %s\n", av_log(ost, AV_LOG_ERROR, "Error applying encoder options: %s\n",
av_err2str(ret)); av_err2str(ret));
@ -1424,7 +1389,7 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
// default to automatic thread count // default to automatic thread count
if (!threads_manual) if (!threads_manual)
ost->enc->enc_ctx->thread_count = 0; ost->enc_ctx->thread_count = 0;
} else { } else {
ret = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st, ret = filter_codec_opts(o->g->codec_opts, AV_CODEC_ID_NONE, oc, st,
NULL, &encoder_opts, NULL, &encoder_opts,
@ -1436,14 +1401,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
if (o->bitexact) { if (o->bitexact) {
ost->bitexact = 1; ost->bitexact = 1;
} else if (ost->enc) { } else if (ost->enc_ctx) {
ost->bitexact = !!(ost->enc->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT); ost->bitexact = !!(ost->enc_ctx->flags & AV_CODEC_FLAG_BITEXACT);
}
if (enc) {
ret = set_encoder_id(ost, enc);
if (ret < 0)
return ret;
} }
opt_match_per_stream_str(ost, &o->time_bases, oc, st, &time_base); opt_match_per_stream_str(ost, &o->time_bases, oc, st, &time_base);
@ -1488,15 +1447,15 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
tag = AV_RL32(buf); tag = AV_RL32(buf);
} }
ost->st->codecpar->codec_tag = tag; ost->st->codecpar->codec_tag = tag;
ms->par_in->codec_tag = tag; ost->par_in->codec_tag = tag;
if (ost->enc) if (ost->enc_ctx)
ost->enc->enc_ctx->codec_tag = tag; ost->enc_ctx->codec_tag = tag;
} }
opt_match_per_stream_dbl(ost, &o->qscale, oc, st, &qscale); opt_match_per_stream_dbl(ost, &o->qscale, oc, st, &qscale);
if (ost->enc && qscale >= 0) { if (ost->enc_ctx && qscale >= 0) {
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE; ost->enc_ctx->flags |= AV_CODEC_FLAG_QSCALE;
ost->enc->enc_ctx->global_quality = FF_QP2LAMBDA * qscale; ost->enc_ctx->global_quality = FF_QP2LAMBDA * qscale;
} }
if (ms->sch_idx >= 0) { if (ms->sch_idx >= 0) {
@ -1518,8 +1477,8 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
opt_match_per_stream_int(ost, &o->fix_sub_duration_heartbeat, opt_match_per_stream_int(ost, &o->fix_sub_duration_heartbeat,
oc, st, &ost->fix_sub_duration_heartbeat); oc, st, &ost->fix_sub_duration_heartbeat);
if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc) if (oc->oformat->flags & AVFMT_GLOBALHEADER && ost->enc_ctx)
ost->enc->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; ost->enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
opt_match_per_stream_int(ost, &o->copy_initial_nonkeyframes, opt_match_per_stream_int(ost, &o->copy_initial_nonkeyframes,
oc, st, &ms->copy_initial_nonkeyframes); oc, st, &ms->copy_initial_nonkeyframes);
@ -1531,43 +1490,48 @@ static int ost_add(Muxer *mux, const OptionsContext *o, enum AVMediaType type,
if (ret < 0) if (ret < 0)
goto fail; goto fail;
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO) {
ret = ost_get_filters(o, oc, ost, &filters);
if (ret < 0)
goto fail;
}
if (ost->enc && if (ost->enc &&
(type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) { (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) {
ret = ost_bind_filter(mux, ms, ofilter, o, enc_tb, vsync_method, ret = ost_bind_filter(mux, ms, ofilter, o, filters, enc_tb, vsync_method,
keep_pix_fmt, autoscale, threads_manual, vs, &src); keep_pix_fmt, autoscale, threads_manual, vs);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
} else if (ost->ist) { } else if (ost->ist) {
ret = ist_use(ost->ist, !!ost->enc, NULL, &src); int sched_idx = ist_output_add(ost->ist, ost);
if (ret < 0) { if (sched_idx < 0) {
av_log(ost, AV_LOG_ERROR, av_log(ost, AV_LOG_ERROR,
"Error binding an input stream\n"); "Error binding an input stream\n");
ret = sched_idx;
goto fail; goto fail;
} }
ms->sch_idx_src = src.idx; ms->sch_idx_src = sched_idx;
// src refers to a decoder for transcoding, demux stream otherwise
if (ost->enc) { if (ost->enc) {
ret = sch_connect(mux->sch, ret = sch_connect(mux->sch, SCH_DEC_OUT(sched_idx, 0),
src, SCH_ENC(ms->sch_idx_enc)); SCH_ENC(ms->sch_idx_enc));
if (ret < 0) if (ret < 0)
goto fail; goto fail;
src = SCH_ENC(ms->sch_idx_enc);
}
}
if (src.type != SCH_NODE_TYPE_NONE) { ret = sch_connect(mux->sch, SCH_ENC(ms->sch_idx_enc),
ret = sch_connect(mux->sch, SCH_MSTREAM(mux->sch_idx, ms->sch_idx));
src, SCH_MSTREAM(mux->sch_idx, ms->sch_idx)); if (ret < 0)
if (ret < 0) goto fail;
goto fail; } else {
} else { ret = sch_connect(mux->sch, SCH_DSTREAM(ost->ist->file->index, sched_idx),
// only attachment streams don't have a source SCH_MSTREAM(ost->file->index, ms->sch_idx));
av_assert0(type == AVMEDIA_TYPE_ATTACHMENT && ms->sch_idx < 0); if (ret < 0)
goto fail;
}
} }
if (ost->ist && !ost->enc) { if (ost->ist && !ost->enc) {
ret = streamcopy_init(o, mux, ost, &encoder_opts); ret = streamcopy_init(mux, ost, &encoder_opts);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
} }
@ -1593,7 +1557,7 @@ static int map_auto_video(Muxer *mux, const OptionsContext *o)
{ {
AVFormatContext *oc = mux->fc; AVFormatContext *oc = mux->fc;
InputStream *best_ist = NULL; InputStream *best_ist = NULL;
int64_t best_score = 0; int best_score = 0;
int qcr; int qcr;
/* video: highest resolution */ /* video: highest resolution */
@ -1604,16 +1568,16 @@ static int map_auto_video(Muxer *mux, const OptionsContext *o)
for (int j = 0; j < nb_input_files; j++) { for (int j = 0; j < nb_input_files; j++) {
InputFile *ifile = input_files[j]; InputFile *ifile = input_files[j];
InputStream *file_best_ist = NULL; InputStream *file_best_ist = NULL;
int64_t file_best_score = 0; int file_best_score = 0;
for (int i = 0; i < ifile->nb_streams; i++) { for (int i = 0; i < ifile->nb_streams; i++) {
InputStream *ist = ifile->streams[i]; InputStream *ist = ifile->streams[i];
int64_t score; int score;
if (ist->user_set_discard == AVDISCARD_ALL || if (ist->user_set_discard == AVDISCARD_ALL ||
ist->st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) ist->st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue; continue;
score = ist->st->codecpar->width * (int64_t)ist->st->codecpar->height score = ist->st->codecpar->width * ist->st->codecpar->height
+ 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS) + 100000000 * !!(ist->st->event_flags & AVSTREAM_EVENT_FLAG_NEW_PACKETS)
+ 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT); + 5000000*!!(ist->st->disposition & AV_DISPOSITION_DEFAULT);
if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)) if((qcr!=MKTAG('A', 'P', 'I', 'C')) && (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC))
@ -1835,7 +1799,6 @@ loop_end:
static int of_add_attachments(Muxer *mux, const OptionsContext *o) static int of_add_attachments(Muxer *mux, const OptionsContext *o)
{ {
MuxStream *ms;
OutputStream *ost; OutputStream *ost;
int err; int err;
@ -1903,11 +1866,9 @@ read_fail:
return err; return err;
} }
ms = ms_from_ost(ost);
ost->attachment_filename = attachment_filename; ost->attachment_filename = attachment_filename;
ms->par_in->extradata = attachment; ost->par_in->extradata = attachment;
ms->par_in->extradata_size = len; ost->par_in->extradata_size = len;
p = strrchr(o->attachments[i], '/'); p = strrchr(o->attachments[i], '/');
av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE); av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
@ -2048,7 +2009,7 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
int limit_frames = 0, limit_frames_av_enc = 0; int limit_frames = 0, limit_frames_av_enc = 0;
#define IS_AV_ENC(ost, type) \ #define IS_AV_ENC(ost, type) \
(ost->enc && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)) (ost->enc_ctx && (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO))
#define IS_INTERLEAVED(type) (type != AVMEDIA_TYPE_ATTACHMENT) #define IS_INTERLEAVED(type) (type != AVMEDIA_TYPE_ATTACHMENT)
for (int i = 0; i < oc->nb_streams; i++) { for (int i = 0; i < oc->nb_streams; i++) {
@ -2060,8 +2021,8 @@ static int setup_sync_queues(Muxer *mux, AVFormatContext *oc,
nb_interleaved += IS_INTERLEAVED(type); nb_interleaved += IS_INTERLEAVED(type);
nb_av_enc += IS_AV_ENC(ost, type); nb_av_enc += IS_AV_ENC(ost, type);
nb_audio_fs += (ost->enc && type == AVMEDIA_TYPE_AUDIO && nb_audio_fs += (ost->enc_ctx && type == AVMEDIA_TYPE_AUDIO &&
!(ost->enc->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)); !(ost->enc_ctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE));
limit_frames |= ms->max_frames < INT64_MAX; limit_frames |= ms->max_frames < INT64_MAX;
limit_frames_av_enc |= (ms->max_frames < INT64_MAX) && IS_AV_ENC(ost, type); limit_frames_av_enc |= (ms->max_frames < INT64_MAX) && IS_AV_ENC(ost, type);
@ -3016,6 +2977,9 @@ static int copy_meta(Muxer *mux, const OptionsContext *o)
if (!ost->ist) /* this is true e.g. for attached files */ if (!ost->ist) /* this is true e.g. for attached files */
continue; continue;
av_dict_copy(&ost->st->metadata, ost->ist->st->metadata, AV_DICT_DONT_OVERWRITE); av_dict_copy(&ost->st->metadata, ost->ist->st->metadata, AV_DICT_DONT_OVERWRITE);
if (ost->enc_ctx) {
av_dict_set(&ost->st->metadata, "encoder", NULL, 0);
}
} }
return 0; return 0;
@ -3092,7 +3056,7 @@ finish:
return ret; return ret;
} }
static const char *const forced_keyframes_const_names[] = { const char *const forced_keyframes_const_names[] = {
"n", "n",
"n_forced", "n_forced",
"prev_forced_n", "prev_forced_n",
@ -3194,7 +3158,7 @@ static int process_forced_keyframes(Muxer *mux, const OptionsContext *o)
mux->fc, ost->st, &forced_keyframes); mux->fc, ost->st, &forced_keyframes);
if (!(ost->type == AVMEDIA_TYPE_VIDEO && if (!(ost->type == AVMEDIA_TYPE_VIDEO &&
ost->enc && forced_keyframes)) ost->enc_ctx && forced_keyframes))
continue; continue;
if (!strncmp(forced_keyframes, "expr:", 5)) { if (!strncmp(forced_keyframes, "expr:", 5)) {
@ -3421,7 +3385,7 @@ int of_open(const OptionsContext *o, const char *filename, Scheduler *sch)
OutputStream *ost = of->streams[i]; OutputStream *ost = of->streams[i];
if (!ost->enc) { if (!ost->enc) {
err = of_stream_init(of, ost, NULL); err = of_stream_init(of, ost);
if (err < 0) if (err < 0)
return err; return err;
} }

View file

@ -47,12 +47,12 @@
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/parseutils.h" #include "libavutil/parseutils.h"
#include "libavutil/stereo3d.h" #include "libavutil/stereo3d.h"
#include "graph/graphprint.h"
HWDevice *filter_hw_device; HWDevice *filter_hw_device;
char *vstats_filename; char *vstats_filename;
float audio_drift_threshold = 0.1;
float dts_delta_threshold = 10; float dts_delta_threshold = 10;
float dts_error_threshold = 3600*30; float dts_error_threshold = 3600*30;
@ -75,11 +75,7 @@ int stdin_interaction = 1;
float max_error_rate = 2.0/3; float max_error_rate = 2.0/3;
char *filter_nbthreads; char *filter_nbthreads;
int filter_complex_nbthreads = 0; int filter_complex_nbthreads = 0;
int filter_buffered_frames = 0;
int vstats_version = 2; int vstats_version = 2;
int print_graphs = 0;
char *print_graphs_file = NULL;
char *print_graphs_format = NULL;
int auto_conversion_filters = 1; int auto_conversion_filters = 1;
int64_t stats_period = 500000; int64_t stats_period = 500000;
@ -90,15 +86,6 @@ int ignore_unknown_streams = 0;
int copy_unknown_streams = 0; int copy_unknown_streams = 0;
int recast_media = 0; int recast_media = 0;
// this struct is passed as the optctx argument
// to func_arg() for global options
typedef struct GlobalOptionsContext {
Scheduler *sch;
char **filtergraphs;
int nb_filtergraphs;
} GlobalOptionsContext;
static void uninit_options(OptionsContext *o) static void uninit_options(OptionsContext *o)
{ {
/* all OPT_SPEC and OPT_TYPE_STRING can be freed in generic way */ /* all OPT_SPEC and OPT_TYPE_STRING can be freed in generic way */
@ -358,7 +345,7 @@ static void correct_input_start_times(void)
if (copy_ts && start_at_zero) if (copy_ts && start_at_zero)
ifile->ts_offset = -new_start_time; ifile->ts_offset = -new_start_time;
else if (!copy_ts) { else if (!copy_ts) {
abs_start_seek = is->start_time + ((ifile->start_time != AV_NOPTS_VALUE) ? ifile->start_time : 0); abs_start_seek = is->start_time + (ifile->start_time != AV_NOPTS_VALUE) ? ifile->start_time : 0;
ifile->ts_offset = abs_start_seek > new_start_time ? -abs_start_seek : -new_start_time; ifile->ts_offset = abs_start_seek > new_start_time ? -abs_start_seek : -new_start_time;
} else if (copy_ts) } else if (copy_ts)
ifile->ts_offset = 0; ifile->ts_offset = 0;
@ -624,8 +611,8 @@ static int opt_attach(void *optctx, const char *opt, const char *arg)
static int opt_sdp_file(void *optctx, const char *opt, const char *arg) static int opt_sdp_file(void *optctx, const char *opt, const char *arg)
{ {
GlobalOptionsContext *go = optctx; Scheduler *sch = optctx;
return sch_sdp_filename(go->sch, arg); return sch_sdp_filename(sch, arg);
} }
#if CONFIG_VAAPI #if CONFIG_VAAPI
@ -1163,46 +1150,26 @@ static int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
static int opt_filter_complex(void *optctx, const char *opt, const char *arg) static int opt_filter_complex(void *optctx, const char *opt, const char *arg)
{ {
GlobalOptionsContext *go = optctx; Scheduler *sch = optctx;
char *graph_desc; char *graph_desc = av_strdup(arg);
int ret;
graph_desc = av_strdup(arg);
if (!graph_desc) if (!graph_desc)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ret = GROW_ARRAY(go->filtergraphs, go->nb_filtergraphs); return fg_create(NULL, graph_desc, sch);
if (ret < 0) {
av_freep(&graph_desc);
return ret;
}
go->filtergraphs[go->nb_filtergraphs - 1] = graph_desc;
return 0;
} }
#if FFMPEG_OPT_FILTER_SCRIPT #if FFMPEG_OPT_FILTER_SCRIPT
static int opt_filter_complex_script(void *optctx, const char *opt, const char *arg) static int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
{ {
GlobalOptionsContext *go = optctx; Scheduler *sch = optctx;
char *graph_desc; char *graph_desc = file_read(arg);
int ret;
graph_desc = file_read(arg);
if (!graph_desc) if (!graph_desc)
return AVERROR(EINVAL); return AVERROR(EINVAL);
av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -/filter_complex %s instead\n", av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -/filter_complex %s instead\n",
opt, arg); opt, arg);
ret = GROW_ARRAY(go->filtergraphs, go->nb_filtergraphs); return fg_create(NULL, graph_desc, sch);
if (ret < 0) {
av_freep(&graph_desc);
return ret;
}
go->filtergraphs[go->nb_filtergraphs - 1] = graph_desc;
return 0;
} }
#endif #endif
@ -1379,7 +1346,6 @@ static int open_files(OptionGroupList *l, const char *inout, Scheduler *sch,
int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch) int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
{ {
GlobalOptionsContext go = { .sch = sch };
OptionParseContext octx; OptionParseContext octx;
const char *errmsg = NULL; const char *errmsg = NULL;
int ret; int ret;
@ -1395,7 +1361,7 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
} }
/* apply global options */ /* apply global options */
ret = parse_optgroup(&go, &octx.global_opts, options); ret = parse_optgroup(sch, &octx.global_opts, options);
if (ret < 0) { if (ret < 0) {
errmsg = "parsing global options"; errmsg = "parsing global options";
goto fail; goto fail;
@ -1404,14 +1370,6 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
/* configure terminal and setup signal handlers */ /* configure terminal and setup signal handlers */
term_init(); term_init();
/* create complex filtergraphs */
for (int i = 0; i < go.nb_filtergraphs; i++) {
ret = fg_create(NULL, go.filtergraphs[i], sch);
go.filtergraphs[i] = NULL;
if (ret < 0)
goto fail;
}
/* open input files */ /* open input files */
ret = open_files(&octx.groups[GROUP_INFILE], "input", sch, ifile_open); ret = open_files(&octx.groups[GROUP_INFILE], "input", sch, ifile_open);
if (ret < 0) { if (ret < 0) {
@ -1447,10 +1405,6 @@ int ffmpeg_parse_options(int argc, char **argv, Scheduler *sch)
goto fail; goto fail;
fail: fail:
for (int i = 0; i < go.nb_filtergraphs; i++)
av_freep(&go.filtergraphs[i]);
av_freep(&go.filtergraphs);
uninit_parse_context(&octx); uninit_parse_context(&octx);
if (ret < 0 && ret != AVERROR_EXIT) { if (ret < 0 && ret != AVERROR_EXIT) {
av_log(NULL, AV_LOG_FATAL, "Error %s: %s\n", av_log(NULL, AV_LOG_FATAL, "Error %s: %s\n",
@ -1512,6 +1466,7 @@ static int opt_adrift_threshold(void *optctx, const char *opt, const char *arg)
} }
#endif #endif
static const char *const alt_bsf[] = { "absf", "vbsf", NULL };
static const char *const alt_channel_layout[] = { "ch_layout", NULL}; static const char *const alt_channel_layout[] = { "ch_layout", NULL};
static const char *const alt_codec[] = { "c", "acodec", "vcodec", "scodec", "dcodec", NULL }; static const char *const alt_codec[] = { "c", "acodec", "vcodec", "scodec", "dcodec", NULL };
static const char *const alt_filter[] = { "af", "vf", NULL }; static const char *const alt_filter[] = { "af", "vf", NULL };
@ -1643,9 +1598,6 @@ const OptionDef options[] = {
{ "readrate_initial_burst", OPT_TYPE_DOUBLE, OPT_OFFSET | OPT_EXPERT | OPT_INPUT, { "readrate_initial_burst", OPT_TYPE_DOUBLE, OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
{ .off = OFFSET(readrate_initial_burst) }, { .off = OFFSET(readrate_initial_burst) },
"The initial amount of input to burst read before imposing any readrate", "seconds" }, "The initial amount of input to burst read before imposing any readrate", "seconds" },
{ "readrate_catchup", OPT_TYPE_FLOAT, OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
{ .off = OFFSET(readrate_catchup) },
"Temporary readrate used to catch up if an input lags behind the specified readrate", "speed" },
{ "target", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_PERFILE | OPT_EXPERT | OPT_OUTPUT, { "target", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_PERFILE | OPT_EXPERT | OPT_OUTPUT,
{ .func_arg = opt_target }, { .func_arg = opt_target },
"specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" " "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
@ -1715,9 +1667,6 @@ const OptionDef options[] = {
{ "filter_threads", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { "filter_threads", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT,
{ .func_arg = opt_filter_threads }, { .func_arg = opt_filter_threads },
"number of non-complex filter threads" }, "number of non-complex filter threads" },
{ "filter_buffered_frames", OPT_TYPE_INT, OPT_EXPERT,
{ &filter_buffered_frames },
"maximum number of buffered frames in a filter graph" },
#if FFMPEG_OPT_FILTER_SCRIPT #if FFMPEG_OPT_FILTER_SCRIPT
{ "filter_script", OPT_TYPE_STRING, OPT_PERSTREAM | OPT_EXPERT | OPT_OUTPUT, { "filter_script", OPT_TYPE_STRING, OPT_PERSTREAM | OPT_EXPERT | OPT_OUTPUT,
{ .off = OFFSET(filter_scripts) }, { .off = OFFSET(filter_scripts) },
@ -1726,9 +1675,6 @@ const OptionDef options[] = {
{ "reinit_filter", OPT_TYPE_INT, OPT_PERSTREAM | OPT_INPUT | OPT_EXPERT, { "reinit_filter", OPT_TYPE_INT, OPT_PERSTREAM | OPT_INPUT | OPT_EXPERT,
{ .off = OFFSET(reinit_filters) }, { .off = OFFSET(reinit_filters) },
"reinit filtergraph on input parameter changes", "" }, "reinit filtergraph on input parameter changes", "" },
{ "drop_changed", OPT_TYPE_INT, OPT_PERSTREAM | OPT_INPUT | OPT_EXPERT,
{ .off = OFFSET(drop_changed) },
"drop frame instead of reiniting filtergraph on input parameter changes", "" },
{ "filter_complex", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT, { "filter_complex", OPT_TYPE_FUNC, OPT_FUNC_ARG | OPT_EXPERT,
{ .func_arg = opt_filter_complex }, { .func_arg = opt_filter_complex },
"create a complex filtergraph", "graph_description" }, "create a complex filtergraph", "graph_description" },
@ -1743,15 +1689,6 @@ const OptionDef options[] = {
{ .func_arg = opt_filter_complex_script }, { .func_arg = opt_filter_complex_script },
"deprecated, use -/filter_complex instead", "filename" }, "deprecated, use -/filter_complex instead", "filename" },
#endif #endif
{ "print_graphs", OPT_TYPE_BOOL, 0,
{ &print_graphs },
"print execution graph data to stderr" },
{ "print_graphs_file", OPT_TYPE_STRING, 0,
{ &print_graphs_file },
"write execution graph data to the specified file", "filename" },
{ "print_graphs_format", OPT_TYPE_STRING, 0,
{ &print_graphs_format },
"set the output printing format (available formats are: default, compact, csv, flat, ini, json, xml, mermaid, mermaidhtml)", "format" },
{ "auto_conversion_filters", OPT_TYPE_BOOL, OPT_EXPERT, { "auto_conversion_filters", OPT_TYPE_BOOL, OPT_EXPERT,
{ &auto_conversion_filters }, { &auto_conversion_filters },
"enable automatic conversion filters globally" }, "enable automatic conversion filters globally" },

View file

@ -285,9 +285,8 @@ struct Scheduler {
pthread_mutex_t mux_ready_lock; pthread_mutex_t mux_ready_lock;
unsigned nb_mux_done; unsigned nb_mux_done;
unsigned task_failed; pthread_mutex_t mux_done_lock;
pthread_mutex_t finish_lock; pthread_cond_t mux_done_cond;
pthread_cond_t finish_cond;
SchDec *dec; SchDec *dec;
@ -307,6 +306,7 @@ struct Scheduler {
enum SchedulerState state; enum SchedulerState state;
atomic_int terminate; atomic_int terminate;
atomic_int task_failed;
pthread_mutex_t schedule_lock; pthread_mutex_t schedule_lock;
@ -375,6 +375,7 @@ static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_si
enum QueueType type) enum QueueType type)
{ {
ThreadQueue *tq; ThreadQueue *tq;
ObjPool *op;
if (queue_size <= 0) { if (queue_size <= 0) {
if (type == QUEUE_FRAMES) if (type == QUEUE_FRAMES)
@ -392,11 +393,18 @@ static int queue_alloc(ThreadQueue **ptq, unsigned nb_streams, unsigned queue_si
av_assert0(queue_size == DEFAULT_FRAME_THREAD_QUEUE_SIZE); av_assert0(queue_size == DEFAULT_FRAME_THREAD_QUEUE_SIZE);
} }
tq = tq_alloc(nb_streams, queue_size, op = (type == QUEUE_PACKETS) ? objpool_alloc_packets() :
(type == QUEUE_PACKETS) ? THREAD_QUEUE_PACKETS : THREAD_QUEUE_FRAMES); objpool_alloc_frames();
if (!tq) if (!op)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
tq = tq_alloc(nb_streams, queue_size, op,
(type == QUEUE_PACKETS) ? pkt_move : frame_move);
if (!tq) {
objpool_free(&op);
return AVERROR(ENOMEM);
}
*ptq = tq; *ptq = tq;
return 0; return 0;
} }
@ -563,8 +571,8 @@ void sch_free(Scheduler **psch)
pthread_mutex_destroy(&sch->mux_ready_lock); pthread_mutex_destroy(&sch->mux_ready_lock);
pthread_mutex_destroy(&sch->finish_lock); pthread_mutex_destroy(&sch->mux_done_lock);
pthread_cond_destroy(&sch->finish_cond); pthread_cond_destroy(&sch->mux_done_cond);
av_freep(psch); av_freep(psch);
} }
@ -594,11 +602,11 @@ Scheduler *sch_alloc(void)
if (ret) if (ret)
goto fail; goto fail;
ret = pthread_mutex_init(&sch->finish_lock, NULL); ret = pthread_mutex_init(&sch->mux_done_lock, NULL);
if (ret) if (ret)
goto fail; goto fail;
ret = pthread_cond_init(&sch->finish_cond, NULL); ret = pthread_cond_init(&sch->mux_done_cond, NULL);
if (ret) if (ret)
goto fail; goto fail;
@ -1103,52 +1111,22 @@ static int mux_task_start(SchMux *mux)
return ret; return ret;
/* flush the pre-muxing queues */ /* flush the pre-muxing queues */
while (1) { for (unsigned i = 0; i < mux->nb_streams; i++) {
int min_stream = -1; SchMuxStream *ms = &mux->streams[i];
Timestamp min_ts = { .ts = AV_NOPTS_VALUE };
AVPacket *pkt; AVPacket *pkt;
// find the stream with the earliest dts or EOF in pre-muxing queue while (av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1) >= 0) {
for (unsigned i = 0; i < mux->nb_streams; i++) {
SchMuxStream *ms = &mux->streams[i];
if (av_fifo_peek(ms->pre_mux_queue.fifo, &pkt, 1, 0) < 0)
continue;
if (!pkt || pkt->dts == AV_NOPTS_VALUE) {
min_stream = i;
break;
}
if (min_ts.ts == AV_NOPTS_VALUE ||
av_compare_ts(min_ts.ts, min_ts.tb, pkt->dts, pkt->time_base) > 0) {
min_stream = i;
min_ts = (Timestamp){ .ts = pkt->dts, .tb = pkt->time_base };
}
}
if (min_stream >= 0) {
SchMuxStream *ms = &mux->streams[min_stream];
ret = av_fifo_read(ms->pre_mux_queue.fifo, &pkt, 1);
av_assert0(ret >= 0);
if (pkt) { if (pkt) {
if (!ms->init_eof) if (!ms->init_eof)
ret = tq_send(mux->queue, min_stream, pkt); ret = tq_send(mux->queue, i, pkt);
av_packet_free(&pkt); av_packet_free(&pkt);
if (ret == AVERROR_EOF) if (ret == AVERROR_EOF)
ms->init_eof = 1; ms->init_eof = 1;
else if (ret < 0) else if (ret < 0)
return ret; return ret;
} else } else
tq_send_finish(mux->queue, min_stream); tq_send_finish(mux->queue, i);
continue;
} }
break;
} }
atomic_store(&mux->mux_started, 1); atomic_store(&mux->mux_started, 1);
@ -1656,27 +1634,29 @@ fail:
int sch_wait(Scheduler *sch, uint64_t timeout_us, int64_t *transcode_ts) int sch_wait(Scheduler *sch, uint64_t timeout_us, int64_t *transcode_ts)
{ {
int ret; int ret, err;
// convert delay to absolute timestamp // convert delay to absolute timestamp
timeout_us += av_gettime(); timeout_us += av_gettime();
pthread_mutex_lock(&sch->finish_lock); pthread_mutex_lock(&sch->mux_done_lock);
if (sch->nb_mux_done < sch->nb_mux) { if (sch->nb_mux_done < sch->nb_mux) {
struct timespec tv = { .tv_sec = timeout_us / 1000000, struct timespec tv = { .tv_sec = timeout_us / 1000000,
.tv_nsec = (timeout_us % 1000000) * 1000 }; .tv_nsec = (timeout_us % 1000000) * 1000 };
pthread_cond_timedwait(&sch->finish_cond, &sch->finish_lock, &tv); pthread_cond_timedwait(&sch->mux_done_cond, &sch->mux_done_lock, &tv);
} }
// abort transcoding if any task failed ret = sch->nb_mux_done == sch->nb_mux;
ret = sch->nb_mux_done == sch->nb_mux || sch->task_failed;
pthread_mutex_unlock(&sch->finish_lock); pthread_mutex_unlock(&sch->mux_done_lock);
*transcode_ts = atomic_load(&sch->last_dts); *transcode_ts = atomic_load(&sch->last_dts);
return ret; // abort transcoding if any task failed
err = atomic_load(&sch->task_failed);
return ret || err;
} }
static int enc_open(Scheduler *sch, SchEnc *enc, const AVFrame *frame) static int enc_open(Scheduler *sch, SchEnc *enc, const AVFrame *frame)
@ -1836,7 +1816,7 @@ static int mux_queue_packet(SchMux *mux, SchMuxStream *ms, AVPacket *pkt)
if (new_size <= packets) { if (new_size <= packets) {
av_log(mux, AV_LOG_ERROR, av_log(mux, AV_LOG_ERROR,
"Too many packets buffered for output stream.\n"); "Too many packets buffered for output stream.\n");
return AVERROR_BUFFER_TOO_SMALL; return AVERROR(ENOSPC);
} }
ret = av_fifo_grow2(q->fifo, new_size - packets); ret = av_fifo_grow2(q->fifo, new_size - packets);
if (ret < 0) if (ret < 0)
@ -1899,7 +1879,7 @@ static int send_to_mux(Scheduler *sch, SchMux *mux, unsigned stream_idx,
update_schedule: update_schedule:
// TODO: use atomics to check whether this changes trailing dts // TODO: use atomics to check whether this changes trailing dts
// to avoid locking unnecessarily // to avoid locking unnecesarily
if (dts != AV_NOPTS_VALUE || !pkt) { if (dts != AV_NOPTS_VALUE || !pkt) {
pthread_mutex_lock(&sch->schedule_lock); pthread_mutex_lock(&sch->schedule_lock);
@ -2142,14 +2122,14 @@ static int mux_done(Scheduler *sch, unsigned mux_idx)
pthread_mutex_unlock(&sch->schedule_lock); pthread_mutex_unlock(&sch->schedule_lock);
pthread_mutex_lock(&sch->finish_lock); pthread_mutex_lock(&sch->mux_done_lock);
av_assert0(sch->nb_mux_done < sch->nb_mux); av_assert0(sch->nb_mux_done < sch->nb_mux);
sch->nb_mux_done++; sch->nb_mux_done++;
pthread_cond_signal(&sch->finish_cond); pthread_cond_signal(&sch->mux_done_cond);
pthread_mutex_unlock(&sch->finish_lock); pthread_mutex_unlock(&sch->mux_done_lock);
return 0; return 0;
} }
@ -2542,12 +2522,8 @@ static void *task_wrapper(void *arg)
// EOF is considered normal termination // EOF is considered normal termination
if (ret == AVERROR_EOF) if (ret == AVERROR_EOF)
ret = 0; ret = 0;
if (ret < 0) { if (ret < 0)
pthread_mutex_lock(&sch->finish_lock); atomic_store(&sch->task_failed, 1);
sch->task_failed = 1;
pthread_cond_signal(&sch->finish_cond);
pthread_mutex_unlock(&sch->finish_lock);
}
av_log(task->func_arg, ret < 0 ? AV_LOG_ERROR : AV_LOG_VERBOSE, av_log(task->func_arg, ret < 0 ? AV_LOG_ERROR : AV_LOG_VERBOSE,
"Terminating thread with return code %d (%s)\n", ret, "Terminating thread with return code %d (%s)\n", ret,

View file

@ -355,7 +355,7 @@ enum DemuxSendFlags {
* @retval "non-negative value" success * @retval "non-negative value" success
* @retval AVERROR_EOF all consumers for the stream are done * @retval AVERROR_EOF all consumers for the stream are done
* @retval AVERROR_EXIT all consumers are done, should terminate demuxing * @retval AVERROR_EXIT all consumers are done, should terminate demuxing
* @retval "another negative error code" other failure * @retval "anoter negative error code" other failure
*/ */
int sch_demux_send(Scheduler *sch, unsigned demux_idx, struct AVPacket *pkt, int sch_demux_send(Scheduler *sch, unsigned demux_idx, struct AVPacket *pkt,
unsigned flags); unsigned flags);
@ -436,7 +436,7 @@ void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx)
* *
* @retval "non-negative value" success * @retval "non-negative value" success
* @retval AVERROR_EOF all consumers are done * @retval AVERROR_EOF all consumers are done
* @retval "another negative error code" other failure * @retval "anoter negative error code" other failure
*/ */
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx,
struct AVFrame *frame); struct AVFrame *frame);

View file

@ -44,20 +44,14 @@ static inline int err_merge(int err0, int err1)
return (err0 < 0) ? err0 : FFMIN(err1, 0); return (err0 < 0) ? err0 : FFMIN(err1, 0);
} }
/** static inline void pkt_move(void *dst, void *src)
* Wrapper calling av_frame_side_data_clone() in a loop for all source entries.
* It does not clear dst beforehand. */
static inline int clone_side_data(AVFrameSideData ***dst, int *nb_dst,
AVFrameSideData * const *src, int nb_src,
unsigned int flags)
{ {
for (int i = 0; i < nb_src; i++) { av_packet_move_ref(dst, src);
int ret = av_frame_side_data_clone(dst, nb_dst, src[i], flags); }
if (ret < 0)
return ret;
}
return 0; static inline void frame_move(void *dst, void *src)
{
av_frame_move_ref(dst, src);
} }
#endif // FFTOOLS_FFMPEG_UTILS_H #endif // FFTOOLS_FFMPEG_UTILS_H

View file

@ -388,6 +388,7 @@ static const struct TextureFormatEntry {
{ AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV }, { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
{ AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 }, { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
{ AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY }, { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
{ AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
}; };
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg) static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
@ -894,7 +895,7 @@ static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_B
format == AV_PIX_FMT_BGR32 || format == AV_PIX_FMT_BGR32 ||
format == AV_PIX_FMT_BGR32_1) format == AV_PIX_FMT_BGR32_1)
*sdl_blendmode = SDL_BLENDMODE_BLEND; *sdl_blendmode = SDL_BLENDMODE_BLEND;
for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map); i++) { for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
if (format == sdl_texture_format_map[i].format) { if (format == sdl_texture_format_map[i].format) {
*sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt; *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
return; return;
@ -940,6 +941,7 @@ static enum AVColorSpace sdl_supported_color_spaces[] = {
AVCOL_SPC_BT709, AVCOL_SPC_BT709,
AVCOL_SPC_BT470BG, AVCOL_SPC_BT470BG,
AVCOL_SPC_SMPTE170M, AVCOL_SPC_SMPTE170M,
AVCOL_SPC_UNSPECIFIED,
}; };
static void set_sdl_yuv_conversion_mode(AVFrame *frame) static void set_sdl_yuv_conversion_mode(AVFrame *frame)
@ -1859,6 +1861,7 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
{ {
enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)]; enum AVPixelFormat pix_fmts[FF_ARRAY_ELEMS(sdl_texture_format_map)];
char sws_flags_str[512] = ""; char sws_flags_str[512] = "";
char buffersrc_args[256];
int ret; int ret;
AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL; AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
AVCodecParameters *codecpar = is->video_st->codecpar; AVCodecParameters *codecpar = is->video_st->codecpar;
@ -1872,13 +1875,14 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
for (i = 0; i < renderer_info.num_texture_formats; i++) { for (i = 0; i < renderer_info.num_texture_formats; i++) {
for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map); j++) { for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) { if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format; pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
break; break;
} }
} }
} }
pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
while ((e = av_dict_iterate(sws_dict, e))) { while ((e = av_dict_iterate(sws_dict, e))) {
if (!strcmp(e->key, "sws_flags")) { if (!strcmp(e->key, "sws_flags")) {
@ -1891,49 +1895,36 @@ static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const c
graph->scale_sws_opts = av_strdup(sws_flags_str); graph->scale_sws_opts = av_strdup(sws_flags_str);
snprintf(buffersrc_args, sizeof(buffersrc_args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d:"
"colorspace=%d:range=%d",
frame->width, frame->height, frame->format,
is->video_st->time_base.num, is->video_st->time_base.den,
codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1),
frame->colorspace, frame->color_range);
if (fr.num && fr.den)
av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
filt_src = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffer"), if ((ret = avfilter_graph_create_filter(&filt_src,
"ffplay_buffer"); avfilter_get_by_name("buffer"),
if (!filt_src) { "ffplay_buffer", buffersrc_args, NULL,
ret = AVERROR(ENOMEM); graph)) < 0)
goto fail; goto fail;
}
par->format = frame->format;
par->time_base = is->video_st->time_base;
par->width = frame->width;
par->height = frame->height;
par->sample_aspect_ratio = codecpar->sample_aspect_ratio;
par->color_space = frame->colorspace;
par->color_range = frame->color_range;
par->frame_rate = fr;
par->hw_frames_ctx = frame->hw_frames_ctx; par->hw_frames_ctx = frame->hw_frames_ctx;
ret = av_buffersrc_parameters_set(filt_src, par); ret = av_buffersrc_parameters_set(filt_src, par);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
ret = avfilter_init_dict(filt_src, NULL); ret = avfilter_graph_create_filter(&filt_out,
avfilter_get_by_name("buffersink"),
"ffplay_buffersink", NULL, NULL, graph);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
filt_out = avfilter_graph_alloc_filter(graph, avfilter_get_by_name("buffersink"), if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
"ffplay_buffersink");
if (!filt_out) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_opt_set_array(filt_out, "pixel_formats", AV_OPT_SEARCH_CHILDREN,
0, nb_pix_fmts, AV_OPT_TYPE_PIXEL_FMT, pix_fmts)) < 0)
goto fail; goto fail;
if (!vk_renderer && if (!vk_renderer &&
(ret = av_opt_set_array(filt_out, "colorspaces", AV_OPT_SEARCH_CHILDREN, (ret = av_opt_set_int_list(filt_out, "color_spaces", sdl_supported_color_spaces, AVCOL_SPC_UNSPECIFIED, AV_OPT_SEARCH_CHILDREN)) < 0)
0, FF_ARRAY_ELEMS(sdl_supported_color_spaces),
AV_OPT_TYPE_INT, sdl_supported_color_spaces)) < 0)
goto fail;
ret = avfilter_init_dict(filt_out, NULL);
if (ret < 0)
goto fail; goto fail;
last_filter = filt_out; last_filter = filt_out;
@ -2003,6 +1994,8 @@ fail:
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format) static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
{ {
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE };
int sample_rates[2] = { 0, -1 };
AVFilterContext *filt_asrc = NULL, *filt_asink = NULL; AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
char aresample_swr_opts[512] = ""; char aresample_swr_opts[512] = "";
const AVDictionaryEntry *e = NULL; const AVDictionaryEntry *e = NULL;
@ -2036,28 +2029,30 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
if (ret < 0) if (ret < 0)
goto end; goto end;
filt_asink = avfilter_graph_alloc_filter(is->agraph, avfilter_get_by_name("abuffersink"),
"ffplay_abuffersink");
if (!filt_asink) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = av_opt_set(filt_asink, "sample_formats", "s16", AV_OPT_SEARCH_CHILDREN)) < 0) ret = avfilter_graph_create_filter(&filt_asink,
avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
NULL, NULL, is->agraph);
if (ret < 0)
goto end;
if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
goto end;
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
goto end; goto end;
if (force_output_format) { if (force_output_format) {
if ((ret = av_opt_set_array(filt_asink, "channel_layouts", AV_OPT_SEARCH_CHILDREN, av_bprint_clear(&bp);
0, 1, AV_OPT_TYPE_CHLAYOUT, &is->audio_tgt.ch_layout)) < 0) av_channel_layout_describe_bprint(&is->audio_tgt.ch_layout, &bp);
sample_rates [0] = is->audio_tgt.freq;
if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
goto end; goto end;
if ((ret = av_opt_set_array(filt_asink, "samplerates", AV_OPT_SEARCH_CHILDREN, if ((ret = av_opt_set(filt_asink, "ch_layouts", bp.str, AV_OPT_SEARCH_CHILDREN)) < 0)
0, 1, AV_OPT_TYPE_INT, &is->audio_tgt.freq)) < 0) goto end;
if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
goto end; goto end;
} }
ret = avfilter_init_dict(filt_asink, NULL);
if (ret < 0)
goto end;
if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0) if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
goto end; goto end;
@ -2612,11 +2607,6 @@ static int create_hwaccel(AVBufferRef **device_ctx)
if (type == AV_HWDEVICE_TYPE_NONE) if (type == AV_HWDEVICE_TYPE_NONE)
return AVERROR(ENOTSUP); return AVERROR(ENOTSUP);
if (!vk_renderer) {
av_log(NULL, AV_LOG_ERROR, "Vulkan renderer is not available\n");
return AVERROR(ENOTSUP);
}
ret = vk_renderer_get_hw_dev(vk_renderer, &vk_dev); ret = vk_renderer_get_hw_dev(vk_renderer, &vk_dev);
if (ret < 0) if (ret < 0)
return ret; return ret;

View file

@ -259,8 +259,8 @@ static int create_vk_by_hwcontext(VkRenderer *renderer,
ctx->get_proc_addr = hwctx->get_proc_addr; ctx->get_proc_addr = hwctx->get_proc_addr;
ctx->inst = hwctx->inst; ctx->inst = hwctx->inst;
ctx->placebo_vulkan = pl_vulkan_import(ctx->vk_log,
struct pl_vulkan_import_params import_params = { pl_vulkan_import_params(
.instance = hwctx->inst, .instance = hwctx->inst,
.get_proc_addr = hwctx->get_proc_addr, .get_proc_addr = hwctx->get_proc_addr,
.phys_device = hwctx->phys_dev, .phys_device = hwctx->phys_dev,
@ -272,36 +272,18 @@ static int create_vk_by_hwcontext(VkRenderer *renderer,
.unlock_queue = hwctx_unlock_queue, .unlock_queue = hwctx_unlock_queue,
.queue_ctx = dev, .queue_ctx = dev,
.queue_graphics = { .queue_graphics = {
.index = VK_QUEUE_FAMILY_IGNORED, .index = hwctx->queue_family_index,
.count = 0, .count = hwctx->nb_graphics_queues,
}, },
.queue_compute = { .queue_compute = {
.index = VK_QUEUE_FAMILY_IGNORED, .index = hwctx->queue_family_comp_index,
.count = 0, .count = hwctx->nb_comp_queues,
}, },
.queue_transfer = { .queue_transfer = {
.index = VK_QUEUE_FAMILY_IGNORED, .index = hwctx->queue_family_tx_index,
.count = 0, .count = hwctx->nb_tx_queues,
}, },
}; ));
for (int i = 0; i < hwctx->nb_qf; i++) {
const AVVulkanDeviceQueueFamily *qf = &hwctx->qf[i];
if (qf->flags & VK_QUEUE_GRAPHICS_BIT) {
import_params.queue_graphics.index = qf->idx;
import_params.queue_graphics.count = qf->num;
}
if (qf->flags & VK_QUEUE_COMPUTE_BIT) {
import_params.queue_compute.index = qf->idx;
import_params.queue_compute.count = qf->num;
}
if (qf->flags & VK_QUEUE_TRANSFER_BIT) {
import_params.queue_transfer.index = qf->idx;
import_params.queue_transfer.count = qf->num;
}
}
ctx->placebo_vulkan = pl_vulkan_import(ctx->vk_log, &import_params);
if (!ctx->placebo_vulkan) if (!ctx->placebo_vulkan)
return AVERROR_EXTERNAL; return AVERROR_EXTERNAL;
@ -409,8 +391,8 @@ static int create_vk_by_placebo(VkRenderer *renderer,
device_ctx->user_opaque = ctx; device_ctx->user_opaque = ctx;
vk_dev_ctx = device_ctx->hwctx; vk_dev_ctx = device_ctx->hwctx;
vk_dev_ctx->lock_queue = placebo_lock_queue; vk_dev_ctx->lock_queue = placebo_lock_queue,
vk_dev_ctx->unlock_queue = placebo_unlock_queue; vk_dev_ctx->unlock_queue = placebo_unlock_queue;
vk_dev_ctx->get_proc_addr = ctx->placebo_instance->get_proc_addr; vk_dev_ctx->get_proc_addr = ctx->placebo_instance->get_proc_addr;
@ -426,38 +408,21 @@ static int create_vk_by_placebo(VkRenderer *renderer,
vk_dev_ctx->enabled_dev_extensions = ctx->placebo_vulkan->extensions; vk_dev_ctx->enabled_dev_extensions = ctx->placebo_vulkan->extensions;
vk_dev_ctx->nb_enabled_dev_extensions = ctx->placebo_vulkan->num_extensions; vk_dev_ctx->nb_enabled_dev_extensions = ctx->placebo_vulkan->num_extensions;
int nb_qf = 0; vk_dev_ctx->queue_family_index = ctx->placebo_vulkan->queue_graphics.index;
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) { vk_dev_ctx->nb_graphics_queues = ctx->placebo_vulkan->queue_graphics.count;
.idx = ctx->placebo_vulkan->queue_graphics.index,
.num = ctx->placebo_vulkan->queue_graphics.count, vk_dev_ctx->queue_family_tx_index = ctx->placebo_vulkan->queue_transfer.index;
.flags = VK_QUEUE_GRAPHICS_BIT, vk_dev_ctx->nb_tx_queues = ctx->placebo_vulkan->queue_transfer.count;
};
nb_qf++; vk_dev_ctx->queue_family_comp_index = ctx->placebo_vulkan->queue_compute.index;
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) { vk_dev_ctx->nb_comp_queues = ctx->placebo_vulkan->queue_compute.count;
.idx = ctx->placebo_vulkan->queue_transfer.index,
.num = ctx->placebo_vulkan->queue_transfer.count,
.flags = VK_QUEUE_TRANSFER_BIT,
};
nb_qf++;
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) {
.idx = ctx->placebo_vulkan->queue_compute.index,
.num = ctx->placebo_vulkan->queue_compute.count,
.flags = VK_QUEUE_COMPUTE_BIT,
};
nb_qf++;
ret = get_decode_queue(renderer, &decode_index, &decode_count); ret = get_decode_queue(renderer, &decode_index, &decode_count);
if (ret < 0) if (ret < 0)
return ret; return ret;
vk_dev_ctx->qf[nb_qf] = (AVVulkanDeviceQueueFamily) { vk_dev_ctx->queue_family_decode_index = decode_index;
.idx = decode_index, vk_dev_ctx->nb_decode_queues = decode_count;
.num = decode_count,
.flags = VK_QUEUE_VIDEO_DECODE_BIT_KHR,
};
nb_qf++;
vk_dev_ctx->nb_qf = nb_qf;
ret = av_hwdevice_ctx_init(ctx->hw_device_ref); ret = av_hwdevice_ctx_init(ctx->hw_device_ref);
if (ret < 0) if (ret < 0)
@ -732,7 +697,6 @@ static int display(VkRenderer *renderer, AVFrame *frame)
struct pl_frame target = {0}; struct pl_frame target = {0};
RendererContext *ctx = (RendererContext *) renderer; RendererContext *ctx = (RendererContext *) renderer;
int ret = 0; int ret = 0;
struct pl_color_space hint = {0};
ret = convert_frame(renderer, frame); ret = convert_frame(renderer, frame);
if (ret < 0) if (ret < 0)
@ -745,8 +709,6 @@ static int display(VkRenderer *renderer, AVFrame *frame)
return AVERROR_EXTERNAL; return AVERROR_EXTERNAL;
} }
pl_color_space_from_avframe(&hint, frame);
pl_swapchain_colorspace_hint(ctx->swapchain, &hint);
if (!pl_swapchain_start_frame(ctx->swapchain, &swap_frame)) { if (!pl_swapchain_start_frame(ctx->swapchain, &swap_frame)) {
av_log(NULL, AV_LOG_ERROR, "start frame failed\n"); av_log(NULL, AV_LOG_ERROR, "start frame failed\n");
ret = AVERROR_EXTERNAL; ret = AVERROR_EXTERNAL;

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,30 +0,0 @@
/*
* Copyright (c) 2018-2025 - softworkz
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef FFTOOLS_GRAPH_GRAPHPRINT_H
#define FFTOOLS_GRAPH_GRAPHPRINT_H
#include "fftools/ffmpeg.h"
int print_filtergraphs(FilterGraph **graphs, int nb_graphs, InputFile **ifiles, int nb_ifiles, OutputFile **ofiles, int nb_ofiles);
int print_filtergraph(FilterGraph *fg, AVFilterGraph *graph);
#endif /* FFTOOLS_GRAPH_GRAPHPRINT_H */

131
fftools/objpool.c Normal file
View file

@ -0,0 +1,131 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavcodec/packet.h"
#include "libavutil/common.h"
#include "libavutil/error.h"
#include "libavutil/frame.h"
#include "libavutil/mem.h"
#include "objpool.h"
struct ObjPool {
void *pool[32];
unsigned int pool_count;
ObjPoolCBAlloc alloc;
ObjPoolCBReset reset;
ObjPoolCBFree free;
};
ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
ObjPoolCBFree cb_free)
{
ObjPool *op = av_mallocz(sizeof(*op));
if (!op)
return NULL;
op->alloc = cb_alloc;
op->reset = cb_reset;
op->free = cb_free;
return op;
}
void objpool_free(ObjPool **pop)
{
ObjPool *op = *pop;
if (!op)
return;
for (unsigned int i = 0; i < op->pool_count; i++)
op->free(&op->pool[i]);
av_freep(pop);
}
int objpool_get(ObjPool *op, void **obj)
{
if (op->pool_count) {
*obj = op->pool[--op->pool_count];
op->pool[op->pool_count] = NULL;
} else
*obj = op->alloc();
return *obj ? 0 : AVERROR(ENOMEM);
}
void objpool_release(ObjPool *op, void **obj)
{
if (!*obj)
return;
op->reset(*obj);
if (op->pool_count < FF_ARRAY_ELEMS(op->pool))
op->pool[op->pool_count++] = *obj;
else
op->free(obj);
*obj = NULL;
}
static void *alloc_packet(void)
{
return av_packet_alloc();
}
static void *alloc_frame(void)
{
return av_frame_alloc();
}
static void reset_packet(void *obj)
{
av_packet_unref(obj);
}
static void reset_frame(void *obj)
{
av_frame_unref(obj);
}
static void free_packet(void **obj)
{
AVPacket *pkt = *obj;
av_packet_free(&pkt);
*obj = NULL;
}
static void free_frame(void **obj)
{
AVFrame *frame = *obj;
av_frame_free(&frame);
*obj = NULL;
}
ObjPool *objpool_alloc_packets(void)
{
return objpool_alloc(alloc_packet, reset_packet, free_packet);
}
ObjPool *objpool_alloc_frames(void)
{
return objpool_alloc(alloc_frame, reset_frame, free_frame);
}

View file

@ -16,24 +16,22 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/ */
#ifndef AVCODEC_APV_DSP_H #ifndef FFTOOLS_OBJPOOL_H
#define AVCODEC_APV_DSP_H #define FFTOOLS_OBJPOOL_H
#include <stddef.h> typedef struct ObjPool ObjPool;
#include <stdint.h>
typedef void* (*ObjPoolCBAlloc)(void);
typedef void (*ObjPoolCBReset)(void *);
typedef void (*ObjPoolCBFree)(void **);
typedef struct APVDSPContext { void objpool_free(ObjPool **op);
void (*decode_transquant)(void *output, ObjPool *objpool_alloc(ObjPoolCBAlloc cb_alloc, ObjPoolCBReset cb_reset,
ptrdiff_t pitch, ObjPoolCBFree cb_free);
const int16_t *input, ObjPool *objpool_alloc_packets(void);
const int16_t *qmatrix, ObjPool *objpool_alloc_frames(void);
int bit_depth,
int qp_shift);
} APVDSPContext;
void ff_apv_dsp_init(APVDSPContext *dsp); int objpool_get(ObjPool *op, void **obj);
void objpool_release(ObjPool *op, void **obj);
void ff_apv_dsp_init_x86_64(APVDSPContext *dsp); #endif // FFTOOLS_OBJPOOL_H
#endif /* AVCODEC_APV_DSP_H */

Some files were not shown because too many files have changed in this diff Show more