Merge branch 'ytdl-org:master' into fix-npo-support

This commit is contained in:
Bart Broere 2024-02-25 09:42:26 +01:00 committed by GitHub
commit c409a8c54b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
72 changed files with 6368 additions and 1389 deletions

View File

@ -1,81 +1,476 @@
name: CI name: CI
on: [push, pull_request]
env:
all-cpython-versions: 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, 3.10, 3.11, 3.12
main-cpython-versions: 2.7, 3.2, 3.5, 3.9, 3.11
pypy-versions: pypy-2.7, pypy-3.6, pypy-3.7
cpython-versions: main
test-set: core
# Python beta version to be built using pyenv before setup-python support
# Must also be included in all-cpython-versions
next: 3.13
on:
push:
# push inputs aren't known to GitHub
inputs:
cpython-versions:
type: string
default: all
test-set:
type: string
default: core
pull_request:
# pull_request inputs aren't known to GitHub
inputs:
cpython-versions:
type: string
default: main
test-set:
type: string
default: both
workflow_dispatch:
inputs:
cpython-versions:
type: choice
description: CPython versions (main = 2.7, 3.2, 3.5, 3.9, 3.11)
options:
- all
- main
required: true
default: main
test-set:
type: choice
description: core, download
options:
- both
- core
- download
required: true
default: both
permissions:
contents: read
jobs: jobs:
select:
name: Select tests from inputs
runs-on: ubuntu-latest
outputs:
cpython-versions: ${{ steps.run.outputs.cpython-versions }}
test-set: ${{ steps.run.outputs.test-set }}
own-pip-versions: ${{ steps.run.outputs.own-pip-versions }}
steps:
# push and pull_request inputs aren't known to GitHub (pt3)
- name: Set push defaults
if: ${{ github.event_name == 'push' }}
env:
cpython-versions: all
test-set: core
run: |
echo "cpython-versions=${{env.cpython-versions}}" >> "$GITHUB_ENV"
echo "test_set=${{env.test_set}}" >> "$GITHUB_ENV"
- name: Get pull_request inputs
if: ${{ github.event_name == 'pull_request' }}
env:
cpython-versions: main
test-set: both
run: |
echo "cpython-versions=${{env.cpython-versions}}" >> "$GITHUB_ENV"
echo "test_set=${{env.test_set}}" >> "$GITHUB_ENV"
- name: Make version array
id: run
run: |
# Make a JSON Array from comma/space-separated string (no extra escaping)
json_list() { \
ret=""; IFS="${IFS},"; set -- $*; \
for a in "$@"; do \
ret=$(printf '%s"%s"' "${ret}${ret:+, }" "$a"); \
done; \
printf '[%s]' "$ret"; }
tests="${{ inputs.test-set || env.test-set }}"
[ $tests = both ] && tests="core download"
printf 'test-set=%s\n' "$(json_list $tests)" >> "$GITHUB_OUTPUT"
versions="${{ inputs.cpython-versions || env.cpython-versions }}"
if [ "$versions" = all ]; then \
versions="${{ env.all-cpython-versions }}"; else \
versions="${{ env.main-cpython-versions }}"; \
fi
printf 'cpython-versions=%s\n' \
"$(json_list ${versions}${versions:+, }${{ env.pypy-versions }})" >> "$GITHUB_OUTPUT"
# versions with a special get-pip.py in a per-version subdirectory
printf 'own-pip-versions=%s\n' \
"$(json_list 2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6)" >> "$GITHUB_OUTPUT"
tests: tests:
name: Tests name: Run tests
needs: select
permissions:
contents: read
packages: write
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
env:
PIP: python -m pip
PIP_DISABLE_PIP_VERSION_CHECK: true
PIP_NO_PYTHON_VERSION_WARNING: true
strategy: strategy:
fail-fast: true fail-fast: true
matrix: matrix:
os: [ubuntu-18.04] os: [ubuntu-20.04]
# TODO: python 2.6 python-version: ${{ fromJSON(needs.select.outputs.cpython-versions) }}
python-version: [2.7, 3.3, 3.4, 3.5, 3.6, 3.7, 3.8, 3.9, pypy-2.7, pypy-3.6, pypy-3.7]
python-impl: [cpython] python-impl: [cpython]
ytdl-test-set: [core, download] ytdl-test-set: ${{ fromJSON(needs.select.outputs.test-set) }}
run-tests-ext: [sh] run-tests-ext: [sh]
include: include:
# python 3.2 is only available on windows via setup-python
- os: windows-2019 - os: windows-2019
python-version: 3.2 python-version: 3.4
python-impl: cpython python-impl: cpython
ytdl-test-set: core ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
run-tests-ext: bat run-tests-ext: bat
- os: windows-2019 - os: windows-2019
python-version: 3.2 python-version: 3.4
python-impl: cpython python-impl: cpython
ytdl-test-set: download ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
run-tests-ext: bat run-tests-ext: bat
# jython # jython
- os: ubuntu-18.04 - os: ubuntu-20.04
python-version: 2.7
python-impl: jython python-impl: jython
ytdl-test-set: core ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'core') && 'core' || 'nocore' }}
run-tests-ext: sh run-tests-ext: sh
- os: ubuntu-18.04 - os: ubuntu-20.04
python-version: 2.7
python-impl: jython python-impl: jython
ytdl-test-set: download ytdl-test-set: ${{ contains(needs.select.outputs.test-set, 'download') && 'download' || 'nodownload' }}
run-tests-ext: sh run-tests-ext: sh
steps: steps:
- uses: actions/checkout@v2 - name: Prepare Linux
- name: Set up Python ${{ matrix.python-version }} if: ${{ startswith(matrix.os, 'ubuntu') }}
uses: actions/setup-python@v2 shell: bash
if: ${{ matrix.python-impl == 'cpython' }} run: |
# apt in runner, if needed, may not be up-to-date
sudo apt-get update
- name: Checkout
uses: actions/checkout@v3
#-------- Python 3 -----
- name: Set up supported Python ${{ matrix.python-version }}
id: setup-python
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version != '2.6' && matrix.python-version != '2.7' && matrix.python-version != env.next }}
# wrap broken actions/setup-python@v4
# NB may run apt-get install in Linux
uses: ytdl-org/setup-python@v1
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
cache-build: true
allow-build: info
- name: Locate supported Python ${{ matrix.python-version }}
if: ${{ env.pythonLocation }}
shell: bash
run: |
echo "PYTHONHOME=${pythonLocation}" >> "$GITHUB_ENV"
export expected="${{ steps.setup-python.outputs.python-path }}"
dirname() { printf '%s\n' \
'import os, sys' \
'print(os.path.dirname(sys.argv[1]))' \
| ${expected} - "$1"; }
expd="$(dirname "$expected")"
export python="$(command -v python)"
[ "$expd" = "$(dirname "$python")" ] || echo "PATH=$expd:${PATH}" >> "$GITHUB_ENV"
[ -x "$python" ] || printf '%s\n' \
'import os' \
'exp = os.environ["expected"]' \
'python = os.environ["python"]' \
'exps = os.path.split(exp)' \
'if python and (os.path.dirname(python) == exp[0]):' \
' exit(0)' \
'exps[1] = "python" + os.path.splitext(exps[1])[1]' \
'python = os.path.join(*exps)' \
'try:' \
' os.symlink(exp, python)' \
'except AttributeError:' \
' os.rename(exp, python)' \
| ${expected} -
printf '%s\n' \
'import sys' \
'print(sys.path)' \
| ${expected} -
#-------- Python next (was 3.12) -
- name: Set up CPython 3.next environment
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
shell: bash
run: |
PYENV_ROOT=$HOME/.local/share/pyenv
echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
- name: Cache Python 3.next
id: cachenext
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
uses: actions/cache@v3
with:
key: python-${{ env.next }}
path: |
${{ env.PYENV_ROOT }}
- name: Build and set up Python 3.next
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next && ! steps.cachenext.outputs.cache-hit }}
# dl and build locally
shell: bash
run: |
# Install build environment
sudo apt-get install -y build-essential llvm libssl-dev tk-dev \
libncursesw5-dev libreadline-dev libsqlite3-dev \
libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
# Download PyEnv from its GitHub repository.
export PYENV_ROOT=${{ env.PYENV_ROOT }}
export PATH=$PYENV_ROOT/bin:$PATH
git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
pyenv install ${{ env.next }}
- name: Locate Python 3.next
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == env.next }}
shell: bash
run: |
PYTHONHOME="$(echo "${{ env.PYENV_ROOT }}/versions/${{ env.next }}."*)"
test -n "$PYTHONHOME"
echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
#-------- Python 2.7 --
- name: Set up Python 2.7
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.7' }}
# install 2.7
shell: bash
run: |
sudo apt-get install -y python2 python-is-python2
echo "PYTHONHOME=/usr" >> "$GITHUB_ENV"
#-------- Python 2.6 --
- name: Set up Python 2.6 environment
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
shell: bash
run: |
openssl_name=openssl-1.0.2u
echo "openssl_name=${openssl_name}" >> "$GITHUB_ENV"
openssl_dir=$HOME/.local/opt/$openssl_name
echo "openssl_dir=${openssl_dir}" >> "$GITHUB_ENV"
PYENV_ROOT=$HOME/.local/share/pyenv
echo "PYENV_ROOT=${PYENV_ROOT}" >> "$GITHUB_ENV"
sudo apt-get install -y openssl ca-certificates
- name: Cache Python 2.6
id: cache26
if: ${{ matrix.python-version == '2.6' }}
uses: actions/cache@v3
with:
key: python-2.6.9
path: |
${{ env.openssl_dir }}
${{ env.PYENV_ROOT }}
- name: Build and set up Python 2.6
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' && ! steps.cache26.outputs.cache-hit }}
# dl and build locally
shell: bash
run: |
# Install build environment
sudo apt-get install -y build-essential llvm libssl-dev tk-dev \
libncursesw5-dev libreadline-dev libsqlite3-dev \
libffi-dev xz-utils zlib1g-dev libbz2-dev liblzma-dev
# Download and install OpenSSL 1.0.2, back in time
openssl_name=${{ env.openssl_name }}
openssl_targz=${openssl_name}.tar.gz
openssl_dir=${{ env.openssl_dir }}
openssl_inc=$openssl_dir/include
openssl_lib=$openssl_dir/lib
openssl_ssl=$openssl_dir/ssl
curl -L "https://www.openssl.org/source/$openssl_targz" -o $openssl_targz
tar -xf $openssl_targz
( cd $openssl_name; \
./config --prefix=$openssl_dir --openssldir=${openssl_dir}/ssl \
--libdir=lib -Wl,-rpath=${openssl_dir}/lib shared zlib-dynamic && \
make && \
make install )
rm -rf $openssl_name
rmdir $openssl_ssl/certs && ln -s /etc/ssl/certs $openssl_ssl/certs
# Download PyEnv from its GitHub repository.
export PYENV_ROOT=${{ env.PYENV_ROOT }}
export PATH=$PYENV_ROOT/bin:$PATH
git clone "https://github.com/pyenv/pyenv.git" "$PYENV_ROOT"
# Prevent pyenv build trying (and failing) to update pip
export GET_PIP=get-pip-2.6.py
echo 'import sys; sys.exit(0)' > ${GET_PIP}
GET_PIP=$(realpath $GET_PIP)
# Build and install Python
export CFLAGS="-I$openssl_inc"
export LDFLAGS="-L$openssl_lib"
export LD_LIBRARY_PATH="$openssl_lib"
pyenv install 2.6.9
- name: Locate Python 2.6
if: ${{ matrix.python-impl == 'cpython' && matrix.python-version == '2.6' }}
shell: bash
run: |
PYTHONHOME="${{ env.PYENV_ROOT }}/versions/2.6.9"
echo "PYTHONHOME=$PYTHONHOME" >> "$GITHUB_ENV"
echo "PATH=${PYTHONHOME}/bin:$PATH" >> "$GITHUB_ENV"
echo "LD_LIBRARY_PATH=${{ env.openssl_dir }}/lib${LD_LIBRARY_PATH:+:}${LD_LIBRARY_PATH}" >> "$GITHUB_ENV"
#-------- Jython ------
- name: Set up Java 8 - name: Set up Java 8
if: ${{ matrix.python-impl == 'jython' }} if: ${{ matrix.python-impl == 'jython' }}
uses: actions/setup-java@v1 uses: actions/setup-java@v3
with: with:
java-version: 8 java-version: 8
distribution: 'zulu'
- name: Setup Jython environment
if: ${{ matrix.python-impl == 'jython' }}
shell: bash
run: |
echo "JYTHON_ROOT=${HOME}/jython" >> "$GITHUB_ENV"
echo "PIP=pip" >> "$GITHUB_ENV"
- name: Cache Jython
id: cachejy
if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' }}
uses: actions/cache@v3
with:
# 2.7.3 now available, may solve SNI issue
key: jython-2.7.1
path: |
${{ env.JYTHON_ROOT }}
- name: Install Jython - name: Install Jython
if: ${{ matrix.python-impl == 'jython' }} if: ${{ matrix.python-impl == 'jython' && matrix.python-version == '2.7' && ! steps.cachejy.outputs.cache-hit }}
shell: bash
run: | run: |
wget https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar -O jython-installer.jar JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
java -jar jython-installer.jar -s -d "$HOME/jython" curl -L "https://repo1.maven.org/maven2/org/python/jython-installer/2.7.1/jython-installer-2.7.1.jar" -o jython-installer.jar
echo "$HOME/jython/bin" >> $GITHUB_PATH java -jar jython-installer.jar -s -d "${JYTHON_ROOT}"
- name: Install nose echo "${JYTHON_ROOT}/bin" >> "$GITHUB_PATH"
if: ${{ matrix.python-impl != 'jython' }} - name: Set up cached Jython
run: pip install nose if: ${{ steps.cachejy.outputs.cache-hit }}
- name: Install nose (Jython) shell: bash
if: ${{ matrix.python-impl == 'jython' }}
# Working around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
run: | run: |
wget https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl JYTHON_ROOT="${{ env.JYTHON_ROOT }}"
pip install nose-1.3.7-py2-none-any.whl echo "${JYTHON_ROOT}/bin" >> $GITHUB_PATH
- name: Install supporting Python 2.7 if possible
if: ${{ steps.cachejy.outputs.cache-hit }}
shell: bash
run: |
sudo apt-get install -y python2.7 || true
#-------- pip ---------
- name: Set up supported Python ${{ matrix.python-version }} pip
if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || matrix.python-version == '2.7' }}
# This step may run in either Linux or Windows
shell: bash
run: |
echo "$PATH"
echo "$PYTHONHOME"
# curl is available on both Windows and Linux, -L follows redirects, -O gets name
python -m ensurepip || python -m pip --version || { \
get_pip="${{ contains(needs.select.outputs.own-pip-versions, matrix.python-version) && format('{0}/', matrix.python-version) || '' }}"; \
curl -L -O "https://bootstrap.pypa.io/pip/${get_pip}get-pip.py"; \
python get-pip.py; }
- name: Set up Python 2.6 pip
if: ${{ matrix.python-version == '2.6' }}
shell: bash
run: |
python -m pip --version || { \
curl -L -O "https://bootstrap.pypa.io/pip/2.6/get-pip.py"; \
curl -L -O "https://files.pythonhosted.org/packages/ac/95/a05b56bb975efa78d3557efa36acaf9cf5d2fd0ee0062060493687432e03/pip-9.0.3-py2.py3-none-any.whl"; \
python get-pip.py --no-setuptools --no-wheel pip-9.0.3-py2.py3-none-any.whl; }
# work-around to invoke pip module on 2.6: https://bugs.python.org/issue2751
echo "PIP=python -m pip.__main__" >> "$GITHUB_ENV"
- name: Set up other Python ${{ matrix.python-version }} pip
if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
shell: bash
run: |
python -m pip --version || { \
curl -L -O "https://bootstrap.pypa.io/pip/3.2/get-pip.py"; \
curl -L -O "https://files.pythonhosted.org/packages/b2/d0/cd115fe345dd6f07ec1c780020a7dfe74966fceeb171e0f20d1d4905b0b7/pip-7.1.2-py2.py3-none-any.whl"; \
python get-pip.py --no-setuptools --no-wheel pip-7.1.2-py2.py3-none-any.whl; }
#-------- unittest ----
- name: Upgrade Unittest for Python 2.6
if: ${{ matrix.python-version == '2.6' }}
shell: bash
run: |
# Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
$PIP -qq show unittest2 || { \
for u in "65/26/32b8464df2a97e6dd1b656ed26b2c194606c16fe163c695a992b36c11cdf/six-1.13.0-py2.py3-none-any.whl" \
"f2/94/3af39d34be01a24a6e65433d19e107099374224905f1e0cc6bbe1fd22a2f/argparse-1.4.0-py2.py3-none-any.whl" \
"c7/a3/c5da2a44c85bfbb6eebcfc1dde24933f8704441b98fdde6528f4831757a6/linecache2-1.0.0-py2.py3-none-any.whl" \
"17/0a/6ac05a3723017a967193456a2efa0aa9ac4b51456891af1e2353bb9de21e/traceback2-1.4.0-py2.py3-none-any.whl" \
"72/20/7f0f433060a962200b7272b8c12ba90ef5b903e218174301d0abfd523813/unittest2-1.1.0-py2.py3-none-any.whl"; do \
curl -L -O "https://files.pythonhosted.org/packages/${u}"; \
$PIP install ${u##*/}; \
done; }
# make tests use unittest2
for test in ./test/test_*.py ./test/helper.py; do
sed -r -i -e '/^import unittest$/s/test/test2 as unittest/' "$test"
done
#-------- nose --------
- name: Install nose for Python ${{ matrix.python-version }}
if: ${{ (matrix.python-version != '3.2' && steps.setup-python.outputs.python-path) || (matrix.python-impl == 'cpython' && (matrix.python-version == '2.7' || matrix.python-version == env.next)) }}
shell: bash
run: |
echo "$PATH"
echo "$PYTHONHOME"
# Use PyNose for recent Pythons instead of Nose
py3ver="${{ matrix.python-version }}"
py3ver=${py3ver#3.}
[ "$py3ver" != "${{ matrix.python-version }}" ] && py3ver=${py3ver%.*} || py3ver=0
[ "$py3ver" -ge 9 ] && nose=pynose || nose=nose
$PIP -qq show $nose || $PIP install $nose
- name: Install nose for other Python 2
if: ${{ matrix.python-impl == 'jython' || (matrix.python-impl == 'cpython' && matrix.python-version == '2.6') }}
shell: bash
run: |
# Work around deprecation of support for non-SNI clients at PyPI CDN (see https://status.python.org/incidents/hzmjhqsdjqgb)
$PIP -qq show nose || { \
curl -L -O "https://files.pythonhosted.org/packages/99/4f/13fb671119e65c4dce97c60e67d3fd9e6f7f809f2b307e2611f4701205cb/nose-1.3.7-py2-none-any.whl"; \
$PIP install nose-1.3.7-py2-none-any.whl; }
- name: Install nose for other Python 3
if: ${{ matrix.python-version == '3.2' && steps.setup-python.outputs.python-path }}
shell: bash
run: |
$PIP -qq show nose || { \
curl -L -O "https://files.pythonhosted.org/packages/15/d8/dd071918c040f50fa1cf80da16423af51ff8ce4a0f2399b7bf8de45ac3d9/nose-1.3.7-py3-none-any.whl"; \
$PIP install nose-1.3.7-py3-none-any.whl; }
- name: Set up nosetest test
if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
shell: bash
run: |
# set PYTHON_VER
PYTHON_VER=${{ matrix.python-version }}
[ "${PYTHON_VER#*-}" != "$PYTHON_VER" ] || PYTHON_VER="${{ matrix.python-impl }}-${PYTHON_VER}"
echo "PYTHON_VER=$PYTHON_VER" >> "$GITHUB_ENV"
echo "PYTHON_IMPL=${{ matrix.python-impl }}" >> "$GITHUB_ENV"
# define a test to validate the Python version used by nosetests
printf '%s\n' \
'from __future__ import unicode_literals' \
'import sys, os, platform' \
'try:' \
' import unittest2 as unittest' \
'except ImportError:' \
' import unittest' \
'class TestPython(unittest.TestCase):' \
' def setUp(self):' \
' self.ver = os.environ["PYTHON_VER"].split("-")' \
' def test_python_ver(self):' \
' self.assertEqual(["%d" % v for v in sys.version_info[:2]], self.ver[-1].split(".")[:2])' \
' self.assertTrue(sys.version.startswith(self.ver[-1]))' \
' self.assertIn(self.ver[0], ",".join((sys.version, platform.python_implementation())).lower())' \
' def test_python_impl(self):' \
' self.assertIn(platform.python_implementation().lower(), (os.environ["PYTHON_IMPL"], self.ver[0]))' \
> test/test_python.py
#-------- TESTS -------
- name: Run tests - name: Run tests
if: ${{ contains(needs.select.outputs.test-set, matrix.ytdl-test-set ) }}
continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }} continue-on-error: ${{ matrix.ytdl-test-set == 'download' || matrix.python-impl == 'jython' }}
env: env:
YTDL_TEST_SET: ${{ matrix.ytdl-test-set }} YTDL_TEST_SET: ${{ matrix.ytdl-test-set }}
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} run: |
./devscripts/run_tests.${{ matrix.run-tests-ext }}
flake8: flake8:
name: Linter name: Linter
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v3
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v2 uses: actions/setup-python@v4
with: with:
python-version: 3.9 python-version: 3.9
- name: Install flake8 - name: Install flake8
run: pip install flake8 run: pip install flake8
- name: Run flake8 - name: Run flake8
run: flake8 . run: flake8 .

View File

@ -1000,6 +1000,8 @@ To run the test, simply invoke your favorite test runner, or execute a test file
python test/test_download.py python test/test_download.py
nosetests nosetests
For Python versions 3.6 and later, you can use [pynose](https://pypi.org/project/pynose/) to implement `nosetests`. The original [nose](https://pypi.org/project/nose/) has not been upgraded for 3.10 and later.
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases. See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
If you want to create a build of youtube-dl yourself, you'll need If you want to create a build of youtube-dl yourself, you'll need
@ -1091,7 +1093,7 @@ In any case, thank you very much for your contributions!
## youtube-dl coding conventions ## youtube-dl coding conventions
This section introduces a guide lines for writing idiomatic, robust and future-proof extractor code. This section introduces guidelines for writing idiomatic, robust and future-proof extractor code.
Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all. Extractors are very fragile by nature since they depend on the layout of the source data provided by 3rd party media hosters out of your control and this layout tends to change. As an extractor implementer your task is not only to write code that will extract media links and metadata correctly but also to minimize dependency on the source's layout and even to make the code foresee potential future changes and be ready for that. This is important because it will allow the extractor not to break on minor layout changes thus keeping old youtube-dl versions working. Even though this breakage issue is easily fixed by emitting a new version of youtube-dl with a fix incorporated, all the previous versions become broken in all repositories and distros' packages that may not be so prompt in fetching the update from us. Needless to say, some non rolling release distros may never receive an update at all.
@ -1331,7 +1333,7 @@ Wrap all extracted numeric data into safe functions from [`youtube_dl/utils.py`]
Use `url_or_none` for safe URL processing. Use `url_or_none` for safe URL processing.
Use `try_get` for safe metadata extraction from parsed JSON. Use `traverse_obj` for safe metadata extraction from parsed JSON.
Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction. Use `unified_strdate` for uniform `upload_date` or any `YYYYMMDD` meta field extraction, `unified_timestamp` for uniform `timestamp` extraction, `parse_filesize` for `filesize` extraction, `parse_count` for count meta fields extraction, `parse_resolution`, `parse_duration` for `duration` extraction, `parse_age_limit` for `age_limit` extraction.
@ -1340,18 +1342,105 @@ Explore [`youtube_dl/utils.py`](https://github.com/ytdl-org/youtube-dl/blob/mast
#### More examples #### More examples
##### Safely extract optional description from parsed JSON ##### Safely extract optional description from parsed JSON
When processing complex JSON, as often returned by site API requests or stashed in web pages for "hydration", you can use the `traverse_obj()` utility function to handle multiple fallback values and to ensure the expected type of metadata items. The function's docstring defines how the function works: also review usage in the codebase for more examples.
In this example, a text `description`, or `None`, is pulled from the `.result.video[0].summary` member of the parsed JSON `response`, if available.
```python
description = traverse_obj(response, ('result', 'video', 0, 'summary', T(compat_str)))
```
`T(...)` is a shorthand for a set literal; if you hate people who still run Python 2.6, `T(type_or_transformation)` could be written as a set literal `{type_or_transformation}`.
Some extractors use the older and less capable `try_get()` function in the same way.
```python ```python
description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str) description = try_get(response, lambda x: x['result']['video'][0]['summary'], compat_str)
``` ```
##### Safely extract more optional metadata ##### Safely extract more optional metadata
In this example, various optional metadata values are extracted from the `.result.video[0]` member of the parsed JSON `response`, which is expected to be a JS object, parsed into a `dict`, with no crash if that isn't so, or if any of the target values are missing or invalid.
```python ```python
video = try_get(response, lambda x: x['result']['video'][0], dict) or {} video = traverse_obj(response, ('result', 'video', 0, T(dict))) or {}
# formerly:
# video = try_get(response, lambda x: x['result']['video'][0], dict) or {}
description = video.get('summary') description = video.get('summary')
duration = float_or_none(video.get('durationMs'), scale=1000) duration = float_or_none(video.get('durationMs'), scale=1000)
view_count = int_or_none(video.get('views')) view_count = int_or_none(video.get('views'))
``` ```
#### Safely extract nested lists
Suppose you've extracted JSON like this into a Python data structure named `media_json` using, say, the `_download_json()` or `_parse_json()` methods of `InfoExtractor`:
```json
{
"title": "Example video",
"comment": "try extracting this",
"media": [{
"type": "bad",
"size": 320,
"url": "https://some.cdn.site/bad.mp4"
}, {
"type": "streaming",
"url": "https://some.cdn.site/hls.m3u8"
}, {
"type": "super",
"size": 1280,
"url": "https://some.cdn.site/good.webm"
}],
"moreStuff": "more values",
...
}
```
Then extractor code like this can collect the various fields of the JSON:
```python
...
from ..utils import (
determine_ext,
int_or_none,
T,
traverse_obj,
txt_or_none,
url_or_none,
)
...
...
info_dict = {}
# extract title and description if valid and not empty
info_dict.update(traverse_obj(media_json, {
'title': ('title', T(txt_or_none)),
'description': ('comment', T(txt_or_none)),
}))
# extract any recognisable media formats
fmts = []
# traverse into "media" list, extract `dict`s with desired keys
for fmt in traverse_obj(media_json, ('media', Ellipsis, {
'format_id': ('type', T(txt_or_none)),
'url': ('url', T(url_or_none)),
'width': ('size', T(int_or_none)), })):
# bad `fmt` values were `None` and removed
if 'url' not in fmt:
continue
fmt_url = fmt['url'] # known to be valid URL
ext = determine_ext(fmt_url)
if ext == 'm3u8':
fmts.extend(self._extract_m3u8_formats(fmt_url, video_id, 'mp4', fatal=False))
else:
fmt['ext'] = ext
fmts.append(fmt)
# sort, raise if no formats
self._sort_formats(fmts)
info_dict['formats'] = fmts
...
```
The extractor raises an exception rather than random crashes if the JSON structure changes so that no formats are found.
# EMBEDDING YOUTUBE-DL # EMBEDDING YOUTUBE-DL
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/ytdl-org/youtube-dl/issues/new). youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/ytdl-org/youtube-dl/issues/new).

1
devscripts/__init__.py Normal file
View File

@ -0,0 +1 @@
# Empty file needed to make devscripts.utils properly importable from outside

View File

@ -5,8 +5,12 @@ import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import youtube_dl import youtube_dl
from youtube_dl.compat import compat_open as open
from utils import read_file
BASH_COMPLETION_FILE = "youtube-dl.bash-completion" BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in" BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
@ -18,9 +22,8 @@ def build_completion(opt_parser):
for option in group.option_list: for option in group.option_list:
# for every long flag # for every long flag
opts_flag.append(option.get_opt_string()) opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f: template = read_file(BASH_COMPLETION_TEMPLATE)
template = f.read() with open(BASH_COMPLETION_FILE, "w", encoding='utf-8') as f:
with open(BASH_COMPLETION_FILE, "w") as f:
# just using the special char # just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag)) filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template) f.write(filled_template)

View File

@ -49,15 +49,34 @@ def cli_to_api(*opts):
# from https://github.com/yt-dlp/yt-dlp/issues/5859#issuecomment-1363938900 # from https://github.com/yt-dlp/yt-dlp/issues/5859#issuecomment-1363938900
default = parsed_options([]) default = parsed_options([])
diff = dict((k, v) for k, v in parsed_options(opts).items() if default[k] != v)
def neq_opt(a, b):
if a == b:
return False
if a is None and repr(type(object)).endswith(".utils.DateRange'>"):
return '0001-01-01 - 9999-12-31' != '{0}'.format(b)
return a != b
diff = dict((k, v) for k, v in parsed_options(opts).items() if neq_opt(default[k], v))
if 'postprocessors' in diff: if 'postprocessors' in diff:
diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default['postprocessors']] diff['postprocessors'] = [pp for pp in diff['postprocessors'] if pp not in default['postprocessors']]
return diff return diff
def main(): def main():
from pprint import pprint from pprint import PrettyPrinter
pprint(cli_to_api(*sys.argv))
pprint = PrettyPrinter()
super_format = pprint.format
def format(object, context, maxlevels, level):
if repr(type(object)).endswith(".utils.DateRange'>"):
return '{0}: {1}>'.format(repr(object)[:-2], object), True, False
return super_format(object, context, maxlevels, level)
pprint.format = format
pprint.pprint(cli_to_api(*sys.argv))
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,7 +1,6 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import json import json
import mimetypes import mimetypes
import netrc import netrc
@ -10,7 +9,9 @@ import os
import re import re
import sys import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
from youtube_dl.compat import ( from youtube_dl.compat import (
compat_basestring, compat_basestring,
@ -22,6 +23,7 @@ from youtube_dl.utils import (
make_HTTPS_handler, make_HTTPS_handler,
sanitized_Request, sanitized_Request,
) )
from utils import read_file
class GitHubReleaser(object): class GitHubReleaser(object):
@ -89,8 +91,7 @@ def main():
changelog_file, version, build_path = args changelog_file, version, build_path = args
with io.open(changelog_file, encoding='utf-8') as inf: changelog = read_file(changelog_file)
changelog = inf.read()
mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog) mobj = re.search(r'(?s)version %s\n{2}(.+?)\n{3}' % version, changelog)
body = mobj.group(1) if mobj else '' body = mobj.group(1) if mobj else ''

View File

@ -6,10 +6,13 @@ import os
from os.path import dirname as dirn from os.path import dirname as dirn
import sys import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import youtube_dl import youtube_dl
from youtube_dl.utils import shell_quote from youtube_dl.utils import shell_quote
from utils import read_file, write_file
FISH_COMPLETION_FILE = 'youtube-dl.fish' FISH_COMPLETION_FILE = 'youtube-dl.fish'
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in' FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
@ -38,11 +41,9 @@ def build_completion(opt_parser):
complete_cmd.extend(EXTRA_ARGS.get(long_option, [])) complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
commands.append(shell_quote(complete_cmd)) commands.append(shell_quote(complete_cmd))
with open(FISH_COMPLETION_TEMPLATE) as f: template = read_file(FISH_COMPLETION_TEMPLATE)
template = f.read()
filled_template = template.replace('{{commands}}', '\n'.join(commands)) filled_template = template.replace('{{commands}}', '\n'.join(commands))
with open(FISH_COMPLETION_FILE, 'w') as f: write_file(FISH_COMPLETION_FILE, filled_template)
f.write(filled_template)
parser = youtube_dl.parseOpts()[0] parser = youtube_dl.parseOpts()[0]

View File

@ -6,16 +6,21 @@ import sys
import hashlib import hashlib
import os.path import os.path
dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
from devscripts.utils import read_file, write_file
from youtube_dl.compat import compat_open as open
if len(sys.argv) <= 1: if len(sys.argv) <= 1:
print('Specify the version number as parameter') print('Specify the version number as parameter')
sys.exit() sys.exit()
version = sys.argv[1] version = sys.argv[1]
with open('update/LATEST_VERSION', 'w') as f: write_file('update/LATEST_VERSION', version)
f.write(version)
versions_info = json.load(open('update/versions.json')) versions_info = json.loads(read_file('update/versions.json'))
if 'signature' in versions_info: if 'signature' in versions_info:
del versions_info['signature'] del versions_info['signature']
@ -39,5 +44,5 @@ for key, filename in filenames.items():
versions_info['versions'][version] = new_version versions_info['versions'][version] = new_version
versions_info['latest'] = version versions_info['latest'] = version
with open('update/versions.json', 'w') as jsonf: with open('update/versions.json', 'w', encoding='utf-8') as jsonf:
json.dump(versions_info, jsonf, indent=4, sort_keys=True) json.dumps(versions_info, jsonf, indent=4, sort_keys=True)

View File

@ -2,14 +2,21 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import json import json
import os.path
import sys
versions_info = json.load(open('update/versions.json')) dirn = os.path.dirname
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
from utils import read_file, write_file
versions_info = json.loads(read_file('update/versions.json'))
version = versions_info['latest'] version = versions_info['latest']
version_dict = versions_info['versions'][version] version_dict = versions_info['versions'][version]
# Read template page # Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf: template = read_file('download.html.in')
template = tmplf.read()
template = template.replace('@PROGRAM_VERSION@', version) template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', version_dict['bin'][0]) template = template.replace('@PROGRAM_URL@', version_dict['bin'][0])
@ -18,5 +25,5 @@ template = template.replace('@EXE_URL@', version_dict['exe'][0])
template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1]) template = template.replace('@EXE_SHA256SUM@', version_dict['exe'][1])
template = template.replace('@TAR_URL@', version_dict['tar'][0]) template = template.replace('@TAR_URL@', version_dict['tar'][0])
template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1]) template = template.replace('@TAR_SHA256SUM@', version_dict['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template) write_file('download.html', template)

View File

@ -5,17 +5,22 @@ from __future__ import with_statement, unicode_literals
import datetime import datetime
import glob import glob
import io # For Python 2 compatibility
import os import os
import re import re
import sys
year = str(datetime.datetime.now().year) dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
from devscripts.utils import read_file, write_file
from youtube_dl import compat_str
year = compat_str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'): for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f: content = read_file(fn)
content = f.read()
newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content) newc = re.sub(r'(?P<copyright>Copyright © 2011-)(?P<year>[0-9]{4})', 'Copyright © 2011-' + year, content)
if content != newc: if content != newc:
tmpFn = fn + '.part' tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf: write_file(tmpFn, newc)
outf.write(newc)
os.rename(tmpFn, fn) os.rename(tmpFn, fn)

View File

@ -2,10 +2,16 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import datetime import datetime
import io
import json import json
import os.path
import textwrap import textwrap
import sys
dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
from utils import write_file
atom_template = textwrap.dedent("""\ atom_template = textwrap.dedent("""\
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>
@ -72,5 +78,4 @@ for v in versions:
entries_str = textwrap.indent(''.join(entries), '\t') entries_str = textwrap.indent(''.join(entries), '\t')
atom_template = atom_template.replace('@ENTRIES@', entries_str) atom_template = atom_template.replace('@ENTRIES@', entries_str)
with io.open('update/releases.atom', 'w', encoding='utf-8') as atom_file: write_file('update/releases.atom', atom_template)
atom_file.write(atom_template)

View File

@ -5,15 +5,17 @@ import sys
import os import os
import textwrap import textwrap
dirn = os.path.dirname
# We must be able to import youtube_dl # We must be able to import youtube_dl
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(dirn(os.path.abspath(__file__)))))
import youtube_dl import youtube_dl
from devscripts.utils import read_file, write_file
def main(): def main():
with open('supportedsites.html.in', 'r', encoding='utf-8') as tmplf: template = read_file('supportedsites.html.in')
template = tmplf.read()
ie_htmls = [] ie_htmls = []
for ie in youtube_dl.list_extractors(age_limit=None): for ie in youtube_dl.list_extractors(age_limit=None):
@ -29,8 +31,7 @@ def main():
template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t')) template = template.replace('@SITES@', textwrap.indent('\n'.join(ie_htmls), '\t'))
with open('supportedsites.html', 'w', encoding='utf-8') as sitesf: write_file('supportedsites.html', template)
sitesf.write(template)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,10 +1,11 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import optparse import optparse
import re import re
from utils import read_file, write_file
def main(): def main():
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE') parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
@ -14,8 +15,7 @@ def main():
infile, outfile = args infile, outfile = args
with io.open(infile, encoding='utf-8') as inf: readme = read_file(infile)
readme = inf.read()
bug_text = re.search( bug_text = re.search(
r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1) r'(?s)#\s*BUGS\s*[^\n]*\s*(.*?)#\s*COPYRIGHT', readme).group(1)
@ -25,8 +25,7 @@ def main():
out = bug_text + dev_text out = bug_text + dev_text
with io.open(outfile, 'w', encoding='utf-8') as outf: write_file(outfile, out)
outf.write(out)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,8 +1,11 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import optparse import optparse
import os.path
import sys
from utils import read_file, read_version, write_file
def main(): def main():
@ -13,17 +16,11 @@ def main():
infile, outfile = args infile, outfile = args
with io.open(infile, encoding='utf-8') as inf: issue_template_tmpl = read_file(infile)
issue_template_tmpl = inf.read()
# Get the version from youtube_dl/version.py without importing the package out = issue_template_tmpl % {'version': read_version()}
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
out = issue_template_tmpl % {'version': locals()['__version__']} write_file(outfile, out)
with io.open(outfile, 'w', encoding='utf-8') as outf:
outf.write(out)
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -1,35 +1,48 @@
from __future__ import unicode_literals, print_function from __future__ import unicode_literals, print_function
from inspect import getsource from inspect import getsource
import io
import os import os
from os.path import dirname as dirn from os.path import dirname as dirn
import re
import sys import sys
print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr) print('WARNING: Lazy loading extractors is an experimental feature that may not always work', file=sys.stderr)
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
lazy_extractors_filename = sys.argv[1] lazy_extractors_filename = sys.argv[1]
if os.path.exists(lazy_extractors_filename): if os.path.exists(lazy_extractors_filename):
os.remove(lazy_extractors_filename) os.remove(lazy_extractors_filename)
# Py2: may be confused by leftover lazy_extractors.pyc # Py2: may be confused by leftover lazy_extractors.pyc
try: if sys.version_info[0] < 3:
os.remove(lazy_extractors_filename + 'c') for c in ('c', 'o'):
except OSError: try:
pass os.remove(lazy_extractors_filename + 'c')
except OSError:
pass
from devscripts.utils import read_file, write_file
from youtube_dl.compat import compat_register_utf8
compat_register_utf8()
from youtube_dl.extractor import _ALL_CLASSES from youtube_dl.extractor import _ALL_CLASSES
from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor from youtube_dl.extractor.common import InfoExtractor, SearchInfoExtractor
with open('devscripts/lazy_load_template.py', 'rt') as f: module_template = read_file('devscripts/lazy_load_template.py')
module_template = f.read()
def get_source(m):
return re.sub(r'(?m)^\s*#.*\n', '', getsource(m))
module_contents = [ module_contents = [
module_template + '\n' + getsource(InfoExtractor.suitable) + '\n', module_template,
get_source(InfoExtractor.suitable),
get_source(InfoExtractor._match_valid_url) + '\n',
'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n', 'class LazyLoadSearchExtractor(LazyLoadExtractor):\n pass\n',
# needed for suitable() methods of Youtube extractor (see #28780) # needed for suitable() methods of Youtube extractor (see #28780)
'from youtube_dl.utils import parse_qs\n', 'from youtube_dl.utils import parse_qs, variadic\n',
] ]
ie_template = ''' ie_template = '''
@ -62,7 +75,7 @@ def build_lazy_ie(ie, name):
valid_url=valid_url, valid_url=valid_url,
module=ie.__module__) module=ie.__module__)
if ie.suitable.__func__ is not InfoExtractor.suitable.__func__: if ie.suitable.__func__ is not InfoExtractor.suitable.__func__:
s += '\n' + getsource(ie.suitable) s += '\n' + get_source(ie.suitable)
if hasattr(ie, '_make_valid_url'): if hasattr(ie, '_make_valid_url'):
# search extractors # search extractors
s += make_valid_template.format(valid_url=ie._make_valid_url()) s += make_valid_template.format(valid_url=ie._make_valid_url())
@ -102,7 +115,17 @@ for ie in ordered_cls:
module_contents.append( module_contents.append(
'_ALL_CLASSES = [{0}]'.format(', '.join(names))) '_ALL_CLASSES = [{0}]'.format(', '.join(names)))
module_src = '\n'.join(module_contents) + '\n' module_src = '\n'.join(module_contents)
with io.open(lazy_extractors_filename, 'wt', encoding='utf-8') as f: write_file(lazy_extractors_filename, module_src + '\n')
f.write(module_src)
# work around JVM byte code module limit in Jython
if sys.platform.startswith('java') and sys.version_info[:2] == (2, 7):
import subprocess
from youtube_dl.compat import compat_subprocess_get_DEVNULL
# if Python 2.7 is available, use it to compile the module for Jython
try:
# if Python 2.7 is available, use it to compile the module for Jython
subprocess.check_call(['python2.7', '-m', 'py_compile', lazy_extractors_filename], stdout=compat_subprocess_get_DEVNULL())
except Exception:
pass

View File

@ -1,8 +1,14 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import io import os.path
import sys
import re import re
import sys
dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
from utils import read_file
from youtube_dl.compat import compat_open as open
README_FILE = 'README.md' README_FILE = 'README.md'
helptext = sys.stdin.read() helptext = sys.stdin.read()
@ -10,8 +16,7 @@ helptext = sys.stdin.read()
if isinstance(helptext, bytes): if isinstance(helptext, bytes):
helptext = helptext.decode('utf-8') helptext = helptext.decode('utf-8')
with io.open(README_FILE, encoding='utf-8') as f: oldreadme = read_file(README_FILE)
oldreadme = f.read()
header = oldreadme[:oldreadme.index('# OPTIONS')] header = oldreadme[:oldreadme.index('# OPTIONS')]
footer = oldreadme[oldreadme.index('# CONFIGURATION'):] footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
@ -20,7 +25,7 @@ options = helptext[helptext.index(' General Options:') + 19:]
options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options) options = re.sub(r'(?m)^ (\w.+)$', r'## \1', options)
options = '# OPTIONS\n' + options + '\n' options = '# OPTIONS\n' + options + '\n'
with io.open(README_FILE, 'w', encoding='utf-8') as f: with open(README_FILE, 'w', encoding='utf-8') as f:
f.write(header) f.write(header)
f.write(options) f.write(options)
f.write(footer) f.write(footer)

View File

@ -1,17 +1,19 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import optparse import optparse
import os import os.path
import sys import sys
# Import youtube_dl # Import youtube_dl
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') dirn = os.path.dirname
sys.path.insert(0, ROOT_DIR)
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import youtube_dl import youtube_dl
from utils import write_file
def main(): def main():
parser = optparse.OptionParser(usage='%prog OUTFILE.md') parser = optparse.OptionParser(usage='%prog OUTFILE.md')
@ -38,8 +40,7 @@ def main():
' - ' + md + '\n' ' - ' + md + '\n'
for md in gen_ies_md(ies)) for md in gen_ies_md(ies))
with io.open(outfile, 'w', encoding='utf-8') as outf: write_file(outfile, out)
outf.write(out)
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,13 +1,13 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import optparse import optparse
import os.path import os.path
import re import re
from utils import read_file, write_file
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
README_FILE = os.path.join(ROOT_DIR, 'README.md') README_FILE = os.path.join(ROOT_DIR, 'README.md')
PREFIX = r'''%YOUTUBE-DL(1) PREFIX = r'''%YOUTUBE-DL(1)
# NAME # NAME
@ -29,8 +29,7 @@ def main():
outfile, = args outfile, = args
with io.open(README_FILE, encoding='utf-8') as f: readme = read_file(README_FILE)
readme = f.read()
readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme) readme = re.sub(r'(?s)^.*?(?=# DESCRIPTION)', '', readme)
readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme) readme = re.sub(r'\s+youtube-dl \[OPTIONS\] URL \[URL\.\.\.\]', '', readme)
@ -38,8 +37,7 @@ def main():
readme = filter_options(readme) readme = filter_options(readme)
with io.open(outfile, 'w', encoding='utf-8') as outf: write_file(outfile, readme)
outf.write(readme)
def filter_options(readme): def filter_options(readme):

62
devscripts/utils.py Normal file
View File

@ -0,0 +1,62 @@
# coding: utf-8
from __future__ import unicode_literals
import argparse
import functools
import os.path
import subprocess
import sys
dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
from youtube_dl.compat import (
compat_kwargs,
compat_open as open,
)
def read_file(fname):
with open(fname, encoding='utf-8') as f:
return f.read()
def write_file(fname, content, mode='w'):
with open(fname, mode, encoding='utf-8') as f:
return f.write(content)
def read_version(fname='youtube_dl/version.py'):
"""Get the version without importing the package"""
exec(compile(read_file(fname), fname, 'exec'))
return locals()['__version__']
def get_filename_args(has_infile=False, default_outfile=None):
parser = argparse.ArgumentParser()
if has_infile:
parser.add_argument('infile', help='Input file')
kwargs = {'nargs': '?', 'default': default_outfile} if default_outfile else {}
kwargs['help'] = 'Output file'
parser.add_argument('outfile', **compat_kwargs(kwargs))
opts = parser.parse_args()
if has_infile:
return opts.infile, opts.outfile
return opts.outfile
def compose_functions(*functions):
return lambda x: functools.reduce(lambda y, f: f(y), functions, x)
def run_process(*args, **kwargs):
kwargs.setdefault('text', True)
kwargs.setdefault('check', True)
kwargs.setdefault('capture_output', True)
if kwargs['text']:
kwargs.setdefault('encoding', 'utf-8')
kwargs.setdefault('errors', 'replace')
kwargs = compat_kwargs(kwargs)
return subprocess.run(args, **kwargs)

View File

@ -7,6 +7,8 @@ import sys
sys.path.insert(0, dirn(dirn((os.path.abspath(__file__))))) sys.path.insert(0, dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl import youtube_dl
from utils import read_file, write_file
ZSH_COMPLETION_FILE = "youtube-dl.zsh" ZSH_COMPLETION_FILE = "youtube-dl.zsh"
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in" ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
@ -34,15 +36,13 @@ def build_completion(opt_parser):
flags = [opt.get_opt_string() for opt in opts] flags = [opt.get_opt_string() for opt in opts]
with open(ZSH_COMPLETION_TEMPLATE) as f: template = read_file(ZSH_COMPLETION_TEMPLATE)
template = f.read()
template = template.replace("{{fileopts}}", "|".join(fileopts)) template = template.replace("{{fileopts}}", "|".join(fileopts))
template = template.replace("{{diropts}}", "|".join(diropts)) template = template.replace("{{diropts}}", "|".join(diropts))
template = template.replace("{{flags}}", " ".join(flags)) template = template.replace("{{flags}}", " ".join(flags))
with open(ZSH_COMPLETION_FILE, "w") as f: write_file(ZSH_COMPLETION_FILE, template)
f.write(template)
parser = youtube_dl.parseOpts()[0] parser = youtube_dl.parseOpts()[0]

View File

@ -1,7 +1,6 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import errno import errno
import io
import hashlib import hashlib
import json import json
import os.path import os.path
@ -9,14 +8,17 @@ import re
import types import types
import ssl import ssl
import sys import sys
import unittest
import youtube_dl.extractor import youtube_dl.extractor
from youtube_dl import YoutubeDL from youtube_dl import YoutubeDL
from youtube_dl.compat import ( from youtube_dl.compat import (
compat_open as open,
compat_os_name, compat_os_name,
compat_str, compat_str,
) )
from youtube_dl.utils import ( from youtube_dl.utils import (
IDENTITY,
preferredencoding, preferredencoding,
write_string, write_string,
) )
@ -27,10 +29,10 @@ def get_params(override=None):
"parameters.json") "parameters.json")
LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), LOCAL_PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"local_parameters.json") "local_parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: with open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf) parameters = json.load(pf)
if os.path.exists(LOCAL_PARAMETERS_FILE): if os.path.exists(LOCAL_PARAMETERS_FILE):
with io.open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf: with open(LOCAL_PARAMETERS_FILE, encoding='utf-8') as pf:
parameters.update(json.load(pf)) parameters.update(json.load(pf))
if override: if override:
parameters.update(override) parameters.update(override)
@ -72,7 +74,8 @@ class FakeYDL(YoutubeDL):
def to_screen(self, s, skip_eol=None): def to_screen(self, s, skip_eol=None):
print(s) print(s)
def trouble(self, s, tb=None): def trouble(self, *args, **kwargs):
s = args[0] if len(args) > 0 else kwargs.get('message', 'Missing message')
raise Exception(s) raise Exception(s)
def download(self, x): def download(self, x):
@ -139,7 +142,7 @@ def expect_value(self, got, expected, field):
self.assertTrue( self.assertTrue(
contains_str in got, contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str)) 'field %s (value: %r) should contain %r' % (field, got, contains_str))
elif isinstance(expected, compat_str) and re.match(r'^lambda \w+:', expected): elif isinstance(expected, compat_str) and re.match(r'lambda \w+:', expected):
fn = eval(expected) fn = eval(expected)
suite = expected.split(':', 1)[1].strip() suite = expected.split(':', 1)[1].strip()
self.assertTrue( self.assertTrue(
@ -297,3 +300,7 @@ def http_server_port(httpd):
else: else:
sock = httpd.socket sock = httpd.socket
return sock.getsockname()[1] return sock.getsockname()[1]
def expectedFailureIf(cond):
return unittest.expectedFailure if cond else IDENTITY

View File

@ -3,19 +3,37 @@
from __future__ import unicode_literals from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import io
import os import os
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL, expect_dict, expect_value, http_server_port
from youtube_dl.compat import compat_etree_fromstring, compat_http_server
from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.extractor import YoutubeIE, get_info_extractor
from youtube_dl.utils import encode_data_uri, strip_jsonp, ExtractorError, RegexNotFoundError
import threading import threading
from test.helper import (
expect_dict,
expect_value,
FakeYDL,
http_server_port,
)
from youtube_dl.compat import (
compat_etree_fromstring,
compat_http_server,
compat_open as open,
)
from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.extractor import (
get_info_extractor,
YoutubeIE,
)
from youtube_dl.utils import (
encode_data_uri,
ExtractorError,
RegexNotFoundError,
strip_jsonp,
)
TEAPOT_RESPONSE_STATUS = 418 TEAPOT_RESPONSE_STATUS = 418
TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>" TEAPOT_RESPONSE_BODY = "<h1>418 I'm a teapot</h1>"
@ -100,6 +118,71 @@ class TestInfoExtractor(unittest.TestCase):
self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._html_search_meta, 'z', html, None, fatal=True)
self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True) self.assertRaises(RegexNotFoundError, ie._html_search_meta, ('z', 'x'), html, None, fatal=True)
def test_search_nextjs_data(self):
html = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content=
"text/html; charset=utf-8">
<meta name="viewport" content="width=device-width">
<title>Test _search_nextjs_data()</title>
</head>
<body>
<div id="__next">
<div style="background-color:#17171E" class="FU" dir="ltr">
<div class="sc-93de261d-0 dyzzYE">
<div>
<header class="HD"></header>
<main class="MN">
<div style="height:0" class="HT0">
<div style="width:NaN%" data-testid=
"stream-container" class="WDN"></div>
</div>
</main>
</div>
<footer class="sc-6e5faf91-0 dEGaHS"></footer>
</div>
</div>
</div>
<script id="__NEXT_DATA__" type="application/json">
{"props":{"pageProps":{"video":{"id":"testid"}}}}
</script>
</body>
</html>
'''
search = self.ie._search_nextjs_data(html, 'testID')
self.assertEqual(search['props']['pageProps']['video']['id'], 'testid')
def test_search_nuxt_data(self):
html = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content=
"text/html; charset=utf-8">
<title>Nuxt.js Test Page</title>
<meta name="viewport" content=
"width=device-width, initial-scale=1">
<meta data-hid="robots" name="robots" content="all">
</head>
<body class="BD">
<div id="__layout">
<h1 class="H1">Example heading</h1>
<div class="IN">
<p>Decoy text</p>
</div>
</div>
<script>
window.__NUXT__=(function(a,b,c,d,e,f,g,h){return {decoy:" default",data:[{track:{id:f,title:g}}]}}(null,null,"c",null,null,"testid","Nuxt.js title",null));
</script>
<script src="/_nuxt/a12345b.js" defer="defer"></script>
</body>
</html>
'''
search = self.ie._search_nuxt_data(html, 'testID')
self.assertEqual(search['track']['id'], 'testid')
def test_search_json_ld_realworld(self): def test_search_json_ld_realworld(self):
# https://github.com/ytdl-org/youtube-dl/issues/23306 # https://github.com/ytdl-org/youtube-dl/issues/23306
expect_dict( expect_dict(
@ -348,6 +431,24 @@ class TestInfoExtractor(unittest.TestCase):
}], }],
}) })
# from https://0000.studio/
# with type attribute but without extension in URL
expect_dict(
self,
self.ie._parse_html5_media_entries(
'https://0000.studio',
r'''
<video src="https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92"
controls="controls" type="video/mp4" preload="metadata" autoplay="autoplay" playsinline class="object-contain">
</video>
''', None)[0],
{
'formats': [{
'url': 'https://d1ggyt9m8pwf3g.cloudfront.net/protected/ap-northeast-1:1864af40-28d5-492b-b739-b32314b1a527/archive/clip/838db6a7-8973-4cd6-840d-8517e4093c92',
'ext': 'mp4',
}],
})
def test_extract_jwplayer_data_realworld(self): def test_extract_jwplayer_data_realworld(self):
# from http://www.suffolk.edu/sjc/ # from http://www.suffolk.edu/sjc/
expect_dict( expect_dict(
@ -801,8 +902,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
] ]
for m3u8_file, m3u8_url, expected_formats in _TEST_CASES: for m3u8_file, m3u8_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/m3u8/%s.m3u8' % m3u8_file, with open('./test/testdata/m3u8/%s.m3u8' % m3u8_file,
mode='r', encoding='utf-8') as f: mode='r', encoding='utf-8') as f:
formats = self.ie._parse_m3u8_formats( formats = self.ie._parse_m3u8_formats(
f.read(), m3u8_url, ext='mp4') f.read(), m3u8_url, ext='mp4')
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
@ -892,7 +993,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
'tbr': 5997.485, 'tbr': 5997.485,
'width': 1920, 'width': 1920,
'height': 1080, 'height': 1080,
}] }],
{},
), ( ), (
# https://github.com/ytdl-org/youtube-dl/pull/14844 # https://github.com/ytdl-org/youtube-dl/pull/14844
'urls_only', 'urls_only',
@ -975,7 +1077,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
'tbr': 4400, 'tbr': 4400,
'width': 1920, 'width': 1920,
'height': 1080, 'height': 1080,
}] }],
{},
), ( ), (
# https://github.com/ytdl-org/youtube-dl/issues/20346 # https://github.com/ytdl-org/youtube-dl/issues/20346
# Media considered unfragmented even though it contains # Media considered unfragmented even though it contains
@ -1021,18 +1124,185 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
'width': 360, 'width': 360,
'height': 360, 'height': 360,
'fps': 30, 'fps': 30,
}] }],
{},
), (
# https://github.com/ytdl-org/youtube-dl/issues/30235
# Bento4 generated test mpd
# mp4dash --mpd-name=manifest.mpd --no-split --use-segment-list mediafiles
'url_and_range',
'http://unknown/manifest.mpd', # mpd_url
'http://unknown/', # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'fragment_base_url': 'http://unknown/',
'ext': 'm4a',
'format_id': 'audio-und-mp4a.40.2',
'format_note': 'DASH audio',
'container': 'm4a_dash',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 98.808,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'fragment_base_url': 'http://unknown/',
'ext': 'mp4',
'format_id': 'video-avc1',
'format_note': 'DASH video',
'container': 'mp4_dash',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4D401E',
'tbr': 699.597,
'width': 768,
'height': 432
}],
{},
), (
# https://github.com/ytdl-org/youtube-dl/issues/27575
# GPAC generated test mpd
# MP4Box -dash 10000 -single-file -out manifest.mpd mediafiles
'range_only',
'http://unknown/manifest.mpd', # mpd_url
'http://unknown/', # mpd_base_url
[{
'manifest_url': 'http://unknown/manifest.mpd',
'fragment_base_url': 'http://unknown/audio_dashinit.mp4',
'ext': 'm4a',
'format_id': '2',
'format_note': 'DASH audio',
'container': 'm4a_dash',
'protocol': 'http_dash_segments',
'acodec': 'mp4a.40.2',
'vcodec': 'none',
'tbr': 98.096,
}, {
'manifest_url': 'http://unknown/manifest.mpd',
'fragment_base_url': 'http://unknown/video_dashinit.mp4',
'ext': 'mp4',
'format_id': '1',
'format_note': 'DASH video',
'container': 'mp4_dash',
'protocol': 'http_dash_segments',
'acodec': 'none',
'vcodec': 'avc1.4D401E',
'tbr': 526.987,
'width': 768,
'height': 432
}],
{},
), (
'subtitles',
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/',
[{
'format_id': 'audio=128001',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'm4a',
'tbr': 128.001,
'asr': 48000,
'format_note': 'DASH audio',
'container': 'm4a_dash',
'vcodec': 'none',
'acodec': 'mp4a.40.2',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}, {
'format_id': 'video=100000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 336,
'height': 144,
'tbr': 100,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}, {
'format_id': 'video=326000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 562,
'height': 240,
'tbr': 326,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}, {
'format_id': 'video=698000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 844,
'height': 360,
'tbr': 698,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}, {
'format_id': 'video=1493000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 1126,
'height': 480,
'tbr': 1493,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}, {
'format_id': 'video=4482000',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'ext': 'mp4',
'width': 1688,
'height': 720,
'tbr': 4482,
'format_note': 'DASH video',
'container': 'mp4_dash',
'vcodec': 'avc1.4D401F',
'acodec': 'none',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}],
{
'en': [
{
'ext': 'mp4',
'manifest_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/manifest.mpd',
'fragment_base_url': 'https://sdn-global-streaming-cache-3qsdn.akamaized.net/stream/3144/files/17/07/672975/3144-kZT4LWMQw6Rh7Kpd.ism/dash/',
'protocol': 'http_dash_segments',
}
]
},
) )
] ]
for mpd_file, mpd_url, mpd_base_url, expected_formats in _TEST_CASES: for mpd_file, mpd_url, mpd_base_url, expected_formats, expected_subtitles in _TEST_CASES:
with io.open('./test/testdata/mpd/%s.mpd' % mpd_file, with open('./test/testdata/mpd/%s.mpd' % mpd_file,
mode='r', encoding='utf-8') as f: mode='r', encoding='utf-8') as f:
formats = self.ie._parse_mpd_formats( formats, subtitles = self.ie._parse_mpd_formats_and_subtitles(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
mpd_base_url=mpd_base_url, mpd_url=mpd_url) mpd_base_url=mpd_base_url, mpd_url=mpd_url)
self.ie._sort_formats(formats) self.ie._sort_formats(formats)
expect_value(self, formats, expected_formats, None) expect_value(self, formats, expected_formats, None)
expect_value(self, subtitles, expected_subtitles, None)
def test_parse_f4m_formats(self): def test_parse_f4m_formats(self):
_TEST_CASES = [ _TEST_CASES = [
@ -1053,8 +1323,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
] ]
for f4m_file, f4m_url, expected_formats in _TEST_CASES: for f4m_file, f4m_url, expected_formats in _TEST_CASES:
with io.open('./test/testdata/f4m/%s.f4m' % f4m_file, with open('./test/testdata/f4m/%s.f4m' % f4m_file,
mode='r', encoding='utf-8') as f: mode='r', encoding='utf-8') as f:
formats = self.ie._parse_f4m_formats( formats = self.ie._parse_f4m_formats(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
f4m_url, None) f4m_url, None)
@ -1101,8 +1371,8 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/
] ]
for xspf_file, xspf_url, expected_entries in _TEST_CASES: for xspf_file, xspf_url, expected_entries in _TEST_CASES:
with io.open('./test/testdata/xspf/%s.xspf' % xspf_file, with open('./test/testdata/xspf/%s.xspf' % xspf_file,
mode='r', encoding='utf-8') as f: mode='r', encoding='utf-8') as f:
entries = self.ie._parse_xspf( entries = self.ie._parse_xspf(
compat_etree_fromstring(f.read().encode('utf-8')), compat_etree_fromstring(f.read().encode('utf-8')),
xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url) xspf_file, xspf_url=xspf_url, xspf_base_url=xspf_url)

View File

@ -10,14 +10,31 @@ import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy import copy
import json
from test.helper import FakeYDL, assertRegexpMatches from test.helper import (
FakeYDL,
assertRegexpMatches,
try_rm,
)
from youtube_dl import YoutubeDL from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_str, compat_urllib_error from youtube_dl.compat import (
compat_http_cookiejar_Cookie,
compat_http_cookies_SimpleCookie,
compat_kwargs,
compat_open as open,
compat_str,
compat_urllib_error,
)
from youtube_dl.extractor import YoutubeIE from youtube_dl.extractor import YoutubeIE
from youtube_dl.extractor.common import InfoExtractor from youtube_dl.extractor.common import InfoExtractor
from youtube_dl.postprocessor.common import PostProcessor from youtube_dl.postprocessor.common import PostProcessor
from youtube_dl.utils import ExtractorError, match_filter_func from youtube_dl.utils import (
ExtractorError,
match_filter_func,
traverse_obj,
)
TEST_URL = 'http://localhost/sample.mp4' TEST_URL = 'http://localhost/sample.mp4'
@ -29,11 +46,14 @@ class YDL(FakeYDL):
self.msgs = [] self.msgs = []
def process_info(self, info_dict): def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict) self.downloaded_info_dicts.append(info_dict.copy())
def to_screen(self, msg): def to_screen(self, msg):
self.msgs.append(msg) self.msgs.append(msg)
def dl(self, *args, **kwargs):
assert False, 'Downloader must not be invoked for test_YoutubeDL'
def _make_result(formats, **kwargs): def _make_result(formats, **kwargs):
res = { res = {
@ -42,8 +62,9 @@ def _make_result(formats, **kwargs):
'title': 'testttitle', 'title': 'testttitle',
'extractor': 'testex', 'extractor': 'testex',
'extractor_key': 'TestEx', 'extractor_key': 'TestEx',
'webpage_url': 'http://example.com/watch?v=shenanigans',
} }
res.update(**kwargs) res.update(**compat_kwargs(kwargs))
return res return res
@ -681,12 +702,12 @@ class TestYoutubeDL(unittest.TestCase):
class SimplePP(PostProcessor): class SimplePP(PostProcessor):
def run(self, info): def run(self, info):
with open(audiofile, 'wt') as f: with open(audiofile, 'w') as f:
f.write('EXAMPLE') f.write('EXAMPLE')
return [info['filepath']], info return [info['filepath']], info
def run_pp(params, PP): def run_pp(params, PP):
with open(filename, 'wt') as f: with open(filename, 'w') as f:
f.write('EXAMPLE') f.write('EXAMPLE')
ydl = YoutubeDL(params) ydl = YoutubeDL(params)
ydl.add_post_processor(PP()) ydl.add_post_processor(PP())
@ -705,7 +726,7 @@ class TestYoutubeDL(unittest.TestCase):
class ModifierPP(PostProcessor): class ModifierPP(PostProcessor):
def run(self, info): def run(self, info):
with open(info['filepath'], 'wt') as f: with open(info['filepath'], 'w') as f:
f.write('MODIFIED') f.write('MODIFIED')
return [], info return [], info
@ -930,17 +951,11 @@ class TestYoutubeDL(unittest.TestCase):
# Test case for https://github.com/ytdl-org/youtube-dl/issues/27064 # Test case for https://github.com/ytdl-org/youtube-dl/issues/27064
def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self): def test_ignoreerrors_for_playlist_with_url_transparent_iterable_entries(self):
class _YDL(YDL): ydl = YDL({
def __init__(self, *args, **kwargs):
super(_YDL, self).__init__(*args, **kwargs)
def trouble(self, s, tb=None):
pass
ydl = _YDL({
'format': 'extra', 'format': 'extra',
'ignoreerrors': True, 'ignoreerrors': True,
}) })
ydl.trouble = lambda *_, **__: None
class VideoIE(InfoExtractor): class VideoIE(InfoExtractor):
_VALID_URL = r'video:(?P<id>\d+)' _VALID_URL = r'video:(?P<id>\d+)'
@ -1017,5 +1032,160 @@ class TestYoutubeDL(unittest.TestCase):
self.assertEqual(out_info['release_date'], '20210930') self.assertEqual(out_info['release_date'], '20210930')
class TestYoutubeDLCookies(unittest.TestCase):
@staticmethod
def encode_cookie(cookie):
if not isinstance(cookie, dict):
cookie = vars(cookie)
for name, value in cookie.items():
yield name, compat_str(value)
@classmethod
def comparable_cookies(cls, cookies):
# Work around cookiejar cookies not being unicode strings
return sorted(map(tuple, map(sorted, map(cls.encode_cookie, cookies))))
def assertSameCookies(self, c1, c2, msg=None):
return self.assertEqual(
*map(self.comparable_cookies, (c1, c2)),
msg=msg)
def assertSameCookieStrings(self, c1, c2, msg=None):
return self.assertSameCookies(
*map(lambda c: compat_http_cookies_SimpleCookie(c).values(), (c1, c2)),
msg=msg)
def test_header_cookies(self):
ydl = FakeYDL()
ydl.report_warning = lambda *_, **__: None
def cookie(name, value, version=None, domain='', path='', secure=False, expires=None):
return compat_http_cookiejar_Cookie(
version or 0, name, value, None, False,
domain, bool(domain), bool(domain), path, bool(path),
secure, expires, False, None, None, rest={})
test_url, test_domain = (t % ('yt.dl',) for t in ('https://%s/test', '.%s'))
def test(encoded_cookies, cookies, headers=False, round_trip=None, error_re=None):
def _test():
ydl.cookiejar.clear()
ydl._load_cookies(encoded_cookies, autoscope=headers)
if headers:
ydl._apply_header_cookies(test_url)
data = {'url': test_url}
ydl._calc_headers(data)
self.assertSameCookies(
cookies, ydl.cookiejar,
'Extracted cookiejar.Cookie is not the same')
if not headers:
self.assertSameCookieStrings(
data.get('cookies'), round_trip or encoded_cookies,
msg='Cookie is not the same as round trip')
ydl.__dict__['_YoutubeDL__header_cookies'] = []
try:
_test()
except AssertionError:
raise
except Exception as e:
if not error_re:
raise
assertRegexpMatches(self, e.args[0], error_re.join(('.*',) * 2))
test('test=value; Domain=' + test_domain, [cookie('test', 'value', domain=test_domain)])
test('test=value', [cookie('test', 'value')], error_re='Unscoped cookies are not allowed')
test('cookie1=value1; Domain={0}; Path=/test; cookie2=value2; Domain={0}; Path=/'.format(test_domain), [
cookie('cookie1', 'value1', domain=test_domain, path='/test'),
cookie('cookie2', 'value2', domain=test_domain, path='/')])
cookie_kw = compat_kwargs(
{'domain': test_domain, 'path': '/test', 'secure': True, 'expires': '9999999999', })
test('test=value; Domain={domain}; Path={path}; Secure; Expires={expires}'.format(**cookie_kw), [
cookie('test', 'value', **cookie_kw)])
test('test="value; "; path=/test; domain=' + test_domain, [
cookie('test', 'value; ', domain=test_domain, path='/test')],
round_trip='test="value\\073 "; Domain={0}; Path=/test'.format(test_domain))
test('name=; Domain=' + test_domain, [cookie('name', '', domain=test_domain)],
round_trip='name=""; Domain=' + test_domain)
test('test=value', [cookie('test', 'value', domain=test_domain)], headers=True)
test('cookie1=value; Domain={0}; cookie2=value'.format(test_domain), [],
headers=True, error_re='Invalid syntax')
ydl.report_warning = ydl.report_error
test('test=value', [], headers=True, error_re='Passing cookies as a header is a potential security risk')
def test_infojson_cookies(self):
TEST_FILE = 'test_infojson_cookies.info.json'
TEST_URL = 'https://example.com/example.mp4'
COOKIES = 'a=b; Domain=.example.com; c=d; Domain=.example.com'
COOKIE_HEADER = {'Cookie': 'a=b; c=d'}
ydl = FakeYDL()
ydl.process_info = lambda x: ydl._write_info_json('test', x, TEST_FILE)
def make_info(info_header_cookies=False, fmts_header_cookies=False, cookies_field=False):
fmt = {'url': TEST_URL}
if fmts_header_cookies:
fmt['http_headers'] = COOKIE_HEADER
if cookies_field:
fmt['cookies'] = COOKIES
return _make_result([fmt], http_headers=COOKIE_HEADER if info_header_cookies else None)
def test(initial_info, note):
def failure_msg(why):
return ' when '.join((why, note))
result = {}
result['processed'] = ydl.process_ie_result(initial_info)
self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
msg=failure_msg('No cookies set in cookiejar after initial process'))
ydl.cookiejar.clear()
with open(TEST_FILE) as infojson:
result['loaded'] = ydl.sanitize_info(json.load(infojson), True)
result['final'] = ydl.process_ie_result(result['loaded'].copy(), download=False)
self.assertTrue(ydl.cookiejar.get_cookies_for_url(TEST_URL),
msg=failure_msg('No cookies set in cookiejar after final process'))
ydl.cookiejar.clear()
for key in ('processed', 'loaded', 'final'):
info = result[key]
self.assertIsNone(
traverse_obj(info, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False),
msg=failure_msg('Cookie header not removed in {0} result'.format(key)))
self.assertSameCookieStrings(
traverse_obj(info, ((None, ('formats', 0)), 'cookies'), get_all=False), COOKIES,
msg=failure_msg('No cookies field found in {0} result'.format(key)))
test({'url': TEST_URL, 'http_headers': COOKIE_HEADER, 'id': '1', 'title': 'x'}, 'no formats field')
test(make_info(info_header_cookies=True), 'info_dict header cokies')
test(make_info(fmts_header_cookies=True), 'format header cookies')
test(make_info(info_header_cookies=True, fmts_header_cookies=True), 'info_dict and format header cookies')
test(make_info(info_header_cookies=True, fmts_header_cookies=True, cookies_field=True), 'all cookies fields')
test(make_info(cookies_field=True), 'cookies format field')
test({'url': TEST_URL, 'cookies': COOKIES, 'id': '1', 'title': 'x'}, 'info_dict cookies field only')
try_rm(TEST_FILE)
def test_add_headers_cookie(self):
def check_for_cookie_header(result):
return traverse_obj(result, ((None, ('formats', 0)), 'http_headers', 'Cookie'), casesense=False, get_all=False)
ydl = FakeYDL({'http_headers': {'Cookie': 'a=b'}})
ydl._apply_header_cookies(_make_result([])['webpage_url']) # Scope to input webpage URL: .example.com
fmt = {'url': 'https://example.com/video.mp4'}
result = ydl.process_ie_result(_make_result([fmt]), download=False)
self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies in result info_dict')
self.assertEqual(result.get('cookies'), 'a=b; Domain=.example.com', msg='No cookies were set in cookies field')
self.assertIn('a=b', ydl.cookiejar.get_cookie_header(fmt['url']), msg='No cookies were set in cookiejar')
fmt = {'url': 'https://wrong.com/video.mp4'}
result = ydl.process_ie_result(_make_result([fmt]), download=False)
self.assertIsNone(check_for_cookie_header(result), msg='http_headers cookies for wrong domain')
self.assertFalse(result.get('cookies'), msg='Cookies set in cookies field for wrong domain')
self.assertFalse(ydl.cookiejar.get_cookie_header(fmt['url']), msg='Cookies set in cookiejar for wrong domain')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -46,6 +46,20 @@ class TestYoutubeDLCookieJar(unittest.TestCase):
# will be ignored # will be ignored
self.assertFalse(cookiejar._cookies) self.assertFalse(cookiejar._cookies)
def test_get_cookie_header(self):
cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/httponly_cookies.txt')
cookiejar.load(ignore_discard=True, ignore_expires=True)
header = cookiejar.get_cookie_header('https://www.foobar.foobar')
self.assertIn('HTTPONLY_COOKIE', header)
def test_get_cookies_for_url(self):
cookiejar = YoutubeDLCookieJar('./test/testdata/cookies/session_cookies.txt')
cookiejar.load(ignore_discard=True, ignore_expires=True)
cookies = cookiejar.get_cookies_for_url('https://www.foobar.foobar/')
self.assertEqual(len(cookies), 2)
cookies = cookiejar.get_cookies_for_url('https://foobar.foobar/')
self.assertFalse(cookies)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -23,6 +23,7 @@ from youtube_dl.compat import (
compat_urllib_parse_unquote, compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus, compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlencode, compat_urllib_parse_urlencode,
compat_urllib_request,
) )
@ -135,6 +136,19 @@ class TestCompat(unittest.TestCase):
self.assertEqual(compat_casefold('\u03a3'), '\u03c3') self.assertEqual(compat_casefold('\u03a3'), '\u03c3')
self.assertEqual(compat_casefold('A\u0345\u03a3'), 'a\u03b9\u03c3') self.assertEqual(compat_casefold('A\u0345\u03a3'), 'a\u03b9\u03c3')
def test_compat_urllib_request_Request(self):
self.assertEqual(
compat_urllib_request.Request('http://127.0.0.1', method='PUT').get_method(),
'PUT')
class PUTrequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
self.assertEqual(
PUTrequest('http://127.0.0.1').get_method(),
'PUT')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -20,15 +20,15 @@ from test.helper import (
import hashlib import hashlib
import io
import json import json
import socket import socket
import youtube_dl.YoutubeDL import youtube_dl.YoutubeDL
from youtube_dl.compat import ( from youtube_dl.compat import (
compat_http_client, compat_http_client,
compat_urllib_error,
compat_HTTPError, compat_HTTPError,
compat_open as open,
compat_urllib_error,
) )
from youtube_dl.utils import ( from youtube_dl.utils import (
DownloadError, DownloadError,
@ -245,7 +245,7 @@ def generator(test_case, tname):
self.assertTrue( self.assertTrue(
os.path.exists(info_json_fn), os.path.exists(info_json_fn),
'Missing info file %s' % info_json_fn) 'Missing info file %s' % info_json_fn)
with io.open(info_json_fn, encoding='utf-8') as infof: with open(info_json_fn, encoding='utf-8') as infof:
info_dict = json.load(infof) info_dict = json.load(infof)
expect_info_dict(self, info_dict, tc.get('info_dict', {})) expect_info_dict(self, info_dict, tc.get('info_dict', {}))
finally: finally:

View File

@ -12,20 +12,65 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import ( from test.helper import (
FakeLogger, FakeLogger,
FakeYDL,
http_server_port, http_server_port,
try_rm, try_rm,
) )
from youtube_dl import YoutubeDL from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_http_server from youtube_dl.compat import (
from youtube_dl.utils import encodeFilename compat_http_cookiejar_Cookie,
from youtube_dl.downloader.external import Aria2pFD compat_http_server,
compat_kwargs,
)
from youtube_dl.utils import (
encodeFilename,
join_nonempty,
)
from youtube_dl.downloader.external import (
Aria2cFD,
Aria2pFD,
AxelFD,
CurlFD,
FFmpegFD,
HttpieFD,
WgetFD,
)
import threading import threading
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_SIZE = 10 * 1024 TEST_SIZE = 10 * 1024
TEST_COOKIE = {
'version': 0,
'name': 'test',
'value': 'ytdlp',
'port': None,
'port_specified': False,
'domain': '.example.com',
'domain_specified': True,
'domain_initial_dot': False,
'path': '/',
'path_specified': True,
'secure': False,
'expires': None,
'discard': False,
'comment': None,
'comment_url': None,
'rest': {},
}
TEST_COOKIE_VALUE = join_nonempty('name', 'value', delim='=', from_dict=TEST_COOKIE)
TEST_INFO = {'url': 'http://www.example.com/'}
def cookiejar_Cookie(**cookie_args):
return compat_http_cookiejar_Cookie(**compat_kwargs(cookie_args))
def ifExternalFDAvailable(externalFD):
return unittest.skipUnless(externalFD.available(),
externalFD.get_basename() + ' not found')
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
def log_message(self, format, *args): def log_message(self, format, *args):
@ -70,7 +115,7 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
assert False, 'unrecognised server path' assert False, 'unrecognised server path'
@unittest.skipUnless(Aria2pFD.available(), 'aria2p module not found') @ifExternalFDAvailable(Aria2pFD)
class TestAria2pFD(unittest.TestCase): class TestAria2pFD(unittest.TestCase):
def setUp(self): def setUp(self):
self.httpd = compat_http_server.HTTPServer( self.httpd = compat_http_server.HTTPServer(
@ -111,5 +156,103 @@ class TestAria2pFD(unittest.TestCase):
}) })
@ifExternalFDAvailable(HttpieFD)
class TestHttpieFD(unittest.TestCase):
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = HttpieFD(ydl, {})
self.assertEqual(
downloader._make_cmd('test', TEST_INFO),
['http', '--download', '--output', 'test', 'http://www.example.com/'])
# Test cookie header is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
self.assertEqual(
downloader._make_cmd('test', TEST_INFO),
['http', '--download', '--output', 'test',
'http://www.example.com/', 'Cookie:' + TEST_COOKIE_VALUE])
@ifExternalFDAvailable(AxelFD)
class TestAxelFD(unittest.TestCase):
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = AxelFD(ydl, {})
self.assertEqual(
downloader._make_cmd('test', TEST_INFO),
['axel', '-o', 'test', '--', 'http://www.example.com/'])
# Test cookie header is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
self.assertEqual(
downloader._make_cmd('test', TEST_INFO),
['axel', '-o', 'test', '-H', 'Cookie: ' + TEST_COOKIE_VALUE,
'--max-redirect=0', '--', 'http://www.example.com/'])
@ifExternalFDAvailable(WgetFD)
class TestWgetFD(unittest.TestCase):
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = WgetFD(ydl, {})
self.assertNotIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
# Test cookiejar tempfile arg is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
self.assertIn('--load-cookies', downloader._make_cmd('test', TEST_INFO))
@ifExternalFDAvailable(CurlFD)
class TestCurlFD(unittest.TestCase):
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = CurlFD(ydl, {})
self.assertNotIn('--cookie', downloader._make_cmd('test', TEST_INFO))
# Test cookie header is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
self.assertIn('--cookie', downloader._make_cmd('test', TEST_INFO))
self.assertIn(TEST_COOKIE_VALUE, downloader._make_cmd('test', TEST_INFO))
@ifExternalFDAvailable(Aria2cFD)
class TestAria2cFD(unittest.TestCase):
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = Aria2cFD(ydl, {})
downloader._make_cmd('test', TEST_INFO)
self.assertFalse(hasattr(downloader, '_cookies_tempfile'))
# Test cookiejar tempfile arg is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
cmd = downloader._make_cmd('test', TEST_INFO)
self.assertIn('--load-cookies=%s' % downloader._cookies_tempfile, cmd)
@ifExternalFDAvailable(FFmpegFD)
class TestFFmpegFD(unittest.TestCase):
_args = []
def _test_cmd(self, args):
self._args = args
def test_make_cmd(self):
with FakeYDL() as ydl:
downloader = FFmpegFD(ydl, {})
downloader._debug_cmd = self._test_cmd
info_dict = TEST_INFO.copy()
info_dict['ext'] = 'mp4'
downloader._call_downloader('test', info_dict)
self.assertEqual(self._args, [
'ffmpeg', '-y', '-i', 'http://www.example.com/',
'-c', 'copy', '-f', 'mp4', 'file:test'])
# Test cookies arg is added
ydl.cookiejar.set_cookie(cookiejar_Cookie(**TEST_COOKIE))
downloader._call_downloader('test', info_dict)
self.assertEqual(self._args, [
'ffmpeg', '-y', '-cookies', TEST_COOKIE_VALUE + '; path=/; domain=.example.com;\r\n',
'-i', 'http://www.example.com/', '-c', 'copy', '-f', 'mp4', 'file:test'])
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -88,7 +88,7 @@ class TestHttpFD(unittest.TestCase):
self.assertTrue(downloader.real_download(filename, { self.assertTrue(downloader.real_download(filename, {
'url': 'http://127.0.0.1:%d/%s' % (self.port, ep), 'url': 'http://127.0.0.1:%d/%s' % (self.port, ep),
})) }))
self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE) self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep)
try_rm(encodeFilename(filename)) try_rm(encodeFilename(filename))
def download_all(self, params): def download_all(self, params):

View File

@ -8,47 +8,53 @@ import unittest
import sys import sys
import os import os
import subprocess import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.utils import encodeArgument
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, rootDir)
try: from youtube_dl.compat import compat_register_utf8, compat_subprocess_get_DEVNULL
_DEV_NULL = subprocess.DEVNULL from youtube_dl.utils import encodeArgument
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb') compat_register_utf8()
_DEV_NULL = compat_subprocess_get_DEVNULL()
class TestExecution(unittest.TestCase): class TestExecution(unittest.TestCase):
def setUp(self):
self.module = 'youtube_dl'
if sys.version_info < (2, 7):
self.module += '.__main__'
def test_import(self): def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir) subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self): def test_module_exec(self):
if sys.version_info >= (2, 7): # Python 2.6 doesn't support package execution subprocess.check_call([sys.executable, '-m', self.module, '--version'], cwd=rootDir, stdout=_DEV_NULL)
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self): def test_main_exec(self):
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL) subprocess.check_call([sys.executable, os.path.normpath('youtube_dl/__main__.py'), '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_cmdline_umlauts(self): def test_cmdline_umlauts(self):
os.environ['PYTHONIOENCODING'] = 'utf-8'
p = subprocess.Popen( p = subprocess.Popen(
[sys.executable, 'youtube_dl/__main__.py', encodeArgument('ä'), '--version'], [sys.executable, '-m', self.module, encodeArgument('ä'), '--version'],
cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE) cwd=rootDir, stdout=_DEV_NULL, stderr=subprocess.PIPE)
_, stderr = p.communicate() _, stderr = p.communicate()
self.assertFalse(stderr) self.assertFalse(stderr)
def test_lazy_extractors(self): def test_lazy_extractors(self):
lazy_extractors = 'youtube_dl/extractor/lazy_extractors.py' lazy_extractors = os.path.normpath('youtube_dl/extractor/lazy_extractors.py')
try: try:
subprocess.check_call([sys.executable, 'devscripts/make_lazy_extractors.py', lazy_extractors], cwd=rootDir, stdout=_DEV_NULL) subprocess.check_call([sys.executable, os.path.normpath('devscripts/make_lazy_extractors.py'), lazy_extractors], cwd=rootDir, stdout=_DEV_NULL)
subprocess.check_call([sys.executable, 'test/test_all_urls.py'], cwd=rootDir, stdout=_DEV_NULL) subprocess.check_call([sys.executable, os.path.normpath('test/test_all_urls.py')], cwd=rootDir, stdout=_DEV_NULL)
finally: finally:
for x in ['', 'c'] if sys.version_info[0] < 3 else ['']: for x in ('', 'c') if sys.version_info[0] < 3 else ('',):
try: try:
os.remove(lazy_extractors + x) os.remove(lazy_extractors + x)
except (IOError, OSError): except OSError:
pass pass

View File

@ -8,33 +8,163 @@ import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import contextlib
import gzip
import io
import ssl
import tempfile
import threading
import zlib
# avoid deprecated alias assertRaisesRegexp
if hasattr(unittest.TestCase, 'assertRaisesRegex'):
unittest.TestCase.assertRaisesRegexp = unittest.TestCase.assertRaisesRegex
try:
import brotli
except ImportError:
brotli = None
try:
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from youtube_dl.compat import (
compat_http_cookiejar_Cookie,
compat_http_server,
compat_str as str,
compat_urllib_error,
compat_urllib_HTTPError,
compat_urllib_parse,
compat_urllib_request,
)
from youtube_dl.utils import (
sanitized_Request,
update_Request,
urlencode_postdata,
)
from test.helper import ( from test.helper import (
expectedFailureIf,
FakeYDL,
FakeLogger, FakeLogger,
http_server_port, http_server_port,
) )
from youtube_dl import YoutubeDL from youtube_dl import YoutubeDL
from youtube_dl.compat import compat_http_server, compat_urllib_request
import ssl
import threading
TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_DIR = os.path.dirname(os.path.abspath(__file__))
class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
# work-around old/new -style class inheritance
def super(self, meth_name, *args, **kwargs):
from types import MethodType
try:
super()
fn = lambda s, m, *a, **k: getattr(super(), m)(*a, **k)
except TypeError:
fn = lambda s, m, *a, **k: getattr(compat_http_server.BaseHTTPRequestHandler, m)(s, *a, **k)
self.super = MethodType(fn, self)
return self.super(meth_name, *args, **kwargs)
def log_message(self, format, *args): def log_message(self, format, *args):
pass pass
def _headers(self):
payload = str(self.headers).encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', str(len(payload)))
self.end_headers()
self.wfile.write(payload)
def _redirect(self):
self.send_response(int(self.path[len('/redirect_'):]))
self.send_header('Location', '/method')
self.send_header('Content-Length', '0')
self.end_headers()
def _method(self, method, payload=None):
self.send_response(200)
self.send_header('Content-Length', str(len(payload or '')))
self.send_header('Method', method)
self.end_headers()
if payload:
self.wfile.write(payload)
def _status(self, status):
payload = '<html>{0} NOT FOUND</html>'.format(status).encode('utf-8')
self.send_response(int(status))
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.send_header('Content-Length', str(len(payload)))
self.end_headers()
self.wfile.write(payload)
def _read_data(self):
if 'Content-Length' in self.headers:
return self.rfile.read(int(self.headers['Content-Length']))
def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
return '{0}://{1}:{2}/{3}'.format(
scheme, host,
port if port is not None
else http_server_port(self.server), path)
def do_POST(self):
data = self._read_data()
if self.path.startswith('/redirect_'):
self._redirect()
elif self.path.startswith('/method'):
self._method('POST', data)
elif self.path.startswith('/headers'):
self._headers()
else:
self._status(404)
def do_HEAD(self):
if self.path.startswith('/redirect_'):
self._redirect()
elif self.path.startswith('/method'):
self._method('HEAD')
else:
self._status(404)
def do_PUT(self):
data = self._read_data()
if self.path.startswith('/redirect_'):
self._redirect()
elif self.path.startswith('/method'):
self._method('PUT', data)
else:
self._status(404)
def do_GET(self): def do_GET(self):
def respond(payload=b'<html><video src="/vid.mp4" /></html>',
payload_type='text/html; charset=utf-8',
payload_encoding=None,
resp_code=200):
self.send_response(resp_code)
self.send_header('Content-Type', payload_type)
if payload_encoding:
self.send_header('Content-Encoding', payload_encoding)
self.send_header('Content-Length', str(len(payload))) # required for persistent connections
self.end_headers()
self.wfile.write(payload)
def gzip_compress(p):
buf = io.BytesIO()
with contextlib.closing(gzip.GzipFile(fileobj=buf, mode='wb')) as f:
f.write(p)
return buf.getvalue()
if self.path == '/video.html': if self.path == '/video.html':
self.send_response(200) respond()
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(b'<html><video src="/vid.mp4" /></html>')
elif self.path == '/vid.mp4': elif self.path == '/vid.mp4':
self.send_response(200) respond(b'\x00\x00\x00\x00\x20\x66\x74[video]', 'video/mp4')
self.send_header('Content-Type', 'video/mp4')
self.end_headers()
self.wfile.write(b'\x00\x00\x00\x00\x20\x66\x74[video]')
elif self.path == '/302': elif self.path == '/302':
if sys.version_info[0] == 3: if sys.version_info[0] == 3:
# XXX: Python 3 http server does not allow non-ASCII header values # XXX: Python 3 http server does not allow non-ASCII header values
@ -42,60 +172,336 @@ class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler):
self.end_headers() self.end_headers()
return return
new_url = 'http://127.0.0.1:%d/中文.html' % http_server_port(self.server) new_url = self._test_url('中文.html')
self.send_response(302) self.send_response(302)
self.send_header(b'Location', new_url.encode('utf-8')) self.send_header(b'Location', new_url.encode('utf-8'))
self.end_headers() self.end_headers()
elif self.path == '/%E4%B8%AD%E6%96%87.html': elif self.path == '/%E4%B8%AD%E6%96%87.html':
self.send_response(200) respond()
self.send_header('Content-Type', 'text/html; charset=utf-8') elif self.path == '/%c7%9f':
respond()
elif self.path == '/redirect_dotsegments':
self.send_response(301)
# redirect to /headers but with dot segments before
self.send_header('Location', '/a/b/./../../headers')
self.send_header('Content-Length', '0')
self.end_headers() self.end_headers()
self.wfile.write(b'<html><video src="/vid.mp4" /></html>') elif self.path.startswith('/redirect_'):
self._redirect()
elif self.path.startswith('/method'):
self._method('GET')
elif self.path.startswith('/headers'):
self._headers()
elif self.path.startswith('/308-to-headers'):
self.send_response(308)
self.send_header('Location', '/headers')
self.send_header('Content-Length', '0')
self.end_headers()
elif self.path == '/trailing_garbage':
payload = b'<html><video src="/vid.mp4" /></html>'
compressed = gzip_compress(payload) + b'trailing garbage'
respond(compressed, payload_encoding='gzip')
elif self.path == '/302-non-ascii-redirect':
new_url = self._test_url('中文.html')
# actually respond with permanent redirect
self.send_response(301)
self.send_header('Location', new_url)
self.send_header('Content-Length', '0')
self.end_headers()
elif self.path == '/content-encoding':
encodings = self.headers.get('ytdl-encoding', '')
payload = b'<html><video src="/vid.mp4" /></html>'
for encoding in filter(None, (e.strip() for e in encodings.split(','))):
if encoding == 'br' and brotli:
payload = brotli.compress(payload)
elif encoding == 'gzip':
payload = gzip_compress(payload)
elif encoding == 'deflate':
payload = zlib.compress(payload)
elif encoding == 'unsupported':
payload = b'raw'
break
else:
self._status(415)
return
respond(payload, payload_encoding=encodings)
else: else:
assert False self._status(404)
def send_header(self, keyword, value):
"""
Forcibly allow HTTP server to send non percent-encoded non-ASCII characters in headers.
This is against what is defined in RFC 3986: but we need to test that we support this
since some sites incorrectly do this.
"""
if keyword.lower() == 'connection':
return self.super('send_header', keyword, value)
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append('{0}: {1}\r\n'.format(keyword, value).encode('utf-8'))
def end_headers(self):
if hasattr(self, '_headers_buffer'):
self.wfile.write(b''.join(self._headers_buffer))
self._headers_buffer = []
self.super('end_headers')
class TestHTTP(unittest.TestCase): class TestHTTP(unittest.TestCase):
# when does it make sense to check the SSL certificate?
_check_cert = (
sys.version_info >= (3, 2)
or (sys.version_info[0] == 2 and sys.version_info[1:] >= (7, 19)))
def setUp(self): def setUp(self):
self.httpd = compat_http_server.HTTPServer( # HTTP server
self.http_httpd = compat_http_server.HTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler) ('127.0.0.1', 0), HTTPTestRequestHandler)
self.port = http_server_port(self.httpd) self.http_port = http_server_port(self.http_httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True self.http_server_thread = threading.Thread(target=self.http_httpd.serve_forever)
self.server_thread.start() self.http_server_thread.daemon = True
self.http_server_thread.start()
try:
from http.server import ThreadingHTTPServer
except ImportError:
try:
from socketserver import ThreadingMixIn
except ImportError:
from SocketServer import ThreadingMixIn
class ThreadingHTTPServer(ThreadingMixIn, compat_http_server.HTTPServer):
pass
# HTTPS server
certfn = os.path.join(TEST_DIR, 'testcert.pem')
self.https_httpd = ThreadingHTTPServer(
('127.0.0.1', 0), HTTPTestRequestHandler)
try:
sslctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslctx.verify_mode = ssl.CERT_NONE
sslctx.check_hostname = False
sslctx.load_cert_chain(certfn, None)
self.https_httpd.socket = sslctx.wrap_socket(
self.https_httpd.socket, server_side=True)
except AttributeError:
self.https_httpd.socket = ssl.wrap_socket(
self.https_httpd.socket, certfile=certfn, server_side=True)
self.https_port = http_server_port(self.https_httpd)
self.https_server_thread = threading.Thread(target=self.https_httpd.serve_forever)
self.https_server_thread.daemon = True
self.https_server_thread.start()
def tearDown(self):
def closer(svr):
def _closer():
svr.shutdown()
svr.server_close()
return _closer
shutdown_thread = threading.Thread(target=closer(self.http_httpd))
shutdown_thread.start()
self.http_server_thread.join(2.0)
shutdown_thread = threading.Thread(target=closer(self.https_httpd))
shutdown_thread.start()
self.https_server_thread.join(2.0)
def _test_url(self, path, host='127.0.0.1', scheme='http', port=None):
return '{0}://{1}:{2}/{3}'.format(
scheme, host,
port if port is not None
else self.https_port if scheme == 'https'
else self.http_port, path)
@unittest.skipUnless(_check_cert, 'No support for certificate check in SSL')
def test_nocheckcertificate(self):
with FakeYDL({'logger': FakeLogger()}) as ydl:
with self.assertRaises(compat_urllib_error.URLError):
ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
with FakeYDL({'logger': FakeLogger(), 'nocheckcertificate': True}) as ydl:
r = ydl.urlopen(sanitized_Request(self._test_url('headers', scheme='https')))
self.assertEqual(r.getcode(), 200)
r.close()
def test_percent_encode(self):
with FakeYDL() as ydl:
# Unicode characters should be encoded with uppercase percent-encoding
res = ydl.urlopen(sanitized_Request(self._test_url('中文.html')))
self.assertEqual(res.getcode(), 200)
res.close()
# don't normalize existing percent encodings
res = ydl.urlopen(sanitized_Request(self._test_url('%c7%9f')))
self.assertEqual(res.getcode(), 200)
res.close()
def test_unicode_path_redirection(self): def test_unicode_path_redirection(self):
# XXX: Python 3 http server does not allow non-ASCII header values with FakeYDL() as ydl:
if sys.version_info[0] == 3: r = ydl.urlopen(sanitized_Request(self._test_url('302-non-ascii-redirect')))
return self.assertEqual(r.url, self._test_url('%E4%B8%AD%E6%96%87.html'))
r.close()
ydl = YoutubeDL({'logger': FakeLogger()}) def test_redirect(self):
r = ydl.extract_info('http://127.0.0.1:%d/302' % self.port) with FakeYDL() as ydl:
self.assertEqual(r['entries'][0]['url'], 'http://127.0.0.1:%d/vid.mp4' % self.port) def do_req(redirect_status, method, check_no_content=False):
data = b'testdata' if method in ('POST', 'PUT') else None
res = ydl.urlopen(sanitized_Request(
self._test_url('redirect_{0}'.format(redirect_status)),
method=method, data=data))
if check_no_content:
self.assertNotIn('Content-Type', res.headers)
return res.read().decode('utf-8'), res.headers.get('method', '')
# A 303 must either use GET or HEAD for subsequent request
self.assertEqual(do_req(303, 'POST'), ('', 'GET'))
self.assertEqual(do_req(303, 'HEAD'), ('', 'HEAD'))
self.assertEqual(do_req(303, 'PUT'), ('', 'GET'))
class TestHTTPS(unittest.TestCase): # 301 and 302 turn POST only into a GET, with no Content-Type
def setUp(self): self.assertEqual(do_req(301, 'POST', True), ('', 'GET'))
certfn = os.path.join(TEST_DIR, 'testcert.pem') self.assertEqual(do_req(301, 'HEAD'), ('', 'HEAD'))
self.httpd = compat_http_server.HTTPServer( self.assertEqual(do_req(302, 'POST', True), ('', 'GET'))
('127.0.0.1', 0), HTTPTestRequestHandler) self.assertEqual(do_req(302, 'HEAD'), ('', 'HEAD'))
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket, certfile=certfn, server_side=True)
self.port = http_server_port(self.httpd)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
def test_nocheckcertificate(self): self.assertEqual(do_req(301, 'PUT'), ('testdata', 'PUT'))
if sys.version_info >= (2, 7, 9): # No certificate checking anyways self.assertEqual(do_req(302, 'PUT'), ('testdata', 'PUT'))
ydl = YoutubeDL({'logger': FakeLogger()})
self.assertRaises(
Exception,
ydl.extract_info, 'https://127.0.0.1:%d/video.html' % self.port)
ydl = YoutubeDL({'logger': FakeLogger(), 'nocheckcertificate': True}) # 307 and 308 should not change method
r = ydl.extract_info('https://127.0.0.1:%d/video.html' % self.port) for m in ('POST', 'PUT'):
self.assertEqual(r['entries'][0]['url'], 'https://127.0.0.1:%d/vid.mp4' % self.port) self.assertEqual(do_req(307, m), ('testdata', m))
self.assertEqual(do_req(308, m), ('testdata', m))
self.assertEqual(do_req(307, 'HEAD'), ('', 'HEAD'))
self.assertEqual(do_req(308, 'HEAD'), ('', 'HEAD'))
# These should not redirect and instead raise an HTTPError
for code in (300, 304, 305, 306):
with self.assertRaises(compat_urllib_HTTPError):
do_req(code, 'GET')
# Jython 2.7.1 times out for some reason
@expectedFailureIf(sys.platform.startswith('java') and sys.version_info < (2, 7, 2))
def test_content_type(self):
# https://github.com/yt-dlp/yt-dlp/commit/379a4f161d4ad3e40932dcf5aca6e6fb9715ab28
with FakeYDL({'nocheckcertificate': True}) as ydl:
# method should be auto-detected as POST
r = sanitized_Request(self._test_url('headers', scheme='https'), data=urlencode_postdata({'test': 'test'}))
headers = ydl.urlopen(r).read().decode('utf-8')
self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
# test http
r = sanitized_Request(self._test_url('headers'), data=urlencode_postdata({'test': 'test'}))
headers = ydl.urlopen(r).read().decode('utf-8')
self.assertIn('Content-Type: application/x-www-form-urlencoded', headers)
def test_update_req(self):
req = sanitized_Request('http://example.com')
assert req.data is None
assert req.get_method() == 'GET'
assert not req.has_header('Content-Type')
# Test that zero-byte payloads will be sent
req = update_Request(req, data=b'')
assert req.data == b''
assert req.get_method() == 'POST'
# yt-dl expects data to be encoded and Content-Type to be added by sender
# assert req.get_header('Content-Type') == 'application/x-www-form-urlencoded'
def test_cookiejar(self):
with FakeYDL() as ydl:
ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
0, 'test', 'ytdl', None, False, '127.0.0.1', True,
False, '/headers', True, False, None, False, None, None, {}))
data = ydl.urlopen(sanitized_Request(
self._test_url('headers'))).read().decode('utf-8')
self.assertIn('Cookie: test=ytdl', data)
def test_passed_cookie_header(self):
# We should accept a Cookie header being passed as in normal headers and handle it appropriately.
with FakeYDL() as ydl:
# Specified Cookie header should be used
res = ydl.urlopen(sanitized_Request(
self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
self.assertIn('Cookie: test=test', res)
# Specified Cookie header should be removed on any redirect
res = ydl.urlopen(sanitized_Request(
self._test_url('308-to-headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
self.assertNotIn('Cookie: test=test', res)
# Specified Cookie header should override global cookiejar for that request
ydl.cookiejar.set_cookie(compat_http_cookiejar_Cookie(
0, 'test', 'ytdlp', None, False, '127.0.0.1', True,
False, '/headers', True, False, None, False, None, None, {}))
data = ydl.urlopen(sanitized_Request(
self._test_url('headers'), headers={'Cookie': 'test=test'})).read().decode('utf-8')
self.assertNotIn('Cookie: test=ytdlp', data)
self.assertIn('Cookie: test=test', data)
def test_no_compression_compat_header(self):
with FakeYDL() as ydl:
data = ydl.urlopen(
sanitized_Request(
self._test_url('headers'),
headers={'Youtubedl-no-compression': True})).read()
self.assertIn(b'Accept-Encoding: identity', data)
self.assertNotIn(b'youtubedl-no-compression', data.lower())
def test_gzip_trailing_garbage(self):
# https://github.com/ytdl-org/youtube-dl/commit/aa3e950764337ef9800c936f4de89b31c00dfcf5
# https://github.com/ytdl-org/youtube-dl/commit/6f2ec15cee79d35dba065677cad9da7491ec6e6f
with FakeYDL() as ydl:
data = ydl.urlopen(sanitized_Request(self._test_url('trailing_garbage'))).read().decode('utf-8')
self.assertEqual(data, '<html><video src="/vid.mp4" /></html>')
def __test_compression(self, encoding):
with FakeYDL() as ydl:
res = ydl.urlopen(
sanitized_Request(
self._test_url('content-encoding'),
headers={'ytdl-encoding': encoding}))
# decoded encodings are removed: only check for valid decompressed data
self.assertEqual(res.read(), b'<html><video src="/vid.mp4" /></html>')
@unittest.skipUnless(brotli, 'brotli support is not installed')
def test_brotli(self):
self.__test_compression('br')
def test_deflate(self):
self.__test_compression('deflate')
def test_gzip(self):
self.__test_compression('gzip')
def test_multiple_encodings(self):
# https://www.rfc-editor.org/rfc/rfc9110.html#section-8.4
for pair in ('gzip,deflate', 'deflate, gzip', 'gzip, gzip', 'deflate, deflate'):
self.__test_compression(pair)
def test_unsupported_encoding(self):
# it should return the raw content
with FakeYDL() as ydl:
res = ydl.urlopen(
sanitized_Request(
self._test_url('content-encoding'),
headers={'ytdl-encoding': 'unsupported'}))
self.assertEqual(res.headers.get('Content-Encoding'), 'unsupported')
self.assertEqual(res.read(), b'raw')
def test_remove_dot_segments(self):
with FakeYDL() as ydl:
res = ydl.urlopen(sanitized_Request(self._test_url('a/b/./../../headers')))
self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
res = ydl.urlopen(sanitized_Request(self._test_url('redirect_dotsegments')))
self.assertEqual(compat_urllib_parse.urlparse(res.geturl()).path, '/headers')
def _build_proxy_handler(name): def _build_proxy_handler(name):
@ -109,7 +515,7 @@ def _build_proxy_handler(name):
self.send_response(200) self.send_response(200)
self.send_header('Content-Type', 'text/plain; charset=utf-8') self.send_header('Content-Type', 'text/plain; charset=utf-8')
self.end_headers() self.end_headers()
self.wfile.write('{self.proxy_name}: {self.path}'.format(self=self).encode('utf-8')) self.wfile.write('{0}: {1}'.format(self.proxy_name, self.path).encode('utf-8'))
return HTTPTestRequestHandler return HTTPTestRequestHandler
@ -129,10 +535,30 @@ class TestProxy(unittest.TestCase):
self.geo_proxy_thread.daemon = True self.geo_proxy_thread.daemon = True
self.geo_proxy_thread.start() self.geo_proxy_thread.start()
def tearDown(self):
def closer(svr):
def _closer():
svr.shutdown()
svr.server_close()
return _closer
shutdown_thread = threading.Thread(target=closer(self.proxy))
shutdown_thread.start()
self.proxy_thread.join(2.0)
shutdown_thread = threading.Thread(target=closer(self.geo_proxy))
shutdown_thread.start()
self.geo_proxy_thread.join(2.0)
def _test_proxy(self, host='127.0.0.1', port=None):
return '{0}:{1}'.format(
host, port if port is not None else self.port)
def test_proxy(self): def test_proxy(self):
geo_proxy = '127.0.0.1:{0}'.format(self.geo_port) geo_proxy = self._test_proxy(port=self.geo_port)
ydl = YoutubeDL({ ydl = YoutubeDL({
'proxy': '127.0.0.1:{0}'.format(self.port), 'proxy': self._test_proxy(),
'geo_verification_proxy': geo_proxy, 'geo_verification_proxy': geo_proxy,
}) })
url = 'http://foo.com/bar' url = 'http://foo.com/bar'
@ -146,7 +572,7 @@ class TestProxy(unittest.TestCase):
def test_proxy_with_idn(self): def test_proxy_with_idn(self):
ydl = YoutubeDL({ ydl = YoutubeDL({
'proxy': '127.0.0.1:{0}'.format(self.port), 'proxy': self._test_proxy(),
}) })
url = 'http://中文.tw/' url = 'http://中文.tw/'
response = ydl.urlopen(url).read().decode('utf-8') response = ydl.urlopen(url).read().decode('utf-8')
@ -154,5 +580,25 @@ class TestProxy(unittest.TestCase):
self.assertEqual(response, 'normal: http://xn--fiq228c.tw/') self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
class TestFileURL(unittest.TestCase):
# See https://github.com/ytdl-org/youtube-dl/issues/8227
def test_file_urls(self):
tf = tempfile.NamedTemporaryFile(delete=False)
tf.write(b'foobar')
tf.close()
url = compat_urllib_parse.urljoin('file://', pathname2url(tf.name))
with FakeYDL() as ydl:
self.assertRaisesRegexp(
compat_urllib_error.URLError, 'file:// scheme is explicitly disabled in youtube-dl for security reasons', ydl.urlopen, url)
# not yet implemented
"""
with FakeYDL({'enable_file_urls': True}) as ydl:
res = ydl.urlopen(url)
self.assertEqual(res.read(), b'foobar')
res.close()
"""
os.unlink(tf.name)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -18,6 +18,7 @@ class TestJSInterpreter(unittest.TestCase):
def test_basic(self): def test_basic(self):
jsi = JSInterpreter('function x(){;}') jsi = JSInterpreter('function x(){;}')
self.assertEqual(jsi.call_function('x'), None) self.assertEqual(jsi.call_function('x'), None)
self.assertEqual(repr(jsi.extract_function('x')), 'F<x>')
jsi = JSInterpreter('function x3(){return 42;}') jsi = JSInterpreter('function x3(){return 42;}')
self.assertEqual(jsi.call_function('x3'), 42) self.assertEqual(jsi.call_function('x3'), 42)
@ -32,6 +33,55 @@ class TestJSInterpreter(unittest.TestCase):
jsi = JSInterpreter('function x4(a){return 2*a+1;}') jsi = JSInterpreter('function x4(a){return 2*a+1;}')
self.assertEqual(jsi.call_function('x4', 3), 7) self.assertEqual(jsi.call_function('x4', 3), 7)
def test_add(self):
jsi = JSInterpreter('function f(){return 42 + 7;}')
self.assertEqual(jsi.call_function('f'), 49)
jsi = JSInterpreter('function f(){return 42 + undefined;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
jsi = JSInterpreter('function f(){return 42 + null;}')
self.assertEqual(jsi.call_function('f'), 42)
def test_sub(self):
jsi = JSInterpreter('function f(){return 42 - 7;}')
self.assertEqual(jsi.call_function('f'), 35)
jsi = JSInterpreter('function f(){return 42 - undefined;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
jsi = JSInterpreter('function f(){return 42 - null;}')
self.assertEqual(jsi.call_function('f'), 42)
def test_mul(self):
jsi = JSInterpreter('function f(){return 42 * 7;}')
self.assertEqual(jsi.call_function('f'), 294)
jsi = JSInterpreter('function f(){return 42 * undefined;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
jsi = JSInterpreter('function f(){return 42 * null;}')
self.assertEqual(jsi.call_function('f'), 0)
def test_div(self):
jsi = JSInterpreter('function f(a, b){return a / b;}')
self.assertTrue(math.isnan(jsi.call_function('f', 0, 0)))
self.assertTrue(math.isnan(jsi.call_function('f', JS_Undefined, 1)))
self.assertTrue(math.isinf(jsi.call_function('f', 2, 0)))
self.assertEqual(jsi.call_function('f', 0, 3), 0)
def test_mod(self):
jsi = JSInterpreter('function f(){return 42 % 7;}')
self.assertEqual(jsi.call_function('f'), 0)
jsi = JSInterpreter('function f(){return 42 % 0;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
jsi = JSInterpreter('function f(){return 42 % undefined;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
def test_exp(self):
jsi = JSInterpreter('function f(){return 42 ** 2;}')
self.assertEqual(jsi.call_function('f'), 1764)
jsi = JSInterpreter('function f(){return 42 ** undefined;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
jsi = JSInterpreter('function f(){return 42 ** null;}')
self.assertEqual(jsi.call_function('f'), 1)
jsi = JSInterpreter('function f(){return undefined ** 42;}')
self.assertTrue(math.isnan(jsi.call_function('f')))
def test_empty_return(self): def test_empty_return(self):
jsi = JSInterpreter('function f(){return; y()}') jsi = JSInterpreter('function f(){return; y()}')
self.assertEqual(jsi.call_function('f'), None) self.assertEqual(jsi.call_function('f'), None)
@ -442,10 +492,12 @@ class TestJSInterpreter(unittest.TestCase):
jsi = JSInterpreter(''' jsi = JSInterpreter('''
function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; } function x() { let a=/,,[/,913,/](,)}/; "".replace(a, ""); return a; }
''') ''')
attrs = set(('findall', 'finditer', 'flags', 'groupindex', attrs = set(('findall', 'finditer', 'match', 'scanner', 'search',
'groups', 'match', 'pattern', 'scanner', 'split', 'sub', 'subn'))
'search', 'split', 'sub', 'subn')) if sys.version_info >= (2, 7):
self.assertTrue(set(dir(jsi.call_function('x'))) > attrs) # documented for 2.6 but may not be found
attrs.update(('flags', 'groupindex', 'groups', 'pattern'))
self.assertSetEqual(set(dir(jsi.call_function('x'))) & attrs, attrs)
jsi = JSInterpreter(''' jsi = JSInterpreter('''
function x() { let a=/,,[/,913,/](,)}/i; return a; } function x() { let a=/,,[/,913,/](,)}/i; return a; }
@ -505,6 +557,30 @@ class TestJSInterpreter(unittest.TestCase):
jsi = JSInterpreter('function x(){return 1236566549 << 5}') jsi = JSInterpreter('function x(){return 1236566549 << 5}')
self.assertEqual(jsi.call_function('x'), 915423904) self.assertEqual(jsi.call_function('x'), 915423904)
def test_bitwise_operators_madness(self):
jsi = JSInterpreter('function x(){return null << 5}')
self.assertEqual(jsi.call_function('x'), 0)
jsi = JSInterpreter('function x(){return undefined >> 5}')
self.assertEqual(jsi.call_function('x'), 0)
jsi = JSInterpreter('function x(){return 42 << NaN}')
self.assertEqual(jsi.call_function('x'), 42)
jsi = JSInterpreter('function x(){return 42 << Infinity}')
self.assertEqual(jsi.call_function('x'), 42)
def test_32066(self):
jsi = JSInterpreter("function x(){return Math.pow(3, 5) + new Date('1970-01-01T08:01:42.000+08:00') / 1000 * -239 - -24205;}")
self.assertEqual(jsi.call_function('x'), 70)
def test_unary_operators(self):
jsi = JSInterpreter('function f(){return 2 - - - 2;}')
self.assertEqual(jsi.call_function('f'), 0)
# fails
# jsi = JSInterpreter('function f(){return 2 + - + - - 2;}')
# self.assertEqual(jsi.call_function('f'), 0)
""" # fails so far """ # fails so far
def test_packed(self): def test_packed(self):
jsi = JSInterpreter('''function x(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''') jsi = JSInterpreter('''function x(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}''')

View File

@ -295,6 +295,7 @@ class TestNRKSubtitles(BaseTestSubtitles):
def test_allsubtitles(self): def test_allsubtitles(self):
self.DL.params['writesubtitles'] = True self.DL.params['writesubtitles'] = True
self.DL.params['allsubtitles'] = True self.DL.params['allsubtitles'] = True
self.DL.params['format'] = 'best/bestvideo'
subtitles = self.getSubtitles() subtitles = self.getSubtitles()
self.assertEqual(set(subtitles.keys()), set(['nb-ttv'])) self.assertEqual(set(subtitles.keys()), set(['nb-ttv']))
self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149') self.assertEqual(md5(subtitles['nb-ttv']), '67e06ff02d0deaf975e68f6cb8f6a149')

View File

@ -5,16 +5,18 @@ from __future__ import unicode_literals
import os import os
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
dirn = os.path.dirname
sys.path.insert(0, dirn(dirn(os.path.abspath(__file__))))
import errno import errno
import io
import json import json
import re import re
import subprocess import subprocess
from youtube_dl.swfinterp import SWFInterpreter from youtube_dl.swfinterp import SWFInterpreter
from youtube_dl.compat import compat_open as open
TEST_DIR = os.path.join( TEST_DIR = os.path.join(
@ -43,7 +45,7 @@ def _make_testfunc(testfile):
'-static-link-runtime-shared-libraries', as_file]) '-static-link-runtime-shared-libraries', as_file])
except OSError as ose: except OSError as ose:
if ose.errno == errno.ENOENT: if ose.errno == errno.ENOENT:
print('mxmlc not found! Skipping test.') self.skipTest('mxmlc not found!')
return return
raise raise
@ -51,7 +53,7 @@ def _make_testfunc(testfile):
swf_content = swf_f.read() swf_content = swf_f.read()
swfi = SWFInterpreter(swf_content) swfi = SWFInterpreter(swf_content)
with io.open(as_file, 'r', encoding='utf-8') as as_f: with open(as_file, 'r', encoding='utf-8') as as_f:
as_content = as_f.read() as_content = as_f.read()
def _find_spec(key): def _find_spec(key):

View File

@ -2,19 +2,21 @@ from __future__ import unicode_literals
# Allow direct execution # Allow direct execution
import os import os
import re
import sys import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io dirn = os.path.dirname
import re
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) rootDir = dirn(dirn(os.path.abspath(__file__)))
sys.path.insert(0, rootDir)
IGNORED_FILES = [ IGNORED_FILES = [
'setup.py', # http://bugs.python.org/issue13943 'setup.py', # http://bugs.python.org/issue13943
'conf.py', 'conf.py',
'buildserver.py', 'buildserver.py',
'get-pip.py',
] ]
IGNORED_DIRS = [ IGNORED_DIRS = [
@ -23,6 +25,7 @@ IGNORED_DIRS = [
] ]
from test.helper import assertRegexpMatches from test.helper import assertRegexpMatches
from youtube_dl.compat import compat_open as open
class TestUnicodeLiterals(unittest.TestCase): class TestUnicodeLiterals(unittest.TestCase):
@ -40,7 +43,7 @@ class TestUnicodeLiterals(unittest.TestCase):
continue continue
fn = os.path.join(dirpath, basename) fn = os.path.join(dirpath, basename)
with io.open(fn, encoding='utf-8') as inf: with open(fn, encoding='utf-8') as inf:
code = inf.read() code = inf.read()
if "'" not in code and '"' not in code: if "'" not in code and '"' not in code:

View File

@ -20,7 +20,7 @@ import xml.etree.ElementTree
from youtube_dl.utils import ( from youtube_dl.utils import (
age_restricted, age_restricted,
args_to_str, args_to_str,
encode_base_n, base_url,
caesar, caesar,
clean_html, clean_html,
clean_podcast_url, clean_podcast_url,
@ -29,10 +29,12 @@ from youtube_dl.utils import (
detect_exe_version, detect_exe_version,
determine_ext, determine_ext,
dict_get, dict_get,
encode_base_n,
encode_compat_str, encode_compat_str,
encodeFilename, encodeFilename,
escape_rfc3986, escape_rfc3986,
escape_url, escape_url,
expand_path,
extract_attributes, extract_attributes,
ExtractorError, ExtractorError,
find_xpath_attr, find_xpath_attr,
@ -51,6 +53,7 @@ from youtube_dl.utils import (
js_to_json, js_to_json,
LazyList, LazyList,
limit_length, limit_length,
lowercase_escape,
merge_dicts, merge_dicts,
mimetype2ext, mimetype2ext,
month_by_name, month_by_name,
@ -59,30 +62,33 @@ from youtube_dl.utils import (
OnDemandPagedList, OnDemandPagedList,
orderedSet, orderedSet,
parse_age_limit, parse_age_limit,
parse_bitrate,
parse_duration, parse_duration,
parse_filesize, parse_filesize,
parse_codecs,
parse_count, parse_count,
parse_iso8601, parse_iso8601,
parse_resolution, parse_resolution,
parse_bitrate, parse_qs,
pkcs1pad, pkcs1pad,
read_batch_urls,
sanitize_filename,
sanitize_path,
sanitize_url,
expand_path,
prepend_extension, prepend_extension,
replace_extension, read_batch_urls,
remove_start, remove_start,
remove_end, remove_end,
remove_quotes, remove_quotes,
replace_extension,
rot47, rot47,
sanitize_filename,
sanitize_path,
sanitize_url,
shell_quote, shell_quote,
smuggle_url, smuggle_url,
str_or_none,
str_to_int, str_to_int,
strip_jsonp, strip_jsonp,
strip_or_none, strip_or_none,
subtitles_filename, subtitles_filename,
T,
timeconvert, timeconvert,
traverse_obj, traverse_obj,
try_call, try_call,
@ -91,10 +97,8 @@ from youtube_dl.utils import (
unified_timestamp, unified_timestamp,
unsmuggle_url, unsmuggle_url,
uppercase_escape, uppercase_escape,
lowercase_escape,
url_basename, url_basename,
url_or_none, url_or_none,
base_url,
urljoin, urljoin,
urlencode_postdata, urlencode_postdata,
urshift, urshift,
@ -112,7 +116,7 @@ from youtube_dl.utils import (
cli_option, cli_option,
cli_valueless_option, cli_valueless_option,
cli_bool_option, cli_bool_option,
parse_codecs, YoutubeDLHandler,
) )
from youtube_dl.compat import ( from youtube_dl.compat import (
compat_chr, compat_chr,
@ -122,7 +126,6 @@ from youtube_dl.compat import (
compat_setenv, compat_setenv,
compat_str, compat_str,
compat_urlparse, compat_urlparse,
compat_parse_qs,
) )
@ -680,38 +683,36 @@ class TestUtil(unittest.TestCase):
self.assertTrue(isinstance(data, bytes)) self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self): def test_update_url_query(self):
def query_dict(url): self.assertEqual(parse_qs(update_url_query(
return compat_parse_qs(compat_urlparse.urlparse(url).query)
self.assertEqual(query_dict(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})), 'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
query_dict('http://example.com/path?quality=HD&format=mp4')) parse_qs('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})), 'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
query_dict('http://example.com/path?system=LINUX&system=WINDOWS')) parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})), 'http://example.com/path', {'fields': 'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles')) parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})), 'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails')) parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})), 'http://example.com/path?manifest=f4m', {'manifest': []})),
query_dict('http://example.com/path')) parse_qs('http://example.com/path'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})), 'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
query_dict('http://example.com/path?system=LINUX')) parse_qs('http://example.com/path?system=LINUX'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})), 'http://example.com/path', {'fields': b'id,formats,subtitles'})),
query_dict('http://example.com/path?fields=id,formats,subtitles')) parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})), 'http://example.com/path', {'width': 1080, 'height': 720})),
query_dict('http://example.com/path?width=1080&height=720')) parse_qs('http://example.com/path?width=1080&height=720'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})), 'http://example.com/path', {'bitrate': 5020.43})),
query_dict('http://example.com/path?bitrate=5020.43')) parse_qs('http://example.com/path?bitrate=5020.43'))
self.assertEqual(query_dict(update_url_query( self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'test': '第二行тест'})), 'http://example.com/path', {'test': '第二行тест'})),
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82')) parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self): def test_multipart_encode(self):
self.assertEqual( self.assertEqual(
@ -903,6 +904,111 @@ class TestUtil(unittest.TestCase):
) )
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0') self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_remove_dot_segments(self):
def remove_dot_segments(p):
q = '' if p.startswith('/') else '/'
p = 'http://example.com' + q + p
p = compat_urlparse.urlsplit(YoutubeDLHandler._fix_path(p)).path
return p[1:] if q else p
self.assertEqual(remove_dot_segments('/a/b/c/./../../g'), '/a/g')
self.assertEqual(remove_dot_segments('mid/content=5/../6'), 'mid/6')
self.assertEqual(remove_dot_segments('/ad/../cd'), '/cd')
self.assertEqual(remove_dot_segments('/ad/../cd/'), '/cd/')
self.assertEqual(remove_dot_segments('/..'), '/')
self.assertEqual(remove_dot_segments('/./'), '/')
self.assertEqual(remove_dot_segments('/./a'), '/a')
self.assertEqual(remove_dot_segments('/abc/./.././d/././e/.././f/./../../ghi'), '/ghi')
self.assertEqual(remove_dot_segments('/'), '/')
self.assertEqual(remove_dot_segments('/t'), '/t')
self.assertEqual(remove_dot_segments('t'), 't')
self.assertEqual(remove_dot_segments(''), '')
self.assertEqual(remove_dot_segments('/../a/b/c'), '/a/b/c')
self.assertEqual(remove_dot_segments('../a'), 'a')
self.assertEqual(remove_dot_segments('./a'), 'a')
self.assertEqual(remove_dot_segments('.'), '')
self.assertEqual(remove_dot_segments('////'), '////')
def test_js_to_json_vars_strings(self):
self.assertDictEqual(
json.loads(js_to_json(
'''{
'null': a,
'nullStr': b,
'true': c,
'trueStr': d,
'false': e,
'falseStr': f,
'unresolvedVar': g,
}''',
{
'a': 'null',
'b': '"null"',
'c': 'true',
'd': '"true"',
'e': 'false',
'f': '"false"',
'g': 'var',
}
)),
{
'null': None,
'nullStr': 'null',
'true': True,
'trueStr': 'true',
'false': False,
'falseStr': 'false',
'unresolvedVar': 'var'
}
)
self.assertDictEqual(
json.loads(js_to_json(
'''{
'int': a,
'intStr': b,
'float': c,
'floatStr': d,
}''',
{
'a': '123',
'b': '"123"',
'c': '1.23',
'd': '"1.23"',
}
)),
{
'int': 123,
'intStr': '123',
'float': 1.23,
'floatStr': '1.23',
}
)
self.assertDictEqual(
json.loads(js_to_json(
'''{
'object': a,
'objectStr': b,
'array': c,
'arrayStr': d,
}''',
{
'a': '{}',
'b': '"{}"',
'c': '[]',
'd': '"[]"',
}
)),
{
'object': {},
'objectStr': '{}',
'array': [],
'arrayStr': '[]',
}
)
def test_js_to_json_realworld(self): def test_js_to_json_realworld(self):
inp = '''{ inp = '''{
'clip':{'provider':'pseudo'} 'clip':{'provider':'pseudo'}
@ -973,10 +1079,10 @@ class TestUtil(unittest.TestCase):
!42: 42 !42: 42
}''') }''')
self.assertEqual(json.loads(on), { self.assertEqual(json.loads(on), {
'a': 0, 'a': True,
'b': 1, 'b': False,
'c': 0, 'c': False,
'd': 42.42, 'd': True,
'e': [], 'e': [],
'f': "abc", 'f': "abc",
'g': "", 'g': "",
@ -1046,10 +1152,26 @@ class TestUtil(unittest.TestCase):
on = js_to_json('{ "040": "040" }') on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'}) self.assertEqual(json.loads(on), {'040': '040'})
on = js_to_json('[1,//{},\n2]')
self.assertEqual(json.loads(on), [1, 2])
on = js_to_json(r'"\^\$\#"')
self.assertEqual(json.loads(on), R'^$#', msg='Unnecessary escapes should be stripped')
on = js_to_json('\'"\\""\'')
self.assertEqual(json.loads(on), '"""', msg='Unnecessary quote escape should be escaped')
def test_js_to_json_malformed(self): def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"') self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1') self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_js_to_json_template_literal(self):
self.assertEqual(js_to_json('`Hello ${name}`', {'name': '"world"'}), '"Hello world"')
self.assertEqual(js_to_json('`${name}${name}`', {'name': '"X"'}), '"XX"')
self.assertEqual(js_to_json('`${name}${name}`', {'name': '5'}), '"55"')
self.assertEqual(js_to_json('`${name}"${name}"`', {'name': '5'}), '"5\\"5\\""')
self.assertEqual(js_to_json('`${name}`', {}), '"name"')
def test_extract_attributes(self): def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'}) self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'}) self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
@ -1563,8 +1685,10 @@ Line 1
self.assertEqual(variadic(None), (None, )) self.assertEqual(variadic(None), (None, ))
self.assertEqual(variadic('spam'), ('spam', )) self.assertEqual(variadic('spam'), ('spam', ))
self.assertEqual(variadic('spam', allowed_types=dict), 'spam') self.assertEqual(variadic('spam', allowed_types=dict), 'spam')
self.assertEqual(variadic('spam', allowed_types=[dict]), 'spam')
def test_traverse_obj(self): def test_traverse_obj(self):
str = compat_str
_TEST_DATA = { _TEST_DATA = {
100: 100, 100: 100,
1.2: 1.2, 1.2: 1.2,
@ -1582,6 +1706,11 @@ Line 1
'dict': {}, 'dict': {},
} }
# define a pukka Iterable
def iter_range(stop):
for from_ in range(stop):
yield from_
# Test base functionality # Test base functionality
self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str', self.assertEqual(traverse_obj(_TEST_DATA, ('str',)), 'str',
msg='allow tuple path') msg='allow tuple path')
@ -1597,22 +1726,60 @@ Line 1
# Test Ellipsis behavior # Test Ellipsis behavior
self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis), self.assertCountEqual(traverse_obj(_TEST_DATA, Ellipsis),
(item for item in _TEST_DATA.values() if item is not None), (item for item in _TEST_DATA.values() if item not in (None, {})),
msg='`...` should give all values except `None`') msg='`...` should give all non-discarded values')
self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(), self.assertCountEqual(traverse_obj(_TEST_DATA, ('urls', 0, Ellipsis)), _TEST_DATA['urls'][0].values(),
msg='`...` selection for dicts should select all values') msg='`...` selection for dicts should select all values')
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')), self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'url')),
['https://www.example.com/0', 'https://www.example.com/1'], ['https://www.example.com/0', 'https://www.example.com/1'],
msg='nested `...` queries should work') msg='nested `...` queries should work')
self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), range(4), self.assertCountEqual(traverse_obj(_TEST_DATA, (Ellipsis, Ellipsis, 'index')), iter_range(4),
msg='`...` query result should be flattened') msg='`...` query result should be flattened')
self.assertEqual(traverse_obj(iter(range(4)), Ellipsis), list(range(4)),
msg='`...` should accept iterables')
# Test function as key # Test function as key
self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)), self.assertEqual(traverse_obj(_TEST_DATA, lambda x, y: x == 'urls' and isinstance(y, list)),
[_TEST_DATA['urls']], [_TEST_DATA['urls']],
msg='function as query key should perform a filter based on (key, value)') msg='function as query key should perform a filter based on (key, value)')
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], compat_str)), {'str'}, self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), set(('str',)),
msg='exceptions in the query function should be caught') msg='exceptions in the query function should be caught')
self.assertEqual(traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0), [0, 2],
msg='function key should accept iterables')
if __debug__:
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
traverse_obj(_TEST_DATA, lambda a: Ellipsis)
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
traverse_obj(_TEST_DATA, lambda a, b, c: Ellipsis)
# Test set as key (transformation/type, like `expected_type`)
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper), )), ['STR'],
msg='Function in set should be a transformation')
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str))), ['str'],
msg='Type in set should be a type filter')
self.assertEqual(traverse_obj(_TEST_DATA, T(dict)), _TEST_DATA,
msg='A single set should be wrapped into a path')
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str.upper))), ['STR'],
msg='Transformation function should not raise')
self.assertEqual(traverse_obj(_TEST_DATA, (Ellipsis, T(str_or_none))),
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
msg='Function in set should be a transformation')
if __debug__:
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
traverse_obj(_TEST_DATA, set())
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
traverse_obj(_TEST_DATA, set((str.upper, str)))
# Test `slice` as a key
_SLICE_DATA = [0, 1, 2, 3, 4]
self.assertEqual(traverse_obj(_TEST_DATA, ('dict', slice(1))), None,
msg='slice on a dictionary should not throw')
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1)), _SLICE_DATA[:1],
msg='slice key should apply slice to sequence')
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 2)), _SLICE_DATA[1:2],
msg='slice key should apply slice to sequence')
self.assertEqual(traverse_obj(_SLICE_DATA, slice(1, 4, 2)), _SLICE_DATA[1:4:2],
msg='slice key should apply slice to sequence')
# Test alternative paths # Test alternative paths
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str', self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
@ -1658,15 +1825,23 @@ Line 1
{0: ['https://www.example.com/1', 'https://www.example.com/0']}, {0: ['https://www.example.com/1', 'https://www.example.com/0']},
msg='triple nesting in dict path should be treated as branches') msg='triple nesting in dict path should be treated as branches')
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {}, self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}), {},
msg='remove `None` values when dict key') msg='remove `None` values when top level dict key fails')
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis}, self.assertEqual(traverse_obj(_TEST_DATA, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
msg='do not remove `None` values if `default`') msg='use `default` if key fails and `default`')
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {0: {}}, self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}), {},
msg='do not remove empty values when dict key') msg='remove empty values when dict key')
self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: {}}, self.assertEqual(traverse_obj(_TEST_DATA, {0: 'dict'}, default=Ellipsis), {0: Ellipsis},
msg='do not remove empty values when dict key and a default') msg='use `default` when dict key and a default')
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {0: []}, self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}), {},
msg='if branch in dict key not successful, return `[]`') msg='remove empty values when nested dict key fails')
self.assertEqual(traverse_obj(None, {0: 'fail'}), {},
msg='default to dict if pruned')
self.assertEqual(traverse_obj(None, {0: 'fail'}, default=Ellipsis), {0: Ellipsis},
msg='default to dict if pruned and default is given')
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=Ellipsis), {0: {0: Ellipsis}},
msg='use nested `default` when nested dict key fails and `default`')
self.assertEqual(traverse_obj(_TEST_DATA, {0: ('dict', Ellipsis)}), {},
msg='remove key if branch in dict key not successful')
# Testing default parameter behavior # Testing default parameter behavior
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []} _DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
@ -1690,20 +1865,55 @@ Line 1
msg='if branched but not successful return `[]`, not `default`') msg='if branched but not successful return `[]`, not `default`')
self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [], self.assertEqual(traverse_obj(_DEFAULT_DATA, ('list', Ellipsis)), [],
msg='if branched but object is empty return `[]`, not `default`') msg='if branched but object is empty return `[]`, not `default`')
self.assertEqual(traverse_obj(None, Ellipsis), [],
msg='if branched but object is `None` return `[]`, not `default`')
self.assertEqual(traverse_obj({0: None}, (0, Ellipsis)), [],
msg='if branched but state is `None` return `[]`, not `default`')
branching_paths = [
('fail', Ellipsis),
(Ellipsis, 'fail'),
100 * ('fail',) + (Ellipsis,),
(Ellipsis,) + 100 * ('fail',),
]
for branching_path in branching_paths:
self.assertEqual(traverse_obj({}, branching_path), [],
msg='if branched but state is `None`, return `[]` (not `default`)')
self.assertEqual(traverse_obj({}, 'fail', branching_path), [],
msg='if branching in last alternative and previous did not match, return `[]` (not `default`)')
self.assertEqual(traverse_obj({0: 'x'}, 0, branching_path), 'x',
msg='if branching in last alternative and previous did match, return single value')
self.assertEqual(traverse_obj({0: 'x'}, branching_path, 0), 'x',
msg='if branching in first alternative and non-branching path does match, return single value')
self.assertEqual(traverse_obj({}, branching_path, 'fail'), None,
msg='if branching in first alternative and non-branching path does not match, return `default`')
# Testing expected_type behavior # Testing expected_type behavior
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0} _EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=compat_str), 'str', self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str),
msg='accept matching `expected_type` type') 'str', msg='accept matching `expected_type` type')
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int), None, self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int),
msg='reject non matching `expected_type` type') None, msg='reject non-matching `expected_type` type')
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: compat_str(x)), '0', self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)),
msg='transform type using type function') '0', msg='transform type using type function')
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0),
expected_type=lambda _: 1 / 0), None, None, msg='wrap expected_type function in try_call')
msg='wrap expected_type function in try_call') self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=str),
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, Ellipsis, expected_type=compat_str), ['str'], ['str'], msg='eliminate items that expected_type fails on')
msg='eliminate items that expected_type fails on') self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int),
{0: 100}, msg='type as expected_type should filter dict values')
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none),
{0: '100', 1: '1.2'}, msg='function as expected_type should transform dict values')
self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, set((int_or_none,))), expected_type=int),
1, msg='expected_type should not filter non-final dict values')
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int),
{0: {0: 100}}, msg='expected_type should transform deep dict values')
self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(Ellipsis)),
[{0: Ellipsis}, {0: Ellipsis}], msg='expected_type should transform branched dict values')
self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int),
[4], msg='expected_type regression for type matching in tuple branching')
self.assertEqual(traverse_obj(_TEST_DATA, ['data', Ellipsis], expected_type=int),
[], msg='expected_type regression for type matching in dict result')
# Test get_all behavior # Test get_all behavior
_GET_ALL_DATA = {'key': [0, 1, 2]} _GET_ALL_DATA = {'key': [0, 1, 2]}
@ -1748,14 +1958,23 @@ Line 1
_traverse_string=True), '.', _traverse_string=True), '.',
msg='traverse into converted data if `traverse_string`') msg='traverse into converted data if `traverse_string`')
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis), self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', Ellipsis),
_traverse_string=True), list('str'), _traverse_string=True), 'str',
msg='`...` branching into string should result in list') msg='`...` should result in string (same value) if `traverse_string`')
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)),
_traverse_string=True), 'sr',
msg='`slice` should result in string if `traverse_string`')
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == 's'),
_traverse_string=True), 'str',
msg='function should result in string if `traverse_string`')
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)),
_traverse_string=True), ['s', 'r'], _traverse_string=True), ['s', 'r'],
msg='branching into string should result in list') msg='branching should result in list if `traverse_string`')
self.assertEqual(traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda _, x: x), self.assertEqual(traverse_obj({}, (0, Ellipsis), _traverse_string=True), [],
_traverse_string=True), list('str'), msg='branching should result in list if `traverse_string`')
msg='function branching into string should result in list') self.assertEqual(traverse_obj({}, (0, lambda x, y: True), _traverse_string=True), [],
msg='branching should result in list if `traverse_string`')
self.assertEqual(traverse_obj({}, (0, slice(1)), _traverse_string=True), [],
msg='branching should result in list if `traverse_string`')
# Test is_user_input behavior # Test is_user_input behavior
_IS_USER_INPUT_DATA = {'range8': list(range(8))} _IS_USER_INPUT_DATA = {'range8': list(range(8))}
@ -1792,6 +2011,8 @@ Line 1
msg='failing str key on a `re.Match` should return `default`') msg='failing str key on a `re.Match` should return `default`')
self.assertEqual(traverse_obj(mobj, 8), None, self.assertEqual(traverse_obj(mobj, 8), None,
msg='failing int key on a `re.Match` should return `default`') msg='failing int key on a `re.Match` should return `default`')
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
msg='function on a `re.Match` should give group name as well')
def test_get_first(self): def test_get_first(self):
self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam') self.assertEqual(get_first([{'a': None}, {'a': 'spam'}], 'a'), 'spam')

View File

@ -11,12 +11,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import get_params, try_rm from test.helper import get_params, try_rm
import io
import xml.etree.ElementTree import xml.etree.ElementTree
import youtube_dl.YoutubeDL import youtube_dl.YoutubeDL
import youtube_dl.extractor import youtube_dl.extractor
from youtube_dl.compat import compat_open as open
class YoutubeDL(youtube_dl.YoutubeDL): class YoutubeDL(youtube_dl.YoutubeDL):
@ -51,7 +50,7 @@ class TestAnnotations(unittest.TestCase):
ydl.download([TEST_ID]) ydl.download([TEST_ID])
self.assertTrue(os.path.exists(ANNOTATIONS_FILE)) self.assertTrue(os.path.exists(ANNOTATIONS_FILE))
annoxml = None annoxml = None
with io.open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof: with open(ANNOTATIONS_FILE, 'r', encoding='utf-8') as annof:
annoxml = xml.etree.ElementTree.parse(annof) annoxml = xml.etree.ElementTree.parse(annof)
self.assertTrue(annoxml is not None, 'Failed to parse annotations XML') self.assertTrue(annoxml is not None, 'Failed to parse annotations XML')
root = annoxml.getroot() root = annoxml.getroot()

View File

@ -8,11 +8,14 @@ import sys
import unittest import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import io
import re import re
import string import string
from youtube_dl.compat import compat_str, compat_urlretrieve from youtube_dl.compat import (
compat_open as open,
compat_str,
compat_urlretrieve,
)
from test.helper import FakeYDL from test.helper import FakeYDL
from youtube_dl.extractor import YoutubeIE from youtube_dl.extractor import YoutubeIE
@ -143,6 +146,18 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js',
'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A', 'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A',
), ),
(
'https://www.youtube.com/s/player/6f20102c/player_ias.vflset/en_US/base.js',
'lE8DhoDmKqnmJJ', 'pJTTX6XyJP2BYw',
),
(
'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ',
),
(
'https://www.youtube.com/s/player/b7910ca8/player_ias.vflset/en_US/base.js',
'_hXMCwMt9qE310D', 'LoZMgkkofRMCZQ',
),
] ]
@ -196,7 +211,7 @@ def t_factory(name, sig_func, url_pattern):
if not os.path.exists(fn): if not os.path.exists(fn):
compat_urlretrieve(url, fn) compat_urlretrieve(url, fn)
with io.open(fn, encoding='utf-8') as testf: with open(fn, encoding='utf-8') as testf:
jscode = testf.read() jscode = testf.read()
self.assertEqual(sig_func(jscode, sig_input), expected_sig) self.assertEqual(sig_func(jscode, sig_input), expected_sig)

35
test/testdata/mpd/range_only.mpd vendored Normal file
View File

@ -0,0 +1,35 @@
<?xml version="1.0"?>
<!-- MPD file Generated with GPAC version 1.0.1-revrelease at 2021-11-27T20:53:11.690Z -->
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" minBufferTime="PT1.500S" type="static" mediaPresentationDuration="PT0H0M30.196S" maxSegmentDuration="PT0H0M10.027S" profiles="urn:mpeg:dash:profile:full:2011">
<ProgramInformation moreInformationURL="http://gpac.io">
<Title>manifest.mpd generated by GPAC</Title>
</ProgramInformation>
<Period duration="PT0H0M30.196S">
<AdaptationSet segmentAlignment="true" maxWidth="768" maxHeight="432" maxFrameRate="30000/1001" par="16:9" lang="und" startWithSAP="1">
<Representation id="1" mimeType="video/mp4" codecs="avc1.4D401E" width="768" height="432" frameRate="30000/1001" sar="1:1" bandwidth="526987">
<BaseURL>video_dashinit.mp4</BaseURL>
<SegmentList timescale="90000" duration="900000">
<Initialization range="0-881"/>
<SegmentURL mediaRange="882-876094" indexRange="882-925"/>
<SegmentURL mediaRange="876095-1466732" indexRange="876095-876138"/>
<SegmentURL mediaRange="1466733-1953615" indexRange="1466733-1466776"/>
<SegmentURL mediaRange="1953616-1994211" indexRange="1953616-1953659"/>
</SegmentList>
</Representation>
</AdaptationSet>
<AdaptationSet segmentAlignment="true" lang="und" startWithSAP="1">
<Representation id="2" mimeType="audio/mp4" codecs="mp4a.40.2" audioSamplingRate="48000" bandwidth="98096">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>audio_dashinit.mp4</BaseURL>
<SegmentList timescale="48000" duration="480000">
<Initialization range="0-752"/>
<SegmentURL mediaRange="753-124129" indexRange="753-796"/>
<SegmentURL mediaRange="124130-250544" indexRange="124130-124173"/>
<SegmentURL mediaRange="250545-374929" indexRange="250545-250588"/>
</SegmentList>
</Representation>
</AdaptationSet>
</Period>
</MPD>

351
test/testdata/mpd/subtitles.mpd vendored Normal file
View File

@ -0,0 +1,351 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Created with Unified Streaming Platform (version=1.10.18-20255) -->
<MPD
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="urn:mpeg:dash:schema:mpd:2011"
xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd"
type="static"
mediaPresentationDuration="PT14M48S"
maxSegmentDuration="PT1M"
minBufferTime="PT10S"
profiles="urn:mpeg:dash:profile:isoff-live:2011">
<Period
id="1"
duration="PT14M48S">
<BaseURL>dash/</BaseURL>
<AdaptationSet
id="1"
group="1"
contentType="audio"
segmentAlignment="true"
audioSamplingRate="48000"
mimeType="audio/mp4"
codecs="mp4a.40.2"
startWithSAP="1">
<AudioChannelConfiguration
schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011"
value="2" />
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
<SegmentTemplate
timescale="48000"
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
<SegmentTimeline>
<S t="0" d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="96256" r="2" />
<S d="95232" />
<S d="3584" />
</SegmentTimeline>
</SegmentTemplate>
<Representation
id="audio=128001"
bandwidth="128001">
</Representation>
</AdaptationSet>
<AdaptationSet
id="2"
group="3"
contentType="text"
lang="en"
mimeType="application/mp4"
codecs="stpp"
startWithSAP="1">
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle" />
<SegmentTemplate
timescale="1000"
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
<SegmentTimeline>
<S t="0" d="60000" r="9" />
<S d="24000" />
</SegmentTimeline>
</SegmentTemplate>
<Representation
id="textstream_eng=1000"
bandwidth="1000">
</Representation>
</AdaptationSet>
<AdaptationSet
id="3"
group="2"
contentType="video"
par="960:409"
minBandwidth="100000"
maxBandwidth="4482000"
maxWidth="1689"
maxHeight="720"
segmentAlignment="true"
mimeType="video/mp4"
codecs="avc1.4D401F"
startWithSAP="1">
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="main" />
<SegmentTemplate
timescale="12288"
initialization="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$.dash"
media="3144-kZT4LWMQw6Rh7Kpd-$RepresentationID$-$Time$.dash">
<SegmentTimeline>
<S t="0" d="24576" r="443" />
</SegmentTimeline>
</SegmentTemplate>
<Representation
id="video=100000"
bandwidth="100000"
width="336"
height="144"
sar="2880:2863"
scanType="progressive">
</Representation>
<Representation
id="video=326000"
bandwidth="326000"
width="562"
height="240"
sar="115200:114929"
scanType="progressive">
</Representation>
<Representation
id="video=698000"
bandwidth="698000"
width="844"
height="360"
sar="86400:86299"
scanType="progressive">
</Representation>
<Representation
id="video=1493000"
bandwidth="1493000"
width="1126"
height="480"
sar="230400:230267"
scanType="progressive">
</Representation>
<Representation
id="video=4482000"
bandwidth="4482000"
width="1688"
height="720"
sar="86400:86299"
scanType="progressive">
</Representation>
</AdaptationSet>
</Period>
</MPD>

32
test/testdata/mpd/url_and_range.mpd vendored Normal file
View File

@ -0,0 +1,32 @@
<?xml version="1.0" ?>
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" profiles="urn:mpeg:dash:profile:isoff-live:2011" minBufferTime="PT10.01S" mediaPresentationDuration="PT30.097S" type="static">
<!-- Created with Bento4 mp4-dash.py, VERSION=2.0.0-639 -->
<Period>
<!-- Video -->
<AdaptationSet mimeType="video/mp4" segmentAlignment="true" startWithSAP="1" maxWidth="768" maxHeight="432">
<Representation id="video-avc1" codecs="avc1.4D401E" width="768" height="432" scanType="progressive" frameRate="30000/1001" bandwidth="699597">
<SegmentList timescale="1000" duration="10010">
<Initialization sourceURL="video-frag.mp4" range="36-746"/>
<SegmentURL media="video-frag.mp4" mediaRange="747-876117"/>
<SegmentURL media="video-frag.mp4" mediaRange="876118-1466913"/>
<SegmentURL media="video-frag.mp4" mediaRange="1466914-1953954"/>
<SegmentURL media="video-frag.mp4" mediaRange="1953955-1994652"/>
</SegmentList>
</Representation>
</AdaptationSet>
<!-- Audio -->
<AdaptationSet mimeType="audio/mp4" startWithSAP="1" segmentAlignment="true">
<Representation id="audio-und-mp4a.40.2" codecs="mp4a.40.2" bandwidth="98808" audioSamplingRate="48000">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:mpegB:cicp:ChannelConfiguration" value="2"/>
<SegmentList timescale="1000" duration="10010">
<Initialization sourceURL="audio-frag.mp4" range="32-623"/>
<SegmentURL media="audio-frag.mp4" mediaRange="624-124199"/>
<SegmentURL media="audio-frag.mp4" mediaRange="124200-250303"/>
<SegmentURL media="audio-frag.mp4" mediaRange="250304-374365"/>
<SegmentURL media="audio-frag.mp4" mediaRange="374366-374836"/>
</SegmentList>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -4,11 +4,9 @@
from __future__ import absolute_import, unicode_literals from __future__ import absolute_import, unicode_literals
import collections import collections
import contextlib
import copy import copy
import datetime import datetime
import errno import errno
import fileinput
import io import io
import itertools import itertools
import json import json
@ -26,15 +24,26 @@ import tokenize
import traceback import traceback
import random import random
try:
from ssl import OPENSSL_VERSION
except ImportError:
# Must be Python 2.6, should be built against 1.0.2
OPENSSL_VERSION = 'OpenSSL 1.0.2(?)'
from string import ascii_letters from string import ascii_letters
from .compat import ( from .compat import (
compat_basestring, compat_basestring,
compat_cookiejar, compat_collections_chain_map as ChainMap,
compat_filter as filter,
compat_get_terminal_size, compat_get_terminal_size,
compat_http_client, compat_http_client,
compat_http_cookiejar_Cookie,
compat_http_cookies_SimpleCookie,
compat_integer_types,
compat_kwargs, compat_kwargs,
compat_map as map,
compat_numeric_types, compat_numeric_types,
compat_open as open,
compat_os_name, compat_os_name,
compat_str, compat_str,
compat_tokenize_tokenize, compat_tokenize_tokenize,
@ -46,6 +55,7 @@ from .compat import (
from .utils import ( from .utils import (
age_restricted, age_restricted,
args_to_str, args_to_str,
bug_reports_message,
ContentTooShortError, ContentTooShortError,
date_from_str, date_from_str,
DateRange, DateRange,
@ -61,10 +71,11 @@ from .utils import (
format_bytes, format_bytes,
formatSeconds, formatSeconds,
GeoRestrictedError, GeoRestrictedError,
HEADRequest,
int_or_none, int_or_none,
ISO3166Utils, ISO3166Utils,
join_nonempty,
locked_file, locked_file,
LazyList,
make_HTTPS_handler, make_HTTPS_handler,
MaxDownloadsReached, MaxDownloadsReached,
orderedSet, orderedSet,
@ -76,7 +87,6 @@ from .utils import (
preferredencoding, preferredencoding,
prepend_extension, prepend_extension,
process_communicate_or_kill, process_communicate_or_kill,
PUTRequest,
register_socks_protocols, register_socks_protocols,
render_table, render_table,
replace_extension, replace_extension,
@ -88,6 +98,7 @@ from .utils import (
std_headers, std_headers,
str_or_none, str_or_none,
subtitles_filename, subtitles_filename,
traverse_obj,
UnavailableVideoError, UnavailableVideoError,
url_basename, url_basename,
version_tuple, version_tuple,
@ -97,6 +108,7 @@ from .utils import (
YoutubeDLCookieProcessor, YoutubeDLCookieProcessor,
YoutubeDLHandler, YoutubeDLHandler,
YoutubeDLRedirectHandler, YoutubeDLRedirectHandler,
ytdl_is_updateable,
) )
from .cache import Cache from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
@ -366,6 +378,9 @@ class YoutubeDL(object):
self.params.update(params) self.params.update(params)
self.cache = Cache(self) self.cache = Cache(self)
self._header_cookies = []
self._load_cookies_from_headers(self.params.get('http_headers'))
def check_deprecated(param, option, suggestion): def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None: if self.params.get(param) is not None:
self.report_warning( self.report_warning(
@ -572,7 +587,7 @@ class YoutubeDL(object):
if self.params.get('cookiefile') is not None: if self.params.get('cookiefile') is not None:
self.cookiejar.save(ignore_discard=True, ignore_expires=True) self.cookiejar.save(ignore_discard=True, ignore_expires=True)
def trouble(self, message=None, tb=None): def trouble(self, *args, **kwargs):
"""Determine action to take when a download problem appears. """Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore Depending on if the downloader has been configured to ignore
@ -581,6 +596,11 @@ class YoutubeDL(object):
tb, if given, is additional traceback information. tb, if given, is additional traceback information.
""" """
# message=None, tb=None, is_error=True
message = args[0] if len(args) > 0 else kwargs.get('message', None)
tb = args[1] if len(args) > 1 else kwargs.get('tb', None)
is_error = args[2] if len(args) > 2 else kwargs.get('is_error', True)
if message is not None: if message is not None:
self.to_stderr(message) self.to_stderr(message)
if self.params.get('verbose'): if self.params.get('verbose'):
@ -593,7 +613,10 @@ class YoutubeDL(object):
else: else:
tb_data = traceback.format_list(traceback.extract_stack()) tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data) tb = ''.join(tb_data)
self.to_stderr(tb) if tb:
self.to_stderr(tb)
if not is_error:
return
if not self.params.get('ignoreerrors', False): if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]: if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info exc_info = sys.exc_info()[1].exc_info
@ -602,11 +625,18 @@ class YoutubeDL(object):
raise DownloadError(message, exc_info) raise DownloadError(message, exc_info)
self._download_retcode = 1 self._download_retcode = 1
def report_warning(self, message): def report_warning(self, message, only_once=False, _cache={}):
''' '''
Print the message to stderr, it will be prefixed with 'WARNING:' Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored If stderr is a tty file the 'WARNING:' will be colored
''' '''
if only_once:
m_hash = hash((self, message))
m_cnt = _cache.setdefault(m_hash, 0)
_cache[m_hash] = m_cnt + 1
if m_cnt > 0:
return
if self.params.get('logger') is not None: if self.params.get('logger') is not None:
self.params['logger'].warning(message) self.params['logger'].warning(message)
else: else:
@ -619,7 +649,7 @@ class YoutubeDL(object):
warning_message = '%s %s' % (_msg_header, message) warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message) self.to_stderr(warning_message)
def report_error(self, message, tb=None): def report_error(self, message, *args, **kwargs):
''' '''
Do the same as trouble, but prefixes the message with 'ERROR:', colored Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file. in red if stderr is a tty file.
@ -628,8 +658,18 @@ class YoutubeDL(object):
_msg_header = '\033[0;31mERROR:\033[0m' _msg_header = '\033[0;31mERROR:\033[0m'
else: else:
_msg_header = 'ERROR:' _msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message) kwargs['message'] = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb) self.trouble(*args, **kwargs)
def report_unscoped_cookies(self, *args, **kwargs):
# message=None, tb=False, is_error=False
if len(args) <= 2:
kwargs.setdefault('is_error', False)
if len(args) <= 0:
kwargs.setdefault(
'message',
'Unscoped cookies are not allowed: please specify some sort of scoping')
self.report_error(*args, **kwargs)
def report_file_already_downloaded(self, file_name): def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded.""" """Report file has already been fully downloaded."""
@ -825,7 +865,7 @@ class YoutubeDL(object):
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.' msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg) self.report_error(msg)
except ExtractorError as e: # An error we somewhat expected except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback()) self.report_error(compat_str(e), tb=e.format_traceback())
except MaxDownloadsReached: except MaxDownloadsReached:
raise raise
except Exception as e: except Exception as e:
@ -835,8 +875,83 @@ class YoutubeDL(object):
raise raise
return wrapper return wrapper
def _remove_cookie_header(self, http_headers):
"""Filters out `Cookie` header from an `http_headers` dict
The `Cookie` header is removed to prevent leaks as a result of unscoped cookies.
See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
@param http_headers An `http_headers` dict from which any `Cookie` header
should be removed, or None
"""
return dict(filter(lambda pair: pair[0].lower() != 'cookie', (http_headers or {}).items()))
def _load_cookies(self, data, **kwargs):
"""Loads cookies from a `Cookie` header
This tries to work around the security vulnerability of passing cookies to every domain.
@param data The Cookie header as a string to load the cookies from
@param autoscope If `False`, scope cookies using Set-Cookie syntax and error for cookie without domains
If `True`, save cookies for later to be stored in the jar with a limited scope
If a URL, save cookies in the jar with the domain of the URL
"""
# autoscope=True (kw-only)
autoscope = kwargs.get('autoscope', True)
for cookie in compat_http_cookies_SimpleCookie(data).values() if data else []:
if autoscope and any(cookie.values()):
raise ValueError('Invalid syntax in Cookie Header')
domain = cookie.get('domain') or ''
expiry = cookie.get('expires')
if expiry == '': # 0 is valid so we check for `''` explicitly
expiry = None
prepared_cookie = compat_http_cookiejar_Cookie(
cookie.get('version') or 0, cookie.key, cookie.value, None, False,
domain, True, True, cookie.get('path') or '', bool(cookie.get('path')),
bool(cookie.get('secure')), expiry, False, None, None, {})
if domain:
self.cookiejar.set_cookie(prepared_cookie)
elif autoscope is True:
self.report_warning(
'Passing cookies as a header is a potential security risk; '
'they will be scoped to the domain of the downloaded urls. '
'Please consider loading cookies from a file or browser instead.',
only_once=True)
self._header_cookies.append(prepared_cookie)
elif autoscope:
self.report_warning(
'The extractor result contains an unscoped cookie as an HTTP header. '
'If you are specifying an input URL, ' + bug_reports_message(),
only_once=True)
self._apply_header_cookies(autoscope, [prepared_cookie])
else:
self.report_unscoped_cookies()
def _load_cookies_from_headers(self, headers):
self._load_cookies(traverse_obj(headers, 'cookie', casesense=False))
def _apply_header_cookies(self, url, cookies=None):
"""This method applies stray header cookies to the provided url
This loads header cookies and scopes them to the domain provided in `url`.
While this is not ideal, it helps reduce the risk of them being sent to
an unintended destination.
"""
parsed = compat_urllib_parse.urlparse(url)
if not parsed.hostname:
return
for cookie in map(copy.copy, cookies or self._header_cookies):
cookie.domain = '.' + parsed.hostname
self.cookiejar.set_cookie(cookie)
@__handle_extraction_exceptions @__handle_extraction_exceptions
def __extract_info(self, url, ie, download, extra_info, process): def __extract_info(self, url, ie, download, extra_info, process):
# Compat with passing cookies in http headers
self._apply_header_cookies(url)
ie_result = ie.extract(url) ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here) if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
return return
@ -862,7 +977,7 @@ class YoutubeDL(object):
def process_ie_result(self, ie_result, download=True, extra_info={}): def process_ie_result(self, ie_result, download=True, extra_info={}):
""" """
Take the result of the ie(may be modified) and resolve all unresolved Take the result of the ie (may be modified) and resolve all unresolved
references (URLs, playlist items). references (URLs, playlist items).
It will also download the videos if 'download'. It will also download the videos if 'download'.
@ -1389,17 +1504,16 @@ class YoutubeDL(object):
'abr': formats_info[1].get('abr'), 'abr': formats_info[1].get('abr'),
'ext': output_ext, 'ext': output_ext,
} }
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx): def selector_function(ctx):
for pair in itertools.product( selector_fn = lambda x: _build_selector_function(x)(ctx)
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))): for pair in itertools.product(*map(selector_fn, selector.selector)):
yield _merge(pair) yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters] filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx): def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx) ctx_copy = dict(ctx)
for _filter in filters: for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats'])) ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy) return selector_function(ctx_copy)
@ -1434,23 +1548,45 @@ class YoutubeDL(object):
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens))) parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector) return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict): def _calc_headers(self, info_dict, load_cookies=False):
res = std_headers.copy() if load_cookies: # For --load-info-json
# load cookies from http_headers in legacy info.json
self._load_cookies(traverse_obj(info_dict, ('http_headers', 'Cookie'), casesense=False),
autoscope=info_dict['url'])
# load scoped cookies from info.json
self._load_cookies(info_dict.get('cookies'), autoscope=False)
add_headers = info_dict.get('http_headers') cookies = self.cookiejar.get_cookies_for_url(info_dict['url'])
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies: if cookies:
res['Cookie'] = cookies # Make a string like name1=val1; attr1=a_val1; ...name2=val2; ...
# By convention a cookie name can't be a well-known attribute name
# so this syntax is unambiguous and can be parsed by (eg) SimpleCookie
encoder = compat_http_cookies_SimpleCookie()
values = []
attributes = (('Domain', '='), ('Path', '='), ('Secure',), ('Expires', '='), ('Version', '='))
attributes = tuple([x[0].lower()] + list(x) for x in attributes)
for cookie in cookies:
_, value = encoder.value_encode(cookie.value)
# Py 2 '' --> '', Py 3 '' --> '""'
if value == '':
value = '""'
values.append('='.join((cookie.name, value)))
for attr in attributes:
value = getattr(cookie, attr[0], None)
if value:
values.append('%s%s' % (''.join(attr[1:]), value if len(attr) == 3 else ''))
info_dict['cookies'] = '; '.join(values)
res = std_headers.copy()
res.update(info_dict.get('http_headers') or {})
res = self._remove_cookie_header(res)
if 'X-Forwarded-For' not in res: if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip') x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip: if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip res['X-Forwarded-For'] = x_forwarded_for_ip
return res return res or None
def _calc_cookies(self, info_dict): def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url']) pr = sanitized_Request(info_dict['url'])
@ -1629,10 +1765,13 @@ class YoutubeDL(object):
format['protocol'] = determine_protocol(format) format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the # Add HTTP headers, so that external programs can use them from the
# json output # json output
full_format_info = info_dict.copy() format['http_headers'] = self._calc_headers(ChainMap(format, info_dict), load_cookies=True)
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info) # Safeguard against old/insecure infojson when using --load-info-json
# Remove private housekeeping stuff info_dict['http_headers'] = self._remove_cookie_header(
info_dict.get('http_headers') or {}) or None
# Remove private housekeeping stuff (copied to http_headers in _calc_headers())
if '__x_forwarded_for_ip' in info_dict: if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip'] del info_dict['__x_forwarded_for_ip']
@ -1775,7 +1914,7 @@ class YoutubeDL(object):
self.to_stdout(formatSeconds(info_dict['duration'])) self.to_stdout(formatSeconds(info_dict['duration']))
print_mandatory('format') print_mandatory('format')
if self.params.get('forcejson', False): if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict)) self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
def process_info(self, info_dict): def process_info(self, info_dict):
"""Process a single resolved IE result.""" """Process a single resolved IE result."""
@ -1835,7 +1974,7 @@ class YoutubeDL(object):
else: else:
try: try:
self.to_screen('[info] Writing video description to: ' + descfn) self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description']) descfile.write(info_dict['description'])
except (OSError, IOError): except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn) self.report_error('Cannot write description file ' + descfn)
@ -1850,7 +1989,7 @@ class YoutubeDL(object):
else: else:
try: try:
self.to_screen('[info] Writing video annotations to: ' + annofn) self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations']) annofile.write(info_dict['annotations'])
except (KeyError, TypeError): except (KeyError, TypeError):
self.report_warning('There are no annotations to write.') self.report_warning('There are no annotations to write.')
@ -1877,7 +2016,7 @@ class YoutubeDL(object):
try: try:
# Use newline='' to prevent conversion of newline characters # Use newline='' to prevent conversion of newline characters
# See https://github.com/ytdl-org/youtube-dl/issues/10268 # See https://github.com/ytdl-org/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile: with open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data']) subfile.write(sub_info['data'])
except (OSError, IOError): except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename) self.report_error('Cannot write subtitles file ' + sub_filename)
@ -1886,24 +2025,16 @@ class YoutubeDL(object):
try: try:
sub_data = ie._request_webpage( sub_data = ie._request_webpage(
sub_info['url'], info_dict['id'], note=False).read() sub_info['url'], info_dict['id'], note=False).read()
with io.open(encodeFilename(sub_filename), 'wb') as subfile: with open(encodeFilename(sub_filename), 'wb') as subfile:
subfile.write(sub_data) subfile.write(sub_data)
except (ExtractorError, IOError, OSError, ValueError) as err: except (ExtractorError, IOError, OSError, ValueError) as err:
self.report_warning('Unable to download subtitle for "%s": %s' % self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err))) (sub_lang, error_to_compat_str(err)))
continue continue
if self.params.get('writeinfojson', False): self._write_info_json(
infofn = replace_extension(filename, 'info.json', info_dict.get('ext')) 'video description', info_dict,
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)): replace_extension(filename, 'info.json', info_dict.get('ext')))
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename) self._write_thumbnails(info_dict, filename)
@ -1924,7 +2055,11 @@ class YoutubeDL(object):
fd.add_progress_hook(ph) fd.add_progress_hook(ph)
if self.params.get('verbose'): if self.params.get('verbose'):
self.to_screen('[debug] Invoking downloader on %r' % info.get('url')) self.to_screen('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
new_info = dict((k, v) for k, v in info.items() if not k.startswith('__p'))
new_info['http_headers'] = self._calc_headers(new_info)
return fd.download(name, new_info)
if info_dict.get('requested_formats') is not None: if info_dict.get('requested_formats') is not None:
downloaded = [] downloaded = []
@ -2089,16 +2224,13 @@ class YoutubeDL(object):
raise raise
else: else:
if self.params.get('dump_single_json', False): if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res)) self.to_stdout(json.dumps(self.sanitize_info(res)))
return self._download_retcode return self._download_retcode
def download_with_info_file(self, info_filename): def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput( with open(info_filename, encoding='utf-8') as f:
[info_filename], mode='r', info = self.filter_requested_info(json.load(f))
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try: try:
self.process_ie_result(info, download=True) self.process_ie_result(info, download=True)
except DownloadError: except DownloadError:
@ -2111,10 +2243,36 @@ class YoutubeDL(object):
return self._download_retcode return self._download_retcode
@staticmethod @staticmethod
def filter_requested_info(info_dict): def sanitize_info(info_dict, remove_private_keys=False):
return dict( ''' Sanitize the infodict for converting to json '''
(k, v) for k, v in info_dict.items() if info_dict is None:
if k not in ['requested_formats', 'requested_subtitles']) return info_dict
if remove_private_keys:
reject = lambda k, v: (v is None
or k.startswith('__')
or k in ('requested_formats',
'requested_subtitles'))
else:
reject = lambda k, v: False
def filter_fn(obj):
if isinstance(obj, dict):
return dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v))
elif isinstance(obj, (list, tuple, set, LazyList)):
return list(map(filter_fn, obj))
elif obj is None or any(isinstance(obj, c)
for c in (compat_integer_types,
(compat_str, float, bool))):
return obj
else:
return repr(obj)
return filter_fn(info_dict)
@classmethod
def filter_requested_info(cls, info_dict):
return cls.sanitize_info(info_dict, True)
def post_process(self, filename, ie_info): def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file.""" """Run all the postprocessors on the given file."""
@ -2300,27 +2458,6 @@ class YoutubeDL(object):
""" Start an HTTP download """ """ Start an HTTP download """
if isinstance(req, compat_basestring): if isinstance(req, compat_basestring):
req = sanitized_Request(req) req = sanitized_Request(req)
# an embedded /../ sequence is not automatically handled by urllib2
# see https://github.com/yt-dlp/yt-dlp/issues/3355
url = req.get_full_url()
parts = url.partition('/../')
if parts[1]:
url = compat_urllib_parse.urljoin(parts[0] + parts[1][:1], parts[1][1:] + parts[2])
if url:
# worse, URL path may have initial /../ against RFCs: work-around
# by stripping such prefixes, like eg Firefox
parts = compat_urllib_parse.urlsplit(url)
path = parts.path
while path.startswith('/../'):
path = path[3:]
url = parts._replace(path=path).geturl()
# get a new Request with the munged URL
if url != req.get_full_url():
req_type = {'HEAD': HEADRequest, 'PUT': PUTRequest}.get(
req.get_method(), compat_urllib_request.Request)
req = req_type(
url, data=req.data, headers=dict(req.header_items()),
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
return self._opener.open(req, timeout=self._socket_timeout) return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self): def print_debug_header(self):
@ -2342,9 +2479,12 @@ class YoutubeDL(object):
self.get_encoding())) self.get_encoding()))
write_string(encoding_str, encoding=None) write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n') writeln_debug = lambda *s: self._write_string('[debug] %s\n' % (''.join(s), ))
writeln_debug('youtube-dl version ', __version__)
if _LAZY_LOADER: if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n') writeln_debug('Lazy loading extractors enabled')
if ytdl_is_updateable():
writeln_debug('Single file build')
try: try:
sp = subprocess.Popen( sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'], ['git', 'rev-parse', '--short', 'HEAD'],
@ -2353,7 +2493,7 @@ class YoutubeDL(object):
out, err = process_communicate_or_kill(sp) out, err = process_communicate_or_kill(sp)
out = out.decode().strip() out = out.decode().strip()
if re.match('[0-9a-f]+', out): if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n') writeln_debug('Git HEAD: ', out)
except Exception: except Exception:
try: try:
sys.exc_clear() sys.exc_clear()
@ -2366,9 +2506,22 @@ class YoutubeDL(object):
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3] return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
return impl_name return impl_name
self._write_string('[debug] Python version %s (%s) - %s\n' % ( def libc_ver():
platform.python_version(), python_implementation(), try:
platform_name())) return platform.libc_ver()
except OSError: # We may not have access to the executable
return []
libc = join_nonempty(*libc_ver(), delim=' ')
writeln_debug('Python %s (%s %s %s) - %s - %s%s' % (
platform.python_version(),
python_implementation(),
platform.machine(),
platform.architecture()[0],
platform_name(),
OPENSSL_VERSION,
(' - %s' % (libc, )) if libc else ''
))
exe_versions = FFmpegPostProcessor.get_versions(self) exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version() exe_versions['rtmpdump'] = rtmpdump_version()
@ -2380,17 +2533,17 @@ class YoutubeDL(object):
) )
if not exe_str: if not exe_str:
exe_str = 'none' exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str) writeln_debug('exe versions: %s' % (exe_str, ))
proxy_map = {} proxy_map = {}
for handler in self._opener.handlers: for handler in self._opener.handlers:
if hasattr(handler, 'proxies'): if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies) proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n') writeln_debug('Proxy map: ', compat_str(proxy_map))
if self.params.get('call_home', False): if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8') ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr) writeln_debug('Public IP address: %s' % (ipaddr, ))
latest_version = self.urlopen( latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8') 'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__): if version_tuple(latest_version) > version_tuple(__version__):
@ -2407,7 +2560,7 @@ class YoutubeDL(object):
opts_proxy = self.params.get('proxy') opts_proxy = self.params.get('proxy')
if opts_cookiefile is None: if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar() self.cookiejar = YoutubeDLCookieJar()
else: else:
opts_cookiefile = expand_path(opts_cookiefile) opts_cookiefile = expand_path(opts_cookiefile)
self.cookiejar = YoutubeDLCookieJar(opts_cookiefile) self.cookiejar = YoutubeDLCookieJar(opts_cookiefile)
@ -2468,6 +2621,28 @@ class YoutubeDL(object):
encoding = preferredencoding() encoding = preferredencoding()
return encoding return encoding
def _write_info_json(self, label, info_dict, infofn, overwrite=None):
if not self.params.get('writeinfojson', False):
return False
def msg(fmt, lbl):
return fmt % (lbl + ' metadata',)
if overwrite is None:
overwrite = not self.params.get('nooverwrites', False)
if not overwrite and os.path.exists(encodeFilename(infofn)):
self.to_screen(msg('[info] %s is already present', label.title()))
return 'exists'
else:
self.to_screen(msg('[info] Writing %s as JSON to: ', label) + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
return True
except (OSError, IOError):
self.report_error(msg('Cannot write %s to JSON file ', label) + infofn)
return
def _write_thumbnails(self, info_dict, filename): def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False): if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails') thumbnails = info_dict.get('thumbnails')

View File

@ -5,7 +5,6 @@ from __future__ import unicode_literals
__license__ = 'Public Domain' __license__ = 'Public Domain'
import codecs
import io import io
import os import os
import random import random
@ -17,6 +16,7 @@ from .options import (
) )
from .compat import ( from .compat import (
compat_getpass, compat_getpass,
compat_register_utf8,
compat_shlex_split, compat_shlex_split,
workaround_optparse_bug9161, workaround_optparse_bug9161,
) )
@ -46,10 +46,8 @@ from .YoutubeDL import YoutubeDL
def _real_main(argv=None): def _real_main(argv=None):
# Compatibility fixes for Windows # Compatibility fix for Windows
if sys.platform == 'win32': compat_register_utf8()
# https://github.com/ytdl-org/youtube-dl/issues/820
codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
workaround_optparse_bug9161() workaround_optparse_bug9161()

View File

@ -1,14 +1,16 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import errno import errno
import io
import json import json
import os import os
import re import re
import shutil import shutil
import traceback import traceback
from .compat import compat_getenv from .compat import (
compat_getenv,
compat_open as open,
)
from .utils import ( from .utils import (
error_to_compat_str, error_to_compat_str,
expand_path, expand_path,
@ -83,7 +85,7 @@ class Cache(object):
cache_fn = self._get_cache_fn(section, key, dtype) cache_fn = self._get_cache_fn(section, key, dtype)
try: try:
try: try:
with io.open(cache_fn, 'r', encoding='utf-8') as cachef: with open(cache_fn, 'r', encoding='utf-8') as cachef:
return self._validate(json.load(cachef), min_ver) return self._validate(json.load(cachef), min_ver)
except ValueError: except ValueError:
try: try:

View File

@ -1663,5 +1663,5 @@ def casefold(s):
__all__ = [ __all__ = [
casefold 'casefold',
] ]

View File

@ -1,10 +1,12 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
from __future__ import division
import base64 import base64
import binascii import binascii
import collections import collections
import ctypes import ctypes
import datetime
import email import email
import getpass import getpass
import io import io
@ -19,6 +21,7 @@ import socket
import struct import struct
import subprocess import subprocess
import sys import sys
import types
import xml.etree.ElementTree import xml.etree.ElementTree
# naming convention # naming convention
@ -31,13 +34,17 @@ try:
compat_str, compat_basestring, compat_chr = ( compat_str, compat_basestring, compat_chr = (
unicode, basestring, unichr unicode, basestring, unichr
) )
from .casefold import casefold as compat_casefold
except NameError: except NameError:
compat_str, compat_basestring, compat_chr = ( compat_str, compat_basestring, compat_chr = (
str, str, chr str, (str, bytes), chr
) )
# casefold
try:
compat_str.casefold
compat_casefold = lambda s: s.casefold() compat_casefold = lambda s: s.casefold()
except AttributeError:
from .casefold import casefold as compat_casefold
try: try:
import collections.abc as compat_collections_abc import collections.abc as compat_collections_abc
@ -49,6 +56,29 @@ try:
except ImportError: # Python 2 except ImportError: # Python 2
import urllib2 as compat_urllib_request import urllib2 as compat_urllib_request
# Also fix up lack of method arg in old Pythons
try:
type(compat_urllib_request.Request('http://127.0.0.1', method='GET'))
except TypeError:
def _add_init_method_arg(cls):
init = cls.__init__
def wrapped_init(self, *args, **kwargs):
method = kwargs.pop('method', 'GET')
init(self, *args, **kwargs)
if any(callable(x.__dict__.get('get_method')) for x in (self.__class__, self) if x != cls):
# allow instance or its subclass to override get_method()
return
if self.has_data() and method == 'GET':
method = 'POST'
self.get_method = types.MethodType(lambda _: method, self)
cls.__init__ = wrapped_init
_add_init_method_arg(compat_urllib_request.Request)
del _add_init_method_arg
try: try:
import urllib.error as compat_urllib_error import urllib.error as compat_urllib_error
except ImportError: # Python 2 except ImportError: # Python 2
@ -73,6 +103,12 @@ try:
except ImportError: # Python 2 except ImportError: # Python 2
import urllib as compat_urllib_response import urllib as compat_urllib_response
try:
compat_urllib_response.addinfourl.status
except AttributeError:
# .getcode() is deprecated in Py 3.
compat_urllib_response.addinfourl.status = property(lambda self: self.getcode())
try: try:
import http.cookiejar as compat_cookiejar import http.cookiejar as compat_cookiejar
except ImportError: # Python 2 except ImportError: # Python 2
@ -97,12 +133,24 @@ except ImportError: # Python 2
import Cookie as compat_cookies import Cookie as compat_cookies
compat_http_cookies = compat_cookies compat_http_cookies = compat_cookies
if sys.version_info[0] == 2: if sys.version_info[0] == 2 or sys.version_info < (3, 3):
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie): class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
def load(self, rawdata): def load(self, rawdata):
if isinstance(rawdata, compat_str): must_have_value = 0
rawdata = str(rawdata) if not isinstance(rawdata, dict):
return super(compat_cookies_SimpleCookie, self).load(rawdata) if sys.version_info[:2] != (2, 7) or sys.platform.startswith('java'):
# attribute must have value for parsing
rawdata, must_have_value = re.subn(
r'(?i)(;\s*)(secure|httponly)(\s*(?:;|$))', r'\1\2=\2\3', rawdata)
if sys.version_info[0] == 2:
if isinstance(rawdata, compat_str):
rawdata = str(rawdata)
super(compat_cookies_SimpleCookie, self).load(rawdata)
if must_have_value > 0:
for morsel in self.values():
for attr in ('secure', 'httponly'):
if morsel.get(attr):
morsel[attr] = True
else: else:
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie
@ -2354,6 +2402,11 @@ try:
import http.client as compat_http_client import http.client as compat_http_client
except ImportError: # Python 2 except ImportError: # Python 2
import httplib as compat_http_client import httplib as compat_http_client
try:
compat_http_client.HTTPResponse.getcode
except AttributeError:
# Py < 3.1
compat_http_client.HTTPResponse.getcode = lambda self: self.status
try: try:
from urllib.error import HTTPError as compat_HTTPError from urllib.error import HTTPError as compat_HTTPError
@ -3127,6 +3180,46 @@ else:
return ctypes.WINFUNCTYPE(*args, **kwargs) return ctypes.WINFUNCTYPE(*args, **kwargs)
if sys.version_info < (3, 0):
# open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None
def compat_open(file_, *args, **kwargs):
if len(args) > 6 or 'opener' in kwargs:
raise ValueError('open: unsupported argument "opener"')
return io.open(file_, *args, **kwargs)
else:
compat_open = open
# compat_register_utf8
def compat_register_utf8():
if sys.platform == 'win32':
# https://github.com/ytdl-org/youtube-dl/issues/820
from codecs import register, lookup
register(
lambda name: lookup('utf-8') if name == 'cp65001' else None)
# compat_datetime_timedelta_total_seconds
try:
compat_datetime_timedelta_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
# Py 2.6
def compat_datetime_timedelta_total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
# optional decompression packages
# PyPi brotli package implements 'br' Content-Encoding
try:
import brotli as compat_brotli
except ImportError:
compat_brotli = None
# PyPi ncompress package implements 'compress' Content-Encoding
try:
import ncompress as compat_ncompress
except ImportError:
compat_ncompress = None
legacy = [ legacy = [
'compat_HTMLParseError', 'compat_HTMLParseError',
'compat_HTMLParser', 'compat_HTMLParser',
@ -3160,10 +3253,12 @@ __all__ = [
'compat_Struct', 'compat_Struct',
'compat_base64_b64decode', 'compat_base64_b64decode',
'compat_basestring', 'compat_basestring',
'compat_brotli',
'compat_casefold', 'compat_casefold',
'compat_chr', 'compat_chr',
'compat_collections_abc', 'compat_collections_abc',
'compat_collections_chain_map', 'compat_collections_chain_map',
'compat_datetime_timedelta_total_seconds',
'compat_http_cookiejar', 'compat_http_cookiejar',
'compat_http_cookiejar_Cookie', 'compat_http_cookiejar_Cookie',
'compat_http_cookies', 'compat_http_cookies',
@ -3184,7 +3279,9 @@ __all__ = [
'compat_itertools_zip_longest', 'compat_itertools_zip_longest',
'compat_kwargs', 'compat_kwargs',
'compat_map', 'compat_map',
'compat_ncompress',
'compat_numeric_types', 'compat_numeric_types',
'compat_open',
'compat_ord', 'compat_ord',
'compat_os_name', 'compat_os_name',
'compat_os_path_expanduser', 'compat_os_path_expanduser',
@ -3192,6 +3289,7 @@ __all__ = [
'compat_print', 'compat_print',
'compat_re_Match', 'compat_re_Match',
'compat_re_Pattern', 'compat_re_Pattern',
'compat_register_utf8',
'compat_setenv', 'compat_setenv',
'compat_shlex_quote', 'compat_shlex_quote',
'compat_shlex_split', 'compat_shlex_split',

View File

@ -96,7 +96,7 @@ class FileDownloader(object):
return None return None
return int(float(remaining) / rate) return int(float(remaining) / rate)
start, now = (start_or_rate, now_or_remaining) start, now = (start_or_rate, now_or_remaining)
total, current = args total, current = args[:2]
if total is None: if total is None:
return None return None
if now is None: if now is None:
@ -339,6 +339,10 @@ class FileDownloader(object):
def download(self, filename, info_dict): def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict """Download to a filename using the info from info_dict
Return True on success and False otherwise Return True on success and False otherwise
This method filters the `Cookie` header from the info_dict to prevent leaks.
Downloaders have their own way of handling cookies.
See: https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-v8mc-9377-rwjj
""" """
nooverwrites_and_exists = ( nooverwrites_and_exists = (

View File

@ -1,5 +1,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import itertools
from .fragment import FragmentFD from .fragment import FragmentFD
from ..compat import compat_urllib_error from ..compat import compat_urllib_error
from ..utils import ( from ..utils import (
@ -30,25 +32,28 @@ class DashSegmentsFD(FragmentFD):
fragment_retries = self.params.get('fragment_retries', 0) fragment_retries = self.params.get('fragment_retries', 0)
skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True)
frag_index = 0 for frag_index, fragment in enumerate(fragments, 1):
for i, fragment in enumerate(fragments):
frag_index += 1
if frag_index <= ctx['fragment_index']: if frag_index <= ctx['fragment_index']:
continue continue
success = False
# In DASH, the first segment contains necessary headers to # In DASH, the first segment contains necessary headers to
# generate a valid MP4 file, so always abort for the first segment # generate a valid MP4 file, so always abort for the first segment
fatal = i == 0 or not skip_unavailable_fragments fatal = frag_index == 1 or not skip_unavailable_fragments
for count in range(fragment_retries + 1): fragment_url = fragment.get('url')
if not fragment_url:
assert fragment_base_url
fragment_url = urljoin(fragment_base_url, fragment['path'])
headers = info_dict.get('http_headers')
fragment_range = fragment.get('range')
if fragment_range:
headers = headers.copy() if headers else {}
headers['Range'] = 'bytes=%s' % (fragment_range,)
for count in itertools.count():
try: try:
fragment_url = fragment.get('url') success, frag_content = self._download_fragment(ctx, fragment_url, info_dict, headers)
if not fragment_url:
assert fragment_base_url
fragment_url = urljoin(fragment_base_url, fragment['path'])
success, frag_content = self._download_fragment(ctx, fragment_url, info_dict)
if not success: if not success:
return False return False
self._append_fragment(ctx, frag_content) self._append_fragment(ctx, frag_content)
break
except compat_urllib_error.HTTPError as err: except compat_urllib_error.HTTPError as err:
# YouTube may often return 404 HTTP error for a fragment causing the # YouTube may often return 404 HTTP error for a fragment causing the
# whole download to fail. However if the same fragment is immediately # whole download to fail. However if the same fragment is immediately
@ -58,19 +63,19 @@ class DashSegmentsFD(FragmentFD):
# HTTP error. # HTTP error.
if count < fragment_retries: if count < fragment_retries:
self.report_retry_fragment(err, frag_index, count + 1, fragment_retries) self.report_retry_fragment(err, frag_index, count + 1, fragment_retries)
continue
except DownloadError: except DownloadError:
# Don't retry fragment if error occurred during HTTP downloading # Don't retry fragment if error occurred during HTTP downloading
# itself since it has own retry settings # itself since it has its own retry settings
if not fatal: if fatal:
self.report_skip_fragment(frag_index) raise
break break
raise
if count >= fragment_retries: if not success:
if not fatal: if not fatal:
self.report_skip_fragment(frag_index) self.report_skip_fragment(frag_index)
continue continue
self.report_error('giving up after %s fragment retries' % fragment_retries) self.report_error('giving up after %s fragment retries' % count)
return False return False
self._finish_frag_download(ctx) self._finish_frag_download(ctx)

View File

@ -1,9 +1,10 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import os.path import os
import re import re
import subprocess import subprocess
import sys import sys
import tempfile
import time import time
from .common import FileDownloader from .common import FileDownloader
@ -23,6 +24,8 @@ from ..utils import (
check_executable, check_executable,
is_outdated_version, is_outdated_version,
process_communicate_or_kill, process_communicate_or_kill,
T,
traverse_obj,
) )
@ -30,6 +33,7 @@ class ExternalFD(FileDownloader):
def real_download(self, filename, info_dict): def real_download(self, filename, info_dict):
self.report_destination(filename) self.report_destination(filename)
tmpfilename = self.temp_name(filename) tmpfilename = self.temp_name(filename)
self._cookies_tempfile = None
try: try:
started = time.time() started = time.time()
@ -42,6 +46,13 @@ class ExternalFD(FileDownloader):
# should take place # should take place
retval = 0 retval = 0
self.to_screen('[%s] Interrupted by user' % self.get_basename()) self.to_screen('[%s] Interrupted by user' % self.get_basename())
finally:
if self._cookies_tempfile and os.path.isfile(self._cookies_tempfile):
try:
os.remove(self._cookies_tempfile)
except OSError:
self.report_warning(
'Unable to delete temporary cookies file "{0}"'.format(self._cookies_tempfile))
if retval == 0: if retval == 0:
status = { status = {
@ -97,6 +108,16 @@ class ExternalFD(FileDownloader):
def _configuration_args(self, default=[]): def _configuration_args(self, default=[]):
return cli_configuration_args(self.params, 'external_downloader_args', default) return cli_configuration_args(self.params, 'external_downloader_args', default)
def _write_cookies(self):
if not self.ydl.cookiejar.filename:
tmp_cookies = tempfile.NamedTemporaryFile(suffix='.cookies', delete=False)
tmp_cookies.close()
self._cookies_tempfile = tmp_cookies.name
self.to_screen('[download] Writing temporary cookies file to "{0}"'.format(self._cookies_tempfile))
# real_download resets _cookies_tempfile; if it's None, save() will write to cookiejar.filename
self.ydl.cookiejar.save(self._cookies_tempfile, ignore_discard=True, ignore_expires=True)
return self.ydl.cookiejar.filename or self._cookies_tempfile
def _call_downloader(self, tmpfilename, info_dict): def _call_downloader(self, tmpfilename, info_dict):
""" Either overwrite this or implement _make_cmd """ """ Either overwrite this or implement _make_cmd """
cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)] cmd = [encodeArgument(a) for a in self._make_cmd(tmpfilename, info_dict)]
@ -110,13 +131,21 @@ class ExternalFD(FileDownloader):
self.to_stderr(stderr.decode('utf-8', 'replace')) self.to_stderr(stderr.decode('utf-8', 'replace'))
return p.returncode return p.returncode
@staticmethod
def _header_items(info_dict):
return traverse_obj(
info_dict, ('http_headers', T(dict.items), Ellipsis))
class CurlFD(ExternalFD): class CurlFD(ExternalFD):
AVAILABLE_OPT = '-V' AVAILABLE_OPT = '-V'
def _make_cmd(self, tmpfilename, info_dict): def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '--location', '-o', tmpfilename] cmd = [self.exe, '--location', '-o', tmpfilename, '--compressed']
for key, val in info_dict['http_headers'].items(): cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
if cookie_header:
cmd += ['--cookie', cookie_header]
for key, val in self._header_items(info_dict):
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._bool_option('--continue-at', 'continuedl', '-', '0') cmd += self._bool_option('--continue-at', 'continuedl', '-', '0')
cmd += self._valueless_option('--silent', 'noprogress') cmd += self._valueless_option('--silent', 'noprogress')
@ -151,8 +180,11 @@ class AxelFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict): def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-o', tmpfilename] cmd = [self.exe, '-o', tmpfilename]
for key, val in info_dict['http_headers'].items(): for key, val in self._header_items(info_dict):
cmd += ['-H', '%s: %s' % (key, val)] cmd += ['-H', '%s: %s' % (key, val)]
cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
if cookie_header:
cmd += ['-H', 'Cookie: {0}'.format(cookie_header), '--max-redirect=0']
cmd += self._configuration_args() cmd += self._configuration_args()
cmd += ['--', info_dict['url']] cmd += ['--', info_dict['url']]
return cmd return cmd
@ -162,8 +194,10 @@ class WgetFD(ExternalFD):
AVAILABLE_OPT = '--version' AVAILABLE_OPT = '--version'
def _make_cmd(self, tmpfilename, info_dict): def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies'] cmd = [self.exe, '-O', tmpfilename, '-nv', '--compression=auto']
for key, val in info_dict['http_headers'].items(): if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
cmd += ['--load-cookies', self._write_cookies()]
for key, val in self._header_items(info_dict):
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._option('--limit-rate', 'ratelimit') cmd += self._option('--limit-rate', 'ratelimit')
retry = self._option('--tries', 'retries') retry = self._option('--tries', 'retries')
@ -172,7 +206,10 @@ class WgetFD(ExternalFD):
retry[1] = '0' retry[1] = '0'
cmd += retry cmd += retry
cmd += self._option('--bind-address', 'source_address') cmd += self._option('--bind-address', 'source_address')
cmd += self._option('--proxy', 'proxy') proxy = self.params.get('proxy')
if proxy:
for var in ('http_proxy', 'https_proxy'):
cmd += ['--execute', '%s=%s' % (var, proxy)]
cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate') cmd += self._valueless_option('--no-check-certificate', 'nocheckcertificate')
cmd += self._configuration_args() cmd += self._configuration_args()
cmd += ['--', info_dict['url']] cmd += ['--', info_dict['url']]
@ -182,21 +219,58 @@ class WgetFD(ExternalFD):
class Aria2cFD(ExternalFD): class Aria2cFD(ExternalFD):
AVAILABLE_OPT = '-v' AVAILABLE_OPT = '-v'
@staticmethod
def _aria2c_filename(fn):
return fn if os.path.isabs(fn) else os.path.join('.', fn)
def _make_cmd(self, tmpfilename, info_dict): def _make_cmd(self, tmpfilename, info_dict):
cmd = [self.exe, '-c'] cmd = [self.exe, '-c',
cmd += self._configuration_args([ '--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
'--min-split-size', '1M', '--max-connection-per-server', '4']) '--http-accept-gzip=true', '--file-allocation=none', '-x16', '-j16', '-s16']
dn = os.path.dirname(tmpfilename) if 'fragments' in info_dict:
if dn: cmd += ['--allow-overwrite=true', '--allow-piece-length-change=true']
cmd += ['--dir', dn] else:
cmd += ['--out', os.path.basename(tmpfilename)] cmd += ['--min-split-size', '1M']
for key, val in info_dict['http_headers'].items():
if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
cmd += ['--load-cookies={0}'.format(self._write_cookies())]
for key, val in self._header_items(info_dict):
cmd += ['--header', '%s: %s' % (key, val)] cmd += ['--header', '%s: %s' % (key, val)]
cmd += self._configuration_args(['--max-connection-per-server', '4'])
cmd += ['--out', os.path.basename(tmpfilename)]
cmd += self._option('--max-overall-download-limit', 'ratelimit')
cmd += self._option('--interface', 'source_address') cmd += self._option('--interface', 'source_address')
cmd += self._option('--all-proxy', 'proxy') cmd += self._option('--all-proxy', 'proxy')
cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=') cmd += self._bool_option('--check-certificate', 'nocheckcertificate', 'false', 'true', '=')
cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=') cmd += self._bool_option('--remote-time', 'updatetime', 'true', 'false', '=')
cmd += ['--', info_dict['url']] cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
cmd += self._configuration_args()
# aria2c strips out spaces from the beginning/end of filenames and paths.
# We work around this issue by adding a "./" to the beginning of the
# filename and relative path, and adding a "/" at the end of the path.
# See: https://github.com/yt-dlp/yt-dlp/issues/276
# https://github.com/ytdl-org/youtube-dl/issues/20312
# https://github.com/aria2/aria2/issues/1373
dn = os.path.dirname(tmpfilename)
if dn:
cmd += ['--dir', self._aria2c_filename(dn) + os.path.sep]
if 'fragments' not in info_dict:
cmd += ['--out', self._aria2c_filename(os.path.basename(tmpfilename))]
cmd += ['--auto-file-renaming=false']
if 'fragments' in info_dict:
cmd += ['--file-allocation=none', '--uri-selector=inorder']
url_list_file = '%s.frag.urls' % (tmpfilename, )
url_list = []
for frag_index, fragment in enumerate(info_dict['fragments']):
fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index)
url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename)))
stream, _ = self.sanitize_open(url_list_file, 'wb')
stream.write('\n'.join(url_list).encode())
stream.close()
cmd += ['-i', self._aria2c_filename(url_list_file)]
else:
cmd += ['--', info_dict['url']]
return cmd return cmd
@ -235,8 +309,10 @@ class Aria2pFD(ExternalFD):
} }
options['dir'] = os.path.dirname(tmpfilename) or os.path.abspath('.') options['dir'] = os.path.dirname(tmpfilename) or os.path.abspath('.')
options['out'] = os.path.basename(tmpfilename) options['out'] = os.path.basename(tmpfilename)
if self.ydl.cookiejar.get_cookie_header(info_dict['url']):
options['load-cookies'] = self._write_cookies()
options['header'] = [] options['header'] = []
for key, val in info_dict['http_headers'].items(): for key, val in self._header_items(info_dict):
options['header'].append('{0}: {1}'.format(key, val)) options['header'].append('{0}: {1}'.format(key, val))
download = aria2.add_uris([info_dict['url']], options) download = aria2.add_uris([info_dict['url']], options)
status = { status = {
@ -265,8 +341,16 @@ class HttpieFD(ExternalFD):
def _make_cmd(self, tmpfilename, info_dict): def _make_cmd(self, tmpfilename, info_dict):
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']] cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
for key, val in info_dict['http_headers'].items(): for key, val in self._header_items(info_dict):
cmd += ['%s:%s' % (key, val)] cmd += ['%s:%s' % (key, val)]
# httpie 3.1.0+ removes the Cookie header on redirect, so this should be safe for now. [1]
# If we ever need cookie handling for redirects, we can export the cookiejar into a session. [2]
# 1: https://github.com/httpie/httpie/security/advisories/GHSA-9w4w-cpc8-h2fq
# 2: https://httpie.io/docs/cli/sessions
cookie_header = self.ydl.cookiejar.get_cookie_header(info_dict['url'])
if cookie_header:
cmd += ['Cookie:%s' % cookie_header]
return cmd return cmd
@ -312,7 +396,14 @@ class FFmpegFD(ExternalFD):
# if end_time: # if end_time:
# args += ['-t', compat_str(end_time - start_time)] # args += ['-t', compat_str(end_time - start_time)]
if info_dict['http_headers'] and re.match(r'^https?://', url): cookies = self.ydl.cookiejar.get_cookies_for_url(url)
if cookies:
args.extend(['-cookies', ''.join(
'{0}={1}; path={2}; domain={3};\r\n'.format(
cookie.name, cookie.value, cookie.path, cookie.domain)
for cookie in cookies)])
if info_dict.get('http_headers') and re.match(r'^https?://', url):
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv: # Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header. # [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
headers = handle_youtubedl_headers(info_dict['http_headers']) headers = handle_youtubedl_headers(info_dict['http_headers'])

View File

@ -141,7 +141,8 @@ class HttpFD(FileDownloader):
# Content-Range is either not present or invalid. Assuming remote webserver is # Content-Range is either not present or invalid. Assuming remote webserver is
# trying to send the whole file, resume is not possible, so wiping the local file # trying to send the whole file, resume is not possible, so wiping the local file
# and performing entire redownload # and performing entire redownload
self.report_unable_to_resume() if range_start > 0:
self.report_unable_to_resume()
ctx.resume_len = 0 ctx.resume_len = 0
ctx.open_mode = 'wb' ctx.open_mode = 'wb'
ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None)) ctx.data_len = int_or_none(ctx.data.info().get('Content-length', None))
@ -293,7 +294,7 @@ class HttpFD(FileDownloader):
# Progress message # Progress message
speed = self.calc_speed(start, now, byte_counter - ctx.resume_len) speed = self.calc_speed(start, now, byte_counter - ctx.resume_len)
eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - ctx.resume_len)) eta = self.calc_eta(speed, ctx.data_len and (ctx.data_len - byte_counter))
self._hook_progress({ self._hook_progress({
'status': 'downloading', 'status': 'downloading',

View File

@ -0,0 +1,79 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
merge_dicts,
parse_iso8601,
T,
traverse_obj,
txt_or_none,
urljoin,
)
class CaffeineTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?caffeine\.tv/[^/]+/video/(?P<id>[0-9a-f-]+)'
_TESTS = [{
'url': 'https://www.caffeine.tv/TsuSurf/video/cffc0a00-e73f-11ec-8080-80017d29f26e',
'info_dict': {
'id': 'cffc0a00-e73f-11ec-8080-80017d29f26e',
'ext': 'mp4',
'title': 'GOOOOD MORNINNNNN #highlights',
'timestamp': 1654702180,
'upload_date': '20220608',
'uploader': 'TsuSurf',
'duration': 3145,
'age_limit': 17,
},
'params': {
'format': 'bestvideo',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json(
'https://api.caffeine.tv/social/public/activity/' + video_id,
video_id)
broadcast_info = traverse_obj(json_data, ('broadcast_info', T(dict))) or {}
title = broadcast_info['broadcast_title']
video_url = broadcast_info['video_url']
ext = determine_ext(video_url)
if ext == 'm3u8':
formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4', entry_protocol='m3u8',
fatal=False)
else:
formats = [{'url': video_url}]
self._sort_formats(formats)
return merge_dicts({
'id': video_id,
'title': title,
'formats': formats,
}, traverse_obj(json_data, {
'uploader': ((None, 'user'), 'username'),
}, get_all=False), traverse_obj(json_data, {
'like_count': ('like_count', T(int_or_none)),
'view_count': ('view_count', T(int_or_none)),
'comment_count': ('comment_count', T(int_or_none)),
'tags': ('tags', Ellipsis, T(txt_or_none)),
'is_live': 'is_live',
'uploader': ('user', 'name'),
}), traverse_obj(broadcast_info, {
'duration': ('content_duration', T(int_or_none)),
'timestamp': ('broadcast_start_time', T(parse_iso8601)),
'thumbnail': ('preview_image_path', T(lambda u: urljoin(url, u))),
'age_limit': ('content_rating', T(lambda r: r and {
# assume Apple Store ratings [1]
# 1. https://en.wikipedia.org/wiki/Mobile_software_content_rating_system
'FOUR_PLUS': 0,
'NINE_PLUS': 9,
'TWELVE_PLUS': 12,
'SEVENTEEN_PLUS': 17,
}.get(r, 17))),
}))

View File

@ -0,0 +1,69 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
merge_dicts,
T,
traverse_obj,
unified_timestamp,
url_or_none,
)
class ClipchampIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?clipchamp\.com/watch/(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://clipchamp.com/watch/gRXZ4ZhdDaU',
'info_dict': {
'id': 'gRXZ4ZhdDaU',
'ext': 'mp4',
'title': 'Untitled video',
'uploader': 'Alexander Schwartz',
'timestamp': 1680805580,
'upload_date': '20230406',
'thumbnail': r're:^https?://.+\.jpg',
},
'params': {
'skip_download': 'm3u8',
'format': 'bestvideo',
},
}]
_STREAM_URL_TMPL = 'https://%s.cloudflarestream.com/%s/manifest/video.%s'
_STREAM_URL_QUERY = {'parentOrigin': 'https://clipchamp.com'}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['video']
storage_location = data.get('storage_location')
if storage_location != 'cf_stream':
raise ExtractorError('Unsupported clip storage location "%s"' % (storage_location,))
path = data['download_url']
iframe = self._download_webpage(
'https://iframe.cloudflarestream.com/' + path, video_id, 'Downloading player iframe')
subdomain = self._search_regex(
r'''\bcustomer-domain-prefix\s*=\s*("|')(?P<sd>[\w-]+)\1''', iframe,
'subdomain', group='sd', fatal=False) or 'customer-2ut9yn3y6fta1yxe'
formats = self._extract_mpd_formats(
self._STREAM_URL_TMPL % (subdomain, path, 'mpd'), video_id,
query=self._STREAM_URL_QUERY, fatal=False, mpd_id='dash')
formats.extend(self._extract_m3u8_formats(
self._STREAM_URL_TMPL % (subdomain, path, 'm3u8'), video_id, 'mp4',
query=self._STREAM_URL_QUERY, fatal=False, m3u8_id='hls'))
return merge_dicts({
'id': video_id,
'formats': formats,
'uploader': ' '.join(traverse_obj(data, ('creator', ('first_name', 'last_name'), T(compat_str)))) or None,
}, traverse_obj(data, {
'title': ('project', 'project_name', T(compat_str)),
'timestamp': ('created_at', T(unified_timestamp)),
'thumbnail': ('thumbnail_url', T(url_or_none)),
}), rev=True)

View File

@ -2,7 +2,9 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import base64 import base64
import collections
import datetime import datetime
import functools
import hashlib import hashlib
import json import json
import netrc import netrc
@ -23,6 +25,8 @@ from ..compat import (
compat_getpass, compat_getpass,
compat_integer_types, compat_integer_types,
compat_http_client, compat_http_client,
compat_map as map,
compat_open as open,
compat_os_name, compat_os_name,
compat_str, compat_str,
compat_urllib_error, compat_urllib_error,
@ -31,6 +35,7 @@ from ..compat import (
compat_urllib_request, compat_urllib_request,
compat_urlparse, compat_urlparse,
compat_xml_parse_error, compat_xml_parse_error,
compat_zip as zip,
) )
from ..downloader.f4m import ( from ..downloader.f4m import (
get_base_url, get_base_url,
@ -54,6 +59,7 @@ from ..utils import (
GeoRestrictedError, GeoRestrictedError,
GeoUtils, GeoUtils,
int_or_none, int_or_none,
join_nonempty,
js_to_json, js_to_json,
JSON_LD_RE, JSON_LD_RE,
mimetype2ext, mimetype2ext,
@ -70,6 +76,8 @@ from ..utils import (
str_or_none, str_or_none,
str_to_int, str_to_int,
strip_or_none, strip_or_none,
T,
traverse_obj,
try_get, try_get,
unescapeHTML, unescapeHTML,
unified_strdate, unified_strdate,
@ -79,6 +87,7 @@ from ..utils import (
urljoin, urljoin,
url_basename, url_basename,
url_or_none, url_or_none,
variadic,
xpath_element, xpath_element,
xpath_text, xpath_text,
xpath_with_ns, xpath_with_ns,
@ -174,6 +183,8 @@ class InfoExtractor(object):
fragment_base_url fragment_base_url
* "duration" (optional, int or float) * "duration" (optional, int or float)
* "filesize" (optional, int) * "filesize" (optional, int)
* "range" (optional, str of the form "start-end"
to use in HTTP Range header)
* preference Order number of this format. If this field is * preference Order number of this format. If this field is
present and not None, the formats get sorted present and not None, the formats get sorted
by this field, regardless of all other values. by this field, regardless of all other values.
@ -367,9 +378,22 @@ class InfoExtractor(object):
title, description etc. title, description etc.
Subclasses of this one should re-define the _real_initialize() and A subclass of InfoExtractor must be defined to handle each specific site (or
_real_extract() methods and define a _VALID_URL regexp. several sites). Such a concrete subclass should be added to the list of
Probably, they should also be added to the list of extractors. extractors. It should also:
* define its _VALID_URL attribute as a regexp, or a Sequence of alternative
regexps (but see below)
* re-define the _real_extract() method
* optionally re-define the _real_initialize() method.
An extractor subclass may also override suitable() if necessary, but the
function signature must be preserved and the function must import everything
it needs (except other extractors), so that lazy_extractors works correctly.
If the subclass's suitable() and _real_extract() functions avoid using
_VALID_URL, the subclass need not set that class attribute.
An abstract subclass of InfoExtractor may be used to simplify implementation
within an extractor module; it should not be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable _GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor. geo restriction bypass mechanisms for a particular extractor.
@ -404,22 +428,33 @@ class InfoExtractor(object):
self._x_forwarded_for_ip = None self._x_forwarded_for_ip = None
self.set_downloader(downloader) self.set_downloader(downloader)
@classmethod
def __match_valid_url(cls, url):
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for cls, whereas getattr would also
# match its superclass
if '_VALID_URL_RE' not in cls.__dict__:
# _VALID_URL can now be a list/tuple of patterns
cls._VALID_URL_RE = tuple(map(re.compile, variadic(cls._VALID_URL)))
# 20% faster than next(filter(None, (p.match(url) for p in cls._VALID_URL_RE)), None) in 2.7
for p in cls._VALID_URL_RE:
p = p.match(url)
if p:
return p
# The public alias can safely be overridden, as in some back-ports
_match_valid_url = __match_valid_url
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE.""" """Receives a URL and returns True if suitable for this IE."""
# This function must import everything it needs (except other extractors),
# This does not use has/getattr intentionally - we want to know whether # so that lazy_extractors works correctly
# we have cached the regexp for *this* class, whereas getattr would also return cls.__match_valid_url(url) is not None
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod @classmethod
def _match_id(cls, url): def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__: m = cls.__match_valid_url(url)
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m assert m
return compat_str(m.group('id')) return compat_str(m.group('id'))
@ -566,6 +601,14 @@ class InfoExtractor(object):
"""Sets the downloader for this IE.""" """Sets the downloader for this IE."""
self._downloader = downloader self._downloader = downloader
@property
def cache(self):
return self._downloader.cache
@property
def cookiejar(self):
return self._downloader.cookiejar
def _real_initialize(self): def _real_initialize(self):
"""Real initialization process. Redefine in subclasses.""" """Real initialization process. Redefine in subclasses."""
pass pass
@ -912,14 +955,47 @@ class InfoExtractor(object):
else: else:
self.report_warning(errmsg + str(ve)) self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None): def __ie_msg(self, *msg):
return '[{0}] {1}'.format(self.IE_NAME, ''.join(msg))
# msg, video_id=None, *args, only_once=False, **kwargs
def report_warning(self, msg, *args, **kwargs):
if len(args) > 0:
video_id = args[0]
args = args[1:]
else:
video_id = kwargs.pop('video_id', None)
idstr = '' if video_id is None else '%s: ' % video_id idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning( self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg)) self.__ie_msg(idstr, msg), *args, **kwargs)
def to_screen(self, msg): def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'""" """Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) self._downloader.to_screen(self.__ie_msg(msg))
def write_debug(self, msg, only_once=False, _cache=[]):
'''Log debug message or Print message to stderr'''
if not self.get_param('verbose', False):
return
message = '[debug] ' + self.__ie_msg(msg)
logger = self.get_param('logger')
if logger:
logger.debug(message)
else:
if only_once and hash(message) in _cache:
return
self._downloader.to_stderr(message)
_cache.append(hash(message))
# name, default=None, *args, **kwargs
def get_param(self, name, *args, **kwargs):
default, args = (args[0], args[1:]) if len(args) > 0 else (kwargs.pop('default', None), args)
if self._downloader:
return self._downloader.params.get(name, default, *args, **kwargs)
return default
def report_drm(self, video_id):
self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
def report_extraction(self, id_or_name): def report_extraction(self, id_or_name):
"""Report information extraction.""" """Report information extraction."""
@ -947,6 +1023,15 @@ class InfoExtractor(object):
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None): def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries) raise GeoRestrictedError(msg, countries=countries)
def raise_no_formats(self, msg, expected=False, video_id=None):
if expected and (
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
self.report_warning(msg, video_id)
elif isinstance(msg, ExtractorError):
raise msg
else:
raise ExtractorError(msg, expected=expected, video_id=video_id)
# Methods for following #608 # Methods for following #608
@staticmethod @staticmethod
def url_result(url, ie=None, video_id=None, video_title=None): def url_result(url, ie=None, video_id=None, video_title=None):
@ -1005,6 +1090,8 @@ class InfoExtractor(object):
if group is None: if group is None:
# return the first matching group # return the first matching group
return next(g for g in mobj.groups() if g is not None) return next(g for g in mobj.groups() if g is not None)
elif isinstance(group, (list, tuple)):
return tuple(mobj.group(g) for g in group)
else: else:
return mobj.group(group) return mobj.group(group)
elif default is not NO_DEFAULT: elif default is not NO_DEFAULT:
@ -1020,10 +1107,9 @@ class InfoExtractor(object):
Like _search_regex, but strips HTML tags and unescapes entities. Like _search_regex, but strips HTML tags and unescapes entities.
""" """
res = self._search_regex(pattern, string, name, default, fatal, flags, group) res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res: if isinstance(res, tuple):
return clean_html(res).strip() return tuple(map(clean_html, res))
else: return clean_html(res)
return res
def _get_netrc_login_info(self, netrc_machine=None): def _get_netrc_login_info(self, netrc_machine=None):
username = None username = None
@ -1348,6 +1434,44 @@ class InfoExtractor(object):
break break
return dict((k, v) for k, v in info.items() if v is not None) return dict((k, v) for k, v in info.items() if v is not None)
def _search_nextjs_data(self, webpage, video_id, **kw):
nkw = dict((k, v) for k, v in kw.items() if k in ('transform_source', 'fatal'))
kw.pop('transform_source', None)
next_data = self._search_regex(
r'''<script[^>]+\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>(?P<nd>[^<]+)</script>''',
webpage, 'next.js data', group='nd', **kw)
if not next_data:
return {}
return self._parse_json(next_data, video_id, **nkw)
def _search_nuxt_data(self, webpage, video_id, *args, **kwargs):
"""Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
# self, webpage, video_id, context_name='__NUXT__', *, fatal=True, traverse=('data', 0)
context_name = args[0] if len(args) > 0 else kwargs.get('context_name', '__NUXT__')
fatal = kwargs.get('fatal', True)
traverse = kwargs.get('traverse', ('data', 0))
re_ctx = re.escape(context_name)
FUNCTION_RE = (r'\(\s*function\s*\((?P<arg_keys>[\s\S]*?)\)\s*\{\s*'
r'return\s+(?P<js>\{[\s\S]*?})\s*;?\s*}\s*\((?P<arg_vals>[\s\S]*?)\)')
js, arg_keys, arg_vals = self._search_regex(
(p.format(re_ctx, FUNCTION_RE) for p in
(r'<script>\s*window\s*\.\s*{0}\s*=\s*{1}\s*\)\s*;?\s*</script>',
r'{0}\s*\([\s\S]*?{1}')),
webpage, context_name, group=('js', 'arg_keys', 'arg_vals'),
default=NO_DEFAULT if fatal else (None, None, None))
if js is None:
return {}
args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json(
'[{0}]'.format(arg_vals), video_id, transform_source=js_to_json, fatal=fatal) or ())))
ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
return traverse_obj(ret, traverse) or {}
@staticmethod @staticmethod
def _hidden_inputs(html): def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html) html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
@ -1632,6 +1756,12 @@ class InfoExtractor(object):
'format_note': 'Quality selection URL', 'format_note': 'Quality selection URL',
} }
def _report_ignoring_subs(self, name):
self.report_warning(bug_reports_message(
'Ignoring subtitle tracks found in the {0} manifest; '
'if any subtitle tracks are missing,'.format(name)
), only_once=True)
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None, entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None, m3u8_id=None, note=None, errnote=None,
@ -2072,23 +2202,46 @@ class InfoExtractor(object):
}) })
return entries return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): def _extract_mpd_formats(self, *args, **kwargs):
fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('DASH')
return fmts
def _extract_mpd_formats_and_subtitles(
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
fatal=True, data=None, headers=None, query=None):
# TODO: or not? param not yet implemented
if self.get_param('ignore_no_formats_error'):
fatal = False
res = self._download_xml_handle( res = self._download_xml_handle(
mpd_url, video_id, mpd_url, video_id,
note=note or 'Downloading MPD manifest', note='Downloading MPD manifest' if note is None else note,
errnote=errnote or 'Failed to download MPD manifest', errnote='Failed to download MPD manifest' if errnote is None else errnote,
fatal=fatal, data=data, headers=headers, query=query) fatal=fatal, data=data, headers=headers or {}, query=query or {})
if res is False: if res is False:
return [] return [], {}
mpd_doc, urlh = res mpd_doc, urlh = res
if mpd_doc is None: if mpd_doc is None:
return [] return [], {}
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats( # We could have been redirected to a new url when we retrieved our mpd file.
mpd_url = urlh.geturl()
mpd_base_url = base_url(mpd_url)
return self._parse_mpd_formats_and_subtitles(
mpd_doc, mpd_id, mpd_base_url, mpd_url) mpd_doc, mpd_id, mpd_base_url, mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None): def _parse_mpd_formats(self, *args, **kwargs):
fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
if subs:
self._report_ignoring_subs('DASH')
return fmts
def _parse_mpd_formats_and_subtitles(
self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
""" """
Parse formats from MPD manifest. Parse formats from MPD manifest.
References: References:
@ -2096,8 +2249,10 @@ class InfoExtractor(object):
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
""" """
if mpd_doc.get('type') == 'dynamic': # TODO: param not yet implemented: default like previous yt-dl logic
return [] if not self.get_param('dynamic_mpd', False):
if mpd_doc.get('type') == 'dynamic':
return [], {}
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None) namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
@ -2107,8 +2262,24 @@ class InfoExtractor(object):
def is_drm_protected(element): def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None return element.find(_add_ns('ContentProtection')) is not None
from ..utils import YoutubeDLHandler
fix_path = YoutubeDLHandler._fix_path
def resolve_base_url(element, parent_base_url=None):
# TODO: use native XML traversal when ready
b_url = traverse_obj(element, (
T(lambda e: e.find(_add_ns('BaseURL')).text)))
if parent_base_url and b_url:
if not parent_base_url[-1] in ('/', ':'):
parent_base_url += '/'
b_url = compat_urlparse.urljoin(parent_base_url, b_url)
if b_url:
b_url = fix_path(b_url)
return b_url or parent_base_url
def extract_multisegment_info(element, ms_parent_info): def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy() ms_info = ms_parent_info.copy()
base_url = ms_info['base_url'] = resolve_base_url(element, ms_info.get('base_url'))
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some # As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant # common attributes and elements. We will only extract relevant
@ -2142,15 +2313,27 @@ class InfoExtractor(object):
def extract_Initialization(source): def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization')) initialization = source.find(_add_ns('Initialization'))
if initialization is not None: if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL'] ms_info['initialization_url'] = initialization.get('sourceURL') or base_url
initialization_url_range = initialization.get('range')
if initialization_url_range:
ms_info['initialization_url_range'] = initialization_url_range
segment_list = element.find(_add_ns('SegmentList')) segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None: if segment_list is not None:
extract_common(segment_list) extract_common(segment_list)
extract_Initialization(segment_list) extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL')) segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e: segment_urls = traverse_obj(segment_urls_e, (
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e] Ellipsis, T(lambda e: e.attrib), 'media'))
if segment_urls:
ms_info['segment_urls'] = segment_urls
segment_urls_range = traverse_obj(segment_urls_e, (
Ellipsis, T(lambda e: e.attrib), 'mediaRange',
T(lambda r: re.findall(r'^\d+-\d+$', r)), 0))
if segment_urls_range:
ms_info['segment_urls_range'] = segment_urls_range
if not segment_urls:
ms_info['segment_urls'] = [base_url for _ in segment_urls_range]
else: else:
segment_template = element.find(_add_ns('SegmentTemplate')) segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None: if segment_template is not None:
@ -2166,17 +2349,20 @@ class InfoExtractor(object):
return ms_info return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration')) mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = [] formats, subtitles = [], {}
stream_numbers = collections.defaultdict(int)
mpd_base_url = resolve_base_url(mpd_doc, mpd_base_url or mpd_url)
for period in mpd_doc.findall(_add_ns('Period')): for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, { period_ms_info = extract_multisegment_info(period, {
'start_number': 1, 'start_number': 1,
'timescale': 1, 'timescale': 1,
'base_url': mpd_base_url,
}) })
for adaptation_set in period.findall(_add_ns('AdaptationSet')): for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set): if is_drm_protected(adaptation_set):
continue continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info) adaptation_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')): for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation): if is_drm_protected(representation):
continue continue
@ -2184,27 +2370,35 @@ class InfoExtractor(object):
representation_attrib.update(representation.attrib) representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType'] mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0] content_type = representation_attrib.get('contentType') or mime_type.split('/')[0]
if content_type == 'text': codec_str = representation_attrib.get('codecs', '')
# TODO implement WebVTT downloading # Some kind of binary subtitle found in some youtube livestreams
pass if mime_type == 'application/x-rawcc':
elif content_type in ('video', 'audio'): codecs = {'scodec': codec_str}
base_url = '' else:
for element in (representation, adaptation_set, period, mpd_doc): codecs = parse_codecs(codec_str)
base_url_e = element.find(_add_ns('BaseURL')) if content_type not in ('video', 'audio', 'text'):
if base_url_e is not None: if mime_type == 'image/jpeg':
base_url = base_url_e.text + base_url content_type = mime_type
if re.match(r'^https?://', base_url): elif codecs.get('vcodec', 'none') != 'none':
break content_type = 'video'
if mpd_base_url and not re.match(r'^https?://', base_url): elif codecs.get('acodec', 'none') != 'none':
if not mpd_base_url.endswith('/') and not base_url.startswith('/'): content_type = 'audio'
mpd_base_url += '/' elif codecs.get('scodec', 'none') != 'none':
base_url = mpd_base_url + base_url content_type = 'text'
representation_id = representation_attrib.get('id') elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
lang = representation_attrib.get('lang') content_type = 'text'
url_el = representation.find(_add_ns('BaseURL')) else:
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
bandwidth = int_or_none(representation_attrib.get('bandwidth')) continue
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
format_id = join_nonempty(representation_id or content_type, mpd_id)
if content_type in ('video', 'audio'):
f = { f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id, 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'manifest_url': mpd_url, 'manifest_url': mpd_url,
@ -2219,104 +2413,130 @@ class InfoExtractor(object):
'filesize': filesize, 'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash', 'container': mimetype2ext(mime_type) + '_dash',
} }
f.update(parse_codecs(representation_attrib.get('codecs'))) f.update(codecs)
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) elif content_type == 'text':
f = {
'ext': mimetype2ext(mime_type),
'manifest_url': mpd_url,
'filesize': filesize,
}
elif content_type == 'image/jpeg':
# See test case in VikiIE
# https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
f = {
'format_id': format_id,
'ext': 'mhtml',
'manifest_url': mpd_url,
'format_note': 'DASH storyboards (jpeg)',
'acodec': 'none',
'vcodec': 'none',
}
if is_drm_protected(adaptation_set) or is_drm_protected(representation):
f['has_drm'] = True
representation_ms_info = extract_multisegment_info(representation, adaptation_set_ms_info)
def prepare_template(template_name, identifiers): def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name] tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates # First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing # must be escaped by doubling for proper processing
# by % operator string formatting used further (see # by % operator string formatting used further (see
# https://github.com/ytdl-org/youtube-dl/issues/16867). # https://github.com/ytdl-org/youtube-dl/issues/16867).
t = '' t = ''
in_template = False in_template = False
for c in tmpl: for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c t += c
if c == '$': # Next, $...$ templates are translated to their
in_template = not in_template # %(...) counterparts to be used with % operator
elif c == '%' and not in_template: t = t.replace('$RepresentationID$', representation_id)
t += c t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
# Next, $...$ templates are translated to their t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
# %(...) counterparts to be used with % operator t.replace('$$', '$')
t = t.replace('$RepresentationID$', representation_id) return t
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one # @initialization is a regular template like @media one
# so it should be handled just the same way (see # so it should be handled just the same way (see
# https://github.com/ytdl-org/youtube-dl/issues/11605) # https://github.com/ytdl-org/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info: if 'initialization' in representation_ms_info:
initialization_template = prepare_template( initialization_template = prepare_template(
'initialization', 'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus # $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains # only $Bandwidth$ remains
('Bandwidth', )) ('Bandwidth', ))
representation_ms_info['initialization_url'] = initialization_template % { representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth, 'Bandwidth': bandwidth,
} }
def location_key(location): def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path' return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info: def calc_segment_duration():
return float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time')) if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
# can't be used at the same time media_location_key = location_key(media_template)
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url(): # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
segment_url = media_template % { # can't be used at the same time
'Time': segment_time, if '%(Number' in media_template and 's' not in representation_ms_info:
'Bandwidth': bandwidth, segment_duration = None
'Number': segment_number, if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
} segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
representation_ms_info['fragments'].append({ representation_ms_info['total_number'] = int(math.ceil(
media_location_key: segment_url, float_or_none(period_duration, segment_duration, default=0)))
'duration': float_or_none(segment_d, representation_ms_info['timescale']), representation_ms_info['fragments'] = [{
}) media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
for num, s in enumerate(representation_ms_info['s']): def add_segment_url():
segment_time = s.get('t') or segment_time segment_url = media_template % {
segment_d = s['d'] 'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url() add_segment_url()
segment_number += 1 segment_number += 1
for r in range(s.get('r', 0)): segment_time += segment_d
segment_time += segment_d elif 'segment_urls' in representation_ms_info:
add_segment_url() fragments = []
segment_number += 1 if 's' in representation_ms_info:
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template # No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video # or any YouTube dashsegments video
fragments = []
segment_index = 0 segment_index = 0
timescale = representation_ms_info['timescale'] timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']: for s in representation_ms_info['s']:
@ -2328,48 +2548,78 @@ class InfoExtractor(object):
'duration': duration, 'duration': duration,
}) })
segment_index += 1 segment_index += 1
representation_ms_info['fragments'] = fragments elif 'segment_urls_range' in representation_ms_info:
elif 'segment_urls' in representation_ms_info: # Segment URLs with mediaRange
# Example: https://kinescope.io/200615537/master.mpd
# https://github.com/ytdl-org/youtube-dl/issues/30235
# or any mpd generated with Bento4 `mp4dash --no-split --use-segment-list`
segment_duration = calc_segment_duration()
for segment_url, segment_url_range in zip(
representation_ms_info['segment_urls'], representation_ms_info['segment_urls_range']):
fragments.append({
location_key(segment_url): segment_url,
'range': segment_url_range,
'duration': segment_duration,
})
else:
# Segment URLs with no SegmentTimeline # Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/ytdl-org/youtube-dl/pull/14844 # https://github.com/ytdl-org/youtube-dl/pull/14844
fragments = [] segment_duration = calc_segment_duration()
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']: for segment_url in representation_ms_info['segment_urls']:
fragment = { fragments.append({
location_key(segment_url): segment_url, location_key(segment_url): segment_url,
} 'duration': segment_duration,
if segment_duration: })
fragment['duration'] = segment_duration representation_ms_info['fragments'] = fragments
fragments.append(fragment)
representation_ms_info['fragments'] = fragments # If there is a fragments key available then we correctly recognized fragmented media.
# If there is a fragments key available then we correctly recognized fragmented media. # Otherwise we will assume unfragmented media with direct access. Technically, such
# Otherwise we will assume unfragmented media with direct access. Technically, such # assumption is not necessarily correct since we may simply have no support for
# assumption is not necessarily correct since we may simply have no support for # some forms of fragmented media renditions yet, but for now we'll use this fallback.
# some forms of fragmented media renditions yet, but for now we'll use this fallback. if 'fragments' in representation_ms_info:
if 'fragments' in representation_ms_info: base_url = representation_ms_info['base_url']
f.update({ f.update({
# NB: mpd_url may be empty when MPD manifest is parsed from a string # NB: mpd_url may be empty when MPD manifest is parsed from a string
'url': mpd_url or base_url, 'url': mpd_url or base_url,
'fragment_base_url': base_url, 'fragment_base_url': base_url,
'fragments': [], 'fragments': [],
'protocol': 'http_dash_segments', 'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info and 'initialization_url_range' in representation_ms_info:
# Initialization URL with range (accompanied by Segment URLs with mediaRange above)
# https://github.com/ytdl-org/youtube-dl/issues/30235
initialization_url = representation_ms_info['initialization_url']
f['fragments'].append({
location_key(initialization_url): initialization_url,
'range': representation_ms_info['initialization_url_range'],
}) })
if 'initialization_url' in representation_ms_info: elif 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'] initialization_url = representation_ms_info['initialization_url']
if not f.get('url'): if not f.get('url'):
f['url'] = initialization_url f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url}) f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments']) elif 'initialization_url_range' in representation_ms_info:
else: # no Initialization URL but range (accompanied by no Segment URLs but mediaRange above)
# Assuming direct URL to unfragmented media. # https://github.com/ytdl-org/youtube-dl/issues/27575
f['url'] = base_url f['fragments'].append({
formats.append(f) location_key(base_url): base_url,
'range': representation_ms_info['initialization_url_range'],
})
f['fragments'].extend(representation_ms_info['fragments'])
if not period_duration:
period_duration = sum(traverse_obj(representation_ms_info, (
'fragments', Ellipsis, 'duration', T(float_or_none))))
else: else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) # Assuming direct URL to unfragmented media.
return formats f['url'] = representation_ms_info['base_url']
if content_type in ('video', 'audio', 'image/jpeg'):
f['manifest_stream_number'] = stream_numbers[f['url']]
stream_numbers[f['url']] += 1
formats.append(f)
elif content_type == 'text':
subtitles.setdefault(lang or 'und', []).append(f)
return formats, subtitles
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
res = self._download_xml_handle( res = self._download_xml_handle(
@ -2495,7 +2745,8 @@ class InfoExtractor(object):
return f return f
return {} return {}
def _media_formats(src, cur_media_type, type_info={}): def _media_formats(src, cur_media_type, type_info=None):
type_info = type_info or {}
full_url = absolute_url(src) full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url) ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8': if ext == 'm3u8':
@ -2513,6 +2764,7 @@ class InfoExtractor(object):
formats = [{ formats = [{
'url': full_url, 'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None, 'vcodec': 'none' if cur_media_type == 'audio' else None,
'ext': ext,
}] }]
return is_plain_url, formats return is_plain_url, formats
@ -2521,7 +2773,7 @@ class InfoExtractor(object):
# so we wll include them right here (see # so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video) # https://www.ampproject.org/docs/reference/components/amp-video)
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/ # For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video|audio)' _MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video(?:-js)?|audio)'
media_tags = [(media_tag, media_tag_name, media_type, '') media_tags = [(media_tag, media_tag_name, media_type, '')
for media_tag, media_tag_name, media_type for media_tag, media_tag_name, media_type
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)] in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
@ -2539,7 +2791,8 @@ class InfoExtractor(object):
media_attributes = extract_attributes(media_tag) media_attributes = extract_attributes(media_tag)
src = strip_or_none(media_attributes.get('src')) src = strip_or_none(media_attributes.get('src'))
if src: if src:
_, formats = _media_formats(src, media_type) f = parse_content_type(media_attributes.get('type'))
_, formats = _media_formats(src, media_type, f)
media_info['formats'].extend(formats) media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster')) media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content: if media_content:

204
youtube_dl/extractor/dlf.py Normal file
View File

@ -0,0 +1,204 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
determine_ext,
extract_attributes,
int_or_none,
merge_dicts,
traverse_obj,
url_or_none,
variadic,
)
class DLFBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?deutschlandfunk\.de/'
_BUTTON_REGEX = r'(<button[^>]+alt="Anhören"[^>]+data-audio-diraid[^>]*>)'
def _parse_button_attrs(self, button, audio_id=None):
attrs = extract_attributes(button)
audio_id = audio_id or attrs['data-audio-diraid']
url = traverse_obj(
attrs, 'data-audio-download-src', 'data-audio', 'data-audioreference',
'data-audio-src', expected_type=url_or_none)
ext = determine_ext(url)
formats = (self._extract_m3u8_formats(url, audio_id, fatal=False)
if ext == 'm3u8' else [{'url': url, 'ext': ext, 'vcodec': 'none'}])
self._sort_formats(formats)
def traverse_attrs(path):
path = list(variadic(path))
t = path.pop() if callable(path[-1]) else None
return traverse_obj(attrs, path, expected_type=t, get_all=False)
def txt_or_none(v, default=None):
return default if v is None else (compat_str(v).strip() or default)
return merge_dicts(*reversed([{
'id': audio_id,
# 'extractor_key': DLFIE.ie_key(),
# 'extractor': DLFIE.IE_NAME,
'formats': formats,
}, dict((k, traverse_attrs(v)) for k, v in {
'title': (('data-audiotitle', 'data-audio-title', 'data-audio-download-tracking-title'), txt_or_none),
'duration': (('data-audioduration', 'data-audio-duration'), int_or_none),
'thumbnail': ('data-audioimage', url_or_none),
'uploader': 'data-audio-producer',
'series': 'data-audio-series',
'channel': 'data-audio-origin-site-name',
'webpage_url': ('data-audio-download-tracking-path', url_or_none),
}.items())]))
class DLFIE(DLFBaseIE):
IE_NAME = 'dlf'
_VALID_URL = DLFBaseIE._VALID_URL_BASE + r'[\w-]+-dlf-(?P<id>[\da-f]{8})-100\.html'
_TESTS = [
# Audio as an HLS stream
{
'url': 'https://www.deutschlandfunk.de/tanz-der-saiteninstrumente-das-wild-strings-trio-aus-slowenien-dlf-03a3eb19-100.html',
'info_dict': {
'id': '03a3eb19',
'title': r're:Tanz der Saiteninstrumente [-/] Das Wild Strings Trio aus Slowenien',
'ext': 'm4a',
'duration': 3298,
'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
'uploader': 'Deutschlandfunk',
'series': 'On Stage',
'channel': 'deutschlandfunk'
},
'params': {
'skip_download': 'm3u8'
},
'skip': 'This webpage no longer exists'
}, {
'url': 'https://www.deutschlandfunk.de/russische-athleten-kehren-zurueck-auf-die-sportbuehne-ein-gefaehrlicher-tueroeffner-dlf-d9cc1856-100.html',
'info_dict': {
'id': 'd9cc1856',
'title': 'Russische Athleten kehren zurück auf die Sportbühne: Ein gefährlicher Türöffner',
'ext': 'mp3',
'duration': 291,
'thumbnail': 'https://assets.deutschlandfunk.de/FALLBACK-IMAGE-AUDIO/512x512.png?t=1603714364673',
'uploader': 'Deutschlandfunk',
'series': 'Kommentare und Themen der Woche',
'channel': 'deutschlandfunk'
}
},
]
def _real_extract(self, url):
audio_id = self._match_id(url)
webpage = self._download_webpage(url, audio_id)
return self._parse_button_attrs(
self._search_regex(self._BUTTON_REGEX, webpage, 'button'), audio_id)
class DLFCorpusIE(DLFBaseIE):
IE_NAME = 'dlf:corpus'
IE_DESC = 'DLF Multi-feed Archives'
_VALID_URL = DLFBaseIE._VALID_URL_BASE + r'(?P<id>(?![\w-]+-dlf-[\da-f]{8})[\w-]+-\d+)\.html'
_TESTS = [
# Recorded news broadcast with referrals to related broadcasts
{
'url': 'https://www.deutschlandfunk.de/fechten-russland-belarus-ukraine-protest-100.html',
'info_dict': {
'id': 'fechten-russland-belarus-ukraine-protest-100',
'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
'description': 'md5:91340aab29c71aa7518ad5be13d1e8ad'
},
'playlist_mincount': 5,
'playlist': [{
'info_dict': {
'id': '1fc5d64a',
'title': r're:Wiederzulassung als neutrale Athleten [-/] Was die Rückkehr russischer und belarussischer Sportler beim Fechten bedeutet',
'ext': 'mp3',
'duration': 252,
'thumbnail': 'https://assets.deutschlandfunk.de/aad16241-6b76-4a09-958b-96d0ee1d6f57/512x512.jpg?t=1679480020313',
'uploader': 'Deutschlandfunk',
'series': 'Sport',
'channel': 'deutschlandfunk'
}
}, {
'info_dict': {
'id': '2ada145f',
'title': r're:(?:Sportpolitik / )?Fechtverband votiert für Rückkehr russischer Athleten',
'ext': 'mp3',
'duration': 336,
'thumbnail': 'https://assets.deutschlandfunk.de/FILE_93982766f7317df30409b8a184ac044a/512x512.jpg?t=1678547581005',
'uploader': 'Deutschlandfunk',
'series': 'Deutschlandfunk Nova',
'channel': 'deutschlandfunk-nova'
}
}, {
'info_dict': {
'id': '5e55e8c9',
'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
'ext': 'mp3',
'duration': 187,
'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
'uploader': 'Deutschlandfunk',
'series': 'Sport am Samstag',
'channel': 'deutschlandfunk'
}
}, {
'info_dict': {
'id': '47e1a096',
'title': r're:Rückkehr Russlands im Fechten [-/] "Fassungslos, dass es einfach so passiert ist"',
'ext': 'mp3',
'duration': 602,
'thumbnail': 'https://assets.deutschlandfunk.de/da4c494a-21cc-48b4-9cc7-40e09fd442c2/512x512.jpg?t=1678562155770',
'uploader': 'Deutschlandfunk',
'series': 'Sport am Samstag',
'channel': 'deutschlandfunk'
}
}, {
'info_dict': {
'id': '5e55e8c9',
'title': r're:Wiederzulassung von Russland und Belarus [-/] "Herumlavieren" des Fechter-Bundes sorgt für Unverständnis',
'ext': 'mp3',
'duration': 187,
'thumbnail': 'https://assets.deutschlandfunk.de/a595989d-1ed1-4a2e-8370-b64d7f11d757/512x512.jpg?t=1679173825412',
'uploader': 'Deutschlandfunk',
'series': 'Sport am Samstag',
'channel': 'deutschlandfunk'
}
}]
},
# Podcast feed with tag buttons, playlist count fluctuates
{
'url': 'https://www.deutschlandfunk.de/kommentare-und-themen-der-woche-100.html',
'info_dict': {
'id': 'kommentare-und-themen-der-woche-100',
'title': 'Meinung - Kommentare und Themen der Woche',
'description': 'md5:2901bbd65cd2d45e116d399a099ce5d5',
},
'playlist_mincount': 10,
},
# Podcast feed with no description
{
'url': 'https://www.deutschlandfunk.de/podcast-tolle-idee-100.html',
'info_dict': {
'id': 'podcast-tolle-idee-100',
'title': 'Wissenschaftspodcast - Tolle Idee! - Was wurde daraus?',
},
'playlist_mincount': 11,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
return self.playlist_result(
map(self._parse_button_attrs, re.findall(self._BUTTON_REGEX, webpage)),
playlist_id, self._html_search_meta(['og:title', 'twitter:title'], webpage, default=None),
self._html_search_meta(['description', 'og:description', 'twitter:description'], webpage, default=None))

View File

@ -0,0 +1,101 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
T,
traverse_obj,
txt_or_none,
unified_timestamp,
url_or_none,
)
class EpidemicSoundIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?epidemicsound\.com/track/(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'https://www.epidemicsound.com/track/yFfQVRpSPz/',
'md5': 'd98ff2ddb49e8acab9716541cbc9dfac',
'info_dict': {
'id': '45014',
'display_id': 'yFfQVRpSPz',
'ext': 'mp3',
'tags': ['foley', 'door', 'knock', 'glass', 'window', 'glass door knock'],
'title': 'Door Knock Door 1',
'duration': 1,
'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/default-sfx/3000x3000.jpg',
'timestamp': 1415320353,
'upload_date': '20141107',
'age_limit': None,
# check that the "best" format was found, since test file MD5 doesn't
# distinguish the formats
'format': 'full',
},
}, {
'url': 'https://www.epidemicsound.com/track/mj8GTTwsZd/',
'md5': 'c82b745890f9baf18dc2f8d568ee3830',
'info_dict': {
'id': '148700',
'display_id': 'mj8GTTwsZd',
'ext': 'mp3',
'tags': ['liquid drum n bass', 'energetic'],
'title': 'Noplace',
'duration': 237,
'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/11138/3000x3000.jpg',
'timestamp': 1694426482,
'release_timestamp': 1700535606,
'upload_date': '20230911',
'age_limit': None,
'format': 'full',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
json_data = self._download_json('https://www.epidemicsound.com/json/track/' + video_id, video_id)
def fmt_or_none(f):
if not f.get('format'):
f['format'] = f.get('format_id')
elif not f.get('format_id'):
f['format_id'] = f['format']
if not (f['url'] and f['format']):
return
if f.get('format_note'):
f['format_note'] = 'track ID ' + f['format_note']
f['preference'] = -1 if f['format'] == 'full' else -2
return f
formats = traverse_obj(json_data, (
'stems', T(dict.items), Ellipsis, {
'format': (0, T(txt_or_none)),
'format_note': (1, 's3TrackId', T(txt_or_none)),
'format_id': (1, 'stemType', T(txt_or_none)),
'url': (1, 'lqMp3Url', T(url_or_none)),
}, T(fmt_or_none)))
self._sort_formats(formats)
info = traverse_obj(json_data, {
'id': ('id', T(txt_or_none)),
'tags': ('metadataTags', Ellipsis, T(txt_or_none)),
'title': ('title', T(txt_or_none)),
'duration': ('length', T(float_or_none)),
'timestamp': ('added', T(unified_timestamp)),
'thumbnail': (('imageUrl', 'cover'), T(url_or_none)),
'age_limit': ('isExplicit', T(lambda b: 18 if b else None)),
'release_timestamp': ('releaseDate', T(unified_timestamp)),
}, get_all=False)
info.update(traverse_obj(json_data, {
'categories': ('genres', Ellipsis, 'tag', T(txt_or_none)),
'tags': ('metadataTags', Ellipsis, T(txt_or_none)),
}))
info.update({
'display_id': video_id,
'formats': formats,
})
return info

View File

@ -159,6 +159,7 @@ from .businessinsider import BusinessInsiderIE
from .buzzfeed import BuzzFeedIE from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE from .byutv import BYUtvIE
from .c56 import C56IE from .c56 import C56IE
from .caffeine import CaffeineTVIE
from .callin import CallinIE from .callin import CallinIE
from .camdemy import ( from .camdemy import (
CamdemyIE, CamdemyIE,
@ -226,6 +227,7 @@ from .ciscolive import (
CiscoLiveSearchIE, CiscoLiveSearchIE,
) )
from .cjsw import CJSWIE from .cjsw import CJSWIE
from .clipchamp import ClipchampIE
from .cliphunter import CliphunterIE from .cliphunter import CliphunterIE
from .clippit import ClippitIE from .clippit import ClippitIE
from .cliprs import ClipRsIE from .cliprs import ClipRsIE
@ -295,6 +297,10 @@ from .dbtv import DBTVIE
from .dctp import DctpTvIE from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE from .deezer import DeezerPlaylistIE
from .democracynow import DemocracynowIE from .democracynow import DemocracynowIE
from .dlf import (
DLFCorpusIE,
DLFIE,
)
from .dfb import DFBIE from .dfb import DFBIE
from .dhm import DHMIE from .dhm import DHMIE
from .digg import DiggIE from .digg import DiggIE
@ -352,6 +358,7 @@ from .ellentube import (
from .elpais import ElPaisIE from .elpais import ElPaisIE
from .embedly import EmbedlyIE from .embedly import EmbedlyIE
from .engadget import EngadgetIE from .engadget import EngadgetIE
from .epidemicsound import EpidemicSoundIE
from .eporner import EpornerIE from .eporner import EpornerIE
from .eroprofile import EroProfileIE from .eroprofile import EroProfileIE
from .escapist import EscapistIE from .escapist import EscapistIE
@ -437,6 +444,7 @@ from .gamespot import GameSpotIE
from .gamestar import GameStarIE from .gamestar import GameStarIE
from .gaskrank import GaskrankIE from .gaskrank import GaskrankIE
from .gazeta import GazetaIE from .gazeta import GazetaIE
from .gbnews import GBNewsIE
from .gdcvault import GDCVaultIE from .gdcvault import GDCVaultIE
from .gedidigital import GediDigitalIE from .gedidigital import GediDigitalIE
from .generic import GenericIE from .generic import GenericIE
@ -444,6 +452,13 @@ from .gfycat import GfycatIE
from .giantbomb import GiantBombIE from .giantbomb import GiantBombIE
from .giga import GigaIE from .giga import GigaIE
from .glide import GlideIE from .glide import GlideIE
from .globalplayer import (
GlobalPlayerLiveIE,
GlobalPlayerLivePlaylistIE,
GlobalPlayerAudioIE,
GlobalPlayerAudioEpisodeIE,
GlobalPlayerVideoIE
)
from .globo import ( from .globo import (
GloboIE, GloboIE,
GloboArticleIE, GloboArticleIE,
@ -975,6 +990,10 @@ from .pornhub import (
from .pornotube import PornotubeIE from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE from .pornoxo import PornoXOIE
from .pr0gramm import (
Pr0grammIE,
Pr0grammStaticIE,
)
from .puhutv import ( from .puhutv import (
PuhuTVIE, PuhuTVIE,
PuhuTVSerieIE, PuhuTVSerieIE,
@ -1071,6 +1090,10 @@ from .rutube import (
from .rutv import RUTVIE from .rutv import RUTVIE
from .ruutu import RuutuIE from .ruutu import RuutuIE
from .ruv import RuvIE from .ruv import RuvIE
from .s4c import (
S4CIE,
S4CSeriesIE,
)
from .safari import ( from .safari import (
SafariIE, SafariIE,
SafariApiIE, SafariApiIE,
@ -1565,6 +1588,7 @@ from .weibo import (
WeiboMobileIE WeiboMobileIE
) )
from .weiqitv import WeiqiTVIE from .weiqitv import WeiqiTVIE
from .whyp import WhypIE
from .wistia import ( from .wistia import (
WistiaIE, WistiaIE,
WistiaPlaylistIE, WistiaPlaylistIE,
@ -1678,7 +1702,3 @@ from .zingmp3 import (
) )
from .zoom import ZoomIE from .zoom import ZoomIE
from .zype import ZypeIE from .zype import ZypeIE
from .pr0gramm import (
Pr0grammIE,
Pr0grammStaticIE,
)

View File

@ -0,0 +1,139 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
extract_attributes,
ExtractorError,
T,
traverse_obj,
txt_or_none,
url_or_none,
)
class GBNewsIE(InfoExtractor):
IE_DESC = 'GB News clips, features and live stream'
# \w+ is normally shows or news, but apparently any word redirects to the correct URL
_VALID_URL = r'https?://(?:www\.)?gbnews\.(?:uk|com)/(?:\w+/)?(?P<id>[^#?]+)'
_PLATFORM = 'safari'
_SSMP_URL = 'https://mm-v2.simplestream.com/ssmp/api.php'
_TESTS = [{
'url': 'https://www.gbnews.uk/shows/andrew-neils-message-to-companies-choosing-to-boycott-gb-news/106889',
'info_dict': {
'id': '106889',
'ext': 'mp4',
'title': "Andrew Neil's message to companies choosing to boycott GB News",
'description': 'md5:b281f5d22fd6d5eda64a4e3ba771b351',
},
'skip': '404 not found',
}, {
'url': 'https://www.gbnews.com/news/bbc-claudine-gay-harvard-university-antisemitism-row',
'info_dict': {
'id': '52264136',
'display_id': 'bbc-claudine-gay-harvard-university-antisemitism-row',
'ext': 'mp4',
'title': 'BBC deletes post after furious backlash over headline downplaying antisemitism',
'description': 'The post was criticised by former employers of the broadcaster',
},
}, {
'url': 'https://www.gbnews.uk/watchlive',
'info_dict': {
'id': '1069',
'display_id': 'watchlive',
'ext': 'mp4',
'title': 'GB News Live',
'is_live': True,
},
'params': {
'skip_download': 'm3u8',
},
}]
def _real_extract(self, url):
display_id = self._match_id(url).split('/')[-1]
webpage = self._download_webpage(url, display_id)
# extraction based on https://github.com/ytdl-org/youtube-dl/issues/29341
'''
<div id="video-106908"
class="simplestream"
data-id="GB001"
data-type="vod"
data-key="3Li3Nt2Qs8Ct3Xq9Fi5Uy0Mb2Bj0Qs"
data-token="f9c317c727dc07f515b20036c8ef14a6"
data-expiry="1624300052"
data-uvid="37900558"
data-poster="https://thumbnails.simplestreamcdn.com/gbnews/ondemand/37900558.jpg?width=700&"
data-npaw="false"
data-env="production">
'''
# exception if no match
video_data = self._search_regex(
r'(<div\s[^>]*\bclass\s*=\s*(\'|")(?!.*sidebar\b)simplestream(?:\s[\s\w$-]*)?\2[^>]*>)',
webpage, 'video data')
video_data = extract_attributes(video_data)
ss_id = video_data.get('data-id')
if not ss_id:
raise ExtractorError('Simplestream ID not found')
json_data = self._download_json(
self._SSMP_URL, display_id,
note='Downloading Simplestream JSON metadata',
errnote='Unable to download Simplestream JSON metadata',
query={
'id': ss_id,
'env': video_data.get('data-env', 'production'),
}, fatal=False)
meta_url = traverse_obj(json_data, ('response', 'api_hostname'))
if not meta_url:
raise ExtractorError('No API host found')
uvid = video_data['data-uvid']
dtype = video_data.get('data-type')
stream_data = self._download_json(
'%s/api/%s/stream/%s' % (meta_url, 'show' if dtype == 'vod' else dtype, uvid),
uvid,
query={
'key': video_data.get('data-key'),
'platform': self._PLATFORM,
},
headers={
'Token': video_data.get('data-token'),
'Token-Expiry': video_data.get('data-expiry'),
'Uvid': uvid,
}, fatal=False)
stream_url = traverse_obj(stream_data, (
'response', 'stream', T(url_or_none)))
if not stream_url:
raise ExtractorError('No stream data/URL')
# now known to be a dict
stream_data = stream_data['response']
drm = stream_data.get('drm')
if drm:
self.report_drm(uvid)
formats = self._extract_m3u8_formats(
stream_url, uvid, ext='mp4', entry_protocol='m3u8_native',
fatal=False)
# exception if no formats
self._sort_formats(formats)
return {
'id': uvid,
'display_id': display_id,
'title': (traverse_obj(stream_data, ('title', T(txt_or_none)))
or self._og_search_title(webpage, default=None)
or display_id.replace('-', ' ').capitalize()),
'description': self._og_search_description(webpage, default=None),
'thumbnail': (traverse_obj(video_data, ('data-poster', T(url_or_none)))
or self._og_search_thumbnail(webpage)),
'formats': formats,
'is_live': (dtype == 'live') or None,
}

View File

@ -0,0 +1,273 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
join_nonempty,
merge_dicts,
parse_duration,
str_or_none,
T,
traverse_obj,
unified_strdate,
unified_timestamp,
urlhandle_detect_ext,
)
class GlobalPlayerBaseIE(InfoExtractor):
def _get_page_props(self, url, video_id):
webpage = self._download_webpage(url, video_id)
return self._search_nextjs_data(webpage, video_id)['props']['pageProps']
def _request_ext(self, url, video_id):
return urlhandle_detect_ext(self._request_webpage( # Server rejects HEAD requests
url, video_id, note='Determining source extension'))
@staticmethod
def _clean_desc(x):
x = clean_html(x)
if x:
x = x.replace('\xa0', ' ')
return x
def _extract_audio(self, episode, series):
return merge_dicts({
'vcodec': 'none',
}, traverse_obj(series, {
'series': 'title',
'series_id': 'id',
'thumbnail': 'imageUrl',
'uploader': 'itunesAuthor', # podcasts only
}), traverse_obj(episode, {
'id': 'id',
'description': ('description', T(self._clean_desc)),
'duration': ('duration', T(parse_duration)),
'thumbnail': 'imageUrl',
'url': 'streamUrl',
'timestamp': (('pubDate', 'startDate'), T(unified_timestamp)),
'title': 'title',
}, get_all=False), rev=True)
class GlobalPlayerLiveIE(GlobalPlayerBaseIE):
_VALID_URL = r'https?://www\.globalplayer\.com/live/(?P<id>\w+)/\w+'
_TESTS = [{
'url': 'https://www.globalplayer.com/live/smoothchill/uk/',
'info_dict': {
'id': '2mx1E',
'ext': 'aac',
'display_id': 'smoothchill-uk',
'title': 're:^Smooth Chill.+$',
'thumbnail': 'https://herald.musicradio.com/media/f296ade8-50c9-4f60-911f-924e96873620.png',
'description': 'Music To Chill To',
# 'live_status': 'is_live',
'is_live': True,
},
}, {
# national station
'url': 'https://www.globalplayer.com/live/heart/uk/',
'info_dict': {
'id': '2mwx4',
'ext': 'aac',
'description': 'turn up the feel good!',
'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
# 'live_status': 'is_live',
'is_live': True,
'title': 're:^Heart UK.+$',
'display_id': 'heart-uk',
},
}, {
# regional variation
'url': 'https://www.globalplayer.com/live/heart/london/',
'info_dict': {
'id': 'AMqg',
'ext': 'aac',
'thumbnail': 'https://herald.musicradio.com/media/49b9e8cb-15bf-4bf2-8c28-a4850cc6b0f3.png',
'title': 're:^Heart London.+$',
# 'live_status': 'is_live',
'is_live': True,
'display_id': 'heart-london',
'description': 'turn up the feel good!',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
station = self._get_page_props(url, video_id)['station']
stream_url = station['streamUrl']
return merge_dicts({
'id': station['id'],
'display_id': (
join_nonempty('brandSlug', 'slug', from_dict=station)
or station.get('legacyStationPrefix')),
'url': stream_url,
'ext': self._request_ext(stream_url, video_id),
'vcodec': 'none',
'is_live': True,
}, {
'title': self._live_title(traverse_obj(
station, (('name', 'brandName'), T(str_or_none)),
get_all=False)),
}, traverse_obj(station, {
'description': 'tagline',
'thumbnail': 'brandLogo',
}), rev=True)
class GlobalPlayerLivePlaylistIE(GlobalPlayerBaseIE):
_VALID_URL = r'https?://www\.globalplayer\.com/playlists/(?P<id>\w+)'
_TESTS = [{
# "live playlist"
'url': 'https://www.globalplayer.com/playlists/8bLk/',
'info_dict': {
'id': '8bLk',
'ext': 'aac',
# 'live_status': 'is_live',
'is_live': True,
'description': r're:(?s).+\bclassical\b.+\bClassic FM Hall [oO]f Fame\b',
'thumbnail': 'https://images.globalplayer.com/images/551379?width=450&signature=oMLPZIoi5_dBSHnTMREW0Xg76mA=',
'title': 're:Classic FM Hall of Fame.+$'
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
station = self._get_page_props(url, video_id)['playlistData']
stream_url = station['streamUrl']
return merge_dicts({
'id': video_id,
'url': stream_url,
'ext': self._request_ext(stream_url, video_id),
'vcodec': 'none',
'is_live': True,
}, traverse_obj(station, {
'title': 'title',
'description': ('description', T(self._clean_desc)),
'thumbnail': 'image',
}), rev=True)
class GlobalPlayerAudioIE(GlobalPlayerBaseIE):
_VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)/|catchup/\w+/\w+/)(?P<id>\w+)/?(?:$|[?#])'
_TESTS = [{
# podcast
'url': 'https://www.globalplayer.com/podcasts/42KuaM/',
'playlist_mincount': 5,
'info_dict': {
'id': '42KuaM',
'title': 'Filthy Ritual',
'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
'categories': ['Society & Culture', 'True Crime'],
'uploader': 'Global',
'description': r're:(?s).+\bscam\b.+?\bseries available now\b',
},
}, {
# radio catchup
'url': 'https://www.globalplayer.com/catchup/lbc/uk/46vyD7z/',
'playlist_mincount': 2,
'info_dict': {
'id': '46vyD7z',
'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
'title': 'Nick Ferrari',
'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
},
}]
def _real_extract(self, url):
video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
props = self._get_page_props(url, video_id)
series = props['podcastInfo'] if podcast else props['catchupInfo']
return merge_dicts({
'_type': 'playlist',
'id': video_id,
'entries': [self._extract_audio(ep, series) for ep in traverse_obj(
series, ('episodes', lambda _, v: v['id'] and v['streamUrl']))],
'categories': traverse_obj(series, ('categories', Ellipsis, 'name')) or None,
}, traverse_obj(series, {
'description': ('description', T(self._clean_desc)),
'thumbnail': 'imageUrl',
'title': 'title',
'uploader': 'itunesAuthor', # podcasts only
}), rev=True)
class GlobalPlayerAudioEpisodeIE(GlobalPlayerBaseIE):
_VALID_URL = r'https?://www\.globalplayer\.com/(?:(?P<podcast>podcasts)|catchup/\w+/\w+)/episodes/(?P<id>\w+)/?(?:$|[?#])'
_TESTS = [{
# podcast
'url': 'https://www.globalplayer.com/podcasts/episodes/7DrfNnE/',
'info_dict': {
'id': '7DrfNnE',
'ext': 'mp3',
'title': 'Filthy Ritual - Trailer',
'description': 'md5:1f1562fd0f01b4773b590984f94223e0',
'thumbnail': 'md5:60286e7d12d795bd1bbc9efc6cee643e',
'duration': 225.0,
'timestamp': 1681254900,
'series': 'Filthy Ritual',
'series_id': '42KuaM',
'upload_date': '20230411',
'uploader': 'Global',
},
}, {
# radio catchup
'url': 'https://www.globalplayer.com/catchup/lbc/uk/episodes/2zGq26Vcv1fCWhddC4JAwETXWe/',
'only_matching': True,
# expired: refresh the details with a current show for a full test
'info_dict': {
'id': '2zGq26Vcv1fCWhddC4JAwETXWe',
'ext': 'm4a',
'timestamp': 1682056800,
'series': 'Nick Ferrari',
'thumbnail': 'md5:4df24d8a226f5b2508efbcc6ae874ebf',
'upload_date': '20230421',
'series_id': '46vyD7z',
'description': 'Nick Ferrari At Breakfast is Leading Britain\'s Conversation.',
'title': 'Nick Ferrari',
'duration': 10800.0,
},
}]
def _real_extract(self, url):
video_id, podcast = self._match_valid_url(url).group('id', 'podcast')
props = self._get_page_props(url, video_id)
episode = props['podcastEpisode'] if podcast else props['catchupEpisode']
return self._extract_audio(
episode, traverse_obj(episode, 'podcast', 'show', expected_type=dict) or {})
class GlobalPlayerVideoIE(GlobalPlayerBaseIE):
_VALID_URL = r'https?://www\.globalplayer\.com/videos/(?P<id>\w+)'
_TESTS = [{
'url': 'https://www.globalplayer.com/videos/2JsSZ7Gm2uP/',
'info_dict': {
'id': '2JsSZ7Gm2uP',
'ext': 'mp4',
'description': 'md5:6a9f063c67c42f218e42eee7d0298bfd',
'thumbnail': 'md5:d4498af48e15aae4839ce77b97d39550',
'upload_date': '20230420',
'title': 'Treble Malakai Bayoh sings a sublime Handel aria at Classic FM Live',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._get_page_props(url, video_id)['videoData']
return merge_dicts({
'id': video_id,
}, traverse_obj(meta, {
'url': 'url',
'thumbnail': ('image', 'url'),
'title': 'title',
'upload_date': ('publish_date', T(unified_strdate)),
'description': 'description',
}), rev=True)

View File

@ -1,101 +1,267 @@
# coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
int_or_none, int_or_none,
js_to_json, js_to_json,
merge_dicts,
mimetype2ext, mimetype2ext,
ExtractorError, parse_iso8601,
T,
traverse_obj,
txt_or_none,
url_or_none,
) )
class ImgurIE(InfoExtractor): class ImgurBaseIE(InfoExtractor):
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)' # hard-coded value, as also used by ArchiveTeam
_CLIENT_ID = '546c25a59c58ad7'
@classmethod
def _imgur_result(cls, item_id):
return cls.url_result('imgur:%s' % item_id, ImgurIE.ie_key(), item_id)
def _call_api(self, endpoint, video_id, **kwargs):
return self._download_json(
'https://api.imgur.com/post/v1/%s/%s?client_id=%s&include=media,account' % (endpoint, video_id, self._CLIENT_ID),
video_id, **kwargs)
@staticmethod
def get_description(s):
if 'Discover the magic of the internet at Imgur' in s:
return None
return txt_or_none(s)
class ImgurIE(ImgurBaseIE):
_VALID_URL = r'''(?x)
(?:
https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)|
imgur:
)(?P<id>[a-zA-Z0-9]+)
'''
_TESTS = [{ _TESTS = [{
'url': 'https://i.imgur.com/A61SaA1.gifv', 'url': 'https://imgur.com/A61SaA1',
'info_dict': { 'info_dict': {
'id': 'A61SaA1', 'id': 'A61SaA1',
'ext': 'mp4', 'ext': 'mp4',
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$', 'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
'timestamp': 1416446068,
'upload_date': '20141120',
}, },
}, { }, {
'url': 'https://imgur.com/A61SaA1', 'url': 'https://i.imgur.com/A61SaA1.gifv',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://i.imgur.com/crGpqCV.mp4', 'url': 'https://i.imgur.com/crGpqCV.mp4',
'only_matching': True, 'only_matching': True,
}, { }, {
# no title # previously, no title
'url': 'https://i.imgur.com/jxBXAMC.gifv', 'url': 'https://i.imgur.com/jxBXAMC.gifv',
'only_matching': True, 'info_dict': {
'id': 'jxBXAMC',
'ext': 'mp4',
'title': 'Fahaka puffer feeding',
'timestamp': 1533835503,
'upload_date': '20180809',
},
}] }]
def _extract_twitter_formats(self, html, tw_id='twitter', **kwargs):
fatal = kwargs.pop('fatal', False)
tw_stream = self._html_search_meta('twitter:player:stream', html, fatal=fatal, **kwargs)
if not tw_stream:
return []
ext = mimetype2ext(self._html_search_meta(
'twitter:player:stream:content_type', html, default=None))
width, height = (int_or_none(self._html_search_meta('twitter:player:' + v, html, default=None))
for v in ('width', 'height'))
return [{
'format_id': tw_id,
'url': tw_stream,
'ext': ext or determine_ext(tw_stream),
'width': width,
'height': height,
}]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
data = self._call_api('media', video_id, fatal=False, expected_status=404)
webpage = self._download_webpage( webpage = self._download_webpage(
'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id) 'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id, fatal=not data) or ''
width = int_or_none(self._og_search_property( if not traverse_obj(data, ('media', 0, (
'video:width', webpage, default=None)) ('type', T(lambda t: t == 'video' or None)),
height = int_or_none(self._og_search_property( ('metadata', 'is_animated'))), get_all=False):
'video:height', webpage, default=None)) raise ExtractorError(
'%s is not a video or animated image' % video_id,
expected=True)
media_fmt = traverse_obj(data, ('media', 0, {
'url': ('url', T(url_or_none)),
'ext': 'ext',
'width': ('width', T(int_or_none)),
'height': ('height', T(int_or_none)),
'filesize': ('size', T(int_or_none)),
'acodec': ('metadata', 'has_sound', T(lambda b: None if b else 'none')),
}))
media_url = traverse_obj(media_fmt, 'url')
if media_url:
if not media_fmt.get('ext'):
media_fmt['ext'] = mimetype2ext(traverse_obj(
data, ('media', 0, 'mime_type'))) or determine_ext(media_url)
if traverse_obj(data, ('media', 0, 'type')) == 'image':
media_fmt['acodec'] = 'none'
media_fmt.setdefault('preference', -10)
tw_formats = self._extract_twitter_formats(webpage)
if traverse_obj(tw_formats, (0, 'url')) == media_url:
tw_formats = []
else:
# maybe this isn't an animated image/video?
self._check_formats(tw_formats, video_id)
video_elements = self._search_regex( video_elements = self._search_regex(
r'(?s)<div class="video-elements">(.*?)</div>', r'(?s)<div class="video-elements">(.*?)</div>',
webpage, 'video elements', default=None) webpage, 'video elements', default=None)
if not video_elements: if not (video_elements or tw_formats or media_url):
raise ExtractorError( raise ExtractorError(
'No sources found for video %s. Maybe an image?' % video_id, 'No sources found for video %s. Maybe a plain image?' % video_id,
expected=True) expected=True)
formats = [] def mung_format(fmt, *extra):
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements): fmt.update({
formats.append({
'format_id': m.group('type').partition('/')[2],
'url': self._proto_relative_url(m.group('src')),
'ext': mimetype2ext(m.group('type')),
'width': width,
'height': height,
'http_headers': { 'http_headers': {
'User-Agent': 'youtube-dl (like wget)', 'User-Agent': 'youtube-dl (like wget)',
}, },
}) })
for d in extra:
fmt.update(d)
return fmt
gif_json = self._search_regex( if video_elements:
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})', def og_get_size(media_type):
webpage, 'GIF code', fatal=False) return dict((p, int_or_none(self._og_search_property(
if gif_json: ':'.join((media_type, p)), webpage, default=None)))
gifd = self._parse_json( for p in ('width', 'height'))
gif_json, video_id, transform_source=js_to_json)
formats.append({ size = og_get_size('video')
'format_id': 'gif', if all(v is None for v in size.values()):
'preference': -10, size = og_get_size('image')
'width': width,
'height': height, formats = traverse_obj(
'ext': 'gif', re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements),
'acodec': 'none', (Ellipsis, {
'vcodec': 'gif', 'format_id': ('type', T(lambda s: s.partition('/')[2])),
'container': 'gif', 'url': ('src', T(self._proto_relative_url)),
'url': self._proto_relative_url(gifd['gifUrl']), 'ext': ('type', T(mimetype2ext)),
'filesize': gifd.get('size'), }, T(lambda f: mung_format(f, size))))
'http_headers': {
'User-Agent': 'youtube-dl (like wget)', gif_json = self._search_regex(
}, r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
}) webpage, 'GIF code', fatal=False)
MUST_BRANCH = (None, T(lambda _: None))
formats.extend(traverse_obj(gif_json, (
T(lambda j: self._parse_json(
j, video_id, transform_source=js_to_json, fatal=False)), {
'url': ('gifUrl', T(self._proto_relative_url)),
'filesize': ('size', T(int_or_none)),
}, T(lambda f: mung_format(f, size, {
'format_id': 'gif',
'preference': -10, # gifs are worse than videos
'ext': 'gif',
'acodec': 'none',
'vcodec': 'gif',
'container': 'gif',
})), MUST_BRANCH)))
else:
formats = []
# maybe add formats from JSON or page Twitter metadata
if not any((u == media_url) for u in traverse_obj(formats, (Ellipsis, 'url'))):
formats.append(mung_format(media_fmt))
tw_url = traverse_obj(tw_formats, (0, 'url'))
if not any((u == tw_url) for u in traverse_obj(formats, (Ellipsis, 'url'))):
formats.extend(mung_format(f) for f in tw_formats)
self._sort_formats(formats) self._sort_formats(formats)
return { return merge_dicts(traverse_obj(data, {
'uploader_id': ('account_id', T(txt_or_none),
T(lambda a: a if int_or_none(a) != 0 else None)),
'uploader': ('account', 'username', T(txt_or_none)),
'uploader_url': ('account', 'avatar_url', T(url_or_none)),
'like_count': ('upvote_count', T(int_or_none)),
'dislike_count': ('downvote_count', T(int_or_none)),
'comment_count': ('comment_count', T(int_or_none)),
'age_limit': ('is_mature', T(lambda x: 18 if x else None)),
'timestamp': (('updated_at', 'created_at'), T(parse_iso8601)),
'release_timestamp': ('created_at', T(parse_iso8601)),
}, get_all=False), traverse_obj(data, ('media', 0, 'metadata', {
'title': ('title', T(txt_or_none)),
'description': ('description', T(self.get_description)),
'duration': ('duration', T(float_or_none)),
'timestamp': (('updated_at', 'created_at'), T(parse_iso8601)),
'release_timestamp': ('created_at', T(parse_iso8601)),
})), {
'id': video_id, 'id': video_id,
'formats': formats, 'formats': formats,
'title': self._og_search_title(webpage, default=video_id), 'title': self._og_search_title(webpage, default='Imgur video ' + video_id),
} 'description': self.get_description(self._og_search_description(webpage)),
'thumbnail': url_or_none(self._html_search_meta('thumbnailUrl', webpage, default=None)),
})
class ImgurGalleryIE(InfoExtractor): class ImgurGalleryBaseIE(ImgurBaseIE):
_GALLERY = True
def _real_extract(self, url):
gallery_id = self._match_id(url)
data = self._call_api('albums', gallery_id, fatal=False, expected_status=404)
info = traverse_obj(data, {
'title': ('title', T(txt_or_none)),
'description': ('description', T(self.get_description)),
})
if traverse_obj(data, 'is_album'):
def yield_media_ids():
for m_id in traverse_obj(data, (
'media', lambda _, v: v.get('type') == 'video' or v['metadata']['is_animated'],
'id', T(txt_or_none))):
yield m_id
# if a gallery with exactly one video, apply album metadata to video
media_id = (
self._GALLERY
and traverse_obj(data, ('image_count', T(lambda c: c == 1)))
and next(yield_media_ids(), None))
if not media_id:
result = self.playlist_result(
map(self._imgur_result, yield_media_ids()), gallery_id)
result.update(info)
return result
gallery_id = media_id
result = self._imgur_result(gallery_id)
info['_type'] = 'url_transparent'
result.update(info)
return result
class ImgurGalleryIE(ImgurGalleryBaseIE):
IE_NAME = 'imgur:gallery' IE_NAME = 'imgur:gallery'
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)' _VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
@ -106,49 +272,93 @@ class ImgurGalleryIE(InfoExtractor):
'title': 'Adding faces make every GIF better', 'title': 'Adding faces make every GIF better',
}, },
'playlist_count': 25, 'playlist_count': 25,
'skip': 'Zoinks! You\'ve taken a wrong turn.',
}, { }, {
# TODO: static images - replace with animated/video gallery
'url': 'http://imgur.com/topic/Aww/ll5Vk', 'url': 'http://imgur.com/topic/Aww/ll5Vk',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://imgur.com/gallery/YcAQlkx', 'url': 'https://imgur.com/gallery/YcAQlkx',
'add_ies': ['Imgur'],
'info_dict': { 'info_dict': {
'id': 'YcAQlkx', 'id': 'YcAQlkx',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....', 'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
} 'timestamp': 1358554297,
'upload_date': '20130119',
'uploader_id': '1648642',
'uploader': 'wittyusernamehere',
},
}, { }, {
# TODO: static image - replace with animated/video gallery
'url': 'http://imgur.com/topic/Funny/N8rOudd', 'url': 'http://imgur.com/topic/Funny/N8rOudd',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'http://imgur.com/r/aww/VQcQPhM', 'url': 'http://imgur.com/r/aww/VQcQPhM',
'only_matching': True, 'add_ies': ['Imgur'],
'info_dict': {
'id': 'VQcQPhM',
'ext': 'mp4',
'title': 'The boss is here',
'timestamp': 1476494751,
'upload_date': '20161015',
'uploader_id': '19138530',
'uploader': 'thematrixcam',
},
},
# from PR #16674
{
'url': 'https://imgur.com/t/unmuted/6lAn9VQ',
'info_dict': {
'id': '6lAn9VQ',
'title': 'Penguins !',
},
'playlist_count': 3,
}, {
'url': 'https://imgur.com/t/unmuted/kx2uD3C',
'add_ies': ['Imgur'],
'info_dict': {
'id': 'ZVMv45i',
'ext': 'mp4',
'title': 'Intruder',
'timestamp': 1528129683,
'upload_date': '20180604',
},
}, {
'url': 'https://imgur.com/t/unmuted/wXSK0YH',
'add_ies': ['Imgur'],
'info_dict': {
'id': 'JCAP4io',
'ext': 'mp4',
'title': 're:I got the blues$',
'description': 'Lukas vocal stylings.\n\nFP edit: dont encourage me. Ill never stop posting Luka and friends.',
'timestamp': 1527809525,
'upload_date': '20180531',
},
}] }]
def _real_extract(self, url):
gallery_id = self._match_id(url)
data = self._download_json( class ImgurAlbumIE(ImgurGalleryBaseIE):
'https://imgur.com/gallery/%s.json' % gallery_id,
gallery_id)['data']['image']
if data.get('is_album'):
entries = [
self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])
for image in data['album_images']['images'] if image.get('hash')]
return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))
return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)
class ImgurAlbumIE(ImgurGalleryIE):
IE_NAME = 'imgur:album' IE_NAME = 'imgur:album'
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)' _VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
_GALLERY = False
_TESTS = [{ _TESTS = [{
# TODO: only static images - replace with animated/video gallery
'url': 'http://imgur.com/a/j6Orj', 'url': 'http://imgur.com/a/j6Orj',
'only_matching': True,
},
# from PR #21693
{
'url': 'https://imgur.com/a/iX265HX',
'info_dict': { 'info_dict': {
'id': 'j6Orj', 'id': 'iX265HX',
'title': 'A Literary Analysis of "Star Wars: The Force Awakens"', 'title': 'enen-no-shouboutai'
}, },
'playlist_count': 12, 'playlist_count': 2,
}, {
'url': 'https://imgur.com/a/8pih2Ed',
'info_dict': {
'id': '8pih2Ed'
},
'playlist_mincount': 1,
}] }]

View File

@ -59,7 +59,7 @@ class ITVBaseIE(InfoExtractor):
@staticmethod @staticmethod
def _vanilla_ua_header(): def _vanilla_ua_header():
return {'User-agent': 'Mozilla/5.0'} return {'User-Agent': 'Mozilla/5.0'}
def _download_webpage_handle(self, url, video_id, *args, **kwargs): def _download_webpage_handle(self, url, video_id, *args, **kwargs):
# specialised to (a) use vanilla UA (b) detect geo-block # specialised to (a) use vanilla UA (b) detect geo-block
@ -69,7 +69,7 @@ class ITVBaseIE(InfoExtractor):
'user_agent' not in params 'user_agent' not in params
and not any(re.match(r'(?i)user-agent\s*:', h) and not any(re.match(r'(?i)user-agent\s*:', h)
for h in (params.get('headers') or [])) for h in (params.get('headers') or []))
and 'User-agent' not in (kwargs.get('headers') or {})): and 'User-Agent' not in (kwargs.get('headers') or {})):
kwargs.setdefault('headers', {}) kwargs.setdefault('headers', {})
kwargs['headers'] = self._vanilla_ua_header() kwargs['headers'] = self._vanilla_ua_header()

View File

@ -7,6 +7,7 @@ import subprocess
import tempfile import tempfile
from ..compat import ( from ..compat import (
compat_open as open,
compat_urlparse, compat_urlparse,
compat_kwargs, compat_kwargs,
) )

124
youtube_dl/extractor/s4c.py Normal file
View File

@ -0,0 +1,124 @@
# coding: utf-8
from __future__ import unicode_literals
from functools import partial as partial_f
from .common import InfoExtractor
from ..utils import (
float_or_none,
merge_dicts,
T,
traverse_obj,
txt_or_none,
url_or_none,
)
class S4CIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/programme/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.s4c.cymru/clic/programme/861362209',
'info_dict': {
'id': '861362209',
'ext': 'mp4',
'title': 'Y Swn',
'description': 'md5:f7681a30e4955b250b3224aa9fe70cf0',
'duration': 5340,
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Y_Swn_2023S4C_099_ii.jpg',
},
}, {
'url': 'https://www.s4c.cymru/clic/programme/856636948',
'info_dict': {
'id': '856636948',
'ext': 'mp4',
'title': 'Am Dro',
'duration': 2880,
'description': 'md5:100d8686fc9a632a0cb2db52a3433ffe',
'thumbnail': 'https://www.s4c.cymru/amg/1920x1080/Am_Dro_2022-23S4C_P6_4005.jpg',
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
details = self._download_json(
'https://www.s4c.cymru/df/full_prog_details',
video_id, query={
'lang': 'e',
'programme_id': video_id,
}, fatal=False)
player_config = self._download_json(
'https://player-api.s4c-cdn.co.uk/player-configuration/prod', video_id, query={
'programme_id': video_id,
'signed': '0',
'lang': 'en',
'mode': 'od',
'appId': 'clic',
'streamName': '',
}, note='Downloading player config JSON')
m3u8_url = self._download_json(
'https://player-api.s4c-cdn.co.uk/streaming-urls/prod', video_id, query={
'mode': 'od',
'application': 'clic',
'region': 'WW',
'extra': 'false',
'thirdParty': 'false',
'filename': player_config['filename'],
}, note='Downloading streaming urls JSON')['hls']
formats = self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', m3u8_id='hls', entry_protocol='m3u8_native')
self._sort_formats(formats)
subtitles = {}
for sub in traverse_obj(player_config, ('subtitles', lambda _, v: url_or_none(v['0']))):
subtitles.setdefault(sub.get('3', 'en'), []).append({
'url': sub['0'],
'name': sub.get('1'),
})
return merge_dicts({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'thumbnail': url_or_none(player_config.get('poster')),
}, traverse_obj(details, ('full_prog_details', 0, {
'title': (('programme_title', 'series_title'), T(txt_or_none)),
'description': ('full_billing', T(txt_or_none)),
'duration': ('duration', T(partial_f(float_or_none, invscale=60))),
}), get_all=False),
rev=True)
class S4CSeriesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?s4c\.cymru/clic/series/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.s4c.cymru/clic/series/864982911',
'playlist_mincount': 6,
'info_dict': {
'id': '864982911',
'title': 'Iaith ar Daith',
},
}, {
'url': 'https://www.s4c.cymru/clic/series/866852587',
'playlist_mincount': 8,
'info_dict': {
'id': '866852587',
'title': 'FFIT Cymru',
},
}]
def _real_extract(self, url):
series_id = self._match_id(url)
series_details = self._download_json(
'https://www.s4c.cymru/df/series_details', series_id, query={
'lang': 'e',
'series_id': series_id,
'show_prog_in_series': 'Y'
}, note='Downloading series details JSON')
return self.playlist_result(
(self.url_result('https://www.s4c.cymru/clic/programme/' + episode_id, S4CIE, episode_id)
for episode_id in traverse_obj(series_details, ('other_progs_in_series', Ellipsis, 'id'))),
playlist_id=series_id, playlist_title=traverse_obj(
series_details, ('full_prog_details', 0, 'series_title', T(txt_or_none))))

View File

@ -3,17 +3,23 @@ from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import (
float_or_none,
int_or_none,
url_or_none,
)
class TelewebionIE(InfoExtractor): class TelewebionIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?telewebion\.com/#!/episode/(?P<id>\d+)' _VALID_URL = r'https?://(?:www\.)?telewebion\.com/(episode|clip)/(?P<id>[a-zA-Z0-9]+)'
_TEST = { _TEST = {
'url': 'http://www.telewebion.com/#!/episode/1263668/', 'url': 'http://www.telewebion.com/episode/0x1b3139c/',
'info_dict': { 'info_dict': {
'id': '1263668', 'id': '0x1b3139c',
'ext': 'mp4', 'ext': 'mp4',
'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا', 'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا',
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://static\.telewebion\.com/episodeImages/.*/default',
'view_count': int, 'view_count': int,
}, },
'params': { 'params': {
@ -25,31 +31,24 @@ class TelewebionIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
secure_token = self._download_webpage( episode_details = self._download_json('https://gateway.telewebion.ir/kandoo/episode/getEpisodeDetail/?EpisodeId={0}'.format(video_id), video_id)
'http://m.s2.telewebion.com/op/op?action=getSecurityToken', video_id) episode_details = episode_details['body']['queryEpisode'][0]
episode_details = self._download_json(
'http://m.s2.telewebion.com/op/op', video_id,
query={'action': 'getEpisodeDetails', 'episode_id': video_id})
m3u8_url = 'http://m.s1.telewebion.com/smil/%s.m3u8?filepath=%s&m3u8=1&secure_token=%s' % ( channel_id = episode_details['channel']['descriptor']
video_id, episode_details['file_path'], secure_token) episode_image_id = episode_details.get('image')
episode_image = 'https://static.telewebion.com/episodeImages/{0}/default'.format(episode_image_id) if episode_image_id else None
m3u8_url = 'https://cdna.telewebion.com/{0}/episode/{1}/playlist.m3u8'.format(channel_id, video_id)
formats = self._extract_m3u8_formats( formats = self._extract_m3u8_formats(
m3u8_url, video_id, ext='mp4', m3u8_id='hls') m3u8_url, video_id, ext='mp4', m3u8_id='hls',
entry_protocol='m3u8_native')
picture_paths = [ self._sort_formats(formats)
episode_details.get('picture_path'),
episode_details.get('large_picture_path'),
]
thumbnails = [{
'url': picture_path,
'preference': idx,
} for idx, picture_path in enumerate(picture_paths) if picture_path is not None]
return { return {
'id': video_id, 'id': video_id,
'title': episode_details['title'], 'title': episode_details['title'],
'formats': formats, 'formats': formats,
'thumbnails': thumbnails, 'thumbnail': url_or_none(episode_image),
'view_count': episode_details.get('view_count'), 'view_count': int_or_none(episode_details.get('view_count')),
'duration': float_or_none(episode_details.get('duration')),
} }

View File

@ -2,9 +2,22 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
import time
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ExtractorError from ..compat import compat_kwargs
from ..utils import (
base_url,
determine_ext,
ExtractorError,
float_or_none,
merge_dicts,
T,
traverse_obj,
txt_or_none,
url_basename,
url_or_none,
)
class Vbox7IE(InfoExtractor): class Vbox7IE(InfoExtractor):
@ -20,23 +33,27 @@ class Vbox7IE(InfoExtractor):
) )
(?P<id>[\da-fA-F]+) (?P<id>[\da-fA-F]+)
''' '''
_EMBED_REGEX = [r'<iframe[^>]+src=(?P<q>["\'])(?P<url>(?:https?:)?//vbox7\.com/emb/external\.php.+?)(?P=q)']
_GEO_COUNTRIES = ['BG'] _GEO_COUNTRIES = ['BG']
_TESTS = [{ _TESTS = [{
'url': 'http://vbox7.com/play:0946fff23c', # the http: URL just redirects here
'md5': 'a60f9ab3a3a2f013ef9a967d5f7be5bf', 'url': 'https://vbox7.com/play:0946fff23c',
'md5': '50ca1f78345a9c15391af47d8062d074',
'info_dict': { 'info_dict': {
'id': '0946fff23c', 'id': '0946fff23c',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Борисов: Притеснен съм за бъдещето на България', 'title': 'Борисов: Притеснен съм за бъдещето на България',
'description': 'По думите му е опасно страната ни да бъде обявена за "сигурна"', 'description': 'По думите му е опасно страната ни да бъде обявена за "сигурна"',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1470982814, 'timestamp': 1470982814,
'upload_date': '20160812', 'upload_date': '20160812',
'uploader': 'zdraveibulgaria', 'uploader': 'zdraveibulgaria',
'thumbnail': r're:^https?://.*\.jpg$',
'view_count': int,
'duration': 2640,
}, },
'params': { 'expected_warnings': [
'proxy': '127.0.0.1:8118', 'Unable to download webpage',
}, ],
}, { }, {
'url': 'http://vbox7.com/play:249bb972c2', 'url': 'http://vbox7.com/play:249bb972c2',
'md5': '99f65c0c9ef9b682b97313e052734c3f', 'md5': '99f65c0c9ef9b682b97313e052734c3f',
@ -44,8 +61,15 @@ class Vbox7IE(InfoExtractor):
'id': '249bb972c2', 'id': '249bb972c2',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Смях! Чудо - чист за секунди - Скрита камера', 'title': 'Смях! Чудо - чист за секунди - Скрита камера',
'description': 'Смях! Чудо - чист за секунди - Скрита камера',
'timestamp': 1360215023,
'upload_date': '20130207',
'uploader': 'svideteliat_ot_varshava',
'thumbnail': 'https://i49.vbox7.com/o/249/249bb972c20.jpg',
'view_count': int,
'duration': 83,
}, },
'skip': 'georestricted', 'expected_warnings': ['Failed to download m3u8 information'],
}, { }, {
'url': 'http://vbox7.com/emb/external.php?vid=a240d20f9c&autoplay=1', 'url': 'http://vbox7.com/emb/external.php?vid=a240d20f9c&autoplay=1',
'only_matching': True, 'only_matching': True,
@ -54,52 +78,127 @@ class Vbox7IE(InfoExtractor):
'only_matching': True, 'only_matching': True,
}] }]
@staticmethod @classmethod
def _extract_url(webpage): def _extract_url(cls, webpage):
mobj = re.search( mobj = re.search(cls._EMBED_REGEX[0], webpage)
r'<iframe[^>]+src=(?P<q>["\'])(?P<url>(?:https?:)?//vbox7\.com/emb/external\.php.+?)(?P=q)',
webpage)
if mobj: if mobj:
return mobj.group('url') return mobj.group('url')
# specialisation to transform what looks like ld+json that
# may contain invalid character combinations
# transform_source=None, fatal=True
def _parse_json(self, json_string, video_id, *args, **kwargs):
if '"@context"' in json_string[:30]:
# this is ld+json, or that's the way to bet
transform_source = args[0] if len(args) > 0 else kwargs.get('transform_source')
if not transform_source:
def fix_chars(src):
# fix malformed ld+json: replace raw CRLFs with escaped LFs
return re.sub(
r'"[^"]+"', lambda m: re.sub(r'\r?\n', r'\\n', m.group(0)), src)
if len(args) > 0:
args = (fix_chars,) + args[1:]
else:
kwargs['transform_source'] = fix_chars
kwargs = compat_kwargs(kwargs)
return super(Vbox7IE, self)._parse_json(
json_string, video_id, *args, **kwargs)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
url = 'https://vbox7.com/play:%s' % (video_id,)
now = time.time()
response = self._download_json( response = self._download_json(
'https://www.vbox7.com/ajax/video/nextvideo.php?vid=%s' % video_id, 'https://www.vbox7.com/aj/player/item/options', video_id,
video_id) query={'vid': video_id}, headers={'Referer': url})
# estimate time to which possible `ago` member is relative
now = now + 0.5 * (time.time() - now)
if 'error' in response: if traverse_obj(response, 'error'):
raise ExtractorError( raise ExtractorError(
'%s said: %s' % (self.IE_NAME, response['error']), expected=True) '%s said: %s' % (self.IE_NAME, response['error']), expected=True)
video = response['options'] src_url = traverse_obj(response, ('options', 'src', T(url_or_none))) or ''
title = video['title'] fmt_base = url_basename(src_url).rsplit('.', 1)[0].rsplit('_', 1)[0]
video_url = video['src'] if fmt_base in ('na', 'vn'):
if '/na.mp4' in video_url:
self.raise_geo_restricted(countries=self._GEO_COUNTRIES) self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
uploader = video.get('uploader') ext = determine_ext(src_url)
if ext == 'mpd':
# extract MPD
try:
formats, subtitles = self._extract_mpd_formats_and_subtitles(
src_url, video_id, 'dash', fatal=False)
except KeyError: # fatal doesn't catch this
self.report_warning('Failed to parse MPD manifest')
formats, subtitles = [], {}
elif ext != 'm3u8':
formats = [{
'url': src_url,
}] if src_url else []
subtitles = {}
webpage = self._download_webpage( if src_url:
'http://vbox7.com/play:%s' % video_id, video_id, fatal=None) # possibly extract HLS, based on https://github.com/yt-dlp/yt-dlp/pull/9100
fmt_base = base_url(src_url) + fmt_base
# prepare for _extract_m3u8_formats_and_subtitles()
# hls_formats, hls_subs = self._extract_m3u8_formats_and_subtitles(
hls_formats = self._extract_m3u8_formats(
'{0}.m3u8'.format(fmt_base), video_id, m3u8_id='hls', fatal=False)
formats.extend(hls_formats)
# self._merge_subtitles(hls_subs, target=subtitles)
info = {} # In case MPD/HLS cannot be parsed, or anyway, get mp4 combined
# formats usually provided to Safari, iOS, and old Windows
video = response['options']
resolutions = (1080, 720, 480, 240, 144)
highest_res = traverse_obj(video, (
'highestRes', T(int))) or resolutions[0]
resolutions = traverse_obj(video, (
'resolutions', lambda _, r: highest_res >= int(r) > 0)) or resolutions
mp4_formats = traverse_obj(resolutions, (
Ellipsis, T(lambda res: {
'url': '{0}_{1}.mp4'.format(fmt_base, res),
'format_id': 'http-{0}'.format(res),
'height': res,
})))
# if above formats are flaky, enable the line below
# self._check_formats(mp4_formats, video_id)
formats.extend(mp4_formats)
if webpage: self._sort_formats(formats)
info = self._search_json_ld(
webpage.replace('"/*@context"', '"@context"'), video_id,
fatal=False)
info.update({ webpage = self._download_webpage(url, video_id, fatal=False) or ''
info = self._search_json_ld(
webpage.replace('"/*@context"', '"@context"'), video_id,
fatal=False) if webpage else {}
if not info.get('title'):
info['title'] = traverse_obj(response, (
'options', 'title', T(txt_or_none))) or self._og_search_title(webpage)
def if_missing(k):
return lambda x: None if k in info else x
info = merge_dicts(info, {
'id': video_id, 'id': video_id,
'title': title, 'formats': formats,
'url': video_url, 'subtitles': subtitles or None,
'uploader': uploader, }, info, traverse_obj(response, ('options', {
'thumbnail': self._proto_relative_url( 'uploader': ('uploader', T(txt_or_none)),
'timestamp': ('ago', T(if_missing('timestamp')), T(lambda t: int(round((now - t) / 60.0)) * 60)),
'duration': ('duration', T(if_missing('duration')), T(float_or_none)),
})))
if 'thumbnail' not in info:
info['thumbnail'] = self._proto_relative_url(
info.get('thumbnail') or self._og_search_thumbnail(webpage), info.get('thumbnail') or self._og_search_thumbnail(webpage),
'http:'), 'https:'),
})
return info return info

View File

@ -0,0 +1,55 @@
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
merge_dicts,
str_or_none,
T,
traverse_obj,
url_or_none,
)
class WhypIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?whyp\.it/tracks/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.whyp.it/tracks/18337/home-page-example-track-b4kq7',
'md5': 'c1187b42ebf8605284e3dc92aeb33d16',
'info_dict': {
'url': 'https://cdn.whyp.it/50eb17cc-e9ff-4e18-b89b-dc9206a95cb1.mp3',
'id': '18337',
'title': 'Home Page Example Track',
'description': r're:(?s).+\bexample track\b',
'ext': 'mp3',
'duration': 52.82,
'uploader': 'Brad',
'uploader_id': '1',
'thumbnail': 'https://cdn.whyp.it/a537bb36-3373-4c61-96c8-27fc1b2f427a.jpg',
},
}, {
'url': 'https://www.whyp.it/tracks/18337',
'only_matching': True,
}]
def _real_extract(self, url):
unique_id = self._match_id(url)
webpage = self._download_webpage(url, unique_id)
data = self._search_nuxt_data(webpage, unique_id)['rawTrack']
return merge_dicts({
'url': data['audio_url'],
'id': unique_id,
}, traverse_obj(data, {
'title': 'title',
'description': 'description',
'duration': ('duration', T(float_or_none)),
'uploader': ('user', 'username'),
'uploader_id': ('user', 'id', T(str_or_none)),
'thumbnail': ('artwork_url', T(url_or_none)),
}), {
'ext': 'mp3',
'vcodec': 'none',
'http_headers': {'Referer': 'https://whyp.it/'},
}, rev=True)

View File

@ -2,6 +2,7 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import collections
import itertools import itertools
import json import json
import os.path import os.path
@ -19,21 +20,26 @@ from ..compat import (
compat_urllib_parse_parse_qs as compat_parse_qs, compat_urllib_parse_parse_qs as compat_parse_qs,
compat_urllib_parse_unquote_plus, compat_urllib_parse_unquote_plus,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
compat_zip as zip,
) )
from ..jsinterp import JSInterpreter from ..jsinterp import JSInterpreter
from ..utils import ( from ..utils import (
ExtractorError,
clean_html, clean_html,
dict_get, dict_get,
error_to_compat_str, error_to_compat_str,
ExtractorError,
float_or_none, float_or_none,
extract_attributes, extract_attributes,
get_element_by_attribute, get_element_by_attribute,
int_or_none, int_or_none,
join_nonempty,
js_to_json, js_to_json,
LazyList,
merge_dicts, merge_dicts,
mimetype2ext, mimetype2ext,
NO_DEFAULT,
parse_codecs, parse_codecs,
parse_count,
parse_duration, parse_duration,
parse_qs, parse_qs,
qualities, qualities,
@ -41,8 +47,11 @@ from ..utils import (
smuggle_url, smuggle_url,
str_or_none, str_or_none,
str_to_int, str_to_int,
T,
traverse_obj, traverse_obj,
try_call,
try_get, try_get,
txt_or_none,
unescapeHTML, unescapeHTML,
unified_strdate, unified_strdate,
unsmuggle_url, unsmuggle_url,
@ -256,16 +265,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
cookies = self._get_cookies('https://www.youtube.com/') cookies = self._get_cookies('https://www.youtube.com/')
if cookies.get('__Secure-3PSID'): if cookies.get('__Secure-3PSID'):
return return
consent_id = None socs = cookies.get('SOCS')
consent = cookies.get('CONSENT') if socs and not socs.value.startswith('CAA'): # not consented
if consent: return
if 'YES' in consent.value: self._set_cookie('.youtube.com', 'SOCS', 'CAI', secure=True) # accept all (required for mixes)
return
consent_id = self._search_regex(
r'PENDING\+(\d+)', consent.value, 'consent', default=None)
if not consent_id:
consent_id = random.randint(100, 999)
self._set_cookie('.youtube.com', 'CONSENT', 'YES+cb.20210328-17-p0.en+FX+%s' % consent_id)
def _real_initialize(self): def _real_initialize(self):
self._initialize_consent() self._initialize_consent()
@ -444,7 +447,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
extract_attributes(self._search_regex( extract_attributes(self._search_regex(
r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)''' r'''(?s)(<link\b[^>]+\bitemprop\s*=\s*("|')%s\2[^>]*>)'''
% re.escape(var_name), % re.escape(var_name),
get_element_by_attribute('itemprop', 'author', webpage) or '', get_element_by_attribute('itemprop', 'author', webpage or '') or '',
'author link', default='')), 'author link', default='')),
paths[var_name][0]) paths[var_name][0])
@ -1249,7 +1252,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'title': 'IMG 3456', 'title': 'IMG 3456',
'description': '', 'description': '',
'upload_date': '20170613', 'upload_date': '20170613',
'uploader': 'ElevageOrVert', 'uploader': "l'Or Vert asbl",
'uploader_id': '@ElevageOrVert', 'uploader_id': '@ElevageOrVert',
}, },
'params': { 'params': {
@ -1462,6 +1465,30 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
self._code_cache = {} self._code_cache = {}
self._player_cache = {} self._player_cache = {}
# *ytcfgs, webpage=None
def _extract_player_url(self, *ytcfgs, **kw_webpage):
if ytcfgs and not isinstance(ytcfgs[0], dict):
webpage = kw_webpage.get('webpage') or ytcfgs[0]
if webpage:
player_url = self._search_regex(
r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
webpage or '', 'player URL', fatal=False)
if player_url:
ytcfgs = ytcfgs + ({'PLAYER_JS_URL': player_url},)
return traverse_obj(
ytcfgs, (Ellipsis, 'PLAYER_JS_URL'), (Ellipsis, 'WEB_PLAYER_CONTEXT_CONFIGS', Ellipsis, 'jsUrl'),
get_all=False, expected_type=lambda u: urljoin('https://www.youtube.com', u))
def _download_player_url(self, video_id, fatal=False):
res = self._download_webpage(
'https://www.youtube.com/iframe_api',
note='Downloading iframe API JS', video_id=video_id, fatal=fatal)
player_version = self._search_regex(
r'player\\?/([0-9a-fA-F]{8})\\?/', res or '', 'player version', fatal=fatal,
default=NO_DEFAULT if res else None)
if player_version:
return 'https://www.youtube.com/s/player/{0}/player_ias.vflset/en_US/base.js'.format(player_version)
def _signature_cache_id(self, example_sig): def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """ """ Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@ -1476,46 +1503,49 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
raise ExtractorError('Cannot identify player %r' % player_url) raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id') return id_m.group('id')
def _get_player_code(self, video_id, player_url, player_id=None): def _load_player(self, video_id, player_url, fatal=True, player_id=None):
if not player_id: if not player_id:
player_id = self._extract_player_info(player_url) player_id = self._extract_player_info(player_url)
if player_id not in self._code_cache: if player_id not in self._code_cache:
self._code_cache[player_id] = self._download_webpage( code = self._download_webpage(
player_url, video_id, player_url, video_id, fatal=fatal,
note='Downloading player ' + player_id, note='Downloading player ' + player_id,
errnote='Download of %s failed' % player_url) errnote='Download of %s failed' % player_url)
return self._code_cache[player_id] if code:
self._code_cache[player_id] = code
return self._code_cache[player_id] if fatal else self._code_cache.get(player_id)
def _extract_signature_function(self, video_id, player_url, example_sig): def _extract_signature_function(self, video_id, player_url, example_sig):
player_id = self._extract_player_info(player_url) player_id = self._extract_player_info(player_url)
# Read from filesystem cache # Read from filesystem cache
func_id = 'js_%s_%s' % ( func_id = 'js_{0}_{1}'.format(
player_id, self._signature_cache_id(example_sig)) player_id, self._signature_cache_id(example_sig))
assert os.path.basename(func_id) == func_id assert os.path.basename(func_id) == func_id
cache_spec = self._downloader.cache.load('youtube-sigfuncs', func_id) self.write_debug('Extracting signature function {0}'.format(func_id))
if cache_spec is not None: cache_spec, code = self.cache.load('youtube-sigfuncs', func_id), None
return lambda s: ''.join(s[i] for i in cache_spec)
code = self._get_player_code(video_id, player_url, player_id) if not cache_spec:
res = self._parse_sig_js(code) code = self._load_player(video_id, player_url, player_id)
if code:
res = self._parse_sig_js(code)
test_string = ''.join(map(compat_chr, range(len(example_sig))))
cache_spec = [ord(c) for c in res(test_string)]
self.cache.store('youtube-sigfuncs', func_id, cache_spec)
test_string = ''.join(map(compat_chr, range(len(example_sig)))) return lambda s: ''.join(s[i] for i in cache_spec)
cache_res = res(test_string)
cache_spec = [ord(c) for c in cache_res]
self._downloader.cache.store('youtube-sigfuncs', func_id, cache_spec)
return res
def _print_sig_code(self, func, example_sig): def _print_sig_code(self, func, example_sig):
if not self.get_param('youtube_print_sig_code'):
return
def gen_sig_code(idxs): def gen_sig_code(idxs):
def _genslice(start, end, step): def _genslice(start, end, step):
starts = '' if start == 0 else str(start) starts = '' if start == 0 else str(start)
ends = (':%d' % (end + step)) if end + step >= 0 else ':' ends = (':%d' % (end + step)) if end + step >= 0 else ':'
steps = '' if step == 1 else (':%d' % step) steps = '' if step == 1 else (':%d' % step)
return 's[%s%s%s]' % (starts, ends, steps) return 's[{0}{1}{2}]'.format(starts, ends, steps)
step = None step = None
# Quelch pyflakes warnings - start will be set when step is set # Quelch pyflakes warnings - start will be set when step is set
@ -1554,17 +1584,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*encodeURIComponent\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)', r'\bm=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(h\.s\)\)',
r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)', r'\bc&&\(c=(?P<sig>[a-zA-Z0-9$]{2,})\(decodeURIComponent\(c\)\)',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\);[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\)', r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)(?:;[a-zA-Z0-9$]{2}\.[a-zA-Z0-9$]{2}\(a,\d+\))?',
r'(?:\b|[^a-zA-Z0-9$])(?P<sig>[a-zA-Z0-9$]{2,})\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)', r'(?P<sig>[a-zA-Z0-9$]+)\s*=\s*function\(\s*a\s*\)\s*{\s*a\s*=\s*a\.split\(\s*""\s*\)',
# Obsolete patterns # Obsolete patterns
r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'("|\')signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(',
r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'yt\.akamaized\.net/\)\s*\|\|\s*.*?\s*[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?:encodeURIComponent\s*\()?\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[cs]\s*&&\s*[adf]\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', r'\b[a-zA-Z0-9]+\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*a\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\(',
r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('), r'\bc\s*&&\s*[a-zA-Z0-9]+\.set\([^,]+\s*,\s*\([^)]*\)\s*\(\s*(?P<sig>[a-zA-Z0-9$]+)\('),
jscode, 'Initial JS player signature function name', group='sig') jscode, 'Initial JS player signature function name', group='sig')
@ -1572,131 +1599,134 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
initial_function = jsi.extract_function(funcname) initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s]) return lambda s: initial_function([s])
def _cached(self, func, *cache_id):
def inner(*args, **kwargs):
if cache_id not in self._player_cache:
try:
self._player_cache[cache_id] = func(*args, **kwargs)
except ExtractorError as e:
self._player_cache[cache_id] = e
except Exception as e:
self._player_cache[cache_id] = ExtractorError(traceback.format_exc(), cause=e)
ret = self._player_cache[cache_id]
if isinstance(ret, Exception):
raise ret
return ret
return inner
def _decrypt_signature(self, s, video_id, player_url): def _decrypt_signature(self, s, video_id, player_url):
"""Turn the encrypted s field into a working signature""" """Turn the encrypted s field into a working signature"""
extract_sig = self._cached(
if player_url is None: self._extract_signature_function, 'sig', player_url, self._signature_cache_id(s))
raise ExtractorError('Cannot decrypt signature without player_url') func = extract_sig(video_id, player_url, s)
self._print_sig_code(func, s)
try: return func(s)
player_id = (player_url, self._signature_cache_id(s))
if player_id not in self._player_cache:
func = self._extract_signature_function(
video_id, player_url, s
)
self._player_cache[player_id] = func
func = self._player_cache[player_id]
if self._downloader.params.get('youtube_print_sig_code'):
self._print_sig_code(func, s)
return func(s)
except Exception as e:
tb = traceback.format_exc()
raise ExtractorError(
'Signature extraction failed: ' + tb, cause=e)
def _extract_player_url(self, webpage):
player_url = self._search_regex(
r'"(?:PLAYER_JS_URL|jsUrl)"\s*:\s*"([^"]+)"',
webpage or '', 'player URL', fatal=False)
if not player_url:
return
if player_url.startswith('//'):
player_url = 'https:' + player_url
elif not re.match(r'https?://', player_url):
player_url = compat_urllib_parse.urljoin(
'https://www.youtube.com', player_url)
return player_url
# from yt-dlp # from yt-dlp
# See also: # See also:
# 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419 # 1. https://github.com/ytdl-org/youtube-dl/issues/29326#issuecomment-894619419
# 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116 # 2. https://code.videolan.org/videolan/vlc/-/blob/4fb284e5af69aa9ac2100ccbdd3b88debec9987f/share/lua/playlist/youtube.lua#L116
# 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377 # 3. https://github.com/ytdl-org/youtube-dl/issues/30097#issuecomment-950157377
def _extract_n_function_name(self, jscode): def _decrypt_nsig(self, n, video_id, player_url):
target = r'(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?' """Turn the encrypted n field into a working signature"""
nfunc_and_idx = self._search_regex( if player_url is None:
r'\.get\("n"\)\)&&\(b=(%s)\([\w$]+\)' % (target, ), raise ExtractorError('Cannot decrypt nsig without player_url')
jscode, 'Initial JS player n function name')
nfunc, idx = re.match(target, nfunc_and_idx).group('nfunc', 'idx')
if not idx:
return nfunc
if int_or_none(idx) == 0:
real_nfunc = self._search_regex(
r'var %s\s*=\s*\[([a-zA-Z_$][\w$]*)\];' % (re.escape(nfunc), ), jscode,
'Initial JS player n function alias ({nfunc}[{idx}])'.format(**locals()))
if real_nfunc:
return real_nfunc
return self._parse_json(self._search_regex(
r'var %s\s*=\s*(\[.+?\]);' % (re.escape(nfunc), ), jscode,
'Initial JS player n function name ({nfunc}[{idx}])'.format(**locals())), nfunc, transform_source=js_to_json)[int(idx)]
def _extract_n_function(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self._downloader.cache.load('youtube-nsig', player_id)
if func_code:
jsi = JSInterpreter(func_code)
else:
jscode = self._get_player_code(video_id, player_url, player_id)
funcname = self._extract_n_function_name(jscode)
jsi = JSInterpreter(jscode)
func_code = jsi.extract_function_code(funcname)
self._downloader.cache.store('youtube-nsig', player_id, func_code)
if self._downloader.params.get('youtube_print_sig_code'):
self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(player_id, func_code[1]))
return lambda s: jsi.extract_function_from_code(*func_code)([s])
def _n_descramble(self, n_param, player_url, video_id):
"""Compute the response to YT's "n" parameter challenge,
or None
Args:
n_param -- challenge string that is the value of the
URL's "n" query parameter
player_url -- URL of YT player JS
video_id
"""
sig_id = ('nsig_value', n_param)
if sig_id in self._player_cache:
return self._player_cache[sig_id]
try: try:
player_id = ('nsig', player_url) jsi, player_id, func_code = self._extract_n_function_code(video_id, player_url)
if player_id not in self._player_cache: except ExtractorError as e:
self._player_cache[player_id] = self._extract_n_function(video_id, player_url) raise ExtractorError('Unable to extract nsig jsi, player_id, func_codefunction code', cause=e)
func = self._player_cache[player_id] if self.get_param('youtube_print_sig_code'):
ret = func(n_param) self.to_screen('Extracted nsig function from {0}:\n{1}\n'.format(
if ret.startswith('enhanced_except_'): player_id, func_code[1]))
raise ExtractorError('Unhandled exception in decode')
self._player_cache[sig_id] = ret try:
if self._downloader.params.get('verbose', False): extract_nsig = self._cached(self._extract_n_function_from_code, 'nsig func', player_url)
self._downloader.to_screen('[debug] [%s] %s' % (self.IE_NAME, 'Decrypted nsig {0} => {1}'.format(n_param, self._player_cache[sig_id]))) ret = extract_nsig(jsi, func_code)(n)
return self._player_cache[sig_id] except JSInterpreter.Exception as e:
except Exception as e: self.report_warning(
self._downloader.report_warning( '%s (%s %s)' % (
'[%s] %s (%s %s)' % ( self.__ie_msg(
self.IE_NAME, 'Unable to decode n-parameter: download likely to be throttled'),
'Unable to decode n-parameter: download likely to be throttled',
error_to_compat_str(e), error_to_compat_str(e),
traceback.format_exc())) traceback.format_exc()))
return
self.write_debug('Decrypted nsig {0} => {1}'.format(n, ret))
return ret
def _extract_n_function_name(self, jscode):
func_name, idx = self._search_regex(
r'\.get\("n"\)\)&&\(b=(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\[(?P<idx>\d+)\])?\([\w$]+\)',
jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
if not idx:
return func_name
return self._parse_json(self._search_regex(
r'var {0}\s*=\s*(\[.+?\])\s*[,;]'.format(re.escape(func_name)), jscode,
'Initial JS player n function list ({0}.{1})'.format(func_name, idx)),
func_name, transform_source=js_to_json)[int(idx)]
def _extract_n_function_code(self, video_id, player_url):
player_id = self._extract_player_info(player_url)
func_code = self.cache.load('youtube-nsig', player_id)
jscode = func_code or self._load_player(video_id, player_url)
jsi = JSInterpreter(jscode)
if func_code:
return jsi, player_id, func_code
func_name = self._extract_n_function_name(jscode)
# For redundancy
func_code = self._search_regex(
r'''(?xs)%s\s*=\s*function\s*\((?P<var>[\w$]+)\)\s*
# NB: The end of the regex is intentionally kept strict
{(?P<code>.+?}\s*return\ [\w$]+.join\(""\))};''' % func_name,
jscode, 'nsig function', group=('var', 'code'), default=None)
if func_code:
func_code = ([func_code[0]], func_code[1])
else:
self.write_debug('Extracting nsig function with jsinterp')
func_code = jsi.extract_function_code(func_name)
self.cache.store('youtube-nsig', player_id, func_code)
return jsi, player_id, func_code
def _extract_n_function_from_code(self, jsi, func_code):
func = jsi.extract_function_from_code(*func_code)
def extract_nsig(s):
try:
ret = func([s])
except JSInterpreter.Exception:
raise
except Exception as e:
raise JSInterpreter.Exception(traceback.format_exc(), cause=e)
if ret.startswith('enhanced_except_'):
raise JSInterpreter.Exception('Signature function returned an exception')
return ret
return extract_nsig
def _unthrottle_format_urls(self, video_id, player_url, *formats):
def decrypt_nsig(n):
return self._cached(self._decrypt_nsig, 'nsig', n, player_url)
def _unthrottle_format_urls(self, video_id, player_url, formats):
for fmt in formats: for fmt in formats:
parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url']) parsed_fmt_url = compat_urllib_parse.urlparse(fmt['url'])
n_param = compat_parse_qs(parsed_fmt_url.query).get('n') n_param = compat_parse_qs(parsed_fmt_url.query).get('n')
if not n_param: if not n_param:
continue continue
n_param = n_param[-1] n_param = n_param[-1]
n_response = self._n_descramble(n_param, player_url, video_id) n_response = decrypt_nsig(n_param)(n_param, video_id, player_url)
if n_response is None: if n_response is None:
# give up if descrambling failed # give up if descrambling failed
break break
for fmt_dct in traverse_obj(fmt, (None, (None, ('fragments', Ellipsis))), expected_type=dict): fmt['url'] = update_url_query(fmt['url'], {'n': n_response})
fmt_dct['url'] = update_url(
fmt_dct['url'], query_update={'n': [n_response]})
# from yt-dlp, with tweaks # from yt-dlp, with tweaks
def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False): def _extract_signature_timestamp(self, video_id, player_url, ytcfg=None, fatal=False):
@ -1704,16 +1734,16 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
Extract signatureTimestamp (sts) Extract signatureTimestamp (sts)
Required to tell API what sig/player version is in use. Required to tell API what sig/player version is in use.
""" """
sts = int_or_none(ytcfg.get('STS')) if isinstance(ytcfg, dict) else None sts = traverse_obj(ytcfg, 'STS', expected_type=int)
if not sts: if not sts:
# Attempt to extract from player # Attempt to extract from player
if player_url is None: if player_url is None:
error_msg = 'Cannot extract signature timestamp without player_url.' error_msg = 'Cannot extract signature timestamp without player_url.'
if fatal: if fatal:
raise ExtractorError(error_msg) raise ExtractorError(error_msg)
self._downloader.report_warning(error_msg) self.report_warning(error_msg)
return return
code = self._get_player_code(video_id, player_url) code = self._load_player(video_id, player_url, fatal=fatal)
sts = int_or_none(self._search_regex( sts = int_or_none(self._search_regex(
r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '', r'(?:signatureTimestamp|sts)\s*:\s*(?P<sts>[0-9]{5})', code or '',
'JS player signature timestamp', group='sts', fatal=fatal)) 'JS player signature timestamp', group='sts', fatal=fatal))
@ -1729,12 +1759,18 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# cpn generation algorithm is reverse engineered from base.js. # cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn. # In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join((CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))) cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
playback_url = update_url( # more consistent results setting it to right before the end
playback_url, query_update={ qs = parse_qs(playback_url)
'ver': ['2'], video_length = '{0}'.format(float((qs.get('len') or ['1.5'])[0]) - 1)
'cpn': [cpn],
playback_url = update_url_query(
playback_url, {
'ver': '2',
'cpn': cpn,
'cmt': video_length,
'el': 'detailpage', # otherwise defaults to "shorts"
}) })
self._download_webpage( self._download_webpage(
@ -1982,115 +2018,182 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
else: else:
self.to_screen('Downloading just video %s because of --no-playlist' % video_id) self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
if not player_url:
player_url = self._extract_player_url(webpage)
formats = [] formats = []
itags = [] itags = collections.defaultdict(set)
itag_qualities = {} itag_qualities = {}
q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres']) q = qualities(['tiny', 'small', 'medium', 'large', 'hd720', 'hd1080', 'hd1440', 'hd2160', 'hd2880', 'highres'])
CHUNK_SIZE = 10 << 20
streaming_data = player_response.get('streamingData') or {} streaming_data = player_response.get('streamingData') or {}
streaming_formats = streaming_data.get('formats') or [] streaming_formats = streaming_data.get('formats') or []
streaming_formats.extend(streaming_data.get('adaptiveFormats') or []) streaming_formats.extend(streaming_data.get('adaptiveFormats') or [])
def build_fragments(f):
return LazyList({
'url': update_url_query(f['url'], {
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize']))
})
} for range_start in range(0, f['filesize'], CHUNK_SIZE))
lower = lambda s: s.lower()
for fmt in streaming_formats: for fmt in streaming_formats:
if fmt.get('targetDurationSec') or fmt.get('drmFamilies'): if fmt.get('targetDurationSec'):
continue continue
itag = str_or_none(fmt.get('itag')) itag = str_or_none(fmt.get('itag'))
quality = fmt.get('quality') audio_track = traverse_obj(fmt, ('audioTrack', T(dict))) or {}
if itag and quality:
quality = traverse_obj(fmt, ((
# The 3gp format (17) in android client has a quality of "small",
# but is actually worse than other formats
T(lambda _: 'tiny' if itag == 17 else None),
('quality', T(lambda q: q if q and q != 'tiny' else None)),
('audioQuality', T(lower)),
'quality'), T(txt_or_none)), get_all=False)
if quality and itag:
itag_qualities[itag] = quality itag_qualities[itag] = quality
# FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment # FORMAT_STREAM_TYPE_OTF(otf=1) requires downloading the init fragment
# (adding `&sq=0` to the URL) and parsing emsg box to determine the # (adding `&sq=0` to the URL) and parsing emsg box to determine the
# number of fragment that would subsequently requested with (`&sq=N`) # number of fragments that would subsequently be requested with (`&sq=N`)
if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF': if fmt.get('type') == 'FORMAT_STREAM_TYPE_OTF':
continue continue
fmt_url = fmt.get('url') fmt_url = fmt.get('url')
if not fmt_url: if not fmt_url:
sc = compat_parse_qs(fmt.get('signatureCipher')) sc = compat_parse_qs(fmt.get('signatureCipher'))
fmt_url = url_or_none(try_get(sc, lambda x: x['url'][0])) fmt_url = traverse_obj(sc, ('url', -1, T(url_or_none)))
encrypted_sig = try_get(sc, lambda x: x['s'][0]) encrypted_sig = traverse_obj(sc, ('s', -1))
if not (sc and fmt_url and encrypted_sig): if not (fmt_url and encrypted_sig):
continue continue
if not player_url: player_url = player_url or self._extract_player_url(webpage)
player_url = self._extract_player_url(webpage)
if not player_url: if not player_url:
continue continue
signature = self._decrypt_signature(sc['s'][0], video_id, player_url) try:
sp = try_get(sc, lambda x: x['sp'][0]) or 'signature' fmt_url = update_url_query(fmt_url, {
fmt_url += '&' + sp + '=' + signature traverse_obj(sc, ('sp', -1)) or 'signature':
[self._decrypt_signature(encrypted_sig, video_id, player_url)],
})
except ExtractorError as e:
self.report_warning('Signature extraction failed: Some formats may be missing',
video_id=video_id, only_once=True)
self.write_debug(error_to_compat_str(e), only_once=True)
continue
if itag: language_preference = (
itags.append(itag) 10 if audio_track.get('audioIsDefault')
tbr = float_or_none( else -10 if 'descriptive' in (traverse_obj(audio_track, ('displayName', T(lower))) or '')
fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) else -1)
name = (
traverse_obj(fmt, ('qualityLabel', T(txt_or_none)))
or quality.replace('audio_quality_', ''))
dct = { dct = {
'asr': int_or_none(fmt.get('audioSampleRate')), 'format_id': join_nonempty(itag, fmt.get('isDrc') and 'drc'),
'filesize': int_or_none(fmt.get('contentLength')),
'format_id': itag,
'format_note': fmt.get('qualityLabel') or quality,
'fps': int_or_none(fmt.get('fps')),
'height': int_or_none(fmt.get('height')),
'quality': q(quality),
'tbr': tbr,
'url': fmt_url, 'url': fmt_url,
'width': fmt.get('width'), # Format 22 is likely to be damaged: see https://github.com/yt-dlp/yt-dlp/issues/3372
'source_preference': ((-5 if itag == '22' else -1)
+ (100 if 'Premium' in name else 0)),
'quality': q(quality),
'language': join_nonempty(audio_track.get('id', '').split('.')[0],
'desc' if language_preference < -1 else '') or None,
'language_preference': language_preference,
# Strictly de-prioritize 3gp formats
'preference': -2 if itag == '17' else None,
} }
mimetype = fmt.get('mimeType') if itag:
if mimetype: itags[itag].add(('https', dct.get('language')))
mobj = re.match( self._unthrottle_format_urls(video_id, player_url, dct)
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', mimetype) dct.update(traverse_obj(fmt, {
if mobj: 'asr': ('audioSampleRate', T(int_or_none)),
dct['ext'] = mimetype2ext(mobj.group(1)) 'filesize': ('contentLength', T(int_or_none)),
dct.update(parse_codecs(mobj.group(2))) 'format_note': ('qualityLabel', T(lambda x: x or quality)),
no_audio = dct.get('acodec') == 'none' # for some formats, fps is wrongly returned as 1
no_video = dct.get('vcodec') == 'none' 'fps': ('fps', T(int_or_none), T(lambda f: f if f > 1 else None)),
if no_audio: 'audio_channels': ('audioChannels', T(int_or_none)),
dct['vbr'] = tbr 'height': ('height', T(int_or_none)),
if no_video: 'has_drm': ('drmFamilies', T(bool)),
dct['abr'] = tbr 'tbr': (('averageBitrate', 'bitrate'), T(lambda t: float_or_none(t, 1000))),
if no_audio or no_video: 'width': ('width', T(int_or_none)),
CHUNK_SIZE = 10 << 20 '_duration_ms': ('approxDurationMs', T(int_or_none)),
}, get_all=False))
mime_mobj = re.match(
r'((?:[^/]+)/(?:[^;]+))(?:;\s*codecs="([^"]+)")?', fmt.get('mimeType') or '')
if mime_mobj:
dct['ext'] = mimetype2ext(mime_mobj.group(1))
dct.update(parse_codecs(mime_mobj.group(2)))
single_stream = 'none' in (dct.get(c) for c in ('acodec', 'vcodec'))
if single_stream and dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
if single_stream or itag == '17':
# avoid Youtube throttling # avoid Youtube throttling
dct.update({ dct.update({
'protocol': 'http_dash_segments', 'protocol': 'http_dash_segments',
'fragments': [{ 'fragments': build_fragments(dct),
'url': update_url_query(dct['url'], {
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, dct['filesize']))
})
} for range_start in range(0, dct['filesize'], CHUNK_SIZE)]
} if dct['filesize'] else { } if dct['filesize'] else {
'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful? 'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
}) })
if dct.get('ext'):
dct['container'] = dct['ext'] + '_dash'
formats.append(dct) formats.append(dct)
def process_manifest_format(f, proto, client_name, itag, all_formats=False):
key = (proto, f.get('language'))
if not all_formats and key in itags[itag]:
return False
itags[itag].add(key)
if itag:
f['format_id'] = (
'{0}-{1}'.format(itag, proto)
if all_formats or any(p != proto for p, _ in itags[itag])
else itag)
if f.get('source_preference') is None:
f['source_preference'] = -1
if itag in ('616', '235'):
f['format_note'] = join_nonempty(f.get('format_note'), 'Premium', delim=' ')
f['source_preference'] += 100
f['quality'] = q(traverse_obj(f, (
'format_id', T(lambda s: itag_qualities[s.split('-')[0]])), default=-1))
if try_call(lambda: f['fps'] <= 1):
del f['fps']
if proto == 'hls' and f.get('has_drm'):
f['has_drm'] = 'maybe'
f['source_preference'] -= 5
return True
hls_manifest_url = streaming_data.get('hlsManifestUrl') hls_manifest_url = streaming_data.get('hlsManifestUrl')
if hls_manifest_url: if hls_manifest_url:
for f in self._extract_m3u8_formats( for f in self._extract_m3u8_formats(
hls_manifest_url, video_id, 'mp4', fatal=False): hls_manifest_url, video_id, 'mp4', fatal=False):
itag = self._search_regex( if process_manifest_format(
r'/itag/(\d+)', f['url'], 'itag', default=None) f, 'hls', None, self._search_regex(
if itag: r'/itag/(\d+)', f['url'], 'itag', default=None)):
f['format_id'] = itag formats.append(f)
formats.append(f)
if self._downloader.params.get('youtube_include_dash_manifest', True): if self._downloader.params.get('youtube_include_dash_manifest', True):
dash_manifest_url = streaming_data.get('dashManifestUrl') dash_manifest_url = streaming_data.get('dashManifestUrl')
if dash_manifest_url: if dash_manifest_url:
for f in self._extract_mpd_formats( for f in self._extract_mpd_formats(
dash_manifest_url, video_id, fatal=False): dash_manifest_url, video_id, fatal=False):
itag = f['format_id'] if process_manifest_format(
if itag in itags: f, 'dash', None, f['format_id']):
continue f['filesize'] = traverse_obj(f, (
if itag in itag_qualities: ('fragment_base_url', 'url'), T(lambda u: self._search_regex(
f['quality'] = q(itag_qualities[itag]) r'/clen/(\d+)', u, 'file size', default=None)),
filesize = int_or_none(self._search_regex( T(int_or_none)), get_all=False)
r'/clen/(\d+)', f.get('fragment_base_url') formats.append(f)
or f['url'], 'file size', default=None))
if filesize: playable_formats = [f for f in formats if not f.get('has_drm')]
f['filesize'] = filesize if formats and not playable_formats:
formats.append(f) # If there are no formats that definitely don't have DRM, all have DRM
self.report_drm(video_id)
formats[:] = playable_formats
if not formats: if not formats:
if streaming_data.get('licenseInfos'): if streaming_data.get('licenseInfos'):
@ -2162,6 +2265,17 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_details.get('lengthSeconds') video_details.get('lengthSeconds')
or microformat.get('lengthSeconds')) \ or microformat.get('lengthSeconds')) \
or parse_duration(search_meta('duration')) or parse_duration(search_meta('duration'))
for f in formats:
# Some formats may have much smaller duration than others (possibly damaged during encoding)
# but avoid false positives with small duration differences.
# Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
if try_call(lambda x: float(x.pop('_duration_ms')) / duration < 500, args=(f,)):
self.report_warning(
'{0}: Some possibly damaged formats will be deprioritized'.format(video_id), only_once=True)
# Strictly de-prioritize damaged formats
f['preference'] = -10
is_live = video_details.get('isLive') is_live = video_details.get('isLive')
owner_profile_url = self._yt_urljoin(self._extract_author_var( owner_profile_url = self._yt_urljoin(self._extract_author_var(
@ -2170,10 +2284,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
uploader = self._extract_author_var( uploader = self._extract_author_var(
webpage, 'name', videodetails=video_details, metadata=microformat) webpage, 'name', videodetails=video_details, metadata=microformat)
if not player_url:
player_url = self._extract_player_url(webpage)
self._unthrottle_format_urls(video_id, player_url, formats)
info = { info = {
'id': video_id, 'id': video_id,
'title': self._live_title(video_title) if is_live else video_title, 'title': self._live_title(video_title) if is_live else video_title,
@ -2366,6 +2476,14 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'like_count': str_to_int(like_count), 'like_count': str_to_int(like_count),
'dislike_count': str_to_int(dislike_count), 'dislike_count': str_to_int(dislike_count),
}) })
else:
info['like_count'] = traverse_obj(vpir, (
'videoActions', 'menuRenderer', 'topLevelButtons', Ellipsis,
'segmentedLikeDislikeButtonViewModel', 'likeButtonViewModel', 'likeButtonViewModel',
'toggleButtonViewModel', 'toggleButtonViewModel', 'defaultButtonViewModel',
'buttonViewModel', (('title', ('accessibilityText', T(lambda s: s.split()), Ellipsis))), T(parse_count)),
get_all=False)
vsir = content.get('videoSecondaryInfoRenderer') vsir = content.get('videoSecondaryInfoRenderer')
if vsir: if vsir:
rows = try_get( rows = try_get(
@ -2480,7 +2598,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'playlist_mincount': 94, 'playlist_mincount': 94,
'info_dict': { 'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg', 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists', 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2', 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner', 'uploader': 'Igor Kleiner',
'uploader_id': '@IgorDataScience', 'uploader_id': '@IgorDataScience',
@ -2491,7 +2609,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'playlist_mincount': 94, 'playlist_mincount': 94,
'info_dict': { 'info_dict': {
'id': 'UCqj7Cz7revf5maW9g5pgNcg', 'id': 'UCqj7Cz7revf5maW9g5pgNcg',
'title': 'Igor Kleiner - Playlists', 'title': r're:Igor Kleiner(?: Ph\.D\.)? - Playlists',
'description': 'md5:be97ee0f14ee314f1f002cf187166ee2', 'description': 'md5:be97ee0f14ee314f1f002cf187166ee2',
'uploader': 'Igor Kleiner', 'uploader': 'Igor Kleiner',
'uploader_id': '@IgorDataScience', 'uploader_id': '@IgorDataScience',
@ -2603,12 +2721,23 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels', 'url': 'https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w/channels',
'info_dict': { 'info_dict': {
'id': 'UCKfVa3S1e4PHvxWcwyMMg8w', 'id': 'UCKfVa3S1e4PHvxWcwyMMg8w',
'title': 'lex will - Channels', 'title': r're:lex will - (?:Home|Channels)',
'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488', 'description': 'md5:2163c5d0ff54ed5f598d6a7e6211e488',
'uploader': 'lex will', 'uploader': 'lex will',
'uploader_id': '@lexwill718', 'uploader_id': '@lexwill718',
}, },
'playlist_mincount': 75, 'playlist_mincount': 75,
}, {
# Releases tab
'url': 'https://www.youtube.com/@daftpunk/releases',
'info_dict': {
'id': 'UC_kRDKYrUlrbtrSiyu5Tflg',
'title': 'Daft Punk - Releases',
'description': 'Daft Punk (1993 - 2021) - Official YouTube Channel',
'uploader_id': '@daftpunk',
'uploader': 'Daft Punk',
},
'playlist_mincount': 36,
}, { }, {
'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA', 'url': 'https://invidio.us/channel/UCmlqkdCBesrv2Lak1mF_MxA',
'only_matching': True, 'only_matching': True,
@ -2823,6 +2952,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
continue continue
return renderer return renderer
@staticmethod
def _get_text(r, k):
return traverse_obj(
r, (k, 'runs', 0, 'text'), (k, 'simpleText'),
expected_type=txt_or_none)
def _grid_entries(self, grid_renderer): def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']: for item in grid_renderer['items']:
if not isinstance(item, dict): if not isinstance(item, dict):
@ -2830,9 +2965,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
renderer = self._extract_grid_item_renderer(item) renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict): if not isinstance(renderer, dict):
continue continue
title = try_get( title = self._get_text(renderer, 'title')
renderer, (lambda x: x['title']['runs'][0]['text'],
lambda x: x['title']['simpleText']), compat_str)
# playlist # playlist
playlist_id = renderer.get('playlistId') playlist_id = renderer.get('playlistId')
if playlist_id: if playlist_id:
@ -2849,8 +2982,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
# channel # channel
channel_id = renderer.get('channelId') channel_id = renderer.get('channelId')
if channel_id: if channel_id:
title = try_get( title = self._get_text(renderer, 'title')
renderer, lambda x: x['title']['simpleText'], compat_str)
yield self.url_result( yield self.url_result(
'https://www.youtube.com/channel/%s' % channel_id, 'https://www.youtube.com/channel/%s' % channel_id,
ie=YoutubeTabIE.ie_key(), video_title=title) ie=YoutubeTabIE.ie_key(), video_title=title)
@ -2959,15 +3091,26 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
def _rich_grid_entries(self, contents): def _rich_grid_entries(self, contents):
for content in contents: for content in contents:
video_renderer = try_get( content = traverse_obj(
content, content, ('richItemRenderer', 'content'),
(lambda x: x['richItemRenderer']['content']['videoRenderer'], expected_type=dict) or {}
lambda x: x['richItemRenderer']['content']['reelItemRenderer']), video_renderer = traverse_obj(
dict) content, 'videoRenderer', 'reelItemRenderer',
expected_type=dict)
if video_renderer: if video_renderer:
entry = self._video_entry(video_renderer) entry = self._video_entry(video_renderer)
if entry: if entry:
yield entry yield entry
# playlist
renderer = traverse_obj(
content, 'playlistRenderer', expected_type=dict) or {}
title = self._get_text(renderer, 'title')
playlist_id = renderer.get('playlistId')
if playlist_id:
yield self.url_result(
'https://www.youtube.com/playlist?list=%s' % playlist_id,
ie=YoutubeTabIE.ie_key(), video_id=playlist_id,
video_title=title)
@staticmethod @staticmethod
def _build_continuation_query(continuation, ctp=None): def _build_continuation_query(continuation, ctp=None):
@ -3072,6 +3215,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
return return
for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []): for entry in self._rich_grid_entries(rich_grid_renderer.get('contents') or []):
yield entry yield entry
continuation = self._extract_continuation(rich_grid_renderer) continuation = self._extract_continuation(rich_grid_renderer)
ytcfg = self._extract_ytcfg(item_id, webpage) ytcfg = self._extract_ytcfg(item_id, webpage)
@ -3214,50 +3358,41 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
uploader['channel'] = uploader['uploader'] uploader['channel'] = uploader['uploader']
return uploader return uploader
@staticmethod @classmethod
def _extract_alert(data): def _extract_alert(cls, data):
alerts = [] alerts = []
for alert in try_get(data, lambda x: x['alerts'], list) or []: for alert in traverse_obj(data, ('alerts', Ellipsis), expected_type=dict):
if not isinstance(alert, dict): alert_text = traverse_obj(
continue alert, (None, lambda x: x['alertRenderer']['text']), get_all=False)
alert_text = try_get(
alert, lambda x: x['alertRenderer']['text'], dict)
if not alert_text: if not alert_text:
continue continue
text = try_get( text = cls._get_text(alert_text, 'text')
alert_text,
(lambda x: x['simpleText'], lambda x: x['runs'][0]['text']),
compat_str)
if text: if text:
alerts.append(text) alerts.append(text)
return '\n'.join(alerts) return '\n'.join(alerts)
def _extract_from_tabs(self, item_id, webpage, data, tabs): def _extract_from_tabs(self, item_id, webpage, data, tabs):
selected_tab = self._extract_selected_tab(tabs) selected_tab = self._extract_selected_tab(tabs)
renderer = try_get( renderer = traverse_obj(data, ('metadata', 'channelMetadataRenderer'),
data, lambda x: x['metadata']['channelMetadataRenderer'], dict) expected_type=dict) or {}
playlist_id = item_id playlist_id = item_id
title = description = None title = description = None
if renderer: if renderer:
channel_title = renderer.get('title') or item_id channel_title = txt_or_none(renderer.get('title')) or item_id
tab_title = selected_tab.get('title') tab_title = txt_or_none(selected_tab.get('title'))
title = channel_title or item_id title = join_nonempty(
if tab_title: channel_title or item_id, tab_title,
title += ' - %s' % tab_title txt_or_none(selected_tab.get('expandedText')),
if selected_tab.get('expandedText'): delim=' - ')
title += ' - %s' % selected_tab['expandedText'] description = txt_or_none(renderer.get('description'))
description = renderer.get('description') playlist_id = txt_or_none(renderer.get('externalId')) or playlist_id
playlist_id = renderer.get('externalId')
else: else:
renderer = try_get( renderer = traverse_obj(data,
data, lambda x: x['metadata']['playlistMetadataRenderer'], dict) ('metadata', 'playlistMetadataRenderer'),
if renderer: ('header', 'hashtagHeaderRenderer'),
title = renderer.get('title') expected_type=dict) or {}
else: title = traverse_obj(renderer, 'title', ('hashtag', 'simpleText'),
renderer = try_get( expected_type=txt_or_none)
data, lambda x: x['header']['hashtagHeaderRenderer'], dict)
if renderer:
title = try_get(renderer, lambda x: x['hashtag']['simpleText'])
playlist = self.playlist_result( playlist = self.playlist_result(
self._entries(selected_tab, item_id, webpage), self._entries(selected_tab, item_id, webpage),
playlist_id=playlist_id, playlist_title=title, playlist_id=playlist_id, playlist_title=title,
@ -3265,15 +3400,16 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
return merge_dicts(playlist, self._extract_uploader(renderer, data)) return merge_dicts(playlist, self._extract_uploader(renderer, data))
def _extract_from_playlist(self, item_id, url, data, playlist): def _extract_from_playlist(self, item_id, url, data, playlist):
title = playlist.get('title') or try_get( title = traverse_obj((playlist, data),
data, lambda x: x['titleText']['simpleText'], compat_str) (0, 'title'), (1, 'titleText', 'simpleText'),
playlist_id = playlist.get('playlistId') or item_id expected_type=txt_or_none)
playlist_id = txt_or_none(playlist.get('playlistId')) or item_id
# Inline playlist rendition continuation does not always work # Inline playlist rendition continuation does not always work
# at Youtube side, so delegating regular tab-based playlist URL # at Youtube side, so delegating regular tab-based playlist URL
# processing whenever possible. # processing whenever possible.
playlist_url = urljoin(url, try_get( playlist_url = urljoin(url, traverse_obj(
playlist, lambda x: x['endpoint']['commandMetadata']['webCommandMetadata']['url'], playlist, ('endpoint', 'commandMetadata', 'webCommandMetadata', 'url'),
compat_str)) expected_type=url_or_none))
if playlist_url and playlist_url != url: if playlist_url and playlist_url != url:
return self.url_result( return self.url_result(
playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id, playlist_url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id,

View File

@ -2,10 +2,11 @@ from __future__ import unicode_literals
import itertools import itertools
import json import json
import math
import operator import operator
import re import re
from functools import update_wrapper
from .utils import ( from .utils import (
error_to_compat_str, error_to_compat_str,
ExtractorError, ExtractorError,
@ -23,11 +24,46 @@ from .compat import (
) )
# name JS functions
class function_with_repr(object):
# from yt_dlp/utils.py, but in this module
# repr_ is always set
def __init__(self, func, repr_):
update_wrapper(self, func)
self.func, self.__repr = func, repr_
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __repr__(self):
return self.__repr
# name JS operators
def wraps_op(op):
def update_and_rename_wrapper(w):
f = update_wrapper(w, op)
# fn names are str in both Py 2/3
f.__name__ = str('JS_') + f.__name__
return f
return update_and_rename_wrapper
# NB In principle NaN cannot be checked by membership.
# Here all NaN values are actually this one, so _NaN is _NaN,
# although _NaN != _NaN.
_NaN = float('nan')
def _js_bit_op(op): def _js_bit_op(op):
def zeroise(x): def zeroise(x):
return 0 if x in (None, JS_Undefined) else x return 0 if x in (None, JS_Undefined, _NaN) else x
@wraps_op(op)
def wrapped(a, b): def wrapped(a, b):
return op(zeroise(a), zeroise(b)) & 0xffffffff return op(zeroise(a), zeroise(b)) & 0xffffffff
@ -36,23 +72,24 @@ def _js_bit_op(op):
def _js_arith_op(op): def _js_arith_op(op):
@wraps_op(op)
def wrapped(a, b): def wrapped(a, b):
if JS_Undefined in (a, b): if JS_Undefined in (a, b):
return float('nan') return _NaN
return op(a or 0, b or 0) return op(a or 0, b or 0)
return wrapped return wrapped
def _js_div(a, b): def _js_div(a, b):
if JS_Undefined in (a, b) or not (a and b): if JS_Undefined in (a, b) or not (a or b):
return float('nan') return _NaN
return operator.truediv(a or 0, b) if b else float('inf') return operator.truediv(a or 0, b) if b else float('inf')
def _js_mod(a, b): def _js_mod(a, b):
if JS_Undefined in (a, b) or not b: if JS_Undefined in (a, b) or not b:
return float('nan') return _NaN
return (a or 0) % b return (a or 0) % b
@ -60,12 +97,13 @@ def _js_exp(a, b):
if not b: if not b:
return 1 # even 0 ** 0 !! return 1 # even 0 ** 0 !!
elif JS_Undefined in (a, b): elif JS_Undefined in (a, b):
return float('nan') return _NaN
return (a or 0) ** b return (a or 0) ** b
def _js_eq_op(op): def _js_eq_op(op):
@wraps_op(op)
def wrapped(a, b): def wrapped(a, b):
if set((a, b)) <= set((None, JS_Undefined)): if set((a, b)) <= set((None, JS_Undefined)):
return op(a, a) return op(a, a)
@ -76,6 +114,7 @@ def _js_eq_op(op):
def _js_comp_op(op): def _js_comp_op(op):
@wraps_op(op)
def wrapped(a, b): def wrapped(a, b):
if JS_Undefined in (a, b): if JS_Undefined in (a, b):
return False return False
@ -90,13 +129,8 @@ def _js_comp_op(op):
def _js_ternary(cndn, if_true=True, if_false=False): def _js_ternary(cndn, if_true=True, if_false=False):
"""Simulate JS's ternary operator (cndn?if_true:if_false)""" """Simulate JS's ternary operator (cndn?if_true:if_false)"""
if cndn in (False, None, 0, '', JS_Undefined): if cndn in (False, None, 0, '', JS_Undefined, _NaN):
return if_false return if_false
try:
if math.isnan(cndn): # NB: NaN cannot be checked by membership
return if_false
except TypeError:
pass
return if_true return if_true
@ -243,9 +277,20 @@ class JSInterpreter(object):
def __getattr__(self, name): def __getattr__(self, name):
self.__instantiate() self.__instantiate()
if hasattr(self, name): # make Py 2.6 conform to its lying documentation
return getattr(self, name) if name == 'flags':
return super(JSInterpreter.JS_RegExp, self).__getattr__(name) self.flags = self.__flags
return self.flags
elif name == 'pattern':
self.pattern = self.__pattern_txt
return self.pattern
elif hasattr(self.__self, name):
v = getattr(self.__self, name)
setattr(self, name, v)
return v
elif name in ('groupindex', 'groups'):
return 0 if name == 'groupindex' else {}
raise AttributeError('{0} has no attribute named {1}'.format(self, name))
@classmethod @classmethod
def regex_flags(cls, expr): def regex_flags(cls, expr):
@ -269,6 +314,8 @@ class JSInterpreter(object):
def _named_object(self, namespace, obj): def _named_object(self, namespace, obj):
self.__named_object_counter += 1 self.__named_object_counter += 1
name = '%s%d' % (self._OBJ_NAME, self.__named_object_counter) name = '%s%d' % (self._OBJ_NAME, self.__named_object_counter)
if callable(obj) and not isinstance(obj, function_with_repr):
obj = function_with_repr(obj, 'F<%s>' % (self.__named_object_counter, ))
namespace[name] = obj namespace[name] = obj
return name return name
@ -356,6 +403,7 @@ class JSInterpreter(object):
return right_val return right_val
try: try:
# print('Eval:', opfunc.__name__, left_val, right_val)
return opfunc(left_val, right_val) return opfunc(left_val, right_val)
except Exception as e: except Exception as e:
raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e) raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e)
@ -395,6 +443,7 @@ class JSInterpreter(object):
raise self.Exception('Recursion limit reached') raise self.Exception('Recursion limit reached')
allow_recursion -= 1 allow_recursion -= 1
# print('At: ' + stmt[:60])
should_return = False should_return = False
# fails on (eg) if (...) stmt1; else stmt2; # fails on (eg) if (...) stmt1; else stmt2;
sub_statements = list(self._separate(stmt, ';')) or [''] sub_statements = list(self._separate(stmt, ';')) or ['']
@ -462,8 +511,15 @@ class JSInterpreter(object):
expr = self._dump(inner, local_vars) + outer expr = self._dump(inner, local_vars) + outer
if expr.startswith('('): if expr.startswith('('):
inner, outer = self._separate_at_paren(expr)
inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion) m = re.match(r'\((?P<d>[a-z])%(?P<e>[a-z])\.length\+(?P=e)\.length\)%(?P=e)\.length', expr)
if m:
# short-cut eval of frequently used `(d%e.length+e.length)%e.length`, worth ~6% on `pytest -k test_nsig`
outer = None
inner, should_abort = self._offset_e_by_d(m.group('d'), m.group('e'), local_vars)
else:
inner, outer = self._separate_at_paren(expr)
inner, should_abort = self.interpret_statement(inner, local_vars, allow_recursion)
if not outer or should_abort: if not outer or should_abort:
return inner, should_abort or should_return return inner, should_abort or should_return
else: else:
@ -675,7 +731,7 @@ class JSInterpreter(object):
elif expr == 'undefined': elif expr == 'undefined':
return JS_Undefined, should_return return JS_Undefined, should_return
elif expr == 'NaN': elif expr == 'NaN':
return float('NaN'), should_return return _NaN, should_return
elif md.get('return'): elif md.get('return'):
return local_vars[m.group('name')], should_return return local_vars[m.group('name')], should_return
@ -702,9 +758,24 @@ class JSInterpreter(object):
continue continue
right_expr = separated.pop() right_expr = separated.pop()
while op == '-' and len(separated) > 1 and not separated[-1].strip(): # handle operators that are both unary and binary, minimal BODMAS
right_expr = '-' + right_expr if op in ('+', '-'):
separated.pop() undone = 0
while len(separated) > 1 and not separated[-1].strip():
undone += 1
separated.pop()
if op == '-' and undone % 2 != 0:
right_expr = op + right_expr
left_val = separated[-1]
for dm_op in ('*', '%', '/', '**'):
bodmas = tuple(self._separate(left_val, dm_op, skip_delims=skip_delim))
if len(bodmas) > 1 and not bodmas[-1].strip():
expr = op.join(separated) + op + right_expr
right_expr = None
break
if right_expr is None:
continue
left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion) left_val = self.interpret_expression(op.join(separated), local_vars, allow_recursion)
return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), should_return return self._operator(op, left_val, right_expr, expr, local_vars, allow_recursion), should_return
@ -880,16 +951,19 @@ class JSInterpreter(object):
def extract_object(self, objname): def extract_object(self, objname):
_FUNC_NAME_RE = r'''(?:[a-zA-Z$0-9]+|"[a-zA-Z$0-9]+"|'[a-zA-Z$0-9]+')''' _FUNC_NAME_RE = r'''(?:[a-zA-Z$0-9]+|"[a-zA-Z$0-9]+"|'[a-zA-Z$0-9]+')'''
obj = {} obj = {}
obj_m = re.search( fields = None
r'''(?x) for obj_m in re.finditer(
(?<!this\.)%s\s*=\s*{\s* r'''(?xs)
(?P<fields>(%s\s*:\s*function\s*\(.*?\)\s*{.*?}(?:,\s*)?)*) {0}\s*\.\s*{1}|{1}\s*=\s*\{{\s*
}\s*; (?P<fields>({2}\s*:\s*function\s*\(.*?\)\s*\{{.*?}}(?:,\s*)?)*)
''' % (re.escape(objname), _FUNC_NAME_RE), }}\s*;
self.code) '''.format(_NAME_RE, re.escape(objname), _FUNC_NAME_RE),
if not obj_m: self.code):
fields = obj_m.group('fields')
if fields:
break
else:
raise self.Exception('Could not find object ' + objname) raise self.Exception('Could not find object ' + objname)
fields = obj_m.group('fields')
# Currently, it only supports function definitions # Currently, it only supports function definitions
fields_m = re.finditer( fields_m = re.finditer(
r'''(?x) r'''(?x)
@ -902,6 +976,17 @@ class JSInterpreter(object):
return obj return obj
@staticmethod
def _offset_e_by_d(d, e, local_vars):
""" Short-cut eval: (d%e.length+e.length)%e.length """
try:
d = local_vars[d]
e = local_vars[e]
e = len(e)
return _js_mod(_js_mod(d, e) + e, e), False
except Exception:
return None, True
def extract_function_code(self, funcname): def extract_function_code(self, funcname):
""" @returns argnames, code """ """ @returns argnames, code """
func_m = re.search( func_m = re.search(
@ -914,13 +999,15 @@ class JSInterpreter(object):
\((?P<args>[^)]*)\)\s* \((?P<args>[^)]*)\)\s*
(?P<code>{.+})''' % {'name': re.escape(funcname)}, (?P<code>{.+})''' % {'name': re.escape(funcname)},
self.code) self.code)
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
if func_m is None: if func_m is None:
raise self.Exception('Could not find JS function "{funcname}"'.format(**locals())) raise self.Exception('Could not find JS function "{funcname}"'.format(**locals()))
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
return self.build_arglist(func_m.group('args')), code return self.build_arglist(func_m.group('args')), code
def extract_function(self, funcname): def extract_function(self, funcname):
return self.extract_function_from_code(*self.extract_function_code(funcname)) return function_with_repr(
self.extract_function_from_code(*self.extract_function_code(funcname)),
'F<%s>' % (funcname, ))
def extract_function_from_code(self, argnames, code, *global_stack): def extract_function_from_code(self, argnames, code, *global_stack):
local_vars = {} local_vars = {}

View File

@ -11,6 +11,7 @@ from .compat import (
compat_get_terminal_size, compat_get_terminal_size,
compat_getenv, compat_getenv,
compat_kwargs, compat_kwargs,
compat_open as open,
compat_shlex_split, compat_shlex_split,
) )
from .utils import ( from .utils import (
@ -41,14 +42,11 @@ def _hide_login_info(opts):
def parseOpts(overrideArguments=None): def parseOpts(overrideArguments=None):
def _readOptions(filename_bytes, default=[]): def _readOptions(filename_bytes, default=[]):
try: try:
optionf = open(filename_bytes) optionf = open(filename_bytes, encoding=preferredencoding())
except IOError: except IOError:
return default # silently skip if file is not present return default # silently skip if file is not present
try: try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read() contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True) res = compat_shlex_split(contents, comments=True)
finally: finally:
optionf.close() optionf.close()
@ -546,12 +544,14 @@ def parseOpts(overrideArguments=None):
workarounds.add_option( workarounds.add_option(
'--referer', '--referer',
metavar='URL', dest='referer', default=None, metavar='URL', dest='referer', default=None,
help='Specify a custom referer, use if the video access is restricted to one domain', help='Specify a custom Referer: use if the video access is restricted to one domain',
) )
workarounds.add_option( workarounds.add_option(
'--add-header', '--add-header',
metavar='FIELD:VALUE', dest='headers', action='append', metavar='FIELD:VALUE', dest='headers', action='append',
help='Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', help=('Specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times. '
'NB Use --cookies rather than adding a Cookie header if its contents may be sensitive; '
'data from a Cookie header will be sent to all domains, not just the one intended')
) )
workarounds.add_option( workarounds.add_option(
'--bidi-workaround', '--bidi-workaround',
@ -733,9 +733,13 @@ def parseOpts(overrideArguments=None):
'--no-part', '--no-part',
action='store_true', dest='nopart', default=False, action='store_true', dest='nopart', default=False,
help='Do not use .part files - write directly into output file') help='Do not use .part files - write directly into output file')
filesystem.add_option(
'--mtime',
action='store_true', dest='updatetime', default=True,
help='Use the Last-modified header to set the file modification time (default)')
filesystem.add_option( filesystem.add_option(
'--no-mtime', '--no-mtime',
action='store_false', dest='updatetime', default=True, action='store_false', dest='updatetime',
help='Do not use the Last-modified header to set the file modification time') help='Do not use the Last-modified header to set the file modification time')
filesystem.add_option( filesystem.add_option(
'--write-description', '--write-description',

View File

@ -18,6 +18,8 @@ from ..utils import (
shell_quote, shell_quote,
) )
from ..compat import compat_open as open
class EmbedThumbnailPPError(PostProcessingError): class EmbedThumbnailPPError(PostProcessingError):
pass pass

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import os import os
import subprocess import subprocess
import time import time
@ -9,6 +8,7 @@ import re
from .common import AudioConversionError, PostProcessor from .common import AudioConversionError, PostProcessor
from ..compat import compat_open as open
from ..utils import ( from ..utils import (
encodeArgument, encodeArgument,
encodeFilename, encodeFilename,
@ -493,7 +493,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
chapters = info.get('chapters', []) chapters = info.get('chapters', [])
if chapters: if chapters:
metadata_filename = replace_extension(filename, 'meta') metadata_filename = replace_extension(filename, 'meta')
with io.open(metadata_filename, 'wt', encoding='utf-8') as f: with open(metadata_filename, 'w', encoding='utf-8') as f:
def ffmpeg_escape(text): def ffmpeg_escape(text):
return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text) return re.sub(r'(=|;|#|\\|\n)', r'\\\1', text)
@ -636,7 +636,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
with open(dfxp_file, 'rb') as f: with open(dfxp_file, 'rb') as f:
srt_data = dfxp2srt(f.read()) srt_data = dfxp2srt(f.read())
with io.open(srt_file, 'wt', encoding='utf-8') as f: with open(srt_file, 'w', encoding='utf-8') as f:
f.write(srt_data) f.write(srt_data)
old_file = srt_file old_file = srt_file
@ -652,7 +652,7 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
self.run_ffmpeg(old_file, new_file, ['-f', new_format]) self.run_ffmpeg(old_file, new_file, ['-f', new_format])
with io.open(new_file, 'rt', encoding='utf-8') as f: with open(new_file, 'r', encoding='utf-8') as f:
subs[lang] = { subs[lang] = {
'ext': new_ext, 'ext': new_ext,
'data': f.read(), 'data': f.read(),

View File

@ -727,7 +727,7 @@ class SWFInterpreter(object):
stack.append(res) stack.append(res)
continue continue
assert isinstance(obj, (dict, _ScopeDict)),\ assert isinstance(obj, (dict, _ScopeDict)), \
'Accessing member %r on %r' % (pname, obj) 'Accessing member %r on %r' % (pname, obj)
res = obj.get(pname, undefined) res = obj.get(pname, undefined)
stack.append(res) stack.append(res)

View File

@ -1,6 +1,5 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import io
import json import json
import traceback import traceback
import hashlib import hashlib
@ -9,7 +8,10 @@ import subprocess
import sys import sys
from zipimport import zipimporter from zipimport import zipimporter
from .compat import compat_realpath from .compat import (
compat_open as open,
compat_realpath,
)
from .utils import encode_compat_str from .utils import encode_compat_str
from .version import __version__ from .version import __version__
@ -127,7 +129,7 @@ def update_self(to_screen, verbose, opener):
try: try:
bat = os.path.join(directory, 'youtube-dl-updater.bat') bat = os.path.join(directory, 'youtube-dl-updater.bat')
with io.open(bat, 'w') as batfile: with open(bat, 'w') as batfile:
batfile.write(''' batfile.write('''
@echo off @echo off
echo Waiting for file handle to be closed ... echo Waiting for file handle to be closed ...

File diff suppressed because it is too large Load Diff