mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-27 17:51:24 +01:00
Compare commits
No commits in common. "6e7c5260491fbc89ad629804ec234329b270786a" and "f428986c63d61166b64cc56332c1c750bad4c0b4" have entirely different histories.
6e7c526049
...
f428986c63
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
|
@ -245,8 +245,8 @@ jobs:
|
||||||
gsed -i -E '/^curl_cffi.*/d' requirements.txt
|
gsed -i -E '/^curl_cffi.*/d' requirements.txt
|
||||||
python3 -m pip install -U --user --no-binary :all: Pyinstaller>=6.3 -r requirements.txt
|
python3 -m pip install -U --user --no-binary :all: Pyinstaller>=6.3 -r requirements.txt
|
||||||
mkdir curl_cffi_whls curl_cffi_universal2
|
mkdir curl_cffi_whls curl_cffi_universal2
|
||||||
python3 -m pip download --only-binary=:all: --platform macosx_11_0_arm64 'curl_cffi>=0.5.10,<0.6.0' --pre -d curl_cffi_whls
|
python3 -m pip download --only-binary=:all: --platform macosx_11_0_arm64 curl_cffi>=0.5.10,<0.6.0 --pre -d curl_cffi_whls
|
||||||
python3 -m pip download --only-binary=:all: --platform macosx_11_0_x86_64 'curl_cffi>=0.5.10,<0.6.0' --pre -d curl_cffi_whls
|
python3 -m pip download --only-binary=:all: --platform macosx_11_0_x86_64 curl_cffi>=0.5.10,<0.6.0 --pre -d curl_cffi_whls
|
||||||
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi* -w curl_cffi_universal2
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/curl_cffi* -w curl_cffi_universal2
|
||||||
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi-* -w curl_cffi_universal2
|
python3 -m delocate.cmd.delocate_fuse curl_cffi_whls/cffi-* -w curl_cffi_universal2
|
||||||
cd curl_cffi_universal2
|
cd curl_cffi_universal2
|
||||||
|
|
32
.github/workflows/core.yml
vendored
32
.github/workflows/core.yml
vendored
|
@ -1,25 +1,5 @@
|
||||||
name: Core Tests
|
name: Core Tests
|
||||||
on:
|
on: [push, pull_request]
|
||||||
push:
|
|
||||||
paths:
|
|
||||||
- .github/**
|
|
||||||
- devscripts/**
|
|
||||||
- test/**
|
|
||||||
- yt_dlp/**.py
|
|
||||||
- '!yt_dlp/extractor/*.py'
|
|
||||||
- yt_dlp/extractor/__init__.py
|
|
||||||
- yt_dlp/extractor/common.py
|
|
||||||
- yt_dlp/extractor/extractors.py
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- .github/**
|
|
||||||
- devscripts/**
|
|
||||||
- test/**
|
|
||||||
- yt_dlp/**.py
|
|
||||||
- '!yt_dlp/extractor/*.py'
|
|
||||||
- yt_dlp/extractor/__init__.py
|
|
||||||
- yt_dlp/extractor/common.py
|
|
||||||
- yt_dlp/extractor/extractors.py
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
|
||||||
|
@ -36,16 +16,20 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
# CPython 3.8 is in quick-test
|
# CPython 3.11 is in quick-test
|
||||||
python-version: ['3.9', '3.10', '3.11', '3.12', pypy-3.8, pypy-3.10]
|
python-version: ['3.8', '3.9', '3.10', '3.12', pypy-3.8, pypy-3.10]
|
||||||
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
|
run-tests-ext: bat
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.12'
|
python-version: '3.12'
|
||||||
|
run-tests-ext: bat
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
@ -58,4 +42,4 @@ jobs:
|
||||||
continue-on-error: False
|
continue-on-error: False
|
||||||
run: |
|
run: |
|
||||||
python3 -m yt_dlp -v || true # Print debug head
|
python3 -m yt_dlp -v || true # Print debug head
|
||||||
python3 ./devscripts/run_tests.py core
|
./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
||||||
|
|
7
.github/workflows/download.yml
vendored
7
.github/workflows/download.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
||||||
run: pip install pytest -r requirements.txt
|
run: pip install pytest -r requirements.txt
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: python3 ./devscripts/run_tests.py download
|
run: ./devscripts/run_tests.sh download
|
||||||
|
|
||||||
full:
|
full:
|
||||||
name: Full Download Tests
|
name: Full Download Tests
|
||||||
|
@ -29,12 +29,15 @@ jobs:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
python-version: ['3.10', '3.11', '3.12', pypy-3.8, pypy-3.10]
|
python-version: ['3.10', '3.11', '3.12', pypy-3.8, pypy-3.10]
|
||||||
|
run-tests-ext: [sh]
|
||||||
include:
|
include:
|
||||||
# atleast one of each CPython/PyPy tests must be in windows
|
# atleast one of each CPython/PyPy tests must be in windows
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: '3.8'
|
python-version: '3.8'
|
||||||
|
run-tests-ext: bat
|
||||||
- os: windows-latest
|
- os: windows-latest
|
||||||
python-version: pypy-3.9
|
python-version: pypy-3.9
|
||||||
|
run-tests-ext: bat
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
@ -45,4 +48,4 @@ jobs:
|
||||||
run: pip install pytest -r requirements.txt
|
run: pip install pytest -r requirements.txt
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: python3 ./devscripts/run_tests.py download
|
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} download
|
||||||
|
|
6
.github/workflows/quick-test.yml
vendored
6
.github/workflows/quick-test.yml
vendored
|
@ -10,16 +10,16 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python 3.8
|
- name: Set up Python 3.11
|
||||||
uses: actions/setup-python@v4
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: '3.8'
|
python-version: '3.11'
|
||||||
- name: Install test requirements
|
- name: Install test requirements
|
||||||
run: pip install pytest -r requirements.txt
|
run: pip install pytest -r requirements.txt
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: |
|
run: |
|
||||||
python3 -m yt_dlp -v || true
|
python3 -m yt_dlp -v || true
|
||||||
python3 ./devscripts/run_tests.py core
|
./devscripts/run_tests.sh core
|
||||||
flake8:
|
flake8:
|
||||||
name: Linter
|
name: Linter
|
||||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||||
|
|
|
@ -140,9 +140,12 @@ To run yt-dlp as a developer, you don't need to build anything either. Simply ex
|
||||||
|
|
||||||
python -m yt_dlp
|
python -m yt_dlp
|
||||||
|
|
||||||
To run all the available core tests, use:
|
To run the test, simply invoke your favorite test runner, or execute a test file directly; any of the following work:
|
||||||
|
|
||||||
python devscripts/run_tests.py
|
python -m unittest discover
|
||||||
|
python test/test_download.py
|
||||||
|
nosetests
|
||||||
|
pytest
|
||||||
|
|
||||||
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
See item 6 of [new extractor tutorial](#adding-support-for-a-new-site) for how to run extractor specific test cases.
|
||||||
|
|
||||||
|
@ -184,21 +187,15 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||||
'url': 'https://yourextractor.com/watch/42',
|
'url': 'https://yourextractor.com/watch/42',
|
||||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
# For videos, only the 'id' and 'ext' fields are required to RUN the test:
|
|
||||||
'id': '42',
|
'id': '42',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
# Then if the test run fails, it will output the missing/incorrect fields.
|
'title': 'Video title goes here',
|
||||||
# Properties can be added as:
|
'thumbnail': r're:^https?://.*\.jpg$',
|
||||||
# * A value, e.g.
|
# TODO more properties, either as:
|
||||||
# 'title': 'Video title goes here',
|
# * A value
|
||||||
# * MD5 checksum; start the string with 'md5:', e.g.
|
# * MD5 checksum; start the string with md5:
|
||||||
# 'description': 'md5:098f6bcd4621d373cade4e832627b4f6',
|
# * A regular expression; start the string with re:
|
||||||
# * A regular expression; start the string with 're:', e.g.
|
# * Any Python type, e.g. int or float
|
||||||
# 'thumbnail': r're:^https?://.*\.jpg$',
|
|
||||||
# * A count of elements in a list; start the string with 'count:', e.g.
|
|
||||||
# 'tags': 'count:10',
|
|
||||||
# * Any Python type, e.g.
|
|
||||||
# 'view_count': int,
|
|
||||||
}
|
}
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
@ -218,8 +215,8 @@ After you have ensured this site is distributing its content legally, you can fo
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
1. Add an import in [`yt_dlp/extractor/_extractors.py`](yt_dlp/extractor/_extractors.py). Note that the class name must end with `IE`.
|
||||||
1. Run `python devscripts/run_tests.py YourExtractor`. This *may fail* at first, but you can continually re-run it until you're done. Upon failure, it will output the missing fields and/or correct values which you can copy. If you decide to add more than one test, the tests will then be named `YourExtractor`, `YourExtractor_1`, `YourExtractor_2`, etc. Note that tests with an `only_matching` key in the test's dict are not included in the count. You can also run all the tests in one go with `YourExtractor_all`
|
1. Run `python test/test_download.py TestDownload.test_YourExtractor` (note that `YourExtractor` doesn't end with `IE`). This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, the tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc. Note that tests with `only_matching` key in test's dict are not counted in. You can also run all the tests in one go with `TestDownload.test_YourExtractor_all`
|
||||||
1. Make sure you have at least one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
1. Make sure you have atleast one test for your extractor. Even if all videos covered by the extractor are expected to be inaccessible for automated testing, tests should still be added with a `skip` parameter indicating why the particular test is disabled from running.
|
||||||
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
1. Have a look at [`yt_dlp/extractor/common.py`](yt_dlp/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](yt_dlp/extractor/common.py#L119-L440). Add tests and code for as many as you want.
|
||||||
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
1. Make sure your code follows [yt-dlp coding conventions](#yt-dlp-coding-conventions) and check the code with [flake8](https://flake8.pycqa.org/en/latest/index.html#quickstart):
|
||||||
|
|
||||||
|
|
14
CONTRIBUTORS
14
CONTRIBUTORS
|
@ -528,17 +528,3 @@ almx
|
||||||
elivinsky
|
elivinsky
|
||||||
starius
|
starius
|
||||||
TravisDupes
|
TravisDupes
|
||||||
amir16yp
|
|
||||||
Fymyte
|
|
||||||
Ganesh910
|
|
||||||
hashFactory
|
|
||||||
kclauhk
|
|
||||||
Kyraminol
|
|
||||||
lstrojny
|
|
||||||
middlingphys
|
|
||||||
NickCis
|
|
||||||
nicodato
|
|
||||||
prettykool
|
|
||||||
S-Aarab
|
|
||||||
sonmezberkay
|
|
||||||
TSRBerry
|
|
||||||
|
|
87
Changelog.md
87
Changelog.md
|
@ -4,93 +4,6 @@
|
||||||
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
|
||||||
-->
|
-->
|
||||||
|
|
||||||
### 2023.12.30
|
|
||||||
|
|
||||||
#### Core changes
|
|
||||||
- [Fix format selection parse error for CPython 3.12](https://github.com/yt-dlp/yt-dlp/commit/00cdda4f6fe18712ced13dbc64b7ea10f323e268) ([#8797](https://github.com/yt-dlp/yt-dlp/issues/8797)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- [Let `read_stdin` obey `--quiet`](https://github.com/yt-dlp/yt-dlp/commit/a174c453ee1e853c584ceadeac17eef2bd433dc5) by [pukkandan](https://github.com/pukkandan)
|
|
||||||
- [Merged with youtube-dl be008e6](https://github.com/yt-dlp/yt-dlp/commit/65de7d204ce88c0225df1321060304baab85dbd8) by [bashonly](https://github.com/bashonly), [dirkf](https://github.com/dirkf), [Grub4K](https://github.com/Grub4K)
|
|
||||||
- [Parse `release_year` from `release_date`](https://github.com/yt-dlp/yt-dlp/commit/1732eccc0a40256e076bf0435a29f0f1d8419280) ([#8524](https://github.com/yt-dlp/yt-dlp/issues/8524)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- [Release workflow and Updater cleanup](https://github.com/yt-dlp/yt-dlp/commit/632b8ee54eb2df8ac6e20746a0bd95b7ebb053aa) ([#8640](https://github.com/yt-dlp/yt-dlp/issues/8640)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- [Remove Python 3.7 support](https://github.com/yt-dlp/yt-dlp/commit/f4b95acafcd69a50040730dfdf732e797278fdcc) ([#8361](https://github.com/yt-dlp/yt-dlp/issues/8361)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- [Support `NO_COLOR` environment variable](https://github.com/yt-dlp/yt-dlp/commit/a0b19d319a6ce8b7059318fa17a34b144fde1785) ([#8385](https://github.com/yt-dlp/yt-dlp/issues/8385)) by [Grub4K](https://github.com/Grub4K), [prettykool](https://github.com/prettykool)
|
|
||||||
- **outtmpl**: [Support multiplication](https://github.com/yt-dlp/yt-dlp/commit/993edd3f6e17e966c763bc86dc34125445cec6b6) by [pukkandan](https://github.com/pukkandan)
|
|
||||||
- **utils**: `traverse_obj`: [Move `is_user_input` into output template](https://github.com/yt-dlp/yt-dlp/commit/0b6f829b1dfda15d3c1d7d1fbe4ea6102c26dd24) ([#8673](https://github.com/yt-dlp/yt-dlp/issues/8673)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- **webvtt**: [Allow spaces before newlines for CueBlock](https://github.com/yt-dlp/yt-dlp/commit/15f22b4880b6b3f71f350c64d70976ae65b9f1ca) ([#7681](https://github.com/yt-dlp/yt-dlp/issues/7681)) by [TSRBerry](https://github.com/TSRBerry) (With fixes in [298230e](https://github.com/yt-dlp/yt-dlp/commit/298230e550886b746c266724dd701d842ca2696e) by [pukkandan](https://github.com/pukkandan))
|
|
||||||
|
|
||||||
#### Extractor changes
|
|
||||||
- [Add `media_type` field](https://github.com/yt-dlp/yt-dlp/commit/e370f9ec36972d06100a3db893b397bfc1b07b4d) by [trainman261](https://github.com/trainman261)
|
|
||||||
- [Extract from `media` elements in SMIL manifests](https://github.com/yt-dlp/yt-dlp/commit/ddb2d7588bea48bae965dbfabe6df6550c9d3d43) ([#8504](https://github.com/yt-dlp/yt-dlp/issues/8504)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **abematv**: [Fix season metadata](https://github.com/yt-dlp/yt-dlp/commit/cc07f5cc85d9e2a6cd0bedb9d961665eea0d6047) ([#8607](https://github.com/yt-dlp/yt-dlp/issues/8607)) by [middlingphys](https://github.com/middlingphys)
|
|
||||||
- **allstar**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/3237f8ba29fe13bf95ff42b1e48b5b5109715feb) ([#8274](https://github.com/yt-dlp/yt-dlp/issues/8274)) by [S-Aarab](https://github.com/S-Aarab)
|
|
||||||
- **altcensored**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/3f90813f0617e0d21302398010de7496c9ae36aa) ([#8291](https://github.com/yt-dlp/yt-dlp/issues/8291)) by [drzraf](https://github.com/drzraf)
|
|
||||||
- **ard**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/5f009a094f0e8450792b097c4c8273622778052d) ([#8878](https://github.com/yt-dlp/yt-dlp/issues/8878)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **ardbetamediathek**: [Fix series extraction](https://github.com/yt-dlp/yt-dlp/commit/1f8bd8eba82ba10ddb49ee7cc0be4540dab103d5) ([#8687](https://github.com/yt-dlp/yt-dlp/issues/8687)) by [lstrojny](https://github.com/lstrojny)
|
|
||||||
- **bbc**
|
|
||||||
- [Extract more formats](https://github.com/yt-dlp/yt-dlp/commit/c919b68f7e79ea5010f75f648d3c9e45405a8011) ([#8321](https://github.com/yt-dlp/yt-dlp/issues/8321)) by [barsnick](https://github.com/barsnick), [dirkf](https://github.com/dirkf)
|
|
||||||
- [Fix JSON parsing bug](https://github.com/yt-dlp/yt-dlp/commit/19741ab8a401ec64d5e84fdbfcfb141d105e7bc8) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **bfmtv**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/4903f452b68efb62dadf22e81be8c7934fc743e7) ([#8651](https://github.com/yt-dlp/yt-dlp/issues/8651)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **bilibili**: [Support courses and interactive videos](https://github.com/yt-dlp/yt-dlp/commit/9f09bdcfcb8e2b4b2decdc30d35d34b993bc7a94) ([#8343](https://github.com/yt-dlp/yt-dlp/issues/8343)) by [c-basalt](https://github.com/c-basalt)
|
|
||||||
- **bitchute**: [Fix and improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/b1a1ec1540605d2ea7abdb63336ffb1c56bf6316) ([#8507](https://github.com/yt-dlp/yt-dlp/issues/8507)) by [SirElderling](https://github.com/SirElderling)
|
|
||||||
- **box**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/5a230233d6fce06f4abd1fce0dc92b948e6f780b) ([#8649](https://github.com/yt-dlp/yt-dlp/issues/8649)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **bundestag**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/00a3e47bf5440c96025a76e08337ff2a475ed83e) ([#8783](https://github.com/yt-dlp/yt-dlp/issues/8783)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- **drtv**: [Set default ext for m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/f96ab86cd837b1b5823baa87d144e15322ee9298) ([#8590](https://github.com/yt-dlp/yt-dlp/issues/8590)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **duoplay**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/66a0127d45033c698bdbedf162cddc55d9e7b906) ([#8542](https://github.com/yt-dlp/yt-dlp/issues/8542)) by [glensc](https://github.com/glensc)
|
|
||||||
- **eplus**: [Add login support and DRM detection](https://github.com/yt-dlp/yt-dlp/commit/d5d1517e7d838500800d193ac3234b06e89654cd) ([#8661](https://github.com/yt-dlp/yt-dlp/issues/8661)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
|
||||||
- **facebook**
|
|
||||||
- [Fix Memories extraction](https://github.com/yt-dlp/yt-dlp/commit/c39358a54bc6675ae0c50b81024e5a086e41656a) ([#8681](https://github.com/yt-dlp/yt-dlp/issues/8681)) by [kclauhk](https://github.com/kclauhk)
|
|
||||||
- [Improve subtitles extraction](https://github.com/yt-dlp/yt-dlp/commit/9cafb9ff17e14475a35c9a58b5bb010c86c9db4b) ([#8296](https://github.com/yt-dlp/yt-dlp/issues/8296)) by [kclauhk](https://github.com/kclauhk)
|
|
||||||
- **floatplane**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/628fa244bbce2ad39775a5959e99588f30cac152) ([#8639](https://github.com/yt-dlp/yt-dlp/issues/8639)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **francetv**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/71f28097fec1c9e029f74b68a4eadc8915399840) ([#8409](https://github.com/yt-dlp/yt-dlp/issues/8409)) by [Fymyte](https://github.com/Fymyte)
|
|
||||||
- **instagram**: [Fix stories extraction](https://github.com/yt-dlp/yt-dlp/commit/50eaea9fd7787546b53660e736325fa31c77765d) ([#8843](https://github.com/yt-dlp/yt-dlp/issues/8843)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **joqrag**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/db8b4edc7d0bd27da462f6fe82ff6e13e3d68a04) ([#8384](https://github.com/yt-dlp/yt-dlp/issues/8384)) by [pzhlkj6612](https://github.com/pzhlkj6612)
|
|
||||||
- **litv**: [Fix premium content extraction](https://github.com/yt-dlp/yt-dlp/commit/f45c4efcd928a173e1300a8f1ce4258e70c969b1) ([#8842](https://github.com/yt-dlp/yt-dlp/issues/8842)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **maariv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/c5f01bf7d4b9426c87c3f8248de23934a56579e0) ([#8331](https://github.com/yt-dlp/yt-dlp/issues/8331)) by [amir16yp](https://github.com/amir16yp)
|
|
||||||
- **mediastream**: [Fix authenticated format extraction](https://github.com/yt-dlp/yt-dlp/commit/b03c89309eb141be1a1eceeeb7475dd3b7529ad9) ([#8657](https://github.com/yt-dlp/yt-dlp/issues/8657)) by [NickCis](https://github.com/NickCis)
|
|
||||||
- **nebula**: [Overhaul extractors](https://github.com/yt-dlp/yt-dlp/commit/45d82be65f71bb05506bd55376c6fdb36bc54142) ([#8566](https://github.com/yt-dlp/yt-dlp/issues/8566)) by [elyse0](https://github.com/elyse0), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
|
||||||
- **nintendo**: [Fix Nintendo Direct extraction](https://github.com/yt-dlp/yt-dlp/commit/1d24da6c899ef280d8b0a48a5e280ecd5d39cdf4) ([#8609](https://github.com/yt-dlp/yt-dlp/issues/8609)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- **ondemandkorea**: [Fix upgraded format extraction](https://github.com/yt-dlp/yt-dlp/commit/04a5e06350e3ef7c03f94f2f3f90dd96c6411152) ([#8677](https://github.com/yt-dlp/yt-dlp/issues/8677)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **pr0gramm**: [Support variant formats and subtitles](https://github.com/yt-dlp/yt-dlp/commit/f98a3305eb124a0c375d03209d5c5a64fe1766c8) ([#8674](https://github.com/yt-dlp/yt-dlp/issues/8674)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- **rinsefm**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/c91af948e43570025e4aa887e248fd025abae394) ([#8778](https://github.com/yt-dlp/yt-dlp/issues/8778)) by [hashFactory](https://github.com/hashFactory)
|
|
||||||
- **rudovideo**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/0d531c35eca4c2eb36e160530a7a333edbc727cc) ([#8664](https://github.com/yt-dlp/yt-dlp/issues/8664)) by [nicodato](https://github.com/nicodato)
|
|
||||||
- **theguardian**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/1fa3f24d4b5d22176b11d78420f1f4b64a5af0a8) ([#8535](https://github.com/yt-dlp/yt-dlp/issues/8535)) by [SirElderling](https://github.com/SirElderling)
|
|
||||||
- **theplatform**: [Extract more metadata](https://github.com/yt-dlp/yt-dlp/commit/7e09c147fdccb44806bbf601573adc4b77210a89) ([#8635](https://github.com/yt-dlp/yt-dlp/issues/8635)) by [trainman261](https://github.com/trainman261)
|
|
||||||
- **twitcasting**: [Detect livestreams via API and `show` page](https://github.com/yt-dlp/yt-dlp/commit/585d0ed9abcfcb957f2b2684b8ad43c3af160383) ([#8601](https://github.com/yt-dlp/yt-dlp/issues/8601)) by [bashonly](https://github.com/bashonly), [JC-Chung](https://github.com/JC-Chung)
|
|
||||||
- **twitcastinguser**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/ff2fde1b8f922fd34bae6172602008cd67c07c93) ([#8650](https://github.com/yt-dlp/yt-dlp/issues/8650)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **twitter**
|
|
||||||
- [Extract stale tweets](https://github.com/yt-dlp/yt-dlp/commit/1c54a98e19d047e7c15184237b6ef8ad50af489c) ([#8724](https://github.com/yt-dlp/yt-dlp/issues/8724)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- [Prioritize m3u8 formats](https://github.com/yt-dlp/yt-dlp/commit/e7d22348e77367740da78a3db27167ecf894b7c9) ([#8826](https://github.com/yt-dlp/yt-dlp/issues/8826)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- [Work around API rate-limit](https://github.com/yt-dlp/yt-dlp/commit/116c268438ea4d3738f6fa502c169081ca8f0ee7) ([#8825](https://github.com/yt-dlp/yt-dlp/issues/8825)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- broadcast: [Extract `concurrent_view_count`](https://github.com/yt-dlp/yt-dlp/commit/6fe82491ed622b948c512cf4aab46ac3a234ae0a) ([#8600](https://github.com/yt-dlp/yt-dlp/issues/8600)) by [sonmezberkay](https://github.com/sonmezberkay)
|
|
||||||
- **vidly**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/34df1c1f60fa652c0a6a5c712b06c10e45daf6b7) ([#8612](https://github.com/yt-dlp/yt-dlp/issues/8612)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- **vocaroo**: [Do not use deprecated `getheader`](https://github.com/yt-dlp/yt-dlp/commit/f223b1b0789f65e06619dcc9fc9e74f50d259379) ([#8606](https://github.com/yt-dlp/yt-dlp/issues/8606)) by [qbnu](https://github.com/qbnu)
|
|
||||||
- **vvvvid**: [Set user-agent to fix extraction](https://github.com/yt-dlp/yt-dlp/commit/1725e943b0e8a8b585305660d4611e684374409c) ([#8615](https://github.com/yt-dlp/yt-dlp/issues/8615)) by [Kyraminol](https://github.com/Kyraminol)
|
|
||||||
- **youtube**
|
|
||||||
- [Fix `like_count` extraction](https://github.com/yt-dlp/yt-dlp/commit/6b5d93b0b0240e287389d1d43b2d5293e18aa4cc) ([#8763](https://github.com/yt-dlp/yt-dlp/issues/8763)) by [Ganesh910](https://github.com/Ganesh910)
|
|
||||||
- [Improve detection of faulty HLS formats](https://github.com/yt-dlp/yt-dlp/commit/bb5a54e6db2422bbd155d93a0e105b6616c09467) ([#8646](https://github.com/yt-dlp/yt-dlp/issues/8646)) by [bashonly](https://github.com/bashonly)
|
|
||||||
- [Return empty playlist when channel/tab has no videos](https://github.com/yt-dlp/yt-dlp/commit/044886c220620a7679109e92352890e18b6079e3) by [pukkandan](https://github.com/pukkandan)
|
|
||||||
- [Support cf.piped.video](https://github.com/yt-dlp/yt-dlp/commit/6a9c7a2b52655bacfa7ab2da24fd0d14a6fff495) ([#8514](https://github.com/yt-dlp/yt-dlp/issues/8514)) by [OIRNOIR](https://github.com/OIRNOIR)
|
|
||||||
- **zingmp3**: [Add support for radio and podcasts](https://github.com/yt-dlp/yt-dlp/commit/64de1a4c25bada90374b88d7353754fe8fbfcc51) ([#7189](https://github.com/yt-dlp/yt-dlp/issues/7189)) by [hatienl0i261299](https://github.com/hatienl0i261299)
|
|
||||||
|
|
||||||
#### Postprocessor changes
|
|
||||||
- **ffmpegmetadata**: [Embed stream metadata in single format downloads](https://github.com/yt-dlp/yt-dlp/commit/deeb13eae82e60f82a2c0c5861f460399a997528) ([#8647](https://github.com/yt-dlp/yt-dlp/issues/8647)) by [bashonly](https://github.com/bashonly)
|
|
||||||
|
|
||||||
#### Networking changes
|
|
||||||
- [Strip whitespace around header values](https://github.com/yt-dlp/yt-dlp/commit/196eb0fe77b78e2e5ca02c506c3837c2b1a7964c) ([#8802](https://github.com/yt-dlp/yt-dlp/issues/8802)) by [coletdjnz](https://github.com/coletdjnz)
|
|
||||||
- **Request Handler**: websockets: [Migrate websockets to networking framework](https://github.com/yt-dlp/yt-dlp/commit/ccfd70f4c24b579c72123ca76ab50164f8f122b7) ([#7720](https://github.com/yt-dlp/yt-dlp/issues/7720)) by [coletdjnz](https://github.com/coletdjnz)
|
|
||||||
|
|
||||||
#### Misc. changes
|
|
||||||
- **ci**
|
|
||||||
- [Concurrency optimizations](https://github.com/yt-dlp/yt-dlp/commit/f124fa458826308afc86cf364c509f857686ecfd) ([#8614](https://github.com/yt-dlp/yt-dlp/issues/8614)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- [Run core tests only for core changes](https://github.com/yt-dlp/yt-dlp/commit/13b3cb3c2b7169a1e17d6fc62593bf744170521c) ([#8841](https://github.com/yt-dlp/yt-dlp/issues/8841)) by [Grub4K](https://github.com/Grub4K)
|
|
||||||
- **cleanup**
|
|
||||||
- [Fix spelling of `IE_NAME`](https://github.com/yt-dlp/yt-dlp/commit/bc4ab17b38f01000d99c5c2bedec89721fee65ec) ([#8810](https://github.com/yt-dlp/yt-dlp/issues/8810)) by [barsnick](https://github.com/barsnick)
|
|
||||||
- [Remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/9751a457cfdb18bf99d9ee0d10e4e6a594502bbf) ([#8604](https://github.com/yt-dlp/yt-dlp/issues/8604)) by [seproDev](https://github.com/seproDev)
|
|
||||||
- Miscellaneous: [f9fb3ce](https://github.com/yt-dlp/yt-dlp/commit/f9fb3ce86e3c6a0c3c33b45392b8d7288bceba76) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [pukkandan](https://github.com/pukkandan), [seproDev](https://github.com/seproDev)
|
|
||||||
- **devscripts**: `run_tests`: [Create Python script](https://github.com/yt-dlp/yt-dlp/commit/2d1d683a541d71f3d3bb999dfe8eeb1976fb91ce) ([#8720](https://github.com/yt-dlp/yt-dlp/issues/8720)) by [Grub4K](https://github.com/Grub4K) (With fixes in [225cf2b](https://github.com/yt-dlp/yt-dlp/commit/225cf2b830a1de2c5eacd257edd2a01aed1e1114))
|
|
||||||
- **docs**: [Update youtube-dl merge commit in `README.md`](https://github.com/yt-dlp/yt-dlp/commit/f10589e3453009bb523f55849bba144c9b91cf2a) by [bashonly](https://github.com/bashonly)
|
|
||||||
- **test**: networking: [Update tests for OpenSSL 3.2](https://github.com/yt-dlp/yt-dlp/commit/37755a037e612bfc608c3d4722e8ef2ce6a022ee) ([#8814](https://github.com/yt-dlp/yt-dlp/issues/8814)) by [bashonly](https://github.com/bashonly)
|
|
||||||
|
|
||||||
### 2023.11.16
|
### 2023.11.16
|
||||||
|
|
||||||
#### Extractor changes
|
#### Extractor changes
|
||||||
|
|
|
@ -29,7 +29,6 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
||||||
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/coletdjnz)
|
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/coletdjnz)
|
||||||
|
|
||||||
* Improved plugin architecture
|
* Improved plugin architecture
|
||||||
* Rewrote the networking infrastructure, implemented support for `requests`
|
|
||||||
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
* YouTube improvements including: age-gate bypass, private playlists, multiple-clients (to avoid throttling) and a lot of under-the-hood improvements
|
||||||
* Added support for new websites YoutubeWebArchive, MainStreaming, PRX, nzherald, Mediaklikk, StarTV etc
|
* Added support for new websites YoutubeWebArchive, MainStreaming, PRX, nzherald, Mediaklikk, StarTV etc
|
||||||
* Improved/fixed support for Patreon, panopto, gfycat, itv, pbs, SouthParkDE etc
|
* Improved/fixed support for Patreon, panopto, gfycat, itv, pbs, SouthParkDE etc
|
||||||
|
@ -47,17 +46,16 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
||||||
|
|
||||||
## [bashonly](https://github.com/bashonly)
|
## [bashonly](https://github.com/bashonly)
|
||||||
|
|
||||||
* `--update-to`, self-updater rewrite, automated/nightly/master releases
|
* `--update-to`, automated release, nightly builds
|
||||||
* `--cookies-from-browser` support for Firefox containers, external downloader cookie handling overhaul
|
* `--cookies-from-browser` support for Firefox containers
|
||||||
* Added support for new websites like Dacast, Kick, NBCStations, Triller, VideoKen, Weverse, WrestleUniverse etc
|
* Added support for new websites Genius, Kick, NBCStations, Triller, VideoKen etc
|
||||||
* Improved/fixed support for Anvato, Brightcove, Reddit, SlidesLive, TikTok, Twitter, Vimeo etc
|
* Improved/fixed support for Anvato, Brightcove, Instagram, ParamountPlus, Reddit, SlidesLive, TikTok, Twitter, Vimeo etc
|
||||||
|
|
||||||
|
|
||||||
## [Grub4K](https://github.com/Grub4K)
|
## [Grub4K](https://github.com/Grub4K)
|
||||||
|
|
||||||
[![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/Grub4K) [![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/Grub4K)
|
[![ko-fi](https://img.shields.io/badge/_-Ko--fi-red.svg?logo=kofi&labelColor=555555&style=for-the-badge)](https://ko-fi.com/Grub4K) [![gh-sponsor](https://img.shields.io/badge/_-Github-white.svg?logo=github&labelColor=555555&style=for-the-badge)](https://github.com/sponsors/Grub4K)
|
||||||
|
|
||||||
* `--update-to`, self-updater rewrite, automated/nightly/master releases
|
* `--update-to`, automated release, nightly builds
|
||||||
* Reworked internals like `traverse_obj`, various core refactors and bugs fixes
|
* Rework internals like `traverse_obj`, various core refactors and bugs fixes
|
||||||
* Implemented proper progress reporting for parallel downloads
|
* Helped fix crunchyroll, Twitter, wrestleuniverse, wistia, slideslive etc
|
||||||
* Improved/fixed/added Bundestag, crunchyroll, pr0gramm, Twitter, WrestleUniverse etc
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
||||||
|
|
||||||
# NEW FEATURES
|
# NEW FEATURES
|
||||||
|
|
||||||
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@be008e6**](https://github.com/ytdl-org/youtube-dl/commit/be008e657d79832642e2158557c899249c9e31cd) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
* Forked from [**yt-dlc@f9401f2**](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee) and merged with [**youtube-dl@66ab08**](https://github.com/ytdl-org/youtube-dl/commit/66ab0814c4baa2dc79c2dd5287bc0ad61a37c5b9) ([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))
|
||||||
|
|
||||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||||
|
|
||||||
|
@ -159,7 +159,6 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
||||||
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
* yt-dlp versions between 2021.09.01 and 2023.01.02 applies `--match-filter` to nested playlists. This was an unintentional side-effect of [8f18ac](https://github.com/yt-dlp/yt-dlp/commit/8f18aca8717bb0dd49054555af8d386e5eda3a88) and is fixed in [d7b460](https://github.com/yt-dlp/yt-dlp/commit/d7b460d0e5fc710950582baed2e3fc616ed98a80). Use `--compat-options playlist-match-filter` to revert this
|
||||||
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
* yt-dlp versions between 2021.11.10 and 2023.06.21 estimated `filesize_approx` values for fragmented/manifest formats. This was added for convenience in [f2fe69](https://github.com/yt-dlp/yt-dlp/commit/f2fe69c7b0d208bdb1f6292b4ae92bc1e1a7444a), but was reverted in [0dff8e](https://github.com/yt-dlp/yt-dlp/commit/0dff8e4d1e6e9fb938f4256ea9af7d81f42fd54f) due to the potentially extreme inaccuracy of the estimated values. Use `--compat-options manifest-filesize-approx` to keep extracting the estimated values
|
||||||
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
* yt-dlp uses modern http client backends such as `requests`. Use `--compat-options prefer-legacy-http-handler` to prefer the legacy http handler (`urllib`) to be used for standard http requests.
|
||||||
* The sub-module `swfinterp` is removed.
|
|
||||||
|
|
||||||
For ease of use, a few more compat options are available:
|
For ease of use, a few more compat options are available:
|
||||||
|
|
||||||
|
@ -302,7 +301,7 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
|
||||||
|
|
||||||
* [**pycryptodomex**](https://github.com/Legrandin/pycryptodome)\* - For decrypting AES-128 HLS streams and various other data. Licensed under [BSD-2-Clause](https://github.com/Legrandin/pycryptodome/blob/master/LICENSE.rst)
|
* [**pycryptodomex**](https://github.com/Legrandin/pycryptodome)\* - For decrypting AES-128 HLS streams and various other data. Licensed under [BSD-2-Clause](https://github.com/Legrandin/pycryptodome/blob/master/LICENSE.rst)
|
||||||
* [**phantomjs**](https://github.com/ariya/phantomjs) - Used in extractors where javascript needs to be run. Licensed under [BSD-3-Clause](https://github.com/ariya/phantomjs/blob/master/LICENSE.BSD)
|
* [**phantomjs**](https://github.com/ariya/phantomjs) - Used in extractors where javascript needs to be run. Licensed under [BSD-3-Clause](https://github.com/ariya/phantomjs/blob/master/LICENSE.BSD)
|
||||||
* [**secretstorage**](https://github.com/mitya57/secretstorage)\* - For `--cookies-from-browser` to access the **Gnome** keyring while decrypting cookies of **Chromium**-based browsers on **Linux**. Licensed under [BSD-3-Clause](https://github.com/mitya57/secretstorage/blob/master/LICENSE)
|
* [**secretstorage**](https://github.com/mitya57/secretstorage) - For `--cookies-from-browser` to access the **Gnome** keyring while decrypting cookies of **Chromium**-based browsers on **Linux**. Licensed under [BSD-3-Clause](https://github.com/mitya57/secretstorage/blob/master/LICENSE)
|
||||||
* Any external downloader that you want to use with `--downloader`
|
* Any external downloader that you want to use with `--downloader`
|
||||||
|
|
||||||
### Deprecated
|
### Deprecated
|
||||||
|
|
|
@ -114,11 +114,5 @@
|
||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
|
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
|
||||||
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers"
|
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers"
|
||||||
},
|
|
||||||
{
|
|
||||||
"action": "change",
|
|
||||||
"when": "15f22b4880b6b3f71f350c64d70976ae65b9f1ca",
|
|
||||||
"short": "[webvtt] Allow spaces before newlines for CueBlock (#7681)",
|
|
||||||
"authors": ["TSRBerry"]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -40,6 +40,20 @@ class CommitGroup(enum.Enum):
|
||||||
return {
|
return {
|
||||||
name: group
|
name: group
|
||||||
for group, names in {
|
for group, names in {
|
||||||
|
cls.CORE: {
|
||||||
|
'aes',
|
||||||
|
'cache',
|
||||||
|
'compat_utils',
|
||||||
|
'compat',
|
||||||
|
'cookies',
|
||||||
|
'dependencies',
|
||||||
|
'formats',
|
||||||
|
'jsinterp',
|
||||||
|
'outtmpl',
|
||||||
|
'plugins',
|
||||||
|
'update',
|
||||||
|
'utils',
|
||||||
|
},
|
||||||
cls.MISC: {
|
cls.MISC: {
|
||||||
'build',
|
'build',
|
||||||
'ci',
|
'ci',
|
||||||
|
@ -390,9 +404,9 @@ class CommitRange:
|
||||||
if not group:
|
if not group:
|
||||||
if self.EXTRACTOR_INDICATOR_RE.search(commit.short):
|
if self.EXTRACTOR_INDICATOR_RE.search(commit.short):
|
||||||
group = CommitGroup.EXTRACTOR
|
group = CommitGroup.EXTRACTOR
|
||||||
logger.error(f'Assuming [ie] group for {commit.short!r}')
|
|
||||||
else:
|
else:
|
||||||
group = CommitGroup.CORE
|
group = CommitGroup.POSTPROCESSOR
|
||||||
|
logger.warning(f'Failed to map {commit.short!r}, selected {group.name.lower()}')
|
||||||
|
|
||||||
commit_info = CommitInfo(
|
commit_info = CommitInfo(
|
||||||
details, sub_details, message.strip(),
|
details, sub_details, message.strip(),
|
||||||
|
|
|
@ -9,7 +9,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from devscripts.utils import get_filename_args, read_file, write_file
|
from devscripts.utils import (
|
||||||
|
get_filename_args,
|
||||||
|
read_file,
|
||||||
|
write_file,
|
||||||
|
)
|
||||||
|
|
||||||
VERBOSE_TMPL = '''
|
VERBOSE_TMPL = '''
|
||||||
- type: checkboxes
|
- type: checkboxes
|
||||||
|
|
|
@ -1,4 +1,17 @@
|
||||||
|
@setlocal
|
||||||
@echo off
|
@echo off
|
||||||
|
cd /d %~dp0..
|
||||||
|
|
||||||
>&2 echo run_tests.bat is deprecated. Please use `devscripts/run_tests.py` instead
|
if ["%~1"]==[""] (
|
||||||
python %~dp0run_tests.py %~1
|
set "test_set="test""
|
||||||
|
) else if ["%~1"]==["core"] (
|
||||||
|
set "test_set="-m not download""
|
||||||
|
) else if ["%~1"]==["download"] (
|
||||||
|
set "test_set="-m "download""
|
||||||
|
) else (
|
||||||
|
echo.Invalid test type "%~1". Use "core" ^| "download"
|
||||||
|
exit /b 1
|
||||||
|
)
|
||||||
|
|
||||||
|
set PYTHONWARNINGS=error
|
||||||
|
pytest %test_set%
|
||||||
|
|
|
@ -1,71 +0,0 @@
|
||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import functools
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
|
|
||||||
fix_test_name = functools.partial(re.compile(r'IE(_all|_\d+)?$').sub, r'\1')
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(description='Run selected yt-dlp tests')
|
|
||||||
parser.add_argument(
|
|
||||||
'test', help='a extractor tests, or one of "core" or "download"', nargs='*')
|
|
||||||
parser.add_argument(
|
|
||||||
'-k', help='run a test matching EXPRESSION. Same as "pytest -k"', metavar='EXPRESSION')
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def run_tests(*tests, pattern=None, ci=False):
|
|
||||||
run_core = 'core' in tests or (not pattern and not tests)
|
|
||||||
run_download = 'download' in tests
|
|
||||||
tests = list(map(fix_test_name, tests))
|
|
||||||
|
|
||||||
arguments = ['pytest', '-Werror', '--tb=short']
|
|
||||||
if ci:
|
|
||||||
arguments.append('--color=yes')
|
|
||||||
if run_core:
|
|
||||||
arguments.extend(['-m', 'not download'])
|
|
||||||
elif run_download:
|
|
||||||
arguments.extend(['-m', 'download'])
|
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
|
||||||
arguments.extend(
|
|
||||||
f'test/test_download.py::TestDownload::test_{test}' for test in tests)
|
|
||||||
|
|
||||||
print(f'Running {arguments}', flush=True)
|
|
||||||
try:
|
|
||||||
return subprocess.call(arguments)
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
arguments = [sys.executable, '-Werror', '-m', 'unittest']
|
|
||||||
if run_core:
|
|
||||||
print('"pytest" needs to be installed to run core tests', file=sys.stderr, flush=True)
|
|
||||||
return 1
|
|
||||||
elif run_download:
|
|
||||||
arguments.append('test.test_download')
|
|
||||||
elif pattern:
|
|
||||||
arguments.extend(['-k', pattern])
|
|
||||||
else:
|
|
||||||
arguments.extend(
|
|
||||||
f'test.test_download.TestDownload.test_{test}' for test in tests)
|
|
||||||
|
|
||||||
print(f'Running {arguments}', flush=True)
|
|
||||||
return subprocess.call(arguments)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
args = parse_args()
|
|
||||||
|
|
||||||
os.chdir(Path(__file__).parent.parent)
|
|
||||||
sys.exit(run_tests(*args.test, pattern=args.k, ci=bool(os.getenv('CI'))))
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
pass
|
|
|
@ -1,4 +1,14 @@
|
||||||
#!/usr/bin/env sh
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
>&2 echo 'run_tests.sh is deprecated. Please use `devscripts/run_tests.py` instead'
|
if [ -z "$1" ]; then
|
||||||
python3 devscripts/run_tests.py "$1"
|
test_set='test'
|
||||||
|
elif [ "$1" = 'core' ]; then
|
||||||
|
test_set="-m not download"
|
||||||
|
elif [ "$1" = 'download' ]; then
|
||||||
|
test_set="-m download"
|
||||||
|
else
|
||||||
|
echo 'Invalid test type "'"$1"'". Use "core" | "download"'
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
python3 -bb -Werror -m pytest "$test_set"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
mutagen
|
mutagen
|
||||||
pycryptodomex
|
pycryptodomex
|
||||||
|
websockets
|
||||||
brotli; implementation_name=='cpython'
|
brotli; implementation_name=='cpython'
|
||||||
brotlicffi; implementation_name!='cpython'
|
brotlicffi; implementation_name!='cpython'
|
||||||
certifi
|
certifi
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
# Supported sites
|
# Supported sites
|
||||||
|
- **0000studio:archive**
|
||||||
|
- **0000studio:clip**
|
||||||
- **17live**
|
- **17live**
|
||||||
- **17live:clip**
|
- **17live:clip**
|
||||||
- **1News**: 1news.co.nz article videos
|
- **1News**: 1news.co.nz article videos
|
||||||
|
@ -7,6 +9,7 @@
|
||||||
- **23video**
|
- **23video**
|
||||||
- **247sports**
|
- **247sports**
|
||||||
- **24tv.ua**
|
- **24tv.ua**
|
||||||
|
- **24video**
|
||||||
- **3qsdn**: 3Q SDN
|
- **3qsdn**: 3Q SDN
|
||||||
- **3sat**
|
- **3sat**
|
||||||
- **4tube**
|
- **4tube**
|
||||||
|
@ -47,18 +50,15 @@
|
||||||
- **afreecatv**: [*afreecatv*](## "netrc machine") afreecatv.com
|
- **afreecatv**: [*afreecatv*](## "netrc machine") afreecatv.com
|
||||||
- **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com
|
- **afreecatv:live**: [*afreecatv*](## "netrc machine") afreecatv.com
|
||||||
- **afreecatv:user**
|
- **afreecatv:user**
|
||||||
|
- **AirMozilla**
|
||||||
- **AirTV**
|
- **AirTV**
|
||||||
- **AitubeKZVideo**
|
- **AitubeKZVideo**
|
||||||
- **AliExpressLive**
|
- **AliExpressLive**
|
||||||
- **AlJazeera**
|
- **AlJazeera**
|
||||||
- **Allocine**
|
- **Allocine**
|
||||||
- **Allstar**
|
|
||||||
- **AllstarProfile**
|
|
||||||
- **AlphaPorno**
|
- **AlphaPorno**
|
||||||
- **Alsace20TV**
|
- **Alsace20TV**
|
||||||
- **Alsace20TVEmbed**
|
- **Alsace20TVEmbed**
|
||||||
- **altcensored**
|
|
||||||
- **altcensored:channel**
|
|
||||||
- **Alura**: [*alura*](## "netrc machine")
|
- **Alura**: [*alura*](## "netrc machine")
|
||||||
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
- **AluraCourse**: [*aluracourse*](## "netrc machine")
|
||||||
- **Amara**
|
- **Amara**
|
||||||
|
@ -79,7 +79,7 @@
|
||||||
- **ant1newsgr:embed**: ant1news.gr embedded videos
|
- **ant1newsgr:embed**: ant1news.gr embedded videos
|
||||||
- **antenna:watch**: antenna.gr and ant1news.gr videos
|
- **antenna:watch**: antenna.gr and ant1news.gr videos
|
||||||
- **Anvato**
|
- **Anvato**
|
||||||
- **aol.com**: Yahoo screen and movies (**Currently broken**)
|
- **aol.com**: Yahoo screen and movies
|
||||||
- **APA**
|
- **APA**
|
||||||
- **Aparat**
|
- **Aparat**
|
||||||
- **AppleConnect**
|
- **AppleConnect**
|
||||||
|
@ -90,8 +90,8 @@
|
||||||
- **archive.org**: archive.org video and audio
|
- **archive.org**: archive.org video and audio
|
||||||
- **ArcPublishing**
|
- **ArcPublishing**
|
||||||
- **ARD**
|
- **ARD**
|
||||||
- **ARDMediathek**
|
- **ARD:mediathek**
|
||||||
- **ARDMediathekCollection**
|
- **ARDBetaMediathek**
|
||||||
- **Arkena**
|
- **Arkena**
|
||||||
- **arte.sky.it**
|
- **arte.sky.it**
|
||||||
- **ArteTV**
|
- **ArteTV**
|
||||||
|
@ -100,6 +100,7 @@
|
||||||
- **ArteTVPlaylist**
|
- **ArteTVPlaylist**
|
||||||
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
- **AtresPlayer**: [*atresplayer*](## "netrc machine")
|
||||||
- **AtScaleConfEvent**
|
- **AtScaleConfEvent**
|
||||||
|
- **ATTTechChannel**
|
||||||
- **ATVAt**
|
- **ATVAt**
|
||||||
- **AudiMedia**
|
- **AudiMedia**
|
||||||
- **AudioBoom**
|
- **AudioBoom**
|
||||||
|
@ -139,12 +140,12 @@
|
||||||
- **BeatBumpVideo**
|
- **BeatBumpVideo**
|
||||||
- **Beatport**
|
- **Beatport**
|
||||||
- **Beeg**
|
- **Beeg**
|
||||||
- **BehindKink**: (**Currently broken**)
|
- **BehindKink**
|
||||||
- **Bellator**
|
- **Bellator**
|
||||||
- **BellMedia**
|
- **BellMedia**
|
||||||
- **BerufeTV**
|
- **BerufeTV**
|
||||||
- **Bet**: (**Currently broken**)
|
- **Bet**
|
||||||
- **bfi:player**: (**Currently broken**)
|
- **bfi:player**
|
||||||
- **bfmtv**
|
- **bfmtv**
|
||||||
- **bfmtv:article**
|
- **bfmtv:article**
|
||||||
- **bfmtv:live**
|
- **bfmtv:live**
|
||||||
|
@ -161,8 +162,6 @@
|
||||||
- **BiliBiliBangumi**
|
- **BiliBiliBangumi**
|
||||||
- **BiliBiliBangumiMedia**
|
- **BiliBiliBangumiMedia**
|
||||||
- **BiliBiliBangumiSeason**
|
- **BiliBiliBangumiSeason**
|
||||||
- **BilibiliCheese**
|
|
||||||
- **BilibiliCheeseSeason**
|
|
||||||
- **BilibiliCollectionList**
|
- **BilibiliCollectionList**
|
||||||
- **BilibiliFavoritesList**
|
- **BilibiliFavoritesList**
|
||||||
- **BiliBiliPlayer**
|
- **BiliBiliPlayer**
|
||||||
|
@ -177,8 +176,11 @@
|
||||||
- **BiliLive**
|
- **BiliLive**
|
||||||
- **BioBioChileTV**
|
- **BioBioChileTV**
|
||||||
- **Biography**
|
- **Biography**
|
||||||
|
- **BIQLE**
|
||||||
- **BitChute**
|
- **BitChute**
|
||||||
- **BitChuteChannel**
|
- **BitChuteChannel**
|
||||||
|
- **bitwave:replay**
|
||||||
|
- **bitwave:stream**
|
||||||
- **BlackboardCollaborate**
|
- **BlackboardCollaborate**
|
||||||
- **BleacherReport**
|
- **BleacherReport**
|
||||||
- **BleacherReportCMS**
|
- **BleacherReportCMS**
|
||||||
|
@ -191,7 +193,7 @@
|
||||||
- **Box**
|
- **Box**
|
||||||
- **BoxCastVideo**
|
- **BoxCastVideo**
|
||||||
- **Bpb**: Bundeszentrale für politische Bildung
|
- **Bpb**: Bundeszentrale für politische Bildung
|
||||||
- **BR**: Bayerischer Rundfunk (**Currently broken**)
|
- **BR**: Bayerischer Rundfunk
|
||||||
- **BrainPOP**: [*brainpop*](## "netrc machine")
|
- **BrainPOP**: [*brainpop*](## "netrc machine")
|
||||||
- **BrainPOPELL**: [*brainpop*](## "netrc machine")
|
- **BrainPOPELL**: [*brainpop*](## "netrc machine")
|
||||||
- **BrainPOPEsp**: [*brainpop*](## "netrc machine") BrainPOP Español
|
- **BrainPOPEsp**: [*brainpop*](## "netrc machine") BrainPOP Español
|
||||||
|
@ -199,18 +201,19 @@
|
||||||
- **BrainPOPIl**: [*brainpop*](## "netrc machine") BrainPOP Hebrew
|
- **BrainPOPIl**: [*brainpop*](## "netrc machine") BrainPOP Hebrew
|
||||||
- **BrainPOPJr**: [*brainpop*](## "netrc machine")
|
- **BrainPOPJr**: [*brainpop*](## "netrc machine")
|
||||||
- **BravoTV**
|
- **BravoTV**
|
||||||
|
- **Break**
|
||||||
- **BreitBart**
|
- **BreitBart**
|
||||||
- **brightcove:legacy**
|
- **brightcove:legacy**
|
||||||
- **brightcove:new**
|
- **brightcove:new**
|
||||||
- **Brilliantpala:Classes**: [*brilliantpala*](## "netrc machine") VoD on classes.brilliantpala.org
|
- **Brilliantpala:Classes**: [*brilliantpala*](## "netrc machine") VoD on classes.brilliantpala.org
|
||||||
- **Brilliantpala:Elearn**: [*brilliantpala*](## "netrc machine") VoD on elearn.brilliantpala.org
|
- **Brilliantpala:Elearn**: [*brilliantpala*](## "netrc machine") VoD on elearn.brilliantpala.org
|
||||||
|
- **BRMediathek**: Bayerischer Rundfunk Mediathek
|
||||||
- **bt:article**: Bergens Tidende Articles
|
- **bt:article**: Bergens Tidende Articles
|
||||||
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
- **bt:vestlendingen**: Bergens Tidende - Vestlendingen
|
||||||
- **Bundesliga**
|
- **Bundesliga**
|
||||||
- **Bundestag**
|
|
||||||
- **BusinessInsider**
|
- **BusinessInsider**
|
||||||
- **BuzzFeed**
|
- **BuzzFeed**
|
||||||
- **BYUtv**: (**Currently broken**)
|
- **BYUtv**
|
||||||
- **CableAV**
|
- **CableAV**
|
||||||
- **Callin**
|
- **Callin**
|
||||||
- **Caltrans**
|
- **Caltrans**
|
||||||
|
@ -222,11 +225,14 @@
|
||||||
- **CamModels**
|
- **CamModels**
|
||||||
- **Camsoda**
|
- **Camsoda**
|
||||||
- **CamtasiaEmbed**
|
- **CamtasiaEmbed**
|
||||||
|
- **CamWithHer**
|
||||||
- **Canal1**
|
- **Canal1**
|
||||||
- **CanalAlpha**
|
- **CanalAlpha**
|
||||||
- **canalc2.tv**
|
- **canalc2.tv**
|
||||||
- **Canalplus**: mycanal.fr and piwiplus.fr
|
- **Canalplus**: mycanal.fr and piwiplus.fr
|
||||||
- **CaracolTvPlay**: [*caracoltv-play*](## "netrc machine")
|
- **CaracolTvPlay**: [*caracoltv-play*](## "netrc machine")
|
||||||
|
- **CarambaTV**
|
||||||
|
- **CarambaTVPage**
|
||||||
- **CartoonNetwork**
|
- **CartoonNetwork**
|
||||||
- **cbc.ca**
|
- **cbc.ca**
|
||||||
- **cbc.ca:player**
|
- **cbc.ca:player**
|
||||||
|
@ -248,12 +254,16 @@
|
||||||
- **Cellebrite**
|
- **Cellebrite**
|
||||||
- **CeskaTelevize**
|
- **CeskaTelevize**
|
||||||
- **CGTN**
|
- **CGTN**
|
||||||
|
- **channel9**: Channel 9
|
||||||
- **CharlieRose**
|
- **CharlieRose**
|
||||||
- **Chaturbate**
|
- **Chaturbate**
|
||||||
- **Chilloutzone**
|
- **Chilloutzone**
|
||||||
- **Chingari**
|
- **Chingari**
|
||||||
- **ChingariUser**
|
- **ChingariUser**
|
||||||
|
- **chirbit**
|
||||||
|
- **chirbit:profile**
|
||||||
- **cielotv.it**
|
- **cielotv.it**
|
||||||
|
- **Cinchcast**
|
||||||
- **Cinemax**
|
- **Cinemax**
|
||||||
- **CinetecaMilano**
|
- **CinetecaMilano**
|
||||||
- **Cineverse**
|
- **Cineverse**
|
||||||
|
@ -266,12 +276,14 @@
|
||||||
- **cliphunter**
|
- **cliphunter**
|
||||||
- **Clippit**
|
- **Clippit**
|
||||||
- **ClipRs**
|
- **ClipRs**
|
||||||
|
- **Clipsyndicate**
|
||||||
- **ClipYouEmbed**
|
- **ClipYouEmbed**
|
||||||
- **CloserToTruth**
|
- **CloserToTruth**
|
||||||
- **CloudflareStream**
|
- **CloudflareStream**
|
||||||
- **Clubic**: (**Currently broken**)
|
- **Cloudy**
|
||||||
|
- **Clubic**
|
||||||
- **Clyp**
|
- **Clyp**
|
||||||
- **cmt.com**: (**Currently broken**)
|
- **cmt.com**
|
||||||
- **CNBC**
|
- **CNBC**
|
||||||
- **CNBCVideo**
|
- **CNBCVideo**
|
||||||
- **CNN**
|
- **CNN**
|
||||||
|
@ -316,6 +328,7 @@
|
||||||
- **CybraryCourse**: [*cybrary*](## "netrc machine")
|
- **CybraryCourse**: [*cybrary*](## "netrc machine")
|
||||||
- **DacastPlaylist**
|
- **DacastPlaylist**
|
||||||
- **DacastVOD**
|
- **DacastVOD**
|
||||||
|
- **Daftsex**
|
||||||
- **DagelijkseKost**: dagelijksekost.een.be
|
- **DagelijkseKost**: dagelijksekost.een.be
|
||||||
- **DailyMail**
|
- **DailyMail**
|
||||||
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
- **dailymotion**: [*dailymotion*](## "netrc machine")
|
||||||
|
@ -334,12 +347,13 @@
|
||||||
- **DctpTv**
|
- **DctpTv**
|
||||||
- **DeezerAlbum**
|
- **DeezerAlbum**
|
||||||
- **DeezerPlaylist**
|
- **DeezerPlaylist**
|
||||||
|
- **defense.gouv.fr**
|
||||||
- **democracynow**
|
- **democracynow**
|
||||||
- **DestinationAmerica**
|
- **DestinationAmerica**
|
||||||
- **DetikEmbed**
|
- **DetikEmbed**
|
||||||
- **DeuxM**
|
- **DeuxM**
|
||||||
- **DeuxMNews**
|
- **DeuxMNews**
|
||||||
- **DHM**: Filmarchiv - Deutsches Historisches Museum (**Currently broken**)
|
- **DHM**: Filmarchiv - Deutsches Historisches Museum
|
||||||
- **Digg**
|
- **Digg**
|
||||||
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
- **DigitalConcertHall**: [*digitalconcerthall*](## "netrc machine") DigitalConcertHall extractor
|
||||||
- **DigitallySpeaking**
|
- **DigitallySpeaking**
|
||||||
|
@ -359,6 +373,7 @@
|
||||||
- **dlf:corpus**: DLF Multi-feed Archives
|
- **dlf:corpus**: DLF Multi-feed Archives
|
||||||
- **dlive:stream**
|
- **dlive:stream**
|
||||||
- **dlive:vod**
|
- **dlive:vod**
|
||||||
|
- **Dotsub**
|
||||||
- **Douyin**
|
- **Douyin**
|
||||||
- **DouyuShow**
|
- **DouyuShow**
|
||||||
- **DouyuTV**: 斗鱼直播
|
- **DouyuTV**: 斗鱼直播
|
||||||
|
@ -377,29 +392,35 @@
|
||||||
- **duboku**: www.duboku.io
|
- **duboku**: www.duboku.io
|
||||||
- **duboku:list**: www.duboku.io entire series
|
- **duboku:list**: www.duboku.io entire series
|
||||||
- **Dumpert**
|
- **Dumpert**
|
||||||
- **Duoplay**
|
|
||||||
- **dvtv**: http://video.aktualne.cz/
|
- **dvtv**: http://video.aktualne.cz/
|
||||||
- **dw**
|
- **dw**
|
||||||
- **dw:article**
|
- **dw:article**
|
||||||
- **EaglePlatform**
|
- **EaglePlatform**
|
||||||
- **EbaumsWorld**
|
- **EbaumsWorld**
|
||||||
- **Ebay**
|
- **Ebay**
|
||||||
|
- **EchoMsk**
|
||||||
- **egghead:course**: egghead.io course
|
- **egghead:course**: egghead.io course
|
||||||
- **egghead:lesson**: egghead.io lesson
|
- **egghead:lesson**: egghead.io lesson
|
||||||
|
- **ehftv**
|
||||||
|
- **eHow**
|
||||||
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTV**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVLive**: [*1und1tv*](## "netrc machine")
|
||||||
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
- **EinsUndEinsTVRecordings**: [*1und1tv*](## "netrc machine")
|
||||||
- **Einthusan**
|
- **Einthusan**
|
||||||
- **eitb.tv**
|
- **eitb.tv**
|
||||||
|
- **ElevenSports**
|
||||||
|
- **EllenTube**
|
||||||
|
- **EllenTubePlaylist**
|
||||||
|
- **EllenTubeVideo**
|
||||||
- **Elonet**
|
- **Elonet**
|
||||||
- **ElPais**: El País
|
- **ElPais**: El País
|
||||||
- **ElTreceTV**: El Trece TV (Argentina)
|
- **ElTreceTV**: El Trece TV (Argentina)
|
||||||
- **Embedly**
|
- **Embedly**
|
||||||
- **EMPFlix**
|
- **EMPFlix**
|
||||||
|
- **Engadget**
|
||||||
- **Epicon**
|
- **Epicon**
|
||||||
- **EpiconSeries**
|
- **EpiconSeries**
|
||||||
- **EpidemicSound**
|
- **eplus:inbound**: e+ (イープラス) overseas
|
||||||
- **eplus**: [*eplus*](## "netrc machine") e+ (イープラス)
|
|
||||||
- **Epoch**
|
- **Epoch**
|
||||||
- **Eporner**
|
- **Eporner**
|
||||||
- **Erocast**
|
- **Erocast**
|
||||||
|
@ -408,9 +429,11 @@
|
||||||
- **ertflix**: ERTFLIX videos
|
- **ertflix**: ERTFLIX videos
|
||||||
- **ertflix:codename**: ERTFLIX videos by codename
|
- **ertflix:codename**: ERTFLIX videos by codename
|
||||||
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
- **ertwebtv:embed**: ert.gr webtv embedded videos
|
||||||
|
- **Escapist**
|
||||||
- **ESPN**
|
- **ESPN**
|
||||||
- **ESPNArticle**
|
- **ESPNArticle**
|
||||||
- **ESPNCricInfo**
|
- **ESPNCricInfo**
|
||||||
|
- **EsriVideo**
|
||||||
- **EttuTv**
|
- **EttuTv**
|
||||||
- **Europa**
|
- **Europa**
|
||||||
- **EuroParlWebstream**
|
- **EuroParlWebstream**
|
||||||
|
@ -420,7 +443,9 @@
|
||||||
- **EWETV**: [*ewetv*](## "netrc machine")
|
- **EWETV**: [*ewetv*](## "netrc machine")
|
||||||
- **EWETVLive**: [*ewetv*](## "netrc machine")
|
- **EWETVLive**: [*ewetv*](## "netrc machine")
|
||||||
- **EWETVRecordings**: [*ewetv*](## "netrc machine")
|
- **EWETVRecordings**: [*ewetv*](## "netrc machine")
|
||||||
|
- **ExpoTV**
|
||||||
- **Expressen**
|
- **Expressen**
|
||||||
|
- **ExtremeTube**
|
||||||
- **EyedoTV**
|
- **EyedoTV**
|
||||||
- **facebook**: [*facebook*](## "netrc machine")
|
- **facebook**: [*facebook*](## "netrc machine")
|
||||||
- **facebook:reel**
|
- **facebook:reel**
|
||||||
|
@ -440,8 +465,6 @@
|
||||||
- **FiveThirtyEight**
|
- **FiveThirtyEight**
|
||||||
- **FiveTV**
|
- **FiveTV**
|
||||||
- **Flickr**
|
- **Flickr**
|
||||||
- **Floatplane**
|
|
||||||
- **FloatplaneChannel**
|
|
||||||
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||||
- **FoodNetwork**
|
- **FoodNetwork**
|
||||||
- **FootyRoom**
|
- **FootyRoom**
|
||||||
|
@ -449,6 +472,7 @@
|
||||||
- **FOX**
|
- **FOX**
|
||||||
- **FOX9**
|
- **FOX9**
|
||||||
- **FOX9News**
|
- **FOX9News**
|
||||||
|
- **Foxgay**
|
||||||
- **foxnews**: Fox News and Fox Business Video
|
- **foxnews**: Fox News and Fox Business Video
|
||||||
- **foxnews:article**
|
- **foxnews:article**
|
||||||
- **FoxNewsVideo**
|
- **FoxNewsVideo**
|
||||||
|
@ -472,6 +496,7 @@
|
||||||
- **funimation:show**: [*funimation*](## "netrc machine")
|
- **funimation:show**: [*funimation*](## "netrc machine")
|
||||||
- **Funk**
|
- **Funk**
|
||||||
- **Funker530**
|
- **Funker530**
|
||||||
|
- **Fusion**
|
||||||
- **Fux**
|
- **Fux**
|
||||||
- **FuyinTV**
|
- **FuyinTV**
|
||||||
- **Gab**
|
- **Gab**
|
||||||
|
@ -497,6 +522,7 @@
|
||||||
- **GeniusLyrics**
|
- **GeniusLyrics**
|
||||||
- **Gettr**
|
- **Gettr**
|
||||||
- **GettrStreaming**
|
- **GettrStreaming**
|
||||||
|
- **Gfycat**
|
||||||
- **GiantBomb**
|
- **GiantBomb**
|
||||||
- **Giga**
|
- **Giga**
|
||||||
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
- **GlattvisionTV**: [*glattvisiontv*](## "netrc machine")
|
||||||
|
@ -538,6 +564,7 @@
|
||||||
- **HearThisAt**
|
- **HearThisAt**
|
||||||
- **Heise**
|
- **Heise**
|
||||||
- **HellPorno**
|
- **HellPorno**
|
||||||
|
- **Helsinki**: helsinki.fi
|
||||||
- **hetklokhuis**
|
- **hetklokhuis**
|
||||||
- **hgtv.com:show**
|
- **hgtv.com:show**
|
||||||
- **HGTVDe**
|
- **HGTVDe**
|
||||||
|
@ -546,6 +573,8 @@
|
||||||
- **HistoricFilms**
|
- **HistoricFilms**
|
||||||
- **history:player**
|
- **history:player**
|
||||||
- **history:topic**: History.com Topic
|
- **history:topic**: History.com Topic
|
||||||
|
- **hitbox**
|
||||||
|
- **hitbox:live**
|
||||||
- **HitRecord**
|
- **HitRecord**
|
||||||
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
- **hketv**: 香港教育局教育電視 (HKETV) Educational Television, Hong Kong Educational Bureau
|
||||||
- **HollywoodReporter**
|
- **HollywoodReporter**
|
||||||
|
@ -556,6 +585,8 @@
|
||||||
- **hotstar:playlist**
|
- **hotstar:playlist**
|
||||||
- **hotstar:season**
|
- **hotstar:season**
|
||||||
- **hotstar:series**
|
- **hotstar:series**
|
||||||
|
- **Howcast**
|
||||||
|
- **HowStuffWorks**
|
||||||
- **hrfernsehen**
|
- **hrfernsehen**
|
||||||
- **HRTi**: [*hrti*](## "netrc machine")
|
- **HRTi**: [*hrti*](## "netrc machine")
|
||||||
- **HRTiPlaylist**: [*hrti*](## "netrc machine")
|
- **HRTiPlaylist**: [*hrti*](## "netrc machine")
|
||||||
|
@ -577,7 +608,7 @@
|
||||||
- **ign.com**
|
- **ign.com**
|
||||||
- **IGNArticle**
|
- **IGNArticle**
|
||||||
- **IGNVideo**
|
- **IGNVideo**
|
||||||
- **iheartradio**
|
- **IHeartRadio**
|
||||||
- **iheartradio:podcast**
|
- **iheartradio:podcast**
|
||||||
- **Iltalehti**
|
- **Iltalehti**
|
||||||
- **imdb**: Internet Movie Database trailers
|
- **imdb**: Internet Movie Database trailers
|
||||||
|
@ -607,6 +638,7 @@
|
||||||
- **IsraelNationalNews**
|
- **IsraelNationalNews**
|
||||||
- **ITProTV**
|
- **ITProTV**
|
||||||
- **ITProTVCourse**
|
- **ITProTVCourse**
|
||||||
|
- **ITTF**
|
||||||
- **ITV**
|
- **ITV**
|
||||||
- **ITVBTCC**
|
- **ITVBTCC**
|
||||||
- **ivi**: ivi.ru
|
- **ivi**: ivi.ru
|
||||||
|
@ -626,7 +658,6 @@
|
||||||
- **JioSaavnAlbum**
|
- **JioSaavnAlbum**
|
||||||
- **JioSaavnSong**
|
- **JioSaavnSong**
|
||||||
- **Joj**
|
- **Joj**
|
||||||
- **JoqrAg**: 超!A&G+ 文化放送 (f.k.a. AGQR) Nippon Cultural Broadcasting, Inc. (JOQR)
|
|
||||||
- **Jove**
|
- **Jove**
|
||||||
- **JStream**
|
- **JStream**
|
||||||
- **JTBC**: jtbc.co.kr
|
- **JTBC**: jtbc.co.kr
|
||||||
|
@ -639,6 +670,7 @@
|
||||||
- **Karaoketv**
|
- **Karaoketv**
|
||||||
- **KarriereVideos**
|
- **KarriereVideos**
|
||||||
- **Katsomo**
|
- **Katsomo**
|
||||||
|
- **KeezMovies**
|
||||||
- **KelbyOne**
|
- **KelbyOne**
|
||||||
- **Ketnet**
|
- **Ketnet**
|
||||||
- **khanacademy**
|
- **khanacademy**
|
||||||
|
@ -647,7 +679,7 @@
|
||||||
- **Kicker**
|
- **Kicker**
|
||||||
- **KickStarter**
|
- **KickStarter**
|
||||||
- **KickVOD**
|
- **KickVOD**
|
||||||
- **kinja:embed**
|
- **KinjaEmbed**
|
||||||
- **KinoPoisk**
|
- **KinoPoisk**
|
||||||
- **Kommunetv**
|
- **Kommunetv**
|
||||||
- **KompasVideo**
|
- **KompasVideo**
|
||||||
|
@ -666,6 +698,8 @@
|
||||||
- **la7.it**
|
- **la7.it**
|
||||||
- **la7.it:pod:episode**
|
- **la7.it:pod:episode**
|
||||||
- **la7.it:podcast**
|
- **la7.it:podcast**
|
||||||
|
- **laola1tv**
|
||||||
|
- **laola1tv:embed**
|
||||||
- **LastFM**
|
- **LastFM**
|
||||||
- **LastFMPlaylist**
|
- **LastFMPlaylist**
|
||||||
- **LastFMUser**
|
- **LastFMUser**
|
||||||
|
@ -699,6 +733,7 @@
|
||||||
- **LinkedIn**: [*linkedin*](## "netrc machine")
|
- **LinkedIn**: [*linkedin*](## "netrc machine")
|
||||||
- **linkedin:learning**: [*linkedin*](## "netrc machine")
|
- **linkedin:learning**: [*linkedin*](## "netrc machine")
|
||||||
- **linkedin:learning:course**: [*linkedin*](## "netrc machine")
|
- **linkedin:learning:course**: [*linkedin*](## "netrc machine")
|
||||||
|
- **LinuxAcademy**: [*linuxacademy*](## "netrc machine")
|
||||||
- **Liputan6**
|
- **Liputan6**
|
||||||
- **ListenNotes**
|
- **ListenNotes**
|
||||||
- **LiTV**
|
- **LiTV**
|
||||||
|
@ -716,7 +751,7 @@
|
||||||
- **Lumni**
|
- **Lumni**
|
||||||
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
- **lynda**: [*lynda*](## "netrc machine") lynda.com videos
|
||||||
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
- **lynda:course**: [*lynda*](## "netrc machine") lynda.com online courses
|
||||||
- **maariv.co.il**
|
- **m6**
|
||||||
- **MagellanTV**
|
- **MagellanTV**
|
||||||
- **MagentaMusik360**
|
- **MagentaMusik360**
|
||||||
- **mailru**: Видео@Mail.Ru
|
- **mailru**: Видео@Mail.Ru
|
||||||
|
@ -758,8 +793,11 @@
|
||||||
- **megatvcom:embed**: megatv.com embedded videos
|
- **megatvcom:embed**: megatv.com embedded videos
|
||||||
- **Meipai**: 美拍
|
- **Meipai**: 美拍
|
||||||
- **MelonVOD**
|
- **MelonVOD**
|
||||||
|
- **META**
|
||||||
|
- **metacafe**
|
||||||
- **Metacritic**
|
- **Metacritic**
|
||||||
- **mewatch**
|
- **mewatch**
|
||||||
|
- **Mgoon**
|
||||||
- **MiaoPai**
|
- **MiaoPai**
|
||||||
- **MicrosoftEmbed**
|
- **MicrosoftEmbed**
|
||||||
- **microsoftstream**: Microsoft Stream
|
- **microsoftstream**: Microsoft Stream
|
||||||
|
@ -772,6 +810,7 @@
|
||||||
- **minds:group**
|
- **minds:group**
|
||||||
- **MinistryGrid**
|
- **MinistryGrid**
|
||||||
- **Minoto**
|
- **Minoto**
|
||||||
|
- **miomio.tv**
|
||||||
- **mirrativ**
|
- **mirrativ**
|
||||||
- **mirrativ:user**
|
- **mirrativ:user**
|
||||||
- **MirrorCoUK**
|
- **MirrorCoUK**
|
||||||
|
@ -786,10 +825,14 @@
|
||||||
- **MLBTV**: [*mlb*](## "netrc machine")
|
- **MLBTV**: [*mlb*](## "netrc machine")
|
||||||
- **MLBVideo**
|
- **MLBVideo**
|
||||||
- **MLSSoccer**
|
- **MLSSoccer**
|
||||||
|
- **Mnet**
|
||||||
- **MNetTV**: [*mnettv*](## "netrc machine")
|
- **MNetTV**: [*mnettv*](## "netrc machine")
|
||||||
- **MNetTVLive**: [*mnettv*](## "netrc machine")
|
- **MNetTVLive**: [*mnettv*](## "netrc machine")
|
||||||
- **MNetTVRecordings**: [*mnettv*](## "netrc machine")
|
- **MNetTVRecordings**: [*mnettv*](## "netrc machine")
|
||||||
- **MochaVideo**
|
- **MochaVideo**
|
||||||
|
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||||
|
- **Mofosex**
|
||||||
|
- **MofosexEmbed**
|
||||||
- **Mojvideo**
|
- **Mojvideo**
|
||||||
- **Monstercat**
|
- **Monstercat**
|
||||||
- **MonsterSirenHypergryphMusic**
|
- **MonsterSirenHypergryphMusic**
|
||||||
|
@ -800,12 +843,13 @@
|
||||||
- **Motorsport**: motorsport.com
|
- **Motorsport**: motorsport.com
|
||||||
- **MotorTrend**
|
- **MotorTrend**
|
||||||
- **MotorTrendOnDemand**
|
- **MotorTrendOnDemand**
|
||||||
|
- **MovieClips**
|
||||||
- **MovieFap**
|
- **MovieFap**
|
||||||
- **Moviepilot**
|
- **Moviepilot**
|
||||||
- **MoviewPlay**
|
- **MoviewPlay**
|
||||||
- **Moviezine**
|
- **Moviezine**
|
||||||
- **MovingImage**
|
- **MovingImage**
|
||||||
- **MSN**: (**Currently broken**)
|
- **MSN**
|
||||||
- **mtg**: MTG services
|
- **mtg**: MTG services
|
||||||
- **mtv**
|
- **mtv**
|
||||||
- **mtv.de**
|
- **mtv.de**
|
||||||
|
@ -827,13 +871,18 @@
|
||||||
- **MusicdexSong**
|
- **MusicdexSong**
|
||||||
- **mva**: Microsoft Virtual Academy videos
|
- **mva**: Microsoft Virtual Academy videos
|
||||||
- **mva:course**: Microsoft Virtual Academy courses
|
- **mva:course**: Microsoft Virtual Academy courses
|
||||||
|
- **Mwave**
|
||||||
|
- **MwaveMeetGreet**
|
||||||
- **Mxplayer**
|
- **Mxplayer**
|
||||||
- **MxplayerShow**
|
- **MxplayerShow**
|
||||||
|
- **MyChannels**
|
||||||
- **MySpace**
|
- **MySpace**
|
||||||
- **MySpace:album**
|
- **MySpace:album**
|
||||||
- **MySpass**
|
- **MySpass**
|
||||||
|
- **Myvi**
|
||||||
- **MyVideoGe**
|
- **MyVideoGe**
|
||||||
- **MyVidster**
|
- **MyVidster**
|
||||||
|
- **MyviEmbed**
|
||||||
- **Mzaalo**
|
- **Mzaalo**
|
||||||
- **n-tv.de**
|
- **n-tv.de**
|
||||||
- **N1Info:article**
|
- **N1Info:article**
|
||||||
|
@ -845,12 +894,12 @@
|
||||||
- **Naver**
|
- **Naver**
|
||||||
- **Naver:live**
|
- **Naver:live**
|
||||||
- **navernow**
|
- **navernow**
|
||||||
- **nba**
|
- **NBA**
|
||||||
- **nba:channel**
|
|
||||||
- **nba:embed**
|
|
||||||
- **nba:watch**
|
- **nba:watch**
|
||||||
- **nba:watch:collection**
|
- **nba:watch:collection**
|
||||||
- **nba:watch:embed**
|
- **NBAChannel**
|
||||||
|
- **NBAEmbed**
|
||||||
|
- **NBAWatchEmbed**
|
||||||
- **NBC**
|
- **NBC**
|
||||||
- **NBCNews**
|
- **NBCNews**
|
||||||
- **nbcolympics**
|
- **nbcolympics**
|
||||||
|
@ -865,7 +914,6 @@
|
||||||
- **NDTV**
|
- **NDTV**
|
||||||
- **Nebula**: [*watchnebula*](## "netrc machine")
|
- **Nebula**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
- **nebula:channel**: [*watchnebula*](## "netrc machine")
|
||||||
- **nebula:class**: [*watchnebula*](## "netrc machine")
|
|
||||||
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
- **nebula:subscriptions**: [*watchnebula*](## "netrc machine")
|
||||||
- **NekoHacker**
|
- **NekoHacker**
|
||||||
- **NerdCubedFeed**
|
- **NerdCubedFeed**
|
||||||
|
@ -887,6 +935,7 @@
|
||||||
- **Newgrounds:playlist**
|
- **Newgrounds:playlist**
|
||||||
- **Newgrounds:user**
|
- **Newgrounds:user**
|
||||||
- **NewsPicks**
|
- **NewsPicks**
|
||||||
|
- **Newstube**
|
||||||
- **Newsy**
|
- **Newsy**
|
||||||
- **NextMedia**: 蘋果日報
|
- **NextMedia**: 蘋果日報
|
||||||
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
- **NextMediaActionNews**: 蘋果日報 - 動新聞
|
||||||
|
@ -912,6 +961,7 @@
|
||||||
- **nick.de**
|
- **nick.de**
|
||||||
- **nickelodeon:br**
|
- **nickelodeon:br**
|
||||||
- **nickelodeonru**
|
- **nickelodeonru**
|
||||||
|
- **nicknight**
|
||||||
- **niconico**: [*niconico*](## "netrc machine") ニコニコ動画
|
- **niconico**: [*niconico*](## "netrc machine") ニコニコ動画
|
||||||
- **niconico:history**: NicoNico user history or likes. Requires cookies.
|
- **niconico:history**: NicoNico user history or likes. Requires cookies.
|
||||||
- **niconico:live**: ニコニコ生放送
|
- **niconico:live**: ニコニコ生放送
|
||||||
|
@ -934,7 +984,9 @@
|
||||||
- **NonkTube**
|
- **NonkTube**
|
||||||
- **NoodleMagazine**
|
- **NoodleMagazine**
|
||||||
- **Noovo**
|
- **Noovo**
|
||||||
|
- **Normalboots**
|
||||||
- **NOSNLArticle**
|
- **NOSNLArticle**
|
||||||
|
- **NosVideo**
|
||||||
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
- **Nova**: TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz
|
||||||
- **NovaEmbed**
|
- **NovaEmbed**
|
||||||
- **NovaPlay**
|
- **NovaPlay**
|
||||||
|
@ -957,7 +1009,7 @@
|
||||||
- **NRKTVEpisodes**
|
- **NRKTVEpisodes**
|
||||||
- **NRKTVSeason**
|
- **NRKTVSeason**
|
||||||
- **NRKTVSeries**
|
- **NRKTVSeries**
|
||||||
- **NRLTV**: (**Currently broken**)
|
- **NRLTV**
|
||||||
- **ntv.ru**
|
- **ntv.ru**
|
||||||
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
- **NubilesPorn**: [*nubiles-porn*](## "netrc machine")
|
||||||
- **Nuvid**
|
- **Nuvid**
|
||||||
|
@ -985,6 +1037,8 @@
|
||||||
- **onet.tv:channel**
|
- **onet.tv:channel**
|
||||||
- **OnetMVP**
|
- **OnetMVP**
|
||||||
- **OnionStudios**
|
- **OnionStudios**
|
||||||
|
- **Ooyala**
|
||||||
|
- **OoyalaExternal**
|
||||||
- **Opencast**
|
- **Opencast**
|
||||||
- **OpencastPlaylist**
|
- **OpencastPlaylist**
|
||||||
- **openrec**
|
- **openrec**
|
||||||
|
@ -1006,6 +1060,7 @@
|
||||||
- **PalcoMP3:artist**
|
- **PalcoMP3:artist**
|
||||||
- **PalcoMP3:song**
|
- **PalcoMP3:song**
|
||||||
- **PalcoMP3:video**
|
- **PalcoMP3:video**
|
||||||
|
- **pandora.tv**: 판도라TV
|
||||||
- **Panopto**
|
- **Panopto**
|
||||||
- **PanoptoList**
|
- **PanoptoList**
|
||||||
- **PanoptoPlaylist**
|
- **PanoptoPlaylist**
|
||||||
|
@ -1027,6 +1082,7 @@
|
||||||
- **PeerTube:Playlist**
|
- **PeerTube:Playlist**
|
||||||
- **peloton**: [*peloton*](## "netrc machine")
|
- **peloton**: [*peloton*](## "netrc machine")
|
||||||
- **peloton:live**: Peloton Live
|
- **peloton:live**: Peloton Live
|
||||||
|
- **People**
|
||||||
- **PerformGroup**
|
- **PerformGroup**
|
||||||
- **periscope**: Periscope
|
- **periscope**: Periscope
|
||||||
- **periscope:user**: Periscope user videos
|
- **periscope:user**: Periscope user videos
|
||||||
|
@ -1048,11 +1104,14 @@
|
||||||
- **PlanetMarathi**
|
- **PlanetMarathi**
|
||||||
- **Platzi**: [*platzi*](## "netrc machine")
|
- **Platzi**: [*platzi*](## "netrc machine")
|
||||||
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
- **PlatziCourse**: [*platzi*](## "netrc machine")
|
||||||
|
- **play.fm**
|
||||||
- **player.sky.it**
|
- **player.sky.it**
|
||||||
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
- **PlayPlusTV**: [*playplustv*](## "netrc machine")
|
||||||
- **PlayStuff**
|
- **PlayStuff**
|
||||||
|
- **PlaysTV**
|
||||||
- **PlaySuisse**
|
- **PlaySuisse**
|
||||||
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
- **Playtvak**: Playtvak.cz, iDNES.cz and Lidovky.cz
|
||||||
|
- **Playvid**
|
||||||
- **PlayVids**
|
- **PlayVids**
|
||||||
- **Playwire**
|
- **Playwire**
|
||||||
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
- **pluralsight**: [*pluralsight*](## "netrc machine")
|
||||||
|
@ -1077,8 +1136,11 @@
|
||||||
- **Popcorntimes**
|
- **Popcorntimes**
|
||||||
- **PopcornTV**
|
- **PopcornTV**
|
||||||
- **Pornbox**
|
- **Pornbox**
|
||||||
|
- **PornCom**
|
||||||
- **PornerBros**
|
- **PornerBros**
|
||||||
|
- **Pornez**
|
||||||
- **PornFlip**
|
- **PornFlip**
|
||||||
|
- **PornHd**
|
||||||
- **PornHub**: [*pornhub*](## "netrc machine") PornHub and Thumbzilla
|
- **PornHub**: [*pornhub*](## "netrc machine") PornHub and Thumbzilla
|
||||||
- **PornHubPagedVideoList**: [*pornhub*](## "netrc machine")
|
- **PornHubPagedVideoList**: [*pornhub*](## "netrc machine")
|
||||||
- **PornHubPlaylist**: [*pornhub*](## "netrc machine")
|
- **PornHubPlaylist**: [*pornhub*](## "netrc machine")
|
||||||
|
@ -1120,6 +1182,7 @@
|
||||||
- **Radiko**
|
- **Radiko**
|
||||||
- **RadikoRadio**
|
- **RadikoRadio**
|
||||||
- **radio.de**
|
- **radio.de**
|
||||||
|
- **radiobremen**
|
||||||
- **radiocanada**
|
- **radiocanada**
|
||||||
- **radiocanada:audiovideo**
|
- **radiocanada:audiovideo**
|
||||||
- **RadioComercial**
|
- **RadioComercial**
|
||||||
|
@ -1159,6 +1222,7 @@
|
||||||
- **RCTIPlusSeries**
|
- **RCTIPlusSeries**
|
||||||
- **RCTIPlusTV**
|
- **RCTIPlusTV**
|
||||||
- **RDS**: RDS.ca
|
- **RDS**: RDS.ca
|
||||||
|
- **Recurbate**
|
||||||
- **RedBull**
|
- **RedBull**
|
||||||
- **RedBullEmbed**
|
- **RedBullEmbed**
|
||||||
- **RedBullTV**
|
- **RedBullTV**
|
||||||
|
@ -1175,7 +1239,7 @@
|
||||||
- **Reuters**
|
- **Reuters**
|
||||||
- **ReverbNation**
|
- **ReverbNation**
|
||||||
- **RheinMainTV**
|
- **RheinMainTV**
|
||||||
- **RinseFM**
|
- **RICE**
|
||||||
- **RMCDecouverte**
|
- **RMCDecouverte**
|
||||||
- **RockstarGames**
|
- **RockstarGames**
|
||||||
- **Rokfin**: [*rokfin*](## "netrc machine")
|
- **Rokfin**: [*rokfin*](## "netrc machine")
|
||||||
|
@ -1196,6 +1260,8 @@
|
||||||
- **rtl.lu:tele-vod**
|
- **rtl.lu:tele-vod**
|
||||||
- **rtl.nl**: rtl.nl and rtlxl.nl
|
- **rtl.nl**: rtl.nl and rtlxl.nl
|
||||||
- **rtl2**
|
- **rtl2**
|
||||||
|
- **rtl2:you**
|
||||||
|
- **rtl2:you:series**
|
||||||
- **RTLLuLive**
|
- **RTLLuLive**
|
||||||
- **RTLLuRadio**
|
- **RTLLuRadio**
|
||||||
- **RTNews**
|
- **RTNews**
|
||||||
|
@ -1210,9 +1276,10 @@
|
||||||
- **rtve.es:infantil**: RTVE infantil
|
- **rtve.es:infantil**: RTVE infantil
|
||||||
- **rtve.es:live**: RTVE.es live streams
|
- **rtve.es:live**: RTVE.es live streams
|
||||||
- **rtve.es:television**
|
- **rtve.es:television**
|
||||||
|
- **RTVNH**
|
||||||
- **RTVS**
|
- **RTVS**
|
||||||
- **rtvslo.si**
|
- **rtvslo.si**
|
||||||
- **RudoVideo**
|
- **RUHD**
|
||||||
- **Rule34Video**
|
- **Rule34Video**
|
||||||
- **Rumble**
|
- **Rumble**
|
||||||
- **RumbleChannel**
|
- **RumbleChannel**
|
||||||
|
@ -1259,8 +1326,8 @@
|
||||||
- **ScrippsNetworks**
|
- **ScrippsNetworks**
|
||||||
- **scrippsnetworks:watch**
|
- **scrippsnetworks:watch**
|
||||||
- **Scrolller**
|
- **Scrolller**
|
||||||
- **SCTE**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTE**: [*scte*](## "netrc machine")
|
||||||
- **SCTECourse**: [*scte*](## "netrc machine") (**Currently broken**)
|
- **SCTECourse**: [*scte*](## "netrc machine")
|
||||||
- **Seeker**
|
- **Seeker**
|
||||||
- **SenalColombiaLive**
|
- **SenalColombiaLive**
|
||||||
- **SenateGov**
|
- **SenateGov**
|
||||||
|
@ -1272,6 +1339,7 @@
|
||||||
- **SeznamZpravyArticle**
|
- **SeznamZpravyArticle**
|
||||||
- **Shahid**: [*shahid*](## "netrc machine")
|
- **Shahid**: [*shahid*](## "netrc machine")
|
||||||
- **ShahidShow**
|
- **ShahidShow**
|
||||||
|
- **Shared**: shared.sx
|
||||||
- **ShareVideosEmbed**
|
- **ShareVideosEmbed**
|
||||||
- **ShemarooMe**
|
- **ShemarooMe**
|
||||||
- **ShowRoomLive**
|
- **ShowRoomLive**
|
||||||
|
@ -1323,6 +1391,7 @@
|
||||||
- **SovietsClosetPlaylist**
|
- **SovietsClosetPlaylist**
|
||||||
- **SpankBang**
|
- **SpankBang**
|
||||||
- **SpankBangPlaylist**
|
- **SpankBangPlaylist**
|
||||||
|
- **Spankwire**
|
||||||
- **Spiegel**
|
- **Spiegel**
|
||||||
- **Sport5**
|
- **Sport5**
|
||||||
- **SportBox**
|
- **SportBox**
|
||||||
|
@ -1335,7 +1404,7 @@
|
||||||
- **SpreakerShowPage**
|
- **SpreakerShowPage**
|
||||||
- **SpringboardPlatform**
|
- **SpringboardPlatform**
|
||||||
- **Sprout**
|
- **Sprout**
|
||||||
- **sr:mediathek**: Saarländischer Rundfunk (**Currently broken**)
|
- **sr:mediathek**: Saarländischer Rundfunk
|
||||||
- **SRGSSR**
|
- **SRGSSR**
|
||||||
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
- **SRGSSRPlay**: srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites
|
||||||
- **StacommuLive**: [*stacommu*](## "netrc machine")
|
- **StacommuLive**: [*stacommu*](## "netrc machine")
|
||||||
|
@ -1352,6 +1421,7 @@
|
||||||
- **StoryFireSeries**
|
- **StoryFireSeries**
|
||||||
- **StoryFireUser**
|
- **StoryFireUser**
|
||||||
- **Streamable**
|
- **Streamable**
|
||||||
|
- **streamcloud.eu**
|
||||||
- **StreamCZ**
|
- **StreamCZ**
|
||||||
- **StreamFF**
|
- **StreamFF**
|
||||||
- **StreetVoice**
|
- **StreetVoice**
|
||||||
|
@ -1367,6 +1437,7 @@
|
||||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||||
- **SVTSeries**
|
- **SVTSeries**
|
||||||
- **SwearnetEpisode**
|
- **SwearnetEpisode**
|
||||||
|
- **SWRMediathek**
|
||||||
- **Syfy**
|
- **Syfy**
|
||||||
- **SYVDK**
|
- **SYVDK**
|
||||||
- **SztvHu**
|
- **SztvHu**
|
||||||
|
@ -1385,6 +1456,7 @@
|
||||||
- **TeachingChannel**
|
- **TeachingChannel**
|
||||||
- **Teamcoco**
|
- **Teamcoco**
|
||||||
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
- **TeamTreeHouse**: [*teamtreehouse*](## "netrc machine")
|
||||||
|
- **TechTalks**
|
||||||
- **techtv.mit.edu**
|
- **techtv.mit.edu**
|
||||||
- **TedEmbed**
|
- **TedEmbed**
|
||||||
- **TedPlaylist**
|
- **TedPlaylist**
|
||||||
|
@ -1414,8 +1486,6 @@
|
||||||
- **TFO**
|
- **TFO**
|
||||||
- **theatercomplextown:ppv**: [*theatercomplextown*](## "netrc machine")
|
- **theatercomplextown:ppv**: [*theatercomplextown*](## "netrc machine")
|
||||||
- **theatercomplextown:vod**: [*theatercomplextown*](## "netrc machine")
|
- **theatercomplextown:vod**: [*theatercomplextown*](## "netrc machine")
|
||||||
- **TheGuardianPodcast**
|
|
||||||
- **TheGuardianPodcastPlaylist**
|
|
||||||
- **TheHoleTv**
|
- **TheHoleTv**
|
||||||
- **TheIntercept**
|
- **TheIntercept**
|
||||||
- **ThePlatform**
|
- **ThePlatform**
|
||||||
|
@ -1436,23 +1506,27 @@
|
||||||
- **tiktok:sound**: (**Currently broken**)
|
- **tiktok:sound**: (**Currently broken**)
|
||||||
- **tiktok:tag**: (**Currently broken**)
|
- **tiktok:tag**: (**Currently broken**)
|
||||||
- **tiktok:user**: (**Currently broken**)
|
- **tiktok:user**: (**Currently broken**)
|
||||||
|
- **tinypic**: tinypic.com videos
|
||||||
- **TLC**
|
- **TLC**
|
||||||
- **TMZ**
|
- **TMZ**
|
||||||
- **TNAFlix**
|
- **TNAFlix**
|
||||||
- **TNAFlixNetworkEmbed**
|
- **TNAFlixNetworkEmbed**
|
||||||
- **toggle**
|
- **toggle**
|
||||||
- **toggo**
|
- **toggo**
|
||||||
|
- **Tokentube**
|
||||||
|
- **Tokentube:channel**
|
||||||
- **tokfm:audition**
|
- **tokfm:audition**
|
||||||
- **tokfm:podcast**
|
- **tokfm:podcast**
|
||||||
- **ToonGoggles**
|
- **ToonGoggles**
|
||||||
- **tou.tv**: [*toutv*](## "netrc machine")
|
- **tou.tv**: [*toutv*](## "netrc machine")
|
||||||
- **Toypics**: Toypics video (**Currently broken**)
|
- **Toypics**: Toypics video
|
||||||
- **ToypicsUser**: Toypics user profile (**Currently broken**)
|
- **ToypicsUser**: Toypics user profile
|
||||||
- **TrailerAddict**: (**Currently broken**)
|
- **TrailerAddict**: (**Currently broken**)
|
||||||
- **TravelChannel**
|
- **TravelChannel**
|
||||||
- **Triller**: [*triller*](## "netrc machine")
|
- **Triller**: [*triller*](## "netrc machine")
|
||||||
- **TrillerShort**
|
- **TrillerShort**
|
||||||
- **TrillerUser**: [*triller*](## "netrc machine")
|
- **TrillerUser**: [*triller*](## "netrc machine")
|
||||||
|
- **Trilulilu**
|
||||||
- **Trovo**
|
- **Trovo**
|
||||||
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||||
|
@ -1462,7 +1536,7 @@
|
||||||
- **TruNews**
|
- **TruNews**
|
||||||
- **Truth**
|
- **Truth**
|
||||||
- **TruTV**
|
- **TruTV**
|
||||||
- **Tube8**: (**Currently broken**)
|
- **Tube8**
|
||||||
- **TubeTuGraz**: [*tubetugraz*](## "netrc machine") tube.tugraz.at
|
- **TubeTuGraz**: [*tubetugraz*](## "netrc machine") tube.tugraz.at
|
||||||
- **TubeTuGrazSeries**: [*tubetugraz*](## "netrc machine")
|
- **TubeTuGrazSeries**: [*tubetugraz*](## "netrc machine")
|
||||||
- **TubiTv**: [*tubitv*](## "netrc machine")
|
- **TubiTv**: [*tubitv*](## "netrc machine")
|
||||||
|
@ -1471,6 +1545,7 @@
|
||||||
- **TuneInPodcast**
|
- **TuneInPodcast**
|
||||||
- **TuneInPodcastEpisode**
|
- **TuneInPodcastEpisode**
|
||||||
- **TuneInStation**
|
- **TuneInStation**
|
||||||
|
- **TunePk**
|
||||||
- **Turbo**
|
- **Turbo**
|
||||||
- **tv.dfb.de**
|
- **tv.dfb.de**
|
||||||
- **TV2**
|
- **TV2**
|
||||||
|
@ -1494,7 +1569,14 @@
|
||||||
- **TVIPlayer**
|
- **TVIPlayer**
|
||||||
- **tvland.com**
|
- **tvland.com**
|
||||||
- **TVN24**
|
- **TVN24**
|
||||||
|
- **TVNet**
|
||||||
- **TVNoe**
|
- **TVNoe**
|
||||||
|
- **TVNow**
|
||||||
|
- **TVNowAnnual**
|
||||||
|
- **TVNowFilm**
|
||||||
|
- **TVNowNew**
|
||||||
|
- **TVNowSeason**
|
||||||
|
- **TVNowShow**
|
||||||
- **tvopengr:embed**: tvopen.gr embedded videos
|
- **tvopengr:embed**: tvopen.gr embedded videos
|
||||||
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
- **tvopengr:watch**: tvopen.gr (and ethnos.gr) videos
|
||||||
- **tvp**: Telewizja Polska
|
- **tvp**: Telewizja Polska
|
||||||
|
@ -1532,6 +1614,7 @@
|
||||||
- **umg:de**: Universal Music Deutschland
|
- **umg:de**: Universal Music Deutschland
|
||||||
- **Unistra**
|
- **Unistra**
|
||||||
- **Unity**
|
- **Unity**
|
||||||
|
- **UnscriptedNewsVideo**
|
||||||
- **uol.com.br**
|
- **uol.com.br**
|
||||||
- **uplynk**
|
- **uplynk**
|
||||||
- **uplynk:preplay**
|
- **uplynk:preplay**
|
||||||
|
@ -1546,6 +1629,7 @@
|
||||||
- **Utreon**
|
- **Utreon**
|
||||||
- **Varzesh3**
|
- **Varzesh3**
|
||||||
- **Vbox7**
|
- **Vbox7**
|
||||||
|
- **VeeHD**
|
||||||
- **Veo**
|
- **Veo**
|
||||||
- **Veoh**
|
- **Veoh**
|
||||||
- **veoh:user**
|
- **veoh:user**
|
||||||
|
@ -1558,6 +1642,7 @@
|
||||||
- **vice**
|
- **vice**
|
||||||
- **vice:article**
|
- **vice:article**
|
||||||
- **vice:show**
|
- **vice:show**
|
||||||
|
- **Vidbit**
|
||||||
- **Viddler**
|
- **Viddler**
|
||||||
- **Videa**
|
- **Videa**
|
||||||
- **video.arnes.si**: Arnes Video
|
- **video.arnes.si**: Arnes Video
|
||||||
|
@ -1579,7 +1664,6 @@
|
||||||
- **VidioLive**: [*vidio*](## "netrc machine")
|
- **VidioLive**: [*vidio*](## "netrc machine")
|
||||||
- **VidioPremier**: [*vidio*](## "netrc machine")
|
- **VidioPremier**: [*vidio*](## "netrc machine")
|
||||||
- **VidLii**
|
- **VidLii**
|
||||||
- **Vidly**
|
|
||||||
- **viewlift**
|
- **viewlift**
|
||||||
- **viewlift:embed**
|
- **viewlift:embed**
|
||||||
- **Viidea**
|
- **Viidea**
|
||||||
|
@ -1599,6 +1683,7 @@
|
||||||
- **Vimm:stream**
|
- **Vimm:stream**
|
||||||
- **ViMP**
|
- **ViMP**
|
||||||
- **ViMP:Playlist**
|
- **ViMP:Playlist**
|
||||||
|
- **Vimple**: Vimple - one-click video hosting
|
||||||
- **Vine**
|
- **Vine**
|
||||||
- **vine:user**
|
- **vine:user**
|
||||||
- **Viqeo**
|
- **Viqeo**
|
||||||
|
@ -1606,6 +1691,7 @@
|
||||||
- **viu:ott**: [*viu*](## "netrc machine")
|
- **viu:ott**: [*viu*](## "netrc machine")
|
||||||
- **viu:playlist**
|
- **viu:playlist**
|
||||||
- **ViuOTTIndonesia**
|
- **ViuOTTIndonesia**
|
||||||
|
- **Vivo**: vivo.sx
|
||||||
- **vk**: [*vk*](## "netrc machine") VK
|
- **vk**: [*vk*](## "netrc machine") VK
|
||||||
- **vk:uservideos**: [*vk*](## "netrc machine") VK - User's Videos
|
- **vk:uservideos**: [*vk*](## "netrc machine") VK - User's Videos
|
||||||
- **vk:wallpost**: [*vk*](## "netrc machine")
|
- **vk:wallpost**: [*vk*](## "netrc machine")
|
||||||
|
@ -1613,27 +1699,37 @@
|
||||||
- **VKPlayLive**
|
- **VKPlayLive**
|
||||||
- **vm.tiktok**
|
- **vm.tiktok**
|
||||||
- **Vocaroo**
|
- **Vocaroo**
|
||||||
|
- **Vodlocker**
|
||||||
- **VODPl**
|
- **VODPl**
|
||||||
- **VODPlatform**
|
- **VODPlatform**
|
||||||
|
- **VoiceRepublic**
|
||||||
- **voicy**
|
- **voicy**
|
||||||
- **voicy:channel**
|
- **voicy:channel**
|
||||||
- **VolejTV**
|
- **VolejTV**
|
||||||
- **Voot**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **Voot**: [*voot*](## "netrc machine")
|
||||||
- **VootSeries**: [*voot*](## "netrc machine") (**Currently broken**)
|
- **VootSeries**: [*voot*](## "netrc machine")
|
||||||
- **VoxMedia**
|
- **VoxMedia**
|
||||||
- **VoxMediaVolume**
|
- **VoxMediaVolume**
|
||||||
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
- **vpro**: npo.nl, ntr.nl, omroepwnl.nl, zapp.nl and npo3.nl
|
||||||
- **vqq:series**
|
- **vqq:series**
|
||||||
- **vqq:video**
|
- **vqq:video**
|
||||||
|
- **Vrak**
|
||||||
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
- **VRT**: VRT NWS, Flanders News, Flandern Info and Sporza
|
||||||
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
- **VrtNU**: [*vrtnu*](## "netrc machine") VRT MAX
|
||||||
|
- **vrv**: [*vrv*](## "netrc machine")
|
||||||
|
- **vrv:series**
|
||||||
|
- **VShare**
|
||||||
- **VTM**
|
- **VTM**
|
||||||
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
- **VTXTV**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
- **VTXTVLive**: [*vtxtv*](## "netrc machine")
|
||||||
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
- **VTXTVRecordings**: [*vtxtv*](## "netrc machine")
|
||||||
- **VuClip**
|
- **VuClip**
|
||||||
|
- **Vupload**
|
||||||
- **VVVVID**
|
- **VVVVID**
|
||||||
- **VVVVIDShow**
|
- **VVVVIDShow**
|
||||||
|
- **VyboryMos**
|
||||||
|
- **Vzaar**
|
||||||
|
- **Wakanim**
|
||||||
- **Walla**
|
- **Walla**
|
||||||
- **WalyTV**: [*walytv*](## "netrc machine")
|
- **WalyTV**: [*walytv*](## "netrc machine")
|
||||||
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
- **WalyTVLive**: [*walytv*](## "netrc machine")
|
||||||
|
@ -1644,7 +1740,9 @@
|
||||||
- **washingtonpost**
|
- **washingtonpost**
|
||||||
- **washingtonpost:article**
|
- **washingtonpost:article**
|
||||||
- **wat.tv**
|
- **wat.tv**
|
||||||
|
- **WatchBox**
|
||||||
- **WatchESPN**
|
- **WatchESPN**
|
||||||
|
- **WatchIndianPorn**: Watch Indian Porn
|
||||||
- **WDR**
|
- **WDR**
|
||||||
- **wdr:mobile**: (**Currently broken**)
|
- **wdr:mobile**: (**Currently broken**)
|
||||||
- **WDRElefant**
|
- **WDRElefant**
|
||||||
|
@ -1672,6 +1770,7 @@
|
||||||
- **whowatch**
|
- **whowatch**
|
||||||
- **Whyp**
|
- **Whyp**
|
||||||
- **wikimedia.org**
|
- **wikimedia.org**
|
||||||
|
- **Willow**
|
||||||
- **Wimbledon**
|
- **Wimbledon**
|
||||||
- **WimTV**
|
- **WimTV**
|
||||||
- **WinSportsVideo**
|
- **WinSportsVideo**
|
||||||
|
@ -1696,6 +1795,7 @@
|
||||||
- **wykop:post**
|
- **wykop:post**
|
||||||
- **wykop:post:comment**
|
- **wykop:post:comment**
|
||||||
- **Xanimu**
|
- **Xanimu**
|
||||||
|
- **XBef**
|
||||||
- **XboxClips**
|
- **XboxClips**
|
||||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
||||||
- **XHamster**
|
- **XHamster**
|
||||||
|
@ -1707,6 +1807,9 @@
|
||||||
- **XMinus**
|
- **XMinus**
|
||||||
- **XNXX**
|
- **XNXX**
|
||||||
- **Xstream**
|
- **Xstream**
|
||||||
|
- **XTube**
|
||||||
|
- **XTubeUser**: XTube user profile
|
||||||
|
- **Xuite**: 隨意窩Xuite影音
|
||||||
- **XVideos**
|
- **XVideos**
|
||||||
- **xvideos:quickies**
|
- **xvideos:quickies**
|
||||||
- **XXXYMovies**
|
- **XXXYMovies**
|
||||||
|
@ -1723,7 +1826,10 @@
|
||||||
- **YapFiles**
|
- **YapFiles**
|
||||||
- **Yappy**
|
- **Yappy**
|
||||||
- **YappyProfile**
|
- **YappyProfile**
|
||||||
|
- **YesJapan**
|
||||||
|
- **yinyuetai:video**: 音悦Tai
|
||||||
- **YleAreena**
|
- **YleAreena**
|
||||||
|
- **Ynet**
|
||||||
- **YouJizz**
|
- **YouJizz**
|
||||||
- **youku**: 优酷
|
- **youku**: 优酷
|
||||||
- **youku:show**
|
- **youku:show**
|
||||||
|
@ -1771,9 +1877,6 @@
|
||||||
- **zingmp3:chart-home**
|
- **zingmp3:chart-home**
|
||||||
- **zingmp3:chart-music-video**
|
- **zingmp3:chart-music-video**
|
||||||
- **zingmp3:hub**
|
- **zingmp3:hub**
|
||||||
- **zingmp3:liveradio**
|
|
||||||
- **zingmp3:podcast**
|
|
||||||
- **zingmp3:podcast-episode**
|
|
||||||
- **zingmp3:user**
|
- **zingmp3:user**
|
||||||
- **zingmp3:week-chart**
|
- **zingmp3:week-chart**
|
||||||
- **zoom**
|
- **zoom**
|
||||||
|
|
|
@ -140,8 +140,6 @@ class TestFormatSelection(unittest.TestCase):
|
||||||
test('example-with-dashes', 'example-with-dashes')
|
test('example-with-dashes', 'example-with-dashes')
|
||||||
test('all', '2', '47', '45', 'example-with-dashes', '35')
|
test('all', '2', '47', '45', 'example-with-dashes', '35')
|
||||||
test('mergeall', '2+47+45+example-with-dashes+35', multi=True)
|
test('mergeall', '2+47+45+example-with-dashes+35', multi=True)
|
||||||
# See: https://github.com/yt-dlp/yt-dlp/pulls/8797
|
|
||||||
test('7_a/worst', '35')
|
|
||||||
|
|
||||||
def test_format_selection_audio(self):
|
def test_format_selection_audio(self):
|
||||||
formats = [
|
formats = [
|
||||||
|
@ -730,7 +728,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
self.assertEqual(got_dict.get(info_field), expected, info_field)
|
self.assertEqual(got_dict.get(info_field), expected, info_field)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
test('%()j', (expect_same_infodict, None))
|
test('%()j', (expect_same_infodict, str))
|
||||||
|
|
||||||
# NA placeholder
|
# NA placeholder
|
||||||
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
NA_TEST_OUTTMPL = '%(uploader_date)s-%(width)d-%(x|def)s-%(id)s.%(ext)s'
|
||||||
|
|
|
@ -9,7 +9,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
|
||||||
from test.helper import FakeYDL, report_warning
|
from test.helper import FakeYDL, report_warning
|
||||||
from yt_dlp.update import UpdateInfo, Updater
|
from yt_dlp.update import Updater, UpdateInfo
|
||||||
|
|
||||||
|
|
||||||
# XXX: Keep in sync with yt_dlp.update.UPDATE_SOURCES
|
# XXX: Keep in sync with yt_dlp.update.UPDATE_SOURCES
|
||||||
|
|
|
@ -2110,8 +2110,6 @@ Line 1
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str_or_none})),
|
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str_or_none})),
|
||||||
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
|
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
|
||||||
msg='Function in set should be a transformation')
|
msg='Function in set should be a transformation')
|
||||||
self.assertEqual(traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})), 'const',
|
|
||||||
msg='Function in set should always be called')
|
|
||||||
if __debug__:
|
if __debug__:
|
||||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
||||||
traverse_obj(_TEST_DATA, set())
|
traverse_obj(_TEST_DATA, set())
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
@py -Werror -Xdev "%~dp0yt_dlp\__main__.py" %*
|
@py -bb -Werror -Xdev "%~dp0yt_dlp\__main__.py" %*
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
#!/usr/bin/env sh
|
#!/usr/bin/env sh
|
||||||
exec "${PYTHON:-python3}" -Werror -Xdev "$(dirname "$(realpath "$0")")/yt_dlp/__main__.py" "$@"
|
exec "${PYTHON:-python3}" -bb -Werror -Xdev "$(dirname "$(realpath "$0")")/yt_dlp/__main__.py" "$@"
|
||||||
|
|
|
@ -61,13 +61,7 @@ from .postprocessor import (
|
||||||
get_postprocessor,
|
get_postprocessor,
|
||||||
)
|
)
|
||||||
from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
|
from .postprocessor.ffmpeg import resolve_mapping as resolve_recode_mapping
|
||||||
from .update import (
|
from .update import REPOSITORY, _get_system_deprecation, _make_label, current_git_head, detect_variant
|
||||||
REPOSITORY,
|
|
||||||
_get_system_deprecation,
|
|
||||||
_make_label,
|
|
||||||
current_git_head,
|
|
||||||
detect_variant,
|
|
||||||
)
|
|
||||||
from .utils import (
|
from .utils import (
|
||||||
DEFAULT_OUTTMPL,
|
DEFAULT_OUTTMPL,
|
||||||
IDENTITY,
|
IDENTITY,
|
||||||
|
@ -2481,16 +2475,9 @@ class YoutubeDL:
|
||||||
return selector_function(ctx_copy)
|
return selector_function(ctx_copy)
|
||||||
return final_selector
|
return final_selector
|
||||||
|
|
||||||
# HACK: Python 3.12 changed the underlying parser, rendering '7_a' invalid
|
stream = io.BytesIO(format_spec.encode())
|
||||||
# Prefix numbers with random letters to avoid it being classified as a number
|
|
||||||
# See: https://github.com/yt-dlp/yt-dlp/pulls/8797
|
|
||||||
# TODO: Implement parser not reliant on tokenize.tokenize
|
|
||||||
prefix = ''.join(random.choices(string.ascii_letters, k=32))
|
|
||||||
stream = io.BytesIO(re.sub(r'\d[_\d]*', rf'{prefix}\g<0>', format_spec).encode())
|
|
||||||
try:
|
try:
|
||||||
tokens = list(_remove_unused_ops(
|
tokens = list(_remove_unused_ops(tokenize.tokenize(stream.readline)))
|
||||||
token._replace(string=token.string.replace(prefix, ''))
|
|
||||||
for token in tokenize.tokenize(stream.readline)))
|
|
||||||
except tokenize.TokenError:
|
except tokenize.TokenError:
|
||||||
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
|
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
|
||||||
|
|
||||||
|
|
|
@ -134,8 +134,8 @@ from .arcpublishing import ArcPublishingIE
|
||||||
from .arkena import ArkenaIE
|
from .arkena import ArkenaIE
|
||||||
from .ard import (
|
from .ard import (
|
||||||
ARDBetaMediathekIE,
|
ARDBetaMediathekIE,
|
||||||
ARDMediathekCollectionIE,
|
|
||||||
ARDIE,
|
ARDIE,
|
||||||
|
ARDMediathekIE,
|
||||||
)
|
)
|
||||||
from .arte import (
|
from .arte import (
|
||||||
ArteTVIE,
|
ArteTVIE,
|
||||||
|
@ -548,7 +548,6 @@ from .epicon import (
|
||||||
EpiconIE,
|
EpiconIE,
|
||||||
EpiconSeriesIE,
|
EpiconSeriesIE,
|
||||||
)
|
)
|
||||||
from .epidemicsound import EpidemicSoundIE
|
|
||||||
from .eplus import EplusIbIE
|
from .eplus import EplusIbIE
|
||||||
from .epoch import EpochIE
|
from .epoch import EpochIE
|
||||||
from .eporner import EpornerIE
|
from .eporner import EpornerIE
|
||||||
|
@ -1648,7 +1647,6 @@ from .rumble import (
|
||||||
RumbleIE,
|
RumbleIE,
|
||||||
RumbleChannelIE,
|
RumbleChannelIE,
|
||||||
)
|
)
|
||||||
from .rudovideo import RudoVideoIE
|
|
||||||
from .rutube import (
|
from .rutube import (
|
||||||
RutubeIE,
|
RutubeIE,
|
||||||
RutubeChannelIE,
|
RutubeChannelIE,
|
||||||
|
|
|
@ -1,23 +1,24 @@
|
||||||
|
import json
|
||||||
import re
|
import re
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from .generic import GenericIE
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
OnDemandPagedList,
|
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
|
||||||
make_archive_id,
|
|
||||||
parse_duration,
|
parse_duration,
|
||||||
parse_iso8601,
|
qualities,
|
||||||
remove_start,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
try_get,
|
||||||
unified_strdate,
|
unified_strdate,
|
||||||
|
unified_timestamp,
|
||||||
|
update_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
)
|
)
|
||||||
from ..utils.traversal import traverse_obj
|
from ..compat import compat_etree_fromstring
|
||||||
|
|
||||||
|
|
||||||
class ARDMediathekBaseIE(InfoExtractor):
|
class ARDMediathekBaseIE(InfoExtractor):
|
||||||
|
@ -60,6 +61,45 @@ class ARDMediathekBaseIE(InfoExtractor):
|
||||||
'subtitles': subtitles,
|
'subtitles': subtitles,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _ARD_extract_episode_info(self, title):
|
||||||
|
"""Try to extract season/episode data from the title."""
|
||||||
|
res = {}
|
||||||
|
if not title:
|
||||||
|
return res
|
||||||
|
|
||||||
|
for pattern in [
|
||||||
|
# Pattern for title like "Homo sapiens (S06/E07) - Originalversion"
|
||||||
|
# from: https://www.ardmediathek.de/one/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw
|
||||||
|
r'.*(?P<ep_info> \(S(?P<season_number>\d+)/E(?P<episode_number>\d+)\)).*',
|
||||||
|
# E.g.: title="Fritjof aus Norwegen (2) (AD)"
|
||||||
|
# from: https://www.ardmediathek.de/ard/sammlung/der-krieg-und-ich/68cMkqJdllm639Skj4c7sS/
|
||||||
|
r'.*(?P<ep_info> \((?:Folge |Teil )?(?P<episode_number>\d+)(?:/\d+)?\)).*',
|
||||||
|
r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:\:| -|) )\"(?P<episode>.+)\".*',
|
||||||
|
# E.g.: title="Folge 25/42: Symmetrie"
|
||||||
|
# from: https://www.ardmediathek.de/ard/video/grips-mathe/folge-25-42-symmetrie/ard-alpha/Y3JpZDovL2JyLmRlL3ZpZGVvLzMyYzI0ZjczLWQ1N2MtNDAxNC05ZmZhLTFjYzRkZDA5NDU5OQ/
|
||||||
|
# E.g.: title="Folge 1063 - Vertrauen"
|
||||||
|
# from: https://www.ardmediathek.de/ard/sendung/die-fallers/Y3JpZDovL3N3ci5kZS8yMzAyMDQ4/
|
||||||
|
r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:/\d+)?(?:\:| -|) ).*',
|
||||||
|
]:
|
||||||
|
m = re.match(pattern, title)
|
||||||
|
if m:
|
||||||
|
groupdict = m.groupdict()
|
||||||
|
res['season_number'] = int_or_none(groupdict.get('season_number'))
|
||||||
|
res['episode_number'] = int_or_none(groupdict.get('episode_number'))
|
||||||
|
res['episode'] = str_or_none(groupdict.get('episode'))
|
||||||
|
# Build the episode title by removing numeric episode information:
|
||||||
|
if groupdict.get('ep_info') and not res['episode']:
|
||||||
|
res['episode'] = str_or_none(
|
||||||
|
title.replace(groupdict.get('ep_info'), ''))
|
||||||
|
if res['episode']:
|
||||||
|
res['episode'] = res['episode'].strip()
|
||||||
|
break
|
||||||
|
|
||||||
|
# As a fallback use the whole title as the episode name:
|
||||||
|
if not res.get('episode'):
|
||||||
|
res['episode'] = title.strip()
|
||||||
|
return res
|
||||||
|
|
||||||
def _extract_formats(self, media_info, video_id):
|
def _extract_formats(self, media_info, video_id):
|
||||||
type_ = media_info.get('_type')
|
type_ = media_info.get('_type')
|
||||||
media_array = media_info.get('_mediaArray', [])
|
media_array = media_info.get('_mediaArray', [])
|
||||||
|
@ -115,12 +155,144 @@ class ARDMediathekBaseIE(InfoExtractor):
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
|
|
||||||
|
class ARDMediathekIE(ARDMediathekBaseIE):
|
||||||
|
IE_NAME = 'ARD:mediathek'
|
||||||
|
_VALID_URL = r'^https?://(?:(?:(?:www|classic)\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de|one\.ard\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
|
||||||
|
|
||||||
|
_TESTS = [{
|
||||||
|
# available till 26.07.2022
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/S%C3%9CDLICHT/Was-ist-die-Kunst-der-Zukunft-liebe-Ann/BR-Fernsehen/Video?bcastId=34633636&documentId=44726822',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '44726822',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Was ist die Kunst der Zukunft, liebe Anna McCarthy?',
|
||||||
|
'description': 'md5:4ada28b3e3b5df01647310e41f3a62f5',
|
||||||
|
'duration': 1740,
|
||||||
|
},
|
||||||
|
'params': {
|
||||||
|
# m3u8 download
|
||||||
|
'skip_download': True,
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
'url': 'https://one.ard.de/tv/Mord-mit-Aussicht/Mord-mit-Aussicht-6-39-T%C3%B6dliche-Nach/ONE/Video?bcastId=46384294&documentId=55586872',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# audio
|
||||||
|
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
# audio
|
||||||
|
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://classic.ardmediathek.de/tv/Panda-Gorilla-Co/Panda-Gorilla-Co-Folge-274/Das-Erste/Video?bcastId=16355486&documentId=58234698',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def suitable(cls, url):
|
||||||
|
return False if ARDBetaMediathekIE.suitable(url) else super(ARDMediathekIE, cls).suitable(url)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
# determine video id from url
|
||||||
|
m = self._match_valid_url(url)
|
||||||
|
|
||||||
|
document_id = None
|
||||||
|
|
||||||
|
numid = re.search(r'documentId=([0-9]+)', url)
|
||||||
|
if numid:
|
||||||
|
document_id = video_id = numid.group(1)
|
||||||
|
else:
|
||||||
|
video_id = m.group('video_id')
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
|
||||||
|
ERRORS = (
|
||||||
|
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
|
||||||
|
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
|
||||||
|
'Video %s is no longer available'),
|
||||||
|
)
|
||||||
|
|
||||||
|
for pattern, message in ERRORS:
|
||||||
|
if pattern in webpage:
|
||||||
|
raise ExtractorError(message % video_id, expected=True)
|
||||||
|
|
||||||
|
if re.search(r'[\?&]rss($|[=&])', url):
|
||||||
|
doc = compat_etree_fromstring(webpage.encode('utf-8'))
|
||||||
|
if doc.tag == 'rss':
|
||||||
|
return GenericIE()._extract_rss(url, video_id, doc)
|
||||||
|
|
||||||
|
title = self._og_search_title(webpage, default=None) or self._html_search_regex(
|
||||||
|
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
||||||
|
r'<meta name="dcterms\.title" content="(.*?)"/>',
|
||||||
|
r'<h4 class="headline">(.*?)</h4>',
|
||||||
|
r'<title[^>]*>(.*?)</title>'],
|
||||||
|
webpage, 'title')
|
||||||
|
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
|
||||||
|
'dcterms.abstract', webpage, 'description', default=None)
|
||||||
|
if description is None:
|
||||||
|
description = self._html_search_meta(
|
||||||
|
'description', webpage, 'meta description', default=None)
|
||||||
|
if description is None:
|
||||||
|
description = self._html_search_regex(
|
||||||
|
r'<p\s+class="teasertext">(.+?)</p>',
|
||||||
|
webpage, 'teaser text', default=None)
|
||||||
|
|
||||||
|
# Thumbnail is sometimes not present.
|
||||||
|
# It is in the mobile version, but that seems to use a different URL
|
||||||
|
# structure altogether.
|
||||||
|
thumbnail = self._og_search_thumbnail(webpage, default=None)
|
||||||
|
|
||||||
|
media_streams = re.findall(r'''(?x)
|
||||||
|
mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
|
||||||
|
"([^"]+)"''', webpage)
|
||||||
|
|
||||||
|
if media_streams:
|
||||||
|
QUALITIES = qualities(['lo', 'hi', 'hq'])
|
||||||
|
formats = []
|
||||||
|
for furl in set(media_streams):
|
||||||
|
if furl.endswith('.f4m'):
|
||||||
|
fid = 'f4m'
|
||||||
|
else:
|
||||||
|
fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
|
||||||
|
fid = fid_m.group(1) if fid_m else None
|
||||||
|
formats.append({
|
||||||
|
'quality': QUALITIES(fid),
|
||||||
|
'format_id': fid,
|
||||||
|
'url': furl,
|
||||||
|
})
|
||||||
|
info = {
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
else: # request JSON file
|
||||||
|
if not document_id:
|
||||||
|
video_id = self._search_regex(
|
||||||
|
(r'/play/(?:config|media|sola)/(\d+)', r'contentId["\']\s*:\s*(\d+)'),
|
||||||
|
webpage, 'media id', default=None)
|
||||||
|
info = self._extract_media_info(
|
||||||
|
'http://www.ardmediathek.de/play/media/%s' % video_id,
|
||||||
|
webpage, video_id)
|
||||||
|
|
||||||
|
info.update({
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'thumbnail': thumbnail,
|
||||||
|
})
|
||||||
|
info.update(self._ARD_extract_episode_info(info['title']))
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
class ARDIE(InfoExtractor):
|
class ARDIE(InfoExtractor):
|
||||||
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html'
|
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# available till 7.12.2023
|
# available till 7.12.2023
|
||||||
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-video-424.html',
|
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-video-424.html',
|
||||||
'md5': '94812e6438488fb923c361a44469614b',
|
'md5': 'a438f671e87a7eba04000336a119ccc4',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'maischberger-video-424',
|
'id': 'maischberger-video-424',
|
||||||
'display_id': 'maischberger-video-424',
|
'display_id': 'maischberger-video-424',
|
||||||
|
@ -227,35 +399,31 @@ class ARDIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ARDBetaMediathekIE(InfoExtractor):
|
class ARDBetaMediathekIE(ARDMediathekBaseIE):
|
||||||
IE_NAME = 'ARDMediathek'
|
|
||||||
_VALID_URL = r'''(?x)https://
|
_VALID_URL = r'''(?x)https://
|
||||||
(?:(?:beta|www)\.)?ardmediathek\.de/
|
(?:(?:beta|www)\.)?ardmediathek\.de/
|
||||||
(?:[^/]+/)?
|
(?:(?P<client>[^/]+)/)?
|
||||||
(?:player|live|video)/
|
(?:player|live|video|(?P<playlist>sendung|sammlung))/
|
||||||
(?:(?P<display_id>[^?#]+)/)?
|
(?:(?P<display_id>(?(playlist)[^?#]+?|[^?#]+))/)?
|
||||||
(?P<id>[a-zA-Z0-9]+)
|
(?P<id>(?(playlist)|Y3JpZDovL)[a-zA-Z0-9]+)
|
||||||
/?(?:[?#]|$)'''
|
(?(playlist)/(?P<season>\d+)?/?(?:[?#]|$))'''
|
||||||
_GEO_COUNTRIES = ['DE']
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.ardmediathek.de/video/filme-im-mdr/liebe-auf-vier-pfoten/mdr-fernsehen/Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0',
|
'url': 'https://www.ardmediathek.de/video/filme-im-mdr/wolfsland-die-traurigen-schwestern/mdr-fernsehen/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy8xZGY0ZGJmZS00ZWQwLTRmMGItYjhhYy0wOGQ4ZmYxNjVhZDI',
|
||||||
'md5': 'b6e8ab03f2bcc6e1f9e6cef25fcc03c4',
|
'md5': '3fd5fead7a370a819341129c8d713136',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'display_id': 'filme-im-mdr/liebe-auf-vier-pfoten/mdr-fernsehen',
|
'display_id': 'filme-im-mdr/wolfsland-die-traurigen-schwestern/mdr-fernsehen',
|
||||||
'id': 'Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0',
|
'id': '12172961',
|
||||||
'title': 'Liebe auf vier Pfoten',
|
'title': 'Wolfsland - Die traurigen Schwestern',
|
||||||
'description': r're:^Claudia Schmitt, Anwältin in Salzburg',
|
'description': r're:^Als der Polizeiobermeister Raaben',
|
||||||
'duration': 5222,
|
'duration': 5241,
|
||||||
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:aee7cbf8f06de976?w=960&ch=ae4d0f2ee47d8b9b',
|
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:efa186f7b0054957',
|
||||||
'timestamp': 1701343800,
|
'timestamp': 1670710500,
|
||||||
'upload_date': '20231130',
|
'upload_date': '20221210',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'episode': 'Liebe auf vier Pfoten',
|
'age_limit': 12,
|
||||||
'series': 'Filme im MDR',
|
'episode': 'Wolfsland - Die traurigen Schwestern',
|
||||||
'age_limit': 0,
|
'series': 'Filme im MDR'
|
||||||
'channel': 'MDR',
|
|
||||||
'_old_archive_ids': ['ardbetamediathek 12939099'],
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
||||||
|
@ -276,7 +444,7 @@ class ARDBetaMediathekIE(InfoExtractor):
|
||||||
'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||||
'md5': '1e73ded21cb79bac065117e80c81dc88',
|
'md5': '1e73ded21cb79bac065117e80c81dc88',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
'id': '10049223',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'tagesschau, 20:00 Uhr',
|
'title': 'tagesschau, 20:00 Uhr',
|
||||||
'timestamp': 1636398000,
|
'timestamp': 1636398000,
|
||||||
|
@ -286,27 +454,7 @@ class ARDBetaMediathekIE(InfoExtractor):
|
||||||
'duration': 915,
|
'duration': 915,
|
||||||
'episode': 'tagesschau, 20:00 Uhr',
|
'episode': 'tagesschau, 20:00 Uhr',
|
||||||
'series': 'tagesschau',
|
'series': 'tagesschau',
|
||||||
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:fbb21142783b0a49?w=960&ch=ee69108ae344f678',
|
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:fbb21142783b0a49',
|
||||||
'channel': 'ARD-Aktuell',
|
|
||||||
'_old_archive_ids': ['ardbetamediathek 10049223'],
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/video/7-tage/7-tage-unter-harten-jungs/hr-fernsehen/N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3',
|
|
||||||
'md5': 'c428b9effff18ff624d4f903bda26315',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'duration': 2700,
|
|
||||||
'episode': '7 Tage ... unter harten Jungs',
|
|
||||||
'description': 'md5:0f215470dcd2b02f59f4bd10c963f072',
|
|
||||||
'upload_date': '20231005',
|
|
||||||
'timestamp': 1696491171,
|
|
||||||
'display_id': '7-tage/7-tage-unter-harten-jungs/hr-fernsehen',
|
|
||||||
'series': '7 Tage ...',
|
|
||||||
'channel': 'HR',
|
|
||||||
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:f6e6d5ffac41925c?w=960&ch=fa32ba69bc87989a',
|
|
||||||
'title': '7 Tage ... unter harten Jungs',
|
|
||||||
'_old_archive_ids': ['ardbetamediathek 94834686'],
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||||
|
@ -323,230 +471,203 @@ class ARDBetaMediathekIE(InfoExtractor):
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg',
|
'url': 'https://www.ardmediathek.de/swr/live/Y3JpZDovL3N3ci5kZS8xMzQ4MTA0Mg',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _extract_episode_info(self, title):
|
|
||||||
patterns = [
|
|
||||||
# Pattern for title like "Homo sapiens (S06/E07) - Originalversion"
|
|
||||||
# from: https://www.ardmediathek.de/one/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw
|
|
||||||
r'.*(?P<ep_info> \(S(?P<season_number>\d+)/E(?P<episode_number>\d+)\)).*',
|
|
||||||
# E.g.: title="Fritjof aus Norwegen (2) (AD)"
|
|
||||||
# from: https://www.ardmediathek.de/ard/sammlung/der-krieg-und-ich/68cMkqJdllm639Skj4c7sS/
|
|
||||||
r'.*(?P<ep_info> \((?:Folge |Teil )?(?P<episode_number>\d+)(?:/\d+)?\)).*',
|
|
||||||
r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:\:| -|) )\"(?P<episode>.+)\".*',
|
|
||||||
# E.g.: title="Folge 25/42: Symmetrie"
|
|
||||||
# from: https://www.ardmediathek.de/ard/video/grips-mathe/folge-25-42-symmetrie/ard-alpha/Y3JpZDovL2JyLmRlL3ZpZGVvLzMyYzI0ZjczLWQ1N2MtNDAxNC05ZmZhLTFjYzRkZDA5NDU5OQ/
|
|
||||||
# E.g.: title="Folge 1063 - Vertrauen"
|
|
||||||
# from: https://www.ardmediathek.de/ard/sendung/die-fallers/Y3JpZDovL3N3ci5kZS8yMzAyMDQ4/
|
|
||||||
r'.*(?P<ep_info>Folge (?P<episode_number>\d+)(?:/\d+)?(?:\:| -|) ).*',
|
|
||||||
# As a fallback use the full title
|
|
||||||
r'(?P<title>.*)',
|
|
||||||
]
|
|
||||||
|
|
||||||
return traverse_obj(patterns, (..., {partial(re.match, string=title)}, {
|
|
||||||
'season_number': ('season_number', {int_or_none}),
|
|
||||||
'episode_number': ('episode_number', {int_or_none}),
|
|
||||||
'episode': ((
|
|
||||||
('episode', {str_or_none}),
|
|
||||||
('ep_info', {lambda x: title.replace(x, '')}),
|
|
||||||
('title', {str}),
|
|
||||||
), {str.strip}),
|
|
||||||
}), get_all=False)
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
|
|
||||||
|
|
||||||
page_data = self._download_json(
|
|
||||||
f'https://api.ardmediathek.de/page-gateway/pages/ard/item/{video_id}', video_id, query={
|
|
||||||
'embedded': 'false',
|
|
||||||
'mcV6': 'true',
|
|
||||||
})
|
|
||||||
|
|
||||||
player_data = traverse_obj(
|
|
||||||
page_data, ('widgets', lambda _, v: v['type'] in ('player_ondemand', 'player_live'), {dict}), get_all=False)
|
|
||||||
is_live = player_data.get('type') == 'player_live'
|
|
||||||
media_data = traverse_obj(player_data, ('mediaCollection', 'embedded', {dict}))
|
|
||||||
|
|
||||||
if player_data.get('blockedByFsk'):
|
|
||||||
self.raise_no_formats('This video is only available after 22:00', expected=True)
|
|
||||||
|
|
||||||
formats = []
|
|
||||||
subtitles = {}
|
|
||||||
for stream in traverse_obj(media_data, ('streams', ..., {dict})):
|
|
||||||
kind = stream.get('kind')
|
|
||||||
# Prioritize main stream over sign language and others
|
|
||||||
preference = 1 if kind == 'main' else None
|
|
||||||
for media in traverse_obj(stream, ('media', lambda _, v: url_or_none(v['url']))):
|
|
||||||
media_url = media['url']
|
|
||||||
|
|
||||||
audio_kind = traverse_obj(media, (
|
|
||||||
'audios', 0, 'kind', {str}), default='').replace('standard', '')
|
|
||||||
lang_code = traverse_obj(media, ('audios', 0, 'languageCode', {str})) or 'deu'
|
|
||||||
lang = join_nonempty(lang_code, audio_kind)
|
|
||||||
language_preference = 10 if lang == 'deu' else -10
|
|
||||||
|
|
||||||
if determine_ext(media_url) == 'm3u8':
|
|
||||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
|
||||||
media_url, video_id, m3u8_id=f'hls-{kind}', preference=preference, fatal=False, live=is_live)
|
|
||||||
for f in fmts:
|
|
||||||
f['language'] = lang
|
|
||||||
f['language_preference'] = language_preference
|
|
||||||
formats.extend(fmts)
|
|
||||||
self._merge_subtitles(subs, target=subtitles)
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'url': media_url,
|
|
||||||
'format_id': f'http-{kind}',
|
|
||||||
'preference': preference,
|
|
||||||
'language': lang,
|
|
||||||
'language_preference': language_preference,
|
|
||||||
**traverse_obj(media, {
|
|
||||||
'format_note': ('forcedLabel', {str}),
|
|
||||||
'width': ('maxHResolutionPx', {int_or_none}),
|
|
||||||
'height': ('maxVResolutionPx', {int_or_none}),
|
|
||||||
'vcodec': ('videoCodec', {str}),
|
|
||||||
}),
|
|
||||||
})
|
|
||||||
|
|
||||||
for sub in traverse_obj(media_data, ('subtitles', ..., {dict})):
|
|
||||||
for sources in traverse_obj(sub, ('sources', lambda _, v: url_or_none(v['url']))):
|
|
||||||
subtitles.setdefault(sub.get('languageCode') or 'deu', []).append({
|
|
||||||
'url': sources['url'],
|
|
||||||
'ext': {'webvtt': 'vtt', 'ebutt': 'ttml'}.get(sources.get('kind')),
|
|
||||||
})
|
|
||||||
|
|
||||||
age_limit = traverse_obj(page_data, ('fskRating', {lambda x: remove_start(x, 'FSK')}, {int_or_none}))
|
|
||||||
old_id = traverse_obj(page_data, ('tracking', 'atiCustomVars', 'contentId'))
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'display_id': display_id,
|
|
||||||
'formats': formats,
|
|
||||||
'subtitles': subtitles,
|
|
||||||
'is_live': is_live,
|
|
||||||
'age_limit': age_limit,
|
|
||||||
**traverse_obj(media_data, ('meta', {
|
|
||||||
'title': 'title',
|
|
||||||
'description': 'synopsis',
|
|
||||||
'timestamp': ('broadcastedOnDateTime', {parse_iso8601}),
|
|
||||||
'series': 'seriesTitle',
|
|
||||||
'thumbnail': ('images', 0, 'url', {url_or_none}),
|
|
||||||
'duration': ('durationSeconds', {int_or_none}),
|
|
||||||
'channel': 'clipSourceName',
|
|
||||||
})),
|
|
||||||
**self._extract_episode_info(page_data.get('title')),
|
|
||||||
'_old_archive_ids': [make_archive_id(ARDBetaMediathekIE, old_id)],
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ARDMediathekCollectionIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'''(?x)https://
|
|
||||||
(?:(?:beta|www)\.)?ardmediathek\.de/
|
|
||||||
(?:[^/?#]+/)?
|
|
||||||
(?P<playlist>sendung|serie|sammlung)/
|
|
||||||
(?:(?P<display_id>[^?#]+?)/)?
|
|
||||||
(?P<id>[a-zA-Z0-9]+)
|
|
||||||
(?:/(?P<season>\d+)(?:/(?P<version>OV|AD))?)?/?(?:[?#]|$)'''
|
|
||||||
_GEO_COUNTRIES = ['DE']
|
|
||||||
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.ardmediathek.de/serie/quiz/staffel-1-originalversion/Y3JpZDovL3dkci5kZS9vbmUvcXVpeg/1/OV',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Y3JpZDovL3dkci5kZS9vbmUvcXVpeg_1_OV',
|
|
||||||
'display_id': 'quiz/staffel-1-originalversion',
|
|
||||||
'title': 'Staffel 1 Originalversion',
|
|
||||||
},
|
|
||||||
'playlist_count': 3,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/serie/babylon-berlin/staffel-4-mit-audiodeskription/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu/4/AD',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu_4_AD',
|
|
||||||
'display_id': 'babylon-berlin/staffel-4-mit-audiodeskription',
|
|
||||||
'title': 'Staffel 4 mit Audiodeskription',
|
|
||||||
},
|
|
||||||
'playlist_count': 12,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/serie/babylon-berlin/staffel-1/Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu/1/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL2JhYnlsb24tYmVybGlu_1',
|
|
||||||
'display_id': 'babylon-berlin/staffel-1',
|
|
||||||
'title': 'Staffel 1',
|
|
||||||
},
|
|
||||||
'playlist_count': 8,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/sendung/tatort/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydA',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydA',
|
|
||||||
'display_id': 'tatort',
|
|
||||||
'title': 'Tatort',
|
|
||||||
},
|
|
||||||
'playlist_mincount': 500,
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.ardmediathek.de/sammlung/die-kirche-bleibt-im-dorf/5eOHzt8XB2sqeFXbIoJlg2',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '5eOHzt8XB2sqeFXbIoJlg2',
|
|
||||||
'display_id': 'die-kirche-bleibt-im-dorf',
|
|
||||||
'title': 'Die Kirche bleibt im Dorf',
|
|
||||||
'description': 'Die Kirche bleibt im Dorf',
|
|
||||||
},
|
|
||||||
'playlist_count': 4,
|
|
||||||
}, {
|
}, {
|
||||||
# playlist of type 'sendung'
|
# playlist of type 'sendung'
|
||||||
'url': 'https://www.ardmediathek.de/ard/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw/',
|
'url': 'https://www.ardmediathek.de/ard/sendung/doctor-who/Y3JpZDovL3dkci5kZS9vbmUvZG9jdG9yIHdobw/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
|
||||||
# playlist of type 'serie'
|
|
||||||
'url': 'https://www.ardmediathek.de/serie/nachtstreife/staffel-1/Y3JpZDovL3N3ci5kZS9zZGIvc3RJZC8xMjQy/1',
|
|
||||||
'only_matching': True,
|
|
||||||
}, {
|
}, {
|
||||||
# playlist of type 'sammlung'
|
# playlist of type 'sammlung'
|
||||||
'url': 'https://www.ardmediathek.de/ard/sammlung/team-muenster/5JpTzLSbWUAK8184IOvEir/',
|
'url': 'https://www.ardmediathek.de/ard/sammlung/team-muenster/5JpTzLSbWUAK8184IOvEir/',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ardmediathek.de/video/coronavirus-update-ndr-info/astrazeneca-kurz-lockdown-und-pims-syndrom-81/ndr/Y3JpZDovL25kci5kZS84NzE0M2FjNi0wMWEwLTQ5ODEtOTE5NS1mOGZhNzdhOTFmOTI/',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.ardmediathek.de/ard/player/Y3JpZDovL3dkci5kZS9CZWl0cmFnLWQ2NDJjYWEzLTMwZWYtNGI4NS1iMTI2LTU1N2UxYTcxOGIzOQ/tatort-duo-koeln-leipzig-ihr-kinderlein-kommet',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
_PAGE_SIZE = 100
|
def _ARD_load_playlist_snipped(self, playlist_id, display_id, client, mode, pageNumber):
|
||||||
|
""" Query the ARD server for playlist information
|
||||||
|
and returns the data in "raw" format """
|
||||||
|
if mode == 'sendung':
|
||||||
|
graphQL = json.dumps({
|
||||||
|
'query': '''{
|
||||||
|
showPage(
|
||||||
|
client: "%s"
|
||||||
|
showId: "%s"
|
||||||
|
pageNumber: %d
|
||||||
|
) {
|
||||||
|
pagination {
|
||||||
|
pageSize
|
||||||
|
totalElements
|
||||||
|
}
|
||||||
|
teasers { # Array
|
||||||
|
mediumTitle
|
||||||
|
links { target { id href title } }
|
||||||
|
type
|
||||||
|
}
|
||||||
|
}}''' % (client, playlist_id, pageNumber),
|
||||||
|
}).encode()
|
||||||
|
else: # mode == 'sammlung'
|
||||||
|
graphQL = json.dumps({
|
||||||
|
'query': '''{
|
||||||
|
morePage(
|
||||||
|
client: "%s"
|
||||||
|
compilationId: "%s"
|
||||||
|
pageNumber: %d
|
||||||
|
) {
|
||||||
|
widget {
|
||||||
|
pagination {
|
||||||
|
pageSize
|
||||||
|
totalElements
|
||||||
|
}
|
||||||
|
teasers { # Array
|
||||||
|
mediumTitle
|
||||||
|
links { target { id href title } }
|
||||||
|
type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}''' % (client, playlist_id, pageNumber),
|
||||||
|
}).encode()
|
||||||
|
# Ressources for ARD graphQL debugging:
|
||||||
|
# https://api-test.ardmediathek.de/public-gateway
|
||||||
|
show_page = self._download_json(
|
||||||
|
'https://api.ardmediathek.de/public-gateway',
|
||||||
|
'[Playlist] %s' % display_id,
|
||||||
|
data=graphQL,
|
||||||
|
headers={'Content-Type': 'application/json'})['data']
|
||||||
|
# align the structure of the returned data:
|
||||||
|
if mode == 'sendung':
|
||||||
|
show_page = show_page['showPage']
|
||||||
|
else: # mode == 'sammlung'
|
||||||
|
show_page = show_page['morePage']['widget']
|
||||||
|
return show_page
|
||||||
|
|
||||||
|
def _ARD_extract_playlist(self, url, playlist_id, display_id, client, mode):
|
||||||
|
""" Collects all playlist entries and returns them as info dict.
|
||||||
|
Supports playlists of mode 'sendung' and 'sammlung', and also nested
|
||||||
|
playlists. """
|
||||||
|
entries = []
|
||||||
|
pageNumber = 0
|
||||||
|
while True: # iterate by pageNumber
|
||||||
|
show_page = self._ARD_load_playlist_snipped(
|
||||||
|
playlist_id, display_id, client, mode, pageNumber)
|
||||||
|
for teaser in show_page['teasers']: # process playlist items
|
||||||
|
if '/compilation/' in teaser['links']['target']['href']:
|
||||||
|
# alternativ cond.: teaser['type'] == "compilation"
|
||||||
|
# => This is an nested compilation, e.g. like:
|
||||||
|
# https://www.ardmediathek.de/ard/sammlung/die-kirche-bleibt-im-dorf/5eOHzt8XB2sqeFXbIoJlg2/
|
||||||
|
link_mode = 'sammlung'
|
||||||
|
else:
|
||||||
|
link_mode = 'video'
|
||||||
|
|
||||||
|
item_url = 'https://www.ardmediathek.de/%s/%s/%s/%s/%s' % (
|
||||||
|
client, link_mode, display_id,
|
||||||
|
# perform HTLM quoting of episode title similar to ARD:
|
||||||
|
re.sub('^-|-$', '', # remove '-' from begin/end
|
||||||
|
re.sub('[^a-zA-Z0-9]+', '-', # replace special chars by -
|
||||||
|
teaser['links']['target']['title'].lower()
|
||||||
|
.replace('ä', 'ae').replace('ö', 'oe')
|
||||||
|
.replace('ü', 'ue').replace('ß', 'ss'))),
|
||||||
|
teaser['links']['target']['id'])
|
||||||
|
entries.append(self.url_result(
|
||||||
|
item_url,
|
||||||
|
ie=ARDBetaMediathekIE.ie_key()))
|
||||||
|
|
||||||
|
if (show_page['pagination']['pageSize'] * (pageNumber + 1)
|
||||||
|
>= show_page['pagination']['totalElements']):
|
||||||
|
# we've processed enough pages to get all playlist entries
|
||||||
|
break
|
||||||
|
pageNumber = pageNumber + 1
|
||||||
|
|
||||||
|
return self.playlist_result(entries, playlist_id, playlist_title=display_id)
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id, display_id, playlist_type, season_number, version = self._match_valid_url(url).group(
|
video_id, display_id, playlist_type, client, season_number = self._match_valid_url(url).group(
|
||||||
'id', 'display_id', 'playlist', 'season', 'version')
|
'id', 'display_id', 'playlist', 'client', 'season')
|
||||||
|
display_id, client = display_id or video_id, client or 'ard'
|
||||||
|
|
||||||
def call_api(page_num):
|
if playlist_type:
|
||||||
api_path = 'compilations/ard' if playlist_type == 'sammlung' else 'widgets/ard/asset'
|
# TODO: Extract only specified season
|
||||||
return self._download_json(
|
return self._ARD_extract_playlist(url, video_id, display_id, client, playlist_type)
|
||||||
f'https://api.ardmediathek.de/page-gateway/{api_path}/{playlist_id}', playlist_id,
|
|
||||||
f'Downloading playlist page {page_num}', query={
|
player_page = self._download_json(
|
||||||
'pageNumber': page_num,
|
'https://api.ardmediathek.de/public-gateway',
|
||||||
'pageSize': self._PAGE_SIZE,
|
display_id, data=json.dumps({
|
||||||
**({
|
'query': '''{
|
||||||
'seasoned': 'true',
|
playerPage(client:"%s", clipId: "%s") {
|
||||||
'seasonNumber': season_number,
|
blockedByFsk
|
||||||
'withOriginalversion': 'true' if version == 'OV' else 'false',
|
broadcastedOn
|
||||||
'withAudiodescription': 'true' if version == 'AD' else 'false',
|
maturityContentRating
|
||||||
} if season_number else {}),
|
mediaCollection {
|
||||||
|
_duration
|
||||||
|
_geoblocked
|
||||||
|
_isLive
|
||||||
|
_mediaArray {
|
||||||
|
_mediaStreamArray {
|
||||||
|
_quality
|
||||||
|
_server
|
||||||
|
_stream
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_previewImage
|
||||||
|
_subtitleUrl
|
||||||
|
_type
|
||||||
|
}
|
||||||
|
show {
|
||||||
|
title
|
||||||
|
}
|
||||||
|
image {
|
||||||
|
src
|
||||||
|
}
|
||||||
|
synopsis
|
||||||
|
title
|
||||||
|
tracking {
|
||||||
|
atiCustomVars {
|
||||||
|
contentId
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}''' % (client, video_id),
|
||||||
|
}).encode(), headers={
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
})['data']['playerPage']
|
||||||
|
title = player_page['title']
|
||||||
|
content_id = str_or_none(try_get(
|
||||||
|
player_page, lambda x: x['tracking']['atiCustomVars']['contentId']))
|
||||||
|
media_collection = player_page.get('mediaCollection') or {}
|
||||||
|
if not media_collection and content_id:
|
||||||
|
media_collection = self._download_json(
|
||||||
|
'https://www.ardmediathek.de/play/media/' + content_id,
|
||||||
|
content_id, fatal=False) or {}
|
||||||
|
info = self._parse_media_info(
|
||||||
|
media_collection, content_id or video_id,
|
||||||
|
player_page.get('blockedByFsk'))
|
||||||
|
age_limit = None
|
||||||
|
description = player_page.get('synopsis')
|
||||||
|
maturity_content_rating = player_page.get('maturityContentRating')
|
||||||
|
if maturity_content_rating:
|
||||||
|
age_limit = int_or_none(maturity_content_rating.lstrip('FSK'))
|
||||||
|
if not age_limit and description:
|
||||||
|
age_limit = int_or_none(self._search_regex(
|
||||||
|
r'\(FSK\s*(\d+)\)\s*$', description, 'age limit', default=None))
|
||||||
|
info.update({
|
||||||
|
'age_limit': age_limit,
|
||||||
|
'display_id': display_id,
|
||||||
|
'title': title,
|
||||||
|
'description': description,
|
||||||
|
'timestamp': unified_timestamp(player_page.get('broadcastedOn')),
|
||||||
|
'series': try_get(player_page, lambda x: x['show']['title']),
|
||||||
|
'thumbnail': (media_collection.get('_previewImage')
|
||||||
|
or try_get(player_page, lambda x: update_url(x['image']['src'], query=None, fragment=None))
|
||||||
|
or self.get_thumbnail_from_html(display_id, url)),
|
||||||
})
|
})
|
||||||
|
info.update(self._ARD_extract_episode_info(info['title']))
|
||||||
|
return info
|
||||||
|
|
||||||
def fetch_page(page_num):
|
def get_thumbnail_from_html(self, display_id, url):
|
||||||
for item in traverse_obj(call_api(page_num), ('teasers', ..., {dict})):
|
webpage = self._download_webpage(url, display_id, fatal=False) or ''
|
||||||
item_id = traverse_obj(item, ('links', 'target', ('urlId', 'id')), 'id', get_all=False)
|
return (
|
||||||
if not item_id or item_id == playlist_id:
|
self._og_search_thumbnail(webpage, default=None)
|
||||||
continue
|
or self._html_search_meta('thumbnailUrl', webpage, default=None))
|
||||||
item_mode = 'sammlung' if item.get('type') == 'compilation' else 'video'
|
|
||||||
yield self.url_result(
|
|
||||||
f'https://www.ardmediathek.de/{item_mode}/{item_id}',
|
|
||||||
ie=(ARDMediathekCollectionIE if item_mode == 'sammlung' else ARDBetaMediathekIE),
|
|
||||||
**traverse_obj(item, {
|
|
||||||
'id': ('id', {str}),
|
|
||||||
'title': ('longTitle', {str}),
|
|
||||||
'duration': ('duration', {int_or_none}),
|
|
||||||
'timestamp': ('broadcastedOn', {parse_iso8601}),
|
|
||||||
}))
|
|
||||||
|
|
||||||
page_data = call_api(0)
|
|
||||||
full_id = join_nonempty(playlist_id, season_number, version, delim='_')
|
|
||||||
|
|
||||||
return self.playlist_result(
|
|
||||||
OnDemandPagedList(fetch_page, self._PAGE_SIZE), full_id, display_id=display_id,
|
|
||||||
title=page_data.get('title'), description=page_data.get('synopsis'))
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ class BanByeChannelIE(BanByeBaseIE):
|
||||||
'sort': 'new',
|
'sort': 'new',
|
||||||
'limit': self._PAGE_SIZE,
|
'limit': self._PAGE_SIZE,
|
||||||
'offset': page_num * self._PAGE_SIZE,
|
'offset': page_num * self._PAGE_SIZE,
|
||||||
}, note=f'Downloading page {page_num + 1}')
|
}, note=f'Downloading page {page_num+1}')
|
||||||
return [
|
return [
|
||||||
self.url_result(f"{self._VIDEO_BASE}/{video['_id']}", BanByeIE)
|
self.url_result(f"{self._VIDEO_BASE}/{video['_id']}", BanByeIE)
|
||||||
for video in data['items']
|
for video in data['items']
|
||||||
|
|
|
@ -53,6 +53,21 @@ class DuoplayIE(InfoExtractor):
|
||||||
'episode_id': 14,
|
'episode_id': 14,
|
||||||
'release_year': 2010,
|
'release_year': 2010,
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
'note': 'Movie',
|
||||||
|
'url': 'https://duoplay.ee/4325/naljamangud',
|
||||||
|
'md5': '2b0bcac4159a08b1844c2bfde06b1199',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '4325',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Näljamängud',
|
||||||
|
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
|
||||||
|
'description': 'md5:fb35f5eb2ff46cdb82e4d5fbe7b49a13',
|
||||||
|
'cast': ['Jennifer Lawrence', 'Josh Hutcherson', 'Liam Hemsworth'],
|
||||||
|
'upload_date': '20231109',
|
||||||
|
'timestamp': 1699552800,
|
||||||
|
'release_year': 2012,
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
'note': 'Movie without expiry',
|
'note': 'Movie without expiry',
|
||||||
'url': 'https://duoplay.ee/5501/pilvede-all.-neljas-ode',
|
'url': 'https://duoplay.ee/5501/pilvede-all.-neljas-ode',
|
||||||
|
|
|
@ -1,107 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
float_or_none,
|
|
||||||
int_or_none,
|
|
||||||
orderedSet,
|
|
||||||
parse_iso8601,
|
|
||||||
parse_qs,
|
|
||||||
parse_resolution,
|
|
||||||
str_or_none,
|
|
||||||
traverse_obj,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EpidemicSoundIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?epidemicsound\.com/track/(?P<id>[0-9a-zA-Z]+)'
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://www.epidemicsound.com/track/yFfQVRpSPz/',
|
|
||||||
'md5': 'd98ff2ddb49e8acab9716541cbc9dfac',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '45014',
|
|
||||||
'display_id': 'yFfQVRpSPz',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'title': 'Door Knock Door 1',
|
|
||||||
'alt_title': 'Door Knock Door 1',
|
|
||||||
'tags': ['foley', 'door', 'knock', 'glass', 'window', 'glass door knock'],
|
|
||||||
'categories': ['Misc. Door'],
|
|
||||||
'duration': 1,
|
|
||||||
'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/default-sfx/3000x3000.jpg',
|
|
||||||
'timestamp': 1415320353,
|
|
||||||
'upload_date': '20141107',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://www.epidemicsound.com/track/mj8GTTwsZd/',
|
|
||||||
'md5': 'c82b745890f9baf18dc2f8d568ee3830',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '148700',
|
|
||||||
'display_id': 'mj8GTTwsZd',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'title': 'Noplace',
|
|
||||||
'tags': ['liquid drum n bass', 'energetic'],
|
|
||||||
'categories': ['drum and bass'],
|
|
||||||
'duration': 237,
|
|
||||||
'timestamp': 1694426482,
|
|
||||||
'thumbnail': 'https://cdn.epidemicsound.com/curation-assets/commercial-release-cover-images/11138/3000x3000.jpg',
|
|
||||||
'upload_date': '20230911',
|
|
||||||
'release_timestamp': 1700535606,
|
|
||||||
'release_date': '20231121',
|
|
||||||
},
|
|
||||||
}]
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _epidemic_parse_thumbnail(url: str):
|
|
||||||
if not url_or_none(url):
|
|
||||||
return None
|
|
||||||
|
|
||||||
return {
|
|
||||||
'url': url,
|
|
||||||
**(traverse_obj(url, ({parse_qs}, {
|
|
||||||
'width': ('width', 0, {int_or_none}),
|
|
||||||
'height': ('height', 0, {int_or_none}),
|
|
||||||
})) or parse_resolution(url)),
|
|
||||||
}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _epidemic_fmt_or_none(f):
|
|
||||||
if not f.get('format'):
|
|
||||||
f['format'] = f.get('format_id')
|
|
||||||
elif not f.get('format_id'):
|
|
||||||
f['format_id'] = f['format']
|
|
||||||
if not f['url'] or not f['format']:
|
|
||||||
return None
|
|
||||||
if f.get('format_note'):
|
|
||||||
f['format_note'] = f'track ID {f["format_note"]}'
|
|
||||||
if f['format'] != 'full':
|
|
||||||
f['preference'] = -2
|
|
||||||
return f
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id = self._match_id(url)
|
|
||||||
json_data = self._download_json(f'https://www.epidemicsound.com/json/track/{video_id}', video_id)
|
|
||||||
|
|
||||||
thumbnails = traverse_obj(json_data, [('imageUrl', 'cover')])
|
|
||||||
thumb_base_url = traverse_obj(json_data, ('coverArt', 'baseUrl', {url_or_none}))
|
|
||||||
if thumb_base_url:
|
|
||||||
thumbnails.extend(traverse_obj(json_data, (
|
|
||||||
'coverArt', 'sizes', ..., {thumb_base_url.__add__})))
|
|
||||||
|
|
||||||
return traverse_obj(json_data, {
|
|
||||||
'id': ('id', {str_or_none}),
|
|
||||||
'display_id': ('publicSlug', {str}),
|
|
||||||
'title': ('title', {str}),
|
|
||||||
'alt_title': ('oldTitle', {str}),
|
|
||||||
'duration': ('length', {float_or_none}),
|
|
||||||
'timestamp': ('added', {parse_iso8601}),
|
|
||||||
'release_timestamp': ('releaseDate', {parse_iso8601}),
|
|
||||||
'categories': ('genres', ..., 'tag', {str}),
|
|
||||||
'tags': ('metadataTags', ..., {str}),
|
|
||||||
'age_limit': ('isExplicit', {lambda b: 18 if b else None}),
|
|
||||||
'thumbnails': ({lambda _: thumbnails}, {orderedSet}, ..., {self._epidemic_parse_thumbnail}),
|
|
||||||
'formats': ('stems', {dict.items}, ..., {
|
|
||||||
'format': (0, {str_or_none}),
|
|
||||||
'format_note': (1, 's3TrackId', {str_or_none}),
|
|
||||||
'format_id': (1, 'stemType', {str}),
|
|
||||||
'url': (1, 'lqMp3Url', {url_or_none}),
|
|
||||||
}, {self._epidemic_fmt_or_none}),
|
|
||||||
})
|
|
|
@ -52,7 +52,7 @@ class FacebookIE(InfoExtractor):
|
||||||
)\?(?:.*?)(?:v|video_id|story_fbid)=|
|
)\?(?:.*?)(?:v|video_id|story_fbid)=|
|
||||||
[^/]+/videos/(?:[^/]+/)?|
|
[^/]+/videos/(?:[^/]+/)?|
|
||||||
[^/]+/posts/|
|
[^/]+/posts/|
|
||||||
groups/[^/]+/(?:permalink|posts)/|
|
groups/[^/]+/permalink/|
|
||||||
watchparty/
|
watchparty/
|
||||||
)|
|
)|
|
||||||
facebook:
|
facebook:
|
||||||
|
@ -232,21 +232,6 @@ class FacebookIE(InfoExtractor):
|
||||||
'uploader_id': '100013949973717',
|
'uploader_id': '100013949973717',
|
||||||
},
|
},
|
||||||
'skip': 'Requires logging in',
|
'skip': 'Requires logging in',
|
||||||
}, {
|
|
||||||
# data.node.comet_sections.content.story.attachments[].throwbackStyles.attachment_target_renderer.attachment.target.attachments[].styles.attachment.media
|
|
||||||
'url': 'https://www.facebook.com/groups/1645456212344334/posts/3737828833107051/',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '1569199726448814',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Pence MUST GO!',
|
|
||||||
'description': 'Vickie Gentry shared a memory.',
|
|
||||||
'timestamp': 1511548260,
|
|
||||||
'upload_date': '20171124',
|
|
||||||
'uploader': 'Vickie Gentry',
|
|
||||||
'uploader_id': 'pfbid0FuZhHCeWDAxWxEbr3yKPFaRstXvRxgsp9uCPG6GjD4J2AitB35NUAuJ4Q75KcjiDl',
|
|
||||||
'thumbnail': r're:^https?://.*',
|
|
||||||
'duration': 148.435,
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
@ -627,11 +612,9 @@ class FacebookIE(InfoExtractor):
|
||||||
nodes = variadic(traverse_obj(data, 'nodes', 'node') or [])
|
nodes = variadic(traverse_obj(data, 'nodes', 'node') or [])
|
||||||
attachments = traverse_obj(nodes, (
|
attachments = traverse_obj(nodes, (
|
||||||
..., 'comet_sections', 'content', 'story', (None, 'attached_story'), 'attachments',
|
..., 'comet_sections', 'content', 'story', (None, 'attached_story'), 'attachments',
|
||||||
..., ('styles', 'style_type_renderer', ('throwbackStyles', 'attachment_target_renderer')),
|
..., ('styles', 'style_type_renderer'), 'attachment'), expected_type=dict) or []
|
||||||
'attachment', {dict}))
|
|
||||||
for attachment in attachments:
|
for attachment in attachments:
|
||||||
ns = traverse_obj(attachment, ('all_subattachments', 'nodes', ..., {dict}),
|
ns = try_get(attachment, lambda x: x['all_subattachments']['nodes'], list) or []
|
||||||
('target', 'attachments', ..., 'styles', 'attachment', {dict}))
|
|
||||||
for n in ns:
|
for n in ns:
|
||||||
parse_attachment(n)
|
parse_attachment(n)
|
||||||
parse_attachment(attachment)
|
parse_attachment(attachment)
|
||||||
|
@ -654,7 +637,7 @@ class FacebookIE(InfoExtractor):
|
||||||
if len(entries) > 1:
|
if len(entries) > 1:
|
||||||
return self.playlist_result(entries, video_id)
|
return self.playlist_result(entries, video_id)
|
||||||
|
|
||||||
video_info = entries[0] if entries else {'id': video_id}
|
video_info = entries[0]
|
||||||
webpage_info = extract_metadata(webpage)
|
webpage_info = extract_metadata(webpage)
|
||||||
# honor precise duration in video info
|
# honor precise duration in video info
|
||||||
if video_info.get('duration'):
|
if video_info.get('duration'):
|
||||||
|
|
|
@ -173,8 +173,8 @@ class FloatplaneIE(InfoExtractor):
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
})
|
})
|
||||||
|
|
||||||
uploader_url = format_field(
|
uploader_url = format_field(traverse_obj(
|
||||||
post_data, [('creator', 'urlname')], 'https://www.floatplane.com/channel/%s/home') or None
|
post_data, 'creator'), 'urlname', 'https://www.floatplane.com/channel/%s/home', default=None)
|
||||||
channel_url = urljoin(f'{uploader_url}/', traverse_obj(post_data, ('channel', 'urlname')))
|
channel_url = urljoin(f'{uploader_url}/', traverse_obj(post_data, ('channel', 'urlname')))
|
||||||
|
|
||||||
post_info = {
|
post_info = {
|
||||||
|
@ -248,7 +248,7 @@ class FloatplaneChannelIE(InfoExtractor):
|
||||||
for post in page_data or []:
|
for post in page_data or []:
|
||||||
yield self.url_result(
|
yield self.url_result(
|
||||||
f'https://www.floatplane.com/post/{post["id"]}',
|
f'https://www.floatplane.com/post/{post["id"]}',
|
||||||
FloatplaneIE, id=post['id'], title=post.get('title'),
|
ie=FloatplaneIE, video_id=post['id'], video_title=post.get('title'),
|
||||||
release_timestamp=parse_iso8601(post.get('releaseDate')))
|
release_timestamp=parse_iso8601(post.get('releaseDate')))
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
|
@ -264,5 +264,5 @@ class FloatplaneChannelIE(InfoExtractor):
|
||||||
|
|
||||||
return self.playlist_result(OnDemandPagedList(functools.partial(
|
return self.playlist_result(OnDemandPagedList(functools.partial(
|
||||||
self._fetch_page, display_id, creator_data['id'], channel_data.get('id')), self._PAGE_SIZE),
|
self._fetch_page, display_id, creator_data['id'], channel_data.get('id')), self._PAGE_SIZE),
|
||||||
display_id, title=channel_data.get('title') or creator_data.get('title'),
|
display_id, playlist_title=channel_data.get('title') or creator_data.get('title'),
|
||||||
description=channel_data.get('about') or creator_data.get('about'))
|
playlist_description=channel_data.get('about') or creator_data.get('about'))
|
||||||
|
|
|
@ -35,8 +35,8 @@ from ..utils import (
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
url_or_none,
|
|
||||||
urlhandle_detect_ext,
|
urlhandle_detect_ext,
|
||||||
|
url_or_none,
|
||||||
urljoin,
|
urljoin,
|
||||||
variadic,
|
variadic,
|
||||||
xpath_attr,
|
xpath_attr,
|
||||||
|
|
|
@ -23,7 +23,7 @@ class IHeartRadioBaseIE(InfoExtractor):
|
||||||
|
|
||||||
|
|
||||||
class IHeartRadioIE(IHeartRadioBaseIE):
|
class IHeartRadioIE(IHeartRadioBaseIE):
|
||||||
IE_NAME = 'iheartradio'
|
IENAME = 'iheartradio'
|
||||||
_VALID_URL = r'(?:https?://(?:www\.)?iheart\.com/podcast/[^/]+/episode/(?P<display_id>[^/?&#]+)-|iheartradio:)(?P<id>\d+)'
|
_VALID_URL = r'(?:https?://(?:www\.)?iheart\.com/podcast/[^/]+/episode/(?P<display_id>[^/?&#]+)-|iheartradio:)(?P<id>\d+)'
|
||||||
_TEST = {
|
_TEST = {
|
||||||
'url': 'https://www.iheart.com/podcast/105-behind-the-bastards-29236323/episode/part-one-alexander-lukashenko-the-dictator-70346499/?embed=true',
|
'url': 'https://www.iheart.com/podcast/105-behind-the-bastards-29236323/episode/part-one-alexander-lukashenko-the-dictator-70346499/?embed=true',
|
||||||
|
|
|
@ -1,243 +1,99 @@
|
||||||
import functools
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
|
||||||
determine_ext,
|
|
||||||
float_or_none,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
parse_iso8601,
|
ExtractorError,
|
||||||
str_or_none,
|
|
||||||
strip_or_none,
|
|
||||||
traverse_obj,
|
|
||||||
url_or_none,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class ImgurBaseIE(InfoExtractor):
|
class ImgurIE(InfoExtractor):
|
||||||
_CLIENT_ID = '546c25a59c58ad7'
|
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|(?:t(?:opic)?|r)/[^/]+)/)(?P<id>[a-zA-Z0-9]+)'
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _imgur_result(cls, item_id):
|
|
||||||
return cls.url_result(f'https://imgur.com/{item_id}', ImgurIE, item_id)
|
|
||||||
|
|
||||||
def _call_api(self, endpoint, video_id, **kwargs):
|
|
||||||
return self._download_json(
|
|
||||||
f'https://api.imgur.com/post/v1/{endpoint}/{video_id}?client_id={self._CLIENT_ID}&include=media,account',
|
|
||||||
video_id, **kwargs)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_description(s):
|
|
||||||
if 'Discover the magic of the internet at Imgur' in s:
|
|
||||||
return None
|
|
||||||
return s or None
|
|
||||||
|
|
||||||
|
|
||||||
class ImgurIE(ImgurBaseIE):
|
|
||||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?!(?:a|gallery|t|topic|r)/)(?P<id>[a-zA-Z0-9]+)'
|
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://imgur.com/A61SaA1',
|
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'A61SaA1',
|
'id': 'A61SaA1',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'MRW gifv is up and running without any bugs',
|
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
|
||||||
'timestamp': 1416446068,
|
|
||||||
'upload_date': '20141120',
|
|
||||||
'dislike_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'release_timestamp': 1416446068,
|
|
||||||
'release_date': '20141120',
|
|
||||||
'like_count': int,
|
|
||||||
'thumbnail': 'https://i.imgur.com/A61SaA1h.jpg',
|
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
'url': 'https://imgur.com/A61SaA1',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://i.imgur.com/crGpqCV.mp4',
|
'url': 'https://i.imgur.com/crGpqCV.mp4',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
|
# no title
|
||||||
'url': 'https://i.imgur.com/jxBXAMC.gifv',
|
'url': 'https://i.imgur.com/jxBXAMC.gifv',
|
||||||
'info_dict': {
|
'only_matching': True,
|
||||||
'id': 'jxBXAMC',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Fahaka puffer feeding',
|
|
||||||
'timestamp': 1533835503,
|
|
||||||
'upload_date': '20180809',
|
|
||||||
'release_date': '20180809',
|
|
||||||
'like_count': int,
|
|
||||||
'duration': 30.0,
|
|
||||||
'comment_count': int,
|
|
||||||
'release_timestamp': 1533835503,
|
|
||||||
'thumbnail': 'https://i.imgur.com/jxBXAMCh.jpg',
|
|
||||||
'dislike_count': int,
|
|
||||||
},
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
data = self._call_api('media', video_id)
|
|
||||||
if not traverse_obj(data, ('media', 0, (
|
|
||||||
('type', {lambda t: t == 'video' or None}),
|
|
||||||
('metadata', 'is_animated'))), get_all=False):
|
|
||||||
raise ExtractorError(f'{video_id} is not a video or animated image', expected=True)
|
|
||||||
webpage = self._download_webpage(
|
webpage = self._download_webpage(
|
||||||
f'https://i.imgur.com/{video_id}.gifv', video_id, fatal=False) or ''
|
'https://i.imgur.com/{id}.gifv'.format(id=video_id), video_id)
|
||||||
formats = []
|
|
||||||
|
|
||||||
media_fmt = traverse_obj(data, ('media', 0, {
|
width = int_or_none(self._og_search_property(
|
||||||
'url': ('url', {url_or_none}),
|
'video:width', webpage, default=None))
|
||||||
'ext': ('ext', {str}),
|
height = int_or_none(self._og_search_property(
|
||||||
'width': ('width', {int_or_none}),
|
'video:height', webpage, default=None))
|
||||||
'height': ('height', {int_or_none}),
|
|
||||||
'filesize': ('size', {int_or_none}),
|
|
||||||
'acodec': ('metadata', 'has_sound', {lambda b: None if b else 'none'}),
|
|
||||||
}))
|
|
||||||
media_url = media_fmt.get('url')
|
|
||||||
if media_url:
|
|
||||||
if not media_fmt.get('ext'):
|
|
||||||
media_fmt['ext'] = mimetype2ext(traverse_obj(
|
|
||||||
data, ('media', 0, 'mime_type'))) or determine_ext(media_url)
|
|
||||||
if traverse_obj(data, ('media', 0, 'type')) == 'image':
|
|
||||||
media_fmt['acodec'] = 'none'
|
|
||||||
media_fmt.setdefault('preference', -10)
|
|
||||||
formats.append(media_fmt)
|
|
||||||
|
|
||||||
video_elements = self._search_regex(
|
video_elements = self._search_regex(
|
||||||
r'(?s)<div class="video-elements">(.*?)</div>',
|
r'(?s)<div class="video-elements">(.*?)</div>',
|
||||||
webpage, 'video elements', default=None)
|
webpage, 'video elements', default=None)
|
||||||
|
if not video_elements:
|
||||||
|
raise ExtractorError(
|
||||||
|
'No sources found for video %s. Maybe an image?' % video_id,
|
||||||
|
expected=True)
|
||||||
|
|
||||||
if video_elements:
|
formats = []
|
||||||
def og_get_size(media_type):
|
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
|
||||||
return {
|
formats.append({
|
||||||
p: int_or_none(self._og_search_property(f'{media_type}:{p}', webpage, default=None))
|
'format_id': m.group('type').partition('/')[2],
|
||||||
for p in ('width', 'height')
|
'url': self._proto_relative_url(m.group('src')),
|
||||||
}
|
'ext': mimetype2ext(m.group('type')),
|
||||||
|
'width': width,
|
||||||
size = og_get_size('video')
|
'height': height,
|
||||||
if not any(size.values()):
|
'http_headers': {
|
||||||
size = og_get_size('image')
|
'User-Agent': 'yt-dlp (like wget)',
|
||||||
|
},
|
||||||
formats = traverse_obj(
|
|
||||||
re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements),
|
|
||||||
(..., {
|
|
||||||
'format_id': ('type', {lambda s: s.partition('/')[2]}),
|
|
||||||
'url': ('src', {self._proto_relative_url}),
|
|
||||||
'ext': ('type', {mimetype2ext}),
|
|
||||||
}))
|
|
||||||
for f in formats:
|
|
||||||
f.update(size)
|
|
||||||
|
|
||||||
# We can get the original gif format from the webpage as well
|
|
||||||
gif_json = traverse_obj(self._search_json(
|
|
||||||
r'var\s+videoItem\s*=', webpage, 'GIF info', video_id,
|
|
||||||
transform_source=js_to_json, fatal=False), {
|
|
||||||
'url': ('gifUrl', {self._proto_relative_url}),
|
|
||||||
'filesize': ('size', {int_or_none}),
|
|
||||||
})
|
})
|
||||||
|
|
||||||
|
gif_json = self._search_regex(
|
||||||
|
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
|
||||||
|
webpage, 'GIF code', fatal=False)
|
||||||
if gif_json:
|
if gif_json:
|
||||||
gif_json.update(size)
|
gifd = self._parse_json(
|
||||||
gif_json.update({
|
gif_json, video_id, transform_source=js_to_json)
|
||||||
|
formats.append({
|
||||||
'format_id': 'gif',
|
'format_id': 'gif',
|
||||||
'preference': -10, # gifs < videos
|
'preference': -10, # gifs are worse than videos
|
||||||
|
'width': width,
|
||||||
|
'height': height,
|
||||||
'ext': 'gif',
|
'ext': 'gif',
|
||||||
'acodec': 'none',
|
'acodec': 'none',
|
||||||
'vcodec': 'gif',
|
'vcodec': 'gif',
|
||||||
'container': 'gif',
|
'container': 'gif',
|
||||||
|
'url': self._proto_relative_url(gifd['gifUrl']),
|
||||||
|
'filesize': gifd.get('size'),
|
||||||
|
'http_headers': {
|
||||||
|
'User-Agent': 'yt-dlp (like wget)',
|
||||||
|
},
|
||||||
})
|
})
|
||||||
formats.append(gif_json)
|
|
||||||
|
|
||||||
search = functools.partial(self._html_search_meta, html=webpage, default=None)
|
|
||||||
|
|
||||||
twitter_fmt = {
|
|
||||||
'format_id': 'twitter',
|
|
||||||
'url': url_or_none(search('twitter:player:stream')),
|
|
||||||
'ext': mimetype2ext(search('twitter:player:stream:content_type')),
|
|
||||||
'width': int_or_none(search('twitter:width')),
|
|
||||||
'height': int_or_none(search('twitter:height')),
|
|
||||||
}
|
|
||||||
if twitter_fmt['url']:
|
|
||||||
formats.append(twitter_fmt)
|
|
||||||
|
|
||||||
if not formats:
|
|
||||||
self.raise_no_formats(
|
|
||||||
f'No sources found for video {video_id}. Maybe a plain image?', expected=True)
|
|
||||||
self._remove_duplicate_formats(formats)
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'title': self._og_search_title(webpage, default=None),
|
|
||||||
'description': self.get_description(self._og_search_description(webpage, default='')),
|
|
||||||
**traverse_obj(data, {
|
|
||||||
'uploader_id': ('account_id', {lambda a: str(a) if int_or_none(a) else None}),
|
|
||||||
'uploader': ('account', 'username', {lambda x: strip_or_none(x) or None}),
|
|
||||||
'uploader_url': ('account', 'avatar_url', {url_or_none}),
|
|
||||||
'like_count': ('upvote_count', {int_or_none}),
|
|
||||||
'dislike_count': ('downvote_count', {int_or_none}),
|
|
||||||
'comment_count': ('comment_count', {int_or_none}),
|
|
||||||
'age_limit': ('is_mature', {lambda x: 18 if x else None}),
|
|
||||||
'timestamp': (('updated_at', 'created_at'), {parse_iso8601}),
|
|
||||||
'release_timestamp': ('created_at', {parse_iso8601}),
|
|
||||||
}, get_all=False),
|
|
||||||
**traverse_obj(data, ('media', 0, 'metadata', {
|
|
||||||
'title': ('title', {lambda x: strip_or_none(x) or None}),
|
|
||||||
'description': ('description', {self.get_description}),
|
|
||||||
'duration': ('duration', {float_or_none}),
|
|
||||||
'timestamp': (('updated_at', 'created_at'), {parse_iso8601}),
|
|
||||||
'release_timestamp': ('created_at', {parse_iso8601}),
|
|
||||||
}), get_all=False),
|
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'thumbnail': url_or_none(search('thumbnailUrl')),
|
'title': self._og_search_title(webpage, default=video_id),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ImgurGalleryBaseIE(ImgurBaseIE):
|
class ImgurGalleryIE(InfoExtractor):
|
||||||
_GALLERY = True
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
gallery_id = self._match_id(url)
|
|
||||||
|
|
||||||
data = self._call_api('albums', gallery_id, fatal=False, expected_status=404)
|
|
||||||
|
|
||||||
info = traverse_obj(data, {
|
|
||||||
'title': ('title', {lambda x: strip_or_none(x) or None}),
|
|
||||||
'description': ('description', {self.get_description}),
|
|
||||||
})
|
|
||||||
|
|
||||||
if traverse_obj(data, 'is_album'):
|
|
||||||
|
|
||||||
def yield_media_ids():
|
|
||||||
for m_id in traverse_obj(data, (
|
|
||||||
'media', lambda _, v: v.get('type') == 'video' or v['metadata']['is_animated'],
|
|
||||||
'id', {lambda x: str_or_none(x) or None})):
|
|
||||||
yield m_id
|
|
||||||
|
|
||||||
# if a gallery with exactly one video, apply album metadata to video
|
|
||||||
media_id = (
|
|
||||||
self._GALLERY
|
|
||||||
and traverse_obj(data, ('image_count', {lambda c: c == 1}))
|
|
||||||
and next(yield_media_ids(), None))
|
|
||||||
|
|
||||||
if not media_id:
|
|
||||||
result = self.playlist_result(
|
|
||||||
map(self._imgur_result, yield_media_ids()), gallery_id)
|
|
||||||
result.update(info)
|
|
||||||
return result
|
|
||||||
gallery_id = media_id
|
|
||||||
|
|
||||||
result = self._imgur_result(gallery_id)
|
|
||||||
info['_type'] = 'url_transparent'
|
|
||||||
result.update(info)
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
class ImgurGalleryIE(ImgurGalleryBaseIE):
|
|
||||||
IE_NAME = 'imgur:gallery'
|
IE_NAME = 'imgur:gallery'
|
||||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/?#]+)/(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?:gallery|(?:t(?:opic)?|r)/[^/]+)/(?P<id>[a-zA-Z0-9]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://imgur.com/gallery/Q95ko',
|
'url': 'http://imgur.com/gallery/Q95ko',
|
||||||
|
@ -246,121 +102,49 @@ class ImgurGalleryIE(ImgurGalleryBaseIE):
|
||||||
'title': 'Adding faces make every GIF better',
|
'title': 'Adding faces make every GIF better',
|
||||||
},
|
},
|
||||||
'playlist_count': 25,
|
'playlist_count': 25,
|
||||||
'skip': 'Zoinks! You\'ve taken a wrong turn.',
|
|
||||||
}, {
|
}, {
|
||||||
# TODO: static images - replace with animated/video gallery
|
|
||||||
'url': 'http://imgur.com/topic/Aww/ll5Vk',
|
'url': 'http://imgur.com/topic/Aww/ll5Vk',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'https://imgur.com/gallery/YcAQlkx',
|
'url': 'https://imgur.com/gallery/YcAQlkx',
|
||||||
'add_ies': ['Imgur'],
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'YcAQlkx',
|
'id': 'YcAQlkx',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
|
'title': 'Classic Steve Carell gif...cracks me up everytime....damn the repost downvotes....',
|
||||||
'timestamp': 1358554297,
|
}
|
||||||
'upload_date': '20130119',
|
|
||||||
'uploader_id': '1648642',
|
|
||||||
'uploader': 'wittyusernamehere',
|
|
||||||
'release_timestamp': 1358554297,
|
|
||||||
'thumbnail': 'https://i.imgur.com/YcAQlkxh.jpg',
|
|
||||||
'release_date': '20130119',
|
|
||||||
'uploader_url': 'https://i.imgur.com/u3R4I2S_d.png?maxwidth=290&fidelity=grand',
|
|
||||||
'comment_count': int,
|
|
||||||
'dislike_count': int,
|
|
||||||
'like_count': int,
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
# TODO: static image - replace with animated/video gallery
|
|
||||||
'url': 'http://imgur.com/topic/Funny/N8rOudd',
|
'url': 'http://imgur.com/topic/Funny/N8rOudd',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://imgur.com/r/aww/VQcQPhM',
|
'url': 'http://imgur.com/r/aww/VQcQPhM',
|
||||||
'add_ies': ['Imgur'],
|
'only_matching': True,
|
||||||
'info_dict': {
|
|
||||||
'id': 'VQcQPhM',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'The boss is here',
|
|
||||||
'timestamp': 1476494751,
|
|
||||||
'upload_date': '20161015',
|
|
||||||
'uploader_id': '19138530',
|
|
||||||
'uploader': 'thematrixcam',
|
|
||||||
'comment_count': int,
|
|
||||||
'dislike_count': int,
|
|
||||||
'uploader_url': 'https://i.imgur.com/qCjr5Pi_d.png?maxwidth=290&fidelity=grand',
|
|
||||||
'release_timestamp': 1476494751,
|
|
||||||
'like_count': int,
|
|
||||||
'release_date': '20161015',
|
|
||||||
'thumbnail': 'https://i.imgur.com/VQcQPhMh.jpg',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
# from https://github.com/ytdl-org/youtube-dl/pull/16674
|
|
||||||
{
|
|
||||||
'url': 'https://imgur.com/t/unmuted/6lAn9VQ',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '6lAn9VQ',
|
|
||||||
'title': 'Penguins !',
|
|
||||||
},
|
|
||||||
'playlist_count': 3,
|
|
||||||
}, {
|
|
||||||
'url': 'https://imgur.com/t/unmuted/kx2uD3C',
|
|
||||||
'add_ies': ['Imgur'],
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'ZVMv45i',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'Intruder',
|
|
||||||
'timestamp': 1528129683,
|
|
||||||
'upload_date': '20180604',
|
|
||||||
'release_timestamp': 1528129683,
|
|
||||||
'release_date': '20180604',
|
|
||||||
'like_count': int,
|
|
||||||
'dislike_count': int,
|
|
||||||
'comment_count': int,
|
|
||||||
'duration': 30.03,
|
|
||||||
'thumbnail': 'https://i.imgur.com/ZVMv45ih.jpg',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://imgur.com/t/unmuted/wXSK0YH',
|
|
||||||
'add_ies': ['Imgur'],
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'JCAP4io',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 're:I got the blues$',
|
|
||||||
'description': 'Luka’s vocal stylings.\n\nFP edit: don’t encourage me. I’ll never stop posting Luka and friends.',
|
|
||||||
'timestamp': 1527809525,
|
|
||||||
'upload_date': '20180531',
|
|
||||||
'like_count': int,
|
|
||||||
'dislike_count': int,
|
|
||||||
'duration': 30.03,
|
|
||||||
'comment_count': int,
|
|
||||||
'release_timestamp': 1527809525,
|
|
||||||
'thumbnail': 'https://i.imgur.com/JCAP4ioh.jpg',
|
|
||||||
'release_date': '20180531',
|
|
||||||
},
|
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
gallery_id = self._match_id(url)
|
||||||
|
|
||||||
class ImgurAlbumIE(ImgurGalleryBaseIE):
|
data = self._download_json(
|
||||||
|
'https://imgur.com/gallery/%s.json' % gallery_id,
|
||||||
|
gallery_id)['data']['image']
|
||||||
|
|
||||||
|
if data.get('is_album'):
|
||||||
|
entries = [
|
||||||
|
self.url_result('http://imgur.com/%s' % image['hash'], ImgurIE.ie_key(), image['hash'])
|
||||||
|
for image in data['album_images']['images'] if image.get('hash')]
|
||||||
|
return self.playlist_result(entries, gallery_id, data.get('title'), data.get('description'))
|
||||||
|
|
||||||
|
return self.url_result('http://imgur.com/%s' % gallery_id, ImgurIE.ie_key(), gallery_id)
|
||||||
|
|
||||||
|
|
||||||
|
class ImgurAlbumIE(ImgurGalleryIE): # XXX: Do not subclass from concrete IE
|
||||||
IE_NAME = 'imgur:album'
|
IE_NAME = 'imgur:album'
|
||||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
|
_VALID_URL = r'https?://(?:i\.)?imgur\.com/a/(?P<id>[a-zA-Z0-9]+)'
|
||||||
_GALLERY = False
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
# TODO: only static images - replace with animated/video gallery
|
|
||||||
'url': 'http://imgur.com/a/j6Orj',
|
'url': 'http://imgur.com/a/j6Orj',
|
||||||
'only_matching': True,
|
|
||||||
},
|
|
||||||
# from https://github.com/ytdl-org/youtube-dl/pull/21693
|
|
||||||
{
|
|
||||||
'url': 'https://imgur.com/a/iX265HX',
|
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'iX265HX',
|
'id': 'j6Orj',
|
||||||
'title': 'enen-no-shouboutai'
|
'title': 'A Literary Analysis of "Star Wars: The Force Awakens"',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 12,
|
||||||
}, {
|
|
||||||
'url': 'https://imgur.com/a/8pih2Ed',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '8pih2Ed'
|
|
||||||
},
|
|
||||||
'playlist_mincount': 1,
|
|
||||||
}]
|
}]
|
||||||
|
|
|
@ -10,7 +10,6 @@ from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
decode_base_n,
|
decode_base_n,
|
||||||
encode_base_n,
|
encode_base_n,
|
||||||
filter_dict,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
format_field,
|
format_field,
|
||||||
get_element_by_attribute,
|
get_element_by_attribute,
|
||||||
|
@ -704,31 +703,28 @@ class InstagramStoryIE(InstagramBaseIE):
|
||||||
user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
|
user_info = self._search_json(r'"user":', story_info, 'user info', story_id, fatal=False)
|
||||||
if not user_info:
|
if not user_info:
|
||||||
self.raise_login_required('This content is unreachable')
|
self.raise_login_required('This content is unreachable')
|
||||||
|
user_id = user_info.get('id')
|
||||||
|
|
||||||
user_id = traverse_obj(user_info, 'pk', 'id', expected_type=str)
|
|
||||||
story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
|
story_info_url = user_id if username != 'highlights' else f'highlight:{story_id}'
|
||||||
if not story_info_url: # user id is only mandatory for non-highlights
|
|
||||||
raise ExtractorError('Unable to extract user id')
|
|
||||||
|
|
||||||
videos = traverse_obj(self._download_json(
|
videos = traverse_obj(self._download_json(
|
||||||
f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
|
f'{self._API_BASE_URL}/feed/reels_media/?reel_ids={story_info_url}',
|
||||||
story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
|
story_id, errnote=False, fatal=False, headers=self._API_HEADERS), 'reels')
|
||||||
if not videos:
|
if not videos:
|
||||||
self.raise_login_required('You need to log in to access this content')
|
self.raise_login_required('You need to log in to access this content')
|
||||||
|
|
||||||
full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (user_id, 'user', 'full_name'))
|
full_name = traverse_obj(videos, (f'highlight:{story_id}', 'user', 'full_name'), (str(user_id), 'user', 'full_name'))
|
||||||
story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
|
story_title = traverse_obj(videos, (f'highlight:{story_id}', 'title'))
|
||||||
if not story_title:
|
if not story_title:
|
||||||
story_title = f'Story by {username}'
|
story_title = f'Story by {username}'
|
||||||
|
|
||||||
highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (user_id, 'items'))
|
highlights = traverse_obj(videos, (f'highlight:{story_id}', 'items'), (str(user_id), 'items'))
|
||||||
info_data = []
|
info_data = []
|
||||||
for highlight in highlights:
|
for highlight in highlights:
|
||||||
highlight_data = self._extract_product(highlight)
|
highlight_data = self._extract_product(highlight)
|
||||||
if highlight_data.get('formats'):
|
if highlight_data.get('formats'):
|
||||||
info_data.append({
|
info_data.append({
|
||||||
|
**highlight_data,
|
||||||
'uploader': full_name,
|
'uploader': full_name,
|
||||||
'uploader_id': user_id,
|
'uploader_id': user_id,
|
||||||
**filter_dict(highlight_data),
|
|
||||||
})
|
})
|
||||||
return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)
|
return self.playlist_result(info_data, playlist_id=story_id, playlist_title=story_title)
|
||||||
|
|
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||||
|
|
||||||
|
|
||||||
class KinjaEmbedIE(InfoExtractor):
|
class KinjaEmbedIE(InfoExtractor):
|
||||||
IE_NAME = 'kinja:embed'
|
IENAME = 'kinja:embed'
|
||||||
_DOMAIN_REGEX = r'''(?:[^.]+\.)?
|
_DOMAIN_REGEX = r'''(?:[^.]+\.)?
|
||||||
(?:
|
(?:
|
||||||
avclub|
|
avclub|
|
||||||
|
|
|
@ -6,7 +6,6 @@ from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_call,
|
|
||||||
unsmuggle_url,
|
unsmuggle_url,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -97,22 +96,13 @@ class LiTVIE(InfoExtractor):
|
||||||
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
|
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
|
||||||
webpage, 'video data', default='{}'), video_id)
|
webpage, 'video data', default='{}'), video_id)
|
||||||
if not video_data:
|
if not video_data:
|
||||||
payload = {'assetId': program_info['assetId']}
|
payload = {
|
||||||
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
|
'assetId': program_info['assetId'],
|
||||||
if puid:
|
|
||||||
payload.update({
|
|
||||||
'type': 'auth',
|
|
||||||
'puid': puid,
|
|
||||||
})
|
|
||||||
endpoint = 'getUrl'
|
|
||||||
else:
|
|
||||||
payload.update({
|
|
||||||
'watchDevices': program_info['watchDevices'],
|
'watchDevices': program_info['watchDevices'],
|
||||||
'contentType': program_info['contentType'],
|
'contentType': program_info['contentType'],
|
||||||
})
|
}
|
||||||
endpoint = 'getMainUrlNoAuth'
|
|
||||||
video_data = self._download_json(
|
video_data = self._download_json(
|
||||||
f'https://www.litv.tv/vod/ajax/{endpoint}', video_id,
|
'https://www.litv.tv/vod/ajax/getMainUrlNoAuth', video_id,
|
||||||
data=json.dumps(payload).encode('utf-8'),
|
data=json.dumps(payload).encode('utf-8'),
|
||||||
headers={'Content-Type': 'application/json'})
|
headers={'Content-Type': 'application/json'})
|
||||||
|
|
||||||
|
|
|
@ -97,7 +97,7 @@ class NBAWatchBaseIE(NBACVPBaseIE):
|
||||||
|
|
||||||
|
|
||||||
class NBAWatchEmbedIE(NBAWatchBaseIE):
|
class NBAWatchEmbedIE(NBAWatchBaseIE):
|
||||||
IE_NAME = 'nba:watch:embed'
|
IENAME = 'nba:watch:embed'
|
||||||
_VALID_URL = NBAWatchBaseIE._VALID_URL_BASE + r'embed\?.*?\bid=(?P<id>\d+)'
|
_VALID_URL = NBAWatchBaseIE._VALID_URL_BASE + r'embed\?.*?\bid=(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://watch.nba.com/embed?id=659395',
|
'url': 'http://watch.nba.com/embed?id=659395',
|
||||||
|
@ -339,7 +339,7 @@ class NBABaseIE(NBACVPBaseIE):
|
||||||
|
|
||||||
|
|
||||||
class NBAEmbedIE(NBABaseIE):
|
class NBAEmbedIE(NBABaseIE):
|
||||||
IE_NAME = 'nba:embed'
|
IENAME = 'nba:embed'
|
||||||
_VALID_URL = r'https?://secure\.nba\.com/assets/amp/include/video/(?:topI|i)frame\.html\?.*?\bcontentId=(?P<id>[^?#&]+)'
|
_VALID_URL = r'https?://secure\.nba\.com/assets/amp/include/video/(?:topI|i)frame\.html\?.*?\bcontentId=(?P<id>[^?#&]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://secure.nba.com/assets/amp/include/video/topIframe.html?contentId=teams/bulls/2020/12/04/3478774/1607105587854-20201204_SCHEDULE_RELEASE_FINAL_DRUPAL-3478774&team=bulls&adFree=false&profile=71&videoPlayerName=TAMPCVP&baseUrl=&videoAdsection=nba.com_mobile_web_teamsites_chicagobulls&Env=',
|
'url': 'https://secure.nba.com/assets/amp/include/video/topIframe.html?contentId=teams/bulls/2020/12/04/3478774/1607105587854-20201204_SCHEDULE_RELEASE_FINAL_DRUPAL-3478774&team=bulls&adFree=false&profile=71&videoPlayerName=TAMPCVP&baseUrl=&videoAdsection=nba.com_mobile_web_teamsites_chicagobulls&Env=',
|
||||||
|
@ -361,7 +361,7 @@ class NBAEmbedIE(NBABaseIE):
|
||||||
|
|
||||||
|
|
||||||
class NBAIE(NBABaseIE):
|
class NBAIE(NBABaseIE):
|
||||||
IE_NAME = 'nba'
|
IENAME = 'nba'
|
||||||
_VALID_URL = NBABaseIE._VALID_URL_BASE + '(?!%s)video/(?P<id>(?:[^/]+/)*[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX
|
_VALID_URL = NBABaseIE._VALID_URL_BASE + '(?!%s)video/(?P<id>(?:[^/]+/)*[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.nba.com/bulls/video/teams/bulls/2020/12/04/3478774/1607105587854-20201204schedulereleasefinaldrupal-3478774',
|
'url': 'https://www.nba.com/bulls/video/teams/bulls/2020/12/04/3478774/1607105587854-20201204schedulereleasefinaldrupal-3478774',
|
||||||
|
@ -388,7 +388,7 @@ class NBAIE(NBABaseIE):
|
||||||
|
|
||||||
|
|
||||||
class NBAChannelIE(NBABaseIE):
|
class NBAChannelIE(NBABaseIE):
|
||||||
IE_NAME = 'nba:channel'
|
IENAME = 'nba:channel'
|
||||||
_VALID_URL = NBABaseIE._VALID_URL_BASE + '(?:%s)/(?P<id>[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX
|
_VALID_URL = NBABaseIE._VALID_URL_BASE + '(?:%s)/(?P<id>[^/?#&]+)' % NBABaseIE._CHANNEL_PATH_REGEX
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.nba.com/blazers/video/channel/summer_league',
|
'url': 'https://www.nba.com/blazers/video/channel/summer_league',
|
||||||
|
|
|
@ -536,7 +536,7 @@ class PanoptoListIE(PanoptoBaseIE):
|
||||||
}
|
}
|
||||||
|
|
||||||
response = self._call_api(
|
response = self._call_api(
|
||||||
base_url, '/Services/Data.svc/GetSessions', f'{display_id} page {page + 1}',
|
base_url, '/Services/Data.svc/GetSessions', f'{display_id} page {page+1}',
|
||||||
data={'queryParameters': params}, fatal=False)
|
data={'queryParameters': params}, fatal=False)
|
||||||
|
|
||||||
for result in get_first(response, 'Results', default=[]):
|
for result in get_first(response, 'Results', default=[]):
|
||||||
|
|
|
@ -264,7 +264,7 @@ class RadioFranceLiveIE(RadioFranceBaseIE):
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class RadioFrancePlaylistBaseIE(RadioFranceBaseIE):
|
class RadioFrancePlaylistBase(RadioFranceBaseIE):
|
||||||
"""Subclasses must set _METADATA_KEY"""
|
"""Subclasses must set _METADATA_KEY"""
|
||||||
|
|
||||||
def _call_api(self, content_id, cursor, page_num):
|
def _call_api(self, content_id, cursor, page_num):
|
||||||
|
@ -308,7 +308,7 @@ class RadioFrancePlaylistBaseIE(RadioFranceBaseIE):
|
||||||
})})
|
})})
|
||||||
|
|
||||||
|
|
||||||
class RadioFrancePodcastIE(RadioFrancePlaylistBaseIE):
|
class RadioFrancePodcastIE(RadioFrancePlaylistBase):
|
||||||
_VALID_URL = rf'''(?x)
|
_VALID_URL = rf'''(?x)
|
||||||
{RadioFranceBaseIE._VALID_URL_BASE}
|
{RadioFranceBaseIE._VALID_URL_BASE}
|
||||||
/(?:{RadioFranceBaseIE._STATIONS_RE})
|
/(?:{RadioFranceBaseIE._STATIONS_RE})
|
||||||
|
@ -369,7 +369,7 @@ class RadioFrancePodcastIE(RadioFrancePlaylistBaseIE):
|
||||||
note=f'Downloading page {page_num}', query={'pageCursor': cursor})
|
note=f'Downloading page {page_num}', query={'pageCursor': cursor})
|
||||||
|
|
||||||
|
|
||||||
class RadioFranceProfileIE(RadioFrancePlaylistBaseIE):
|
class RadioFranceProfileIE(RadioFrancePlaylistBase):
|
||||||
_VALID_URL = rf'{RadioFranceBaseIE._VALID_URL_BASE}/personnes/(?P<id>[\w-]+)'
|
_VALID_URL = rf'{RadioFranceBaseIE._VALID_URL_BASE}/personnes/(?P<id>[\w-]+)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
|
|
@ -1,135 +0,0 @@
|
||||||
from .common import InfoExtractor
|
|
||||||
from ..utils import (
|
|
||||||
ExtractorError,
|
|
||||||
determine_ext,
|
|
||||||
js_to_json,
|
|
||||||
traverse_obj,
|
|
||||||
update_url_query,
|
|
||||||
url_or_none,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RudoVideoIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'https?://rudo\.video/(?P<type>vod|podcast|live)/(?P<id>[^/?&#]+)'
|
|
||||||
_EMBED_REGEX = [r'<iframe[^>]+src=[\'"](?P<url>(?:https?:)//rudo\.video/(?:vod|podcast|live)/[^\'"]+)']
|
|
||||||
_TESTS = [{
|
|
||||||
'url': 'https://rudo.video/podcast/cz2wrUy8l0o',
|
|
||||||
'md5': '28ed82b477708dc5e12e072da2449221',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'cz2wrUy8l0o',
|
|
||||||
'title': 'Diego Cabot',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/podcast/bQkt07',
|
|
||||||
'md5': '36b22a9863de0f47f00fc7532a32a898',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'bQkt07',
|
|
||||||
'title': 'Tubular Bells',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/podcast/b42ZUznHX0',
|
|
||||||
'md5': 'b91c70d832938871367f8ad10c895821',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'b42ZUznHX0',
|
|
||||||
'title': 'Columna Ruperto Concha',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/vod/bN5AaJ',
|
|
||||||
'md5': '01324a329227e2591530ecb4f555c881',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'bN5AaJ',
|
|
||||||
'title': 'Ucrania 19.03',
|
|
||||||
'creator': 'La Tercera',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/live/bbtv',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'bbtv',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'creator': 'BioBioTV',
|
|
||||||
'live_status': 'is_live',
|
|
||||||
'title': r're:^LIVE BBTV\s\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}$',
|
|
||||||
'thumbnail': r're:^(?:https?:)?//.*\.(png|jpg)$',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/live/c13',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 'c13',
|
|
||||||
'title': 'CANAL13',
|
|
||||||
'ext': 'mp4',
|
|
||||||
},
|
|
||||||
'skip': 'Geo-restricted to Chile',
|
|
||||||
}, {
|
|
||||||
'url': 'https://rudo.video/live/t13-13cl',
|
|
||||||
'info_dict': {
|
|
||||||
'id': 't13-13cl',
|
|
||||||
'title': 'T13',
|
|
||||||
'ext': 'mp4',
|
|
||||||
},
|
|
||||||
'skip': 'Geo-restricted to Chile',
|
|
||||||
}]
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
video_id, type_ = self._match_valid_url(url).group('id', 'type')
|
|
||||||
is_live = type_ == 'live'
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
if 'Streaming is not available in your area' in webpage:
|
|
||||||
self.raise_geo_restricted()
|
|
||||||
|
|
||||||
media_url = (
|
|
||||||
self._search_regex(
|
|
||||||
r'var\s+streamURL\s*=\s*[\'"]([^?\'"]+)', webpage, 'stream url', default=None)
|
|
||||||
# Source URL must be used only if streamURL is unavailable
|
|
||||||
or self._search_regex(
|
|
||||||
r'<source[^>]+src=[\'"]([^\'"]+)', webpage, 'source url', default=None))
|
|
||||||
if not media_url:
|
|
||||||
youtube_url = self._search_regex(r'file:\s*[\'"]((?:https?:)//(?:www\.)?youtube\.com[^\'"]+)',
|
|
||||||
webpage, 'youtube url', default=None)
|
|
||||||
if youtube_url:
|
|
||||||
return self.url_result(youtube_url, 'Youtube')
|
|
||||||
raise ExtractorError('Unable to extract stream url')
|
|
||||||
|
|
||||||
token_array = self._search_json(
|
|
||||||
r'<script>var\s+_\$_[a-zA-Z0-9]+\s*=', webpage, 'access token array', video_id,
|
|
||||||
contains_pattern=r'\[(?s:.+)\]', default=None, transform_source=js_to_json)
|
|
||||||
if token_array:
|
|
||||||
token_url = traverse_obj(token_array, (..., {url_or_none}), get_all=False)
|
|
||||||
if not token_url:
|
|
||||||
raise ExtractorError('Invalid access token array')
|
|
||||||
access_token = self._download_json(
|
|
||||||
token_url, video_id, note='Downloading access token')['data']['authToken']
|
|
||||||
media_url = update_url_query(media_url, {'auth-token': access_token})
|
|
||||||
|
|
||||||
ext = determine_ext(media_url)
|
|
||||||
if ext == 'm3u8':
|
|
||||||
formats = self._extract_m3u8_formats(media_url, video_id, live=is_live)
|
|
||||||
elif ext == 'mp3':
|
|
||||||
formats = [{
|
|
||||||
'url': media_url,
|
|
||||||
'vcodec': 'none',
|
|
||||||
}]
|
|
||||||
else:
|
|
||||||
formats = [{'url': media_url}]
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': (self._search_regex(r'var\s+titleVideo\s*=\s*[\'"]([^\'"]+)',
|
|
||||||
webpage, 'title', default=None)
|
|
||||||
or self._og_search_title(webpage)),
|
|
||||||
'creator': self._search_regex(r'var\s+videoAuthor\s*=\s*[\'"]([^?\'"]+)',
|
|
||||||
webpage, 'videoAuthor', default=None),
|
|
||||||
'thumbnail': (self._search_regex(r'var\s+posterIMG\s*=\s*[\'"]([^?\'"]+)',
|
|
||||||
webpage, 'thumbnail', default=None)
|
|
||||||
or self._og_search_thumbnail(webpage)),
|
|
||||||
'formats': formats,
|
|
||||||
'is_live': is_live,
|
|
||||||
}
|
|
|
@ -1,133 +1,52 @@
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import json
|
|
||||||
from functools import partial
|
|
||||||
from textwrap import dedent
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import ExtractorError, format_field, int_or_none, parse_iso8601
|
|
||||||
from ..utils.traversal import traverse_obj
|
|
||||||
|
|
||||||
|
|
||||||
def _fmt_url(url):
|
|
||||||
return partial(format_field, template=url, default=None)
|
|
||||||
|
|
||||||
|
|
||||||
class TelewebionIE(InfoExtractor):
|
class TelewebionIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?telewebion\.com/episode/(?P<id>(?:0x[a-fA-F\d]+|\d+))'
|
_VALID_URL = r'https?://(?:www\.)?telewebion\.com/#!/episode/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
|
||||||
'url': 'http://www.telewebion.com/episode/0x1b3139c/',
|
_TEST = {
|
||||||
|
'url': 'http://www.telewebion.com/#!/episode/1263668/',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '0x1b3139c',
|
'id': '1263668',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'قرعهکشی لیگ قهرمانان اروپا',
|
'title': 'قرعه\u200cکشی لیگ قهرمانان اروپا',
|
||||||
'series': '+ فوتبال',
|
'thumbnail': r're:^https?://.*\.jpg',
|
||||||
'series_id': '0x1b2505c',
|
|
||||||
'channel': 'شبکه 3',
|
|
||||||
'channel_id': '0x1b1a761',
|
|
||||||
'channel_url': 'https://telewebion.com/live/tv3',
|
|
||||||
'timestamp': 1425522414,
|
|
||||||
'upload_date': '20150305',
|
|
||||||
'release_timestamp': 1425517020,
|
|
||||||
'release_date': '20150305',
|
|
||||||
'duration': 420,
|
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'tags': ['ورزشی', 'لیگ اروپا', 'اروپا'],
|
|
||||||
'thumbnail': 'https://static.telewebion.com/episodeImages/YjFhM2MxMDBkMDNiZTU0MjE5YjQ3ZDY0Mjk1ZDE0ZmUwZWU3OTE3OWRmMDAyODNhNzNkNjdmMWMzMWIyM2NmMA/default',
|
|
||||||
},
|
},
|
||||||
'skip_download': 'm3u8',
|
'params': {
|
||||||
}, {
|
# m3u8 download
|
||||||
'url': 'https://telewebion.com/episode/162175536',
|
'skip_download': True,
|
||||||
'info_dict': {
|
|
||||||
'id': '0x9aa9a30',
|
|
||||||
'ext': 'mp4',
|
|
||||||
'title': 'کارما یعنی این !',
|
|
||||||
'series': 'پاورقی',
|
|
||||||
'series_id': '0x29a7426',
|
|
||||||
'channel': 'شبکه 2',
|
|
||||||
'channel_id': '0x1b1a719',
|
|
||||||
'channel_url': 'https://telewebion.com/live/tv2',
|
|
||||||
'timestamp': 1699979968,
|
|
||||||
'upload_date': '20231114',
|
|
||||||
'release_timestamp': 1699991638,
|
|
||||||
'release_date': '20231114',
|
|
||||||
'duration': 78,
|
|
||||||
'view_count': int,
|
|
||||||
'tags': ['کلیپ های منتخب', ' کلیپ طنز ', ' کلیپ سیاست ', 'پاورقی', 'ویژه فلسطین'],
|
|
||||||
'thumbnail': 'https://static.telewebion.com/episodeImages/871e9455-7567-49a5-9648-34c22c197f5f/default',
|
|
||||||
},
|
},
|
||||||
'skip_download': 'm3u8',
|
}
|
||||||
}]
|
|
||||||
|
|
||||||
def _call_graphql_api(
|
|
||||||
self, operation, video_id, query,
|
|
||||||
variables: dict[str, tuple[str, str]] | None = None,
|
|
||||||
note='Downloading GraphQL JSON metadata',
|
|
||||||
):
|
|
||||||
parameters = ''
|
|
||||||
if variables:
|
|
||||||
parameters = ', '.join(f'${name}: {type_}' for name, (type_, _) in variables.items())
|
|
||||||
parameters = f'({parameters})'
|
|
||||||
|
|
||||||
result = self._download_json('https://graph.telewebion.com/graphql', video_id, note, data=json.dumps({
|
|
||||||
'operationName': operation,
|
|
||||||
'query': f'query {operation}{parameters} @cacheControl(maxAge: 60) {{{query}\n}}\n',
|
|
||||||
'variables': {name: value for name, (_, value) in (variables or {}).items()}
|
|
||||||
}, separators=(',', ':')).encode(), headers={
|
|
||||||
'Content-Type': 'application/json',
|
|
||||||
'Accept': 'application/json',
|
|
||||||
})
|
|
||||||
if not result or traverse_obj(result, 'errors'):
|
|
||||||
message = ', '.join(traverse_obj(result, ('errors', ..., 'message', {str})))
|
|
||||||
raise ExtractorError(message or 'Unknown GraphQL API error')
|
|
||||||
|
|
||||||
return result['data']
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
if not video_id.startswith('0x'):
|
|
||||||
video_id = hex(int(video_id))
|
|
||||||
|
|
||||||
episode_data = self._call_graphql_api('getEpisodeDetail', video_id, dedent('''
|
secure_token = self._download_webpage(
|
||||||
queryEpisode(filter: {EpisodeID: $EpisodeId}, first: 1) {
|
'http://m.s2.telewebion.com/op/op?action=getSecurityToken', video_id)
|
||||||
title
|
episode_details = self._download_json(
|
||||||
program {
|
'http://m.s2.telewebion.com/op/op', video_id,
|
||||||
ProgramID
|
query={'action': 'getEpisodeDetails', 'episode_id': video_id})
|
||||||
title
|
|
||||||
}
|
|
||||||
image
|
|
||||||
view_count
|
|
||||||
duration
|
|
||||||
started_at
|
|
||||||
created_at
|
|
||||||
channel {
|
|
||||||
ChannelID
|
|
||||||
name
|
|
||||||
descriptor
|
|
||||||
}
|
|
||||||
tags {
|
|
||||||
name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'''), {'EpisodeId': ('[ID!]', video_id)})
|
|
||||||
|
|
||||||
info_dict = traverse_obj(episode_data, ('queryEpisode', 0, {
|
m3u8_url = 'http://m.s1.telewebion.com/smil/%s.m3u8?filepath=%s&m3u8=1&secure_token=%s' % (
|
||||||
'title': ('title', {str}),
|
video_id, episode_details['file_path'], secure_token)
|
||||||
'view_count': ('view_count', {int_or_none}),
|
formats = self._extract_m3u8_formats(
|
||||||
'duration': ('duration', {int_or_none}),
|
m3u8_url, video_id, ext='mp4', m3u8_id='hls')
|
||||||
'tags': ('tags', ..., 'name', {str}),
|
|
||||||
'release_timestamp': ('started_at', {parse_iso8601}),
|
picture_paths = [
|
||||||
'timestamp': ('created_at', {parse_iso8601}),
|
episode_details.get('picture_path'),
|
||||||
'series': ('program', 'title', {str}),
|
episode_details.get('large_picture_path'),
|
||||||
'series_id': ('program', 'ProgramID', {str}),
|
]
|
||||||
'channel': ('channel', 'name', {str}),
|
|
||||||
'channel_id': ('channel', 'ChannelID', {str}),
|
thumbnails = [{
|
||||||
'channel_url': ('channel', 'descriptor', {_fmt_url('https://telewebion.com/live/%s')}),
|
'url': picture_path,
|
||||||
'thumbnail': ('image', {_fmt_url('https://static.telewebion.com/episodeImages/%s/default')}),
|
'preference': idx,
|
||||||
'formats': (
|
} for idx, picture_path in enumerate(picture_paths) if picture_path is not None]
|
||||||
'channel', 'descriptor', {str},
|
|
||||||
{_fmt_url(f'https://cdna.telewebion.com/%s/episode/{video_id}/playlist.m3u8')},
|
return {
|
||||||
{partial(self._extract_m3u8_formats, video_id=video_id, ext='mp4', m3u8_id='hls')}),
|
'id': video_id,
|
||||||
}))
|
'title': episode_details['title'],
|
||||||
info_dict['id'] = video_id
|
'formats': formats,
|
||||||
return info_dict
|
'thumbnails': thumbnails,
|
||||||
|
'view_count': episode_details.get('view_count'),
|
||||||
|
}
|
||||||
|
|
|
@ -10,7 +10,6 @@ from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
from ..networking.exceptions import HTTPError
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
dict_get,
|
dict_get,
|
||||||
|
@ -1318,7 +1317,20 @@ class TwitterIE(TwitterBaseIE):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
def _call_syndication_api(self, twid):
|
def _extract_status(self, twid):
|
||||||
|
if self.is_logged_in or self._selected_api == 'graphql':
|
||||||
|
status = self._graphql_to_legacy(self._call_graphql_api(self._GRAPHQL_ENDPOINT, twid), twid)
|
||||||
|
|
||||||
|
elif self._selected_api == 'legacy':
|
||||||
|
status = self._call_api(f'statuses/show/{twid}.json', twid, {
|
||||||
|
'cards_platform': 'Web-12',
|
||||||
|
'include_cards': 1,
|
||||||
|
'include_reply_count': 1,
|
||||||
|
'include_user_entities': 0,
|
||||||
|
'tweet_mode': 'extended',
|
||||||
|
})
|
||||||
|
|
||||||
|
elif self._selected_api == 'syndication':
|
||||||
self.report_warning(
|
self.report_warning(
|
||||||
'Not all metadata or media is available via syndication endpoint', twid, only_once=True)
|
'Not all metadata or media is available via syndication endpoint', twid, only_once=True)
|
||||||
status = self._download_json(
|
status = self._download_json(
|
||||||
|
@ -1338,31 +1350,8 @@ class TwitterIE(TwitterBaseIE):
|
||||||
media.append(detail)
|
media.append(detail)
|
||||||
status['extended_entities'] = {'media': media}
|
status['extended_entities'] = {'media': media}
|
||||||
|
|
||||||
return status
|
else:
|
||||||
|
raise ExtractorError(f'"{self._selected_api}" is not a valid API selection', expected=True)
|
||||||
def _extract_status(self, twid):
|
|
||||||
if self._selected_api not in ('graphql', 'legacy', 'syndication'):
|
|
||||||
raise ExtractorError(f'{self._selected_api!r} is not a valid API selection', expected=True)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if self.is_logged_in or self._selected_api == 'graphql':
|
|
||||||
status = self._graphql_to_legacy(self._call_graphql_api(self._GRAPHQL_ENDPOINT, twid), twid)
|
|
||||||
elif self._selected_api == 'legacy':
|
|
||||||
status = self._call_api(f'statuses/show/{twid}.json', twid, {
|
|
||||||
'cards_platform': 'Web-12',
|
|
||||||
'include_cards': 1,
|
|
||||||
'include_reply_count': 1,
|
|
||||||
'include_user_entities': 0,
|
|
||||||
'tweet_mode': 'extended',
|
|
||||||
})
|
|
||||||
except ExtractorError as e:
|
|
||||||
if not isinstance(e.cause, HTTPError) or not e.cause.status == 429:
|
|
||||||
raise
|
|
||||||
self.report_warning('Rate-limit exceeded; falling back to syndication endpoint')
|
|
||||||
status = self._call_syndication_api(twid)
|
|
||||||
|
|
||||||
if self._selected_api == 'syndication':
|
|
||||||
status = self._call_syndication_api(twid)
|
|
||||||
|
|
||||||
return traverse_obj(status, 'retweeted_status', None, expected_type=dict) or {}
|
return traverse_obj(status, 'retweeted_status', None, expected_type=dict) or {}
|
||||||
|
|
||||||
|
@ -1427,8 +1416,8 @@ class TwitterIE(TwitterBaseIE):
|
||||||
'thumbnails': thumbnails,
|
'thumbnails': thumbnails,
|
||||||
'view_count': traverse_obj(media, ('mediaStats', 'viewCount', {int_or_none})), # No longer available
|
'view_count': traverse_obj(media, ('mediaStats', 'viewCount', {int_or_none})), # No longer available
|
||||||
'duration': float_or_none(traverse_obj(media, ('video_info', 'duration_millis')), 1000),
|
'duration': float_or_none(traverse_obj(media, ('video_info', 'duration_millis')), 1000),
|
||||||
# Prioritize m3u8 formats for compat, see https://github.com/yt-dlp/yt-dlp/issues/8117
|
# The codec of http formats are unknown
|
||||||
'_format_sort_fields': ('res', 'proto:m3u8', 'br', 'size'), # http format codec is unknown
|
'_format_sort_fields': ('res', 'br', 'size', 'proto'),
|
||||||
}
|
}
|
||||||
|
|
||||||
def extract_from_card_info(card):
|
def extract_from_card_info(card):
|
||||||
|
|
|
@ -70,7 +70,7 @@ class WordpressPlaylistEmbedIE(InfoExtractor):
|
||||||
'height': int_or_none(traverse_obj(track, ('dimensions', 'original', 'height'))),
|
'height': int_or_none(traverse_obj(track, ('dimensions', 'original', 'height'))),
|
||||||
'width': int_or_none(traverse_obj(track, ('dimensions', 'original', 'width'))),
|
'width': int_or_none(traverse_obj(track, ('dimensions', 'original', 'width'))),
|
||||||
} for track in traverse_obj(playlist_json, ('tracks', ...), expected_type=dict)]
|
} for track in traverse_obj(playlist_json, ('tracks', ...), expected_type=dict)]
|
||||||
yield self.playlist_result(entries, self._generic_id(url) + f'-wp-playlist-{i + 1}', 'Wordpress Playlist')
|
yield self.playlist_result(entries, self._generic_id(url) + f'-wp-playlist-{i+1}', 'Wordpress Playlist')
|
||||||
|
|
||||||
|
|
||||||
class WordpressMiniAudioPlayerEmbedIE(InfoExtractor):
|
class WordpressMiniAudioPlayerEmbedIE(InfoExtractor):
|
||||||
|
|
|
@ -5297,7 +5297,6 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||||
# See: https://github.com/yt-dlp/yt-dlp/issues/116
|
# See: https://github.com/yt-dlp/yt-dlp/issues/116
|
||||||
if not traverse_obj(data, 'contents', 'currentVideoEndpoint', 'onResponseReceivedActions'):
|
if not traverse_obj(data, 'contents', 'currentVideoEndpoint', 'onResponseReceivedActions'):
|
||||||
retry.error = ExtractorError('Incomplete yt initial data received')
|
retry.error = ExtractorError('Incomplete yt initial data received')
|
||||||
data = None
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
return webpage, data
|
return webpage, data
|
||||||
|
|
|
@ -29,6 +29,7 @@ except ImportError:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
warnings.warn(f'Failed to import "websockets" request handler: {e}' + bug_reports_message())
|
warnings.warn(f'Failed to import "websockets" request handler: {e}' + bug_reports_message())
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from . import _curlcffi # noqa: F401
|
from . import _curlcffi # noqa: F401
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
|
@ -219,7 +219,7 @@ def _socket_connect(ip_addr, timeout, source_address):
|
||||||
sock.bind(source_address)
|
sock.bind(source_address)
|
||||||
sock.connect(sa)
|
sock.connect(sa)
|
||||||
return sock
|
return sock
|
||||||
except OSError:
|
except socket.error:
|
||||||
sock.close()
|
sock.close()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -237,7 +237,7 @@ def create_socks_proxy_socket(dest_addr, proxy_args, proxy_ip_addr, timeout, sou
|
||||||
sock.bind(source_address)
|
sock.bind(source_address)
|
||||||
sock.connect(dest_addr)
|
sock.connect(dest_addr)
|
||||||
return sock
|
return sock
|
||||||
except OSError:
|
except socket.error:
|
||||||
sock.close()
|
sock.close()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -255,7 +255,7 @@ def create_connection(
|
||||||
host, port = address
|
host, port = address
|
||||||
ip_addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
|
ip_addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
|
||||||
if not ip_addrs:
|
if not ip_addrs:
|
||||||
raise OSError('getaddrinfo returns an empty list')
|
raise socket.error('getaddrinfo returns an empty list')
|
||||||
if source_address is not None:
|
if source_address is not None:
|
||||||
af = socket.AF_INET if ':' not in source_address[0] else socket.AF_INET6
|
af = socket.AF_INET if ':' not in source_address[0] else socket.AF_INET6
|
||||||
ip_addrs = [addr for addr in ip_addrs if addr[0] == af]
|
ip_addrs = [addr for addr in ip_addrs if addr[0] == af]
|
||||||
|
@ -272,7 +272,7 @@ def create_connection(
|
||||||
# https://bugs.python.org/issue36820
|
# https://bugs.python.org/issue36820
|
||||||
err = None
|
err = None
|
||||||
return sock
|
return sock
|
||||||
except OSError as e:
|
except socket.error as e:
|
||||||
err = e
|
err = e
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -188,7 +188,6 @@ class RequestsSession(requests.sessions.Session):
|
||||||
"""
|
"""
|
||||||
Ensure unified redirect method handling with our urllib redirect handler.
|
Ensure unified redirect method handling with our urllib redirect handler.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def rebuild_method(self, prepared_request, response):
|
def rebuild_method(self, prepared_request, response):
|
||||||
new_method = get_redirect_method(prepared_request.method, response.status_code)
|
new_method = get_redirect_method(prepared_request.method, response.status_code)
|
||||||
|
|
||||||
|
@ -219,7 +218,6 @@ class Urllib3LoggingFilter(logging.Filter):
|
||||||
|
|
||||||
class Urllib3LoggingHandler(logging.Handler):
|
class Urllib3LoggingHandler(logging.Handler):
|
||||||
"""Redirect urllib3 logs to our logger"""
|
"""Redirect urllib3 logs to our logger"""
|
||||||
|
|
||||||
def __init__(self, logger, *args, **kwargs):
|
def __init__(self, logger, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
self._logger = logger
|
self._logger = logger
|
||||||
|
@ -368,7 +366,7 @@ class SocksHTTPConnection(urllib3.connection.HTTPConnection):
|
||||||
self, f'Connection to {self.host} timed out. (connect timeout={self.timeout})') from e
|
self, f'Connection to {self.host} timed out. (connect timeout={self.timeout})') from e
|
||||||
except SocksProxyError as e:
|
except SocksProxyError as e:
|
||||||
raise urllib3.exceptions.ProxyError(str(e), e) from e
|
raise urllib3.exceptions.ProxyError(str(e), e) from e
|
||||||
except OSError as e:
|
except (OSError, socket.error) as e:
|
||||||
raise urllib3.exceptions.NewConnectionError(
|
raise urllib3.exceptions.NewConnectionError(
|
||||||
self, f'Failed to establish a new connection: {e}') from e
|
self, f'Failed to establish a new connection: {e}') from e
|
||||||
|
|
||||||
|
|
|
@ -5,26 +5,20 @@ import logging
|
||||||
import ssl
|
import ssl
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from ._helper import (
|
from ._helper import create_connection, select_proxy, make_socks_proxy_opts, create_socks_proxy_socket
|
||||||
create_connection,
|
from .common import Response, register_rh, Features
|
||||||
create_socks_proxy_socket,
|
|
||||||
make_socks_proxy_opts,
|
|
||||||
select_proxy,
|
|
||||||
)
|
|
||||||
from .common import Features, Response, register_rh
|
|
||||||
from .exceptions import (
|
from .exceptions import (
|
||||||
CertificateVerifyError,
|
CertificateVerifyError,
|
||||||
HTTPError,
|
HTTPError,
|
||||||
ProxyError,
|
|
||||||
RequestError,
|
RequestError,
|
||||||
SSLError,
|
SSLError,
|
||||||
TransportError,
|
TransportError, ProxyError,
|
||||||
)
|
)
|
||||||
from .websocket import WebSocketRequestHandler, WebSocketResponse
|
from .websocket import WebSocketRequestHandler, WebSocketResponse
|
||||||
from ..compat import functools
|
from ..compat import functools
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
from ..socks import ProxyError as SocksProxyError
|
|
||||||
from ..utils import int_or_none
|
from ..utils import int_or_none
|
||||||
|
from ..socks import ProxyError as SocksProxyError
|
||||||
|
|
||||||
if not websockets:
|
if not websockets:
|
||||||
raise ImportError('websockets is not installed')
|
raise ImportError('websockets is not installed')
|
||||||
|
|
|
@ -2,7 +2,7 @@ from __future__ import annotations
|
||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
from .common import RequestHandler, Response
|
from .common import Response, RequestHandler
|
||||||
|
|
||||||
|
|
||||||
class WebSocketResponse(Response):
|
class WebSocketResponse(Response):
|
||||||
|
|
|
@ -49,7 +49,7 @@ class Socks5AddressType:
|
||||||
ATYP_IPV6 = 0x04
|
ATYP_IPV6 = 0x04
|
||||||
|
|
||||||
|
|
||||||
class ProxyError(OSError):
|
class ProxyError(socket.error):
|
||||||
ERR_SUCCESS = 0x00
|
ERR_SUCCESS = 0x00
|
||||||
|
|
||||||
def __init__(self, code=None, msg=None):
|
def __init__(self, code=None, msg=None):
|
||||||
|
|
|
@ -558,7 +558,7 @@ class LenientJSONDecoder(json.JSONDecoder):
|
||||||
s = self._close_object(e)
|
s = self._close_object(e)
|
||||||
if s is not None:
|
if s is not None:
|
||||||
continue
|
continue
|
||||||
raise type(e)(f'{e.msg} in {s[e.pos - 10:e.pos + 10]!r}', s, e.pos)
|
raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos)
|
||||||
assert False, 'Too many attempts to decode JSON'
|
assert False, 'Too many attempts to decode JSON'
|
||||||
|
|
||||||
|
|
||||||
|
@ -636,7 +636,7 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
|
||||||
elif char in '\\/|*<>':
|
elif char in '\\/|*<>':
|
||||||
return '\0_'
|
return '\0_'
|
||||||
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
|
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
|
||||||
return '' if unicodedata.category(char)[0] in 'CM' else '\0_'
|
return '\0_'
|
||||||
return char
|
return char
|
||||||
|
|
||||||
# Replace look-alike Unicode glyphs
|
# Replace look-alike Unicode glyphs
|
||||||
|
@ -1885,7 +1885,6 @@ def setproctitle(title):
|
||||||
buf = ctypes.create_string_buffer(len(title_bytes))
|
buf = ctypes.create_string_buffer(len(title_bytes))
|
||||||
buf.value = title_bytes
|
buf.value = title_bytes
|
||||||
try:
|
try:
|
||||||
# PR_SET_NAME = 15 Ref: /usr/include/linux/prctl.h
|
|
||||||
libc.prctl(15, buf, 0, 0, 0)
|
libc.prctl(15, buf, 0, 0, 0)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
return # Strange libc, just skip this
|
return # Strange libc, just skip this
|
||||||
|
@ -2261,9 +2260,6 @@ class PagedList:
|
||||||
raise self.IndexError()
|
raise self.IndexError()
|
||||||
return entries[0]
|
return entries[0]
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
return bool(self.getslice(0, 1))
|
|
||||||
|
|
||||||
|
|
||||||
class OnDemandPagedList(PagedList):
|
class OnDemandPagedList(PagedList):
|
||||||
"""Download pages until a page with less than maximum results"""
|
"""Download pages until a page with less than maximum results"""
|
||||||
|
@ -5074,7 +5070,7 @@ def truncate_string(s, left, right=0):
|
||||||
assert left > 3 and right >= 0
|
assert left > 3 and right >= 0
|
||||||
if s is None or len(s) <= left + right:
|
if s is None or len(s) <= left + right:
|
||||||
return s
|
return s
|
||||||
return f'{s[:left - 3]}...{s[-right:] if right else ""}'
|
return f'{s[:left-3]}...{s[-right:] if right else ""}'
|
||||||
|
|
||||||
|
|
||||||
def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
|
def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None):
|
||||||
|
|
|
@ -23,7 +23,7 @@ def traverse_obj(
|
||||||
|
|
||||||
>>> obj = [{}, {"key": "value"}]
|
>>> obj = [{}, {"key": "value"}]
|
||||||
>>> traverse_obj(obj, (1, "key"))
|
>>> traverse_obj(obj, (1, "key"))
|
||||||
'value'
|
"value"
|
||||||
|
|
||||||
Each of the provided `paths` is tested and the first producing a valid result will be returned.
|
Each of the provided `paths` is tested and the first producing a valid result will be returned.
|
||||||
The next path will also be tested if the path branched but no results could be found.
|
The next path will also be tested if the path branched but no results could be found.
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
# Autogenerated by devscripts/update-version.py
|
# Autogenerated by devscripts/update-version.py
|
||||||
|
|
||||||
__version__ = '2023.12.30'
|
__version__ = '2023.11.16'
|
||||||
|
|
||||||
RELEASE_GIT_HEAD = 'f10589e3453009bb523f55849bba144c9b91cf2a'
|
RELEASE_GIT_HEAD = '24f827875c6ba513f12ed09a3aef2bbed223760d'
|
||||||
|
|
||||||
VARIANT = None
|
VARIANT = None
|
||||||
|
|
||||||
|
@ -12,4 +12,4 @@ CHANNEL = 'stable'
|
||||||
|
|
||||||
ORIGIN = 'yt-dlp/yt-dlp'
|
ORIGIN = 'yt-dlp/yt-dlp'
|
||||||
|
|
||||||
_pkg_version = '2023.12.30'
|
_pkg_version = '2023.11.16'
|
||||||
|
|
Loading…
Reference in New Issue
Block a user