Compare commits

..

No commits in common. "986c80a2a60059c60ad13774f4b860a0d079d607" and "22ed1a0090e37e3ed7dce434711c751b4d719f71" have entirely different histories.

47 changed files with 509 additions and 667 deletions

View File

@ -18,7 +18,7 @@ body:
options:
- label: I'm reporting that yt-dlp is broken on a **supported** site
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -61,18 +61,19 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell
validations:

View File

@ -18,7 +18,7 @@ body:
options:
- label: I'm reporting a new site support request
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -73,18 +73,19 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell
validations:

View File

@ -18,7 +18,7 @@ body:
options:
- label: I'm requesting a site-specific feature
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -69,18 +69,19 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell
validations:

View File

@ -18,7 +18,7 @@ body:
options:
- label: I'm reporting a bug unrelated to a specific site
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
required: true
@ -54,18 +54,19 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell
validations:

View File

@ -20,7 +20,7 @@ body:
required: true
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
required: true
@ -50,17 +50,18 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell

View File

@ -26,7 +26,7 @@ body:
required: true
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
required: true
- label: I've verified that I have **updated yt-dlp to nightly or master** ([update instructions](https://github.com/yt-dlp/yt-dlp#update-channels))
- label: I've verified that I'm running yt-dlp version **2023.10.13** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
required: true
- label: I've searched [known issues](https://github.com/yt-dlp/yt-dlp/issues/3766) and the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
required: true
@ -56,17 +56,18 @@ body:
description: |
It should start like this:
placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Command-line config: ['-vU', 'test:youtube']
[debug] Portable config "yt-dlp.conf": ['-i']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] yt-dlp version 2023.10.13 [9d339c4] (win32_exe)
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {}
[debug] Request Handlers: urllib, requests
[debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.10.13, Current version: 2023.10.13
yt-dlp is up to date (2023.10.13)
<more lines>
render: shell

View File

@ -40,4 +40,10 @@ Fixes #
- [ ] Core bug fix/improvement
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
<!-- Do NOT edit/remove anything below this! -->
</details><details><summary>Copilot Summary</summary>
copilot:all
</details>

View File

@ -204,11 +204,11 @@ jobs:
apt -y install zlib1g-dev python3.8 python3.8-dev python3.8-distutils python3-pip
python3.8 -m pip install -U pip setuptools wheel
# Cannot access requirements.txt from the repo directory at this stage
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi secretstorage
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi
run: |
cd repo
python3.8 -m pip install -U Pyinstaller secretstorage -r requirements.txt # Cached version may be out of date
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date
python3.8 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
python3.8 devscripts/make_lazy_extractors.py
python3.8 pyinst.py

View File

@ -216,8 +216,8 @@ jobs:
if: |
!inputs.prerelease && env.target_repo == github.repository
run: |
git config --global user.name "github-actions[bot]"
git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git config --global user.name github-actions
git config --global user.email github-actions@github.com
git add -u
git commit -m "Release ${{ env.version }}" \
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"

View File

@ -513,18 +513,3 @@ awalgarg
midnightveil
naginatana
Riteo
1100101
aniolpages
bartbroere
CrendKing
Esokrates
HitomaruKonpaku
LoserFox
peci1
saintliao
shubhexists
SirElderling
almx
elivinsky
starius
TravisDupes

View File

@ -4,91 +4,6 @@
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
-->
### 2023.11.16
#### Extractor changes
- **abc.net.au**: iview, showseries: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15cb3528cbda7b6198f49a6b5953c226d701696b) ([#8586](https://github.com/yt-dlp/yt-dlp/issues/8586)) by [bashonly](https://github.com/bashonly)
- **beatbump**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/21dc069bea2d4d99345dd969e098f4535c751d45) ([#8576](https://github.com/yt-dlp/yt-dlp/issues/8576)) by [seproDev](https://github.com/seproDev)
- **dailymotion**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a489f071508ec5caf5f32052d142afe86c28df7a) ([#7692](https://github.com/yt-dlp/yt-dlp/issues/7692)) by [TravisDupes](https://github.com/TravisDupes)
- **drtv**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0783fd558ed0d3a8bc754beb75a406256f8b97b2) ([#8484](https://github.com/yt-dlp/yt-dlp/issues/8484)) by [almx](https://github.com/almx), [seproDev](https://github.com/seproDev)
- **eltrecetv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/dcfad52812aa8ce007cefbfbe63f58b49f6b1046) ([#8216](https://github.com/yt-dlp/yt-dlp/issues/8216)) by [elivinsky](https://github.com/elivinsky)
- **jiosaavn**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/b530118e7f48232cacf8050d79a6b20bdfcf5468) ([#8307](https://github.com/yt-dlp/yt-dlp/issues/8307)) by [awalgarg](https://github.com/awalgarg)
- **njpwworld**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/e569c2d1f4b665795a2b64f0aaf7f76930664233) ([#8570](https://github.com/yt-dlp/yt-dlp/issues/8570)) by [aarubui](https://github.com/aarubui)
- **tv5mondeplus**: [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/0f634dba3afdc429ece8839b02f6d56c27b7973a) ([#4209](https://github.com/yt-dlp/yt-dlp/issues/4209)) by [FrankZ85](https://github.com/FrankZ85)
- **twitcasting**: [Fix livestream detection](https://github.com/yt-dlp/yt-dlp/commit/2325d03aa7bb80f56ba52cd6992258e44727b424) ([#8574](https://github.com/yt-dlp/yt-dlp/issues/8574)) by [JC-Chung](https://github.com/JC-Chung)
- **zenyandex**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/5efe68b73cbf6e907c2e6a3aa338664385084184) ([#8454](https://github.com/yt-dlp/yt-dlp/issues/8454)) by [starius](https://github.com/starius)
#### Misc. changes
- **build**: [Make `secretstorage` an optional dependency](https://github.com/yt-dlp/yt-dlp/commit/24f827875c6ba513f12ed09a3aef2bbed223760d) ([#8585](https://github.com/yt-dlp/yt-dlp/issues/8585)) by [bashonly](https://github.com/bashonly)
### 2023.11.14
#### Important changes
- **The release channels have been adjusted!**
* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.
* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes.
- Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)
- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers
#### Core changes
- [Add `--compat-option manifest-filesize-approx`](https://github.com/yt-dlp/yt-dlp/commit/10025b715ea01489557eb2c5a3cc04d361fcdb52) ([#8356](https://github.com/yt-dlp/yt-dlp/issues/8356)) by [bashonly](https://github.com/bashonly)
- [Fix format sorting with `--load-info-json`](https://github.com/yt-dlp/yt-dlp/commit/595ea4a99b726b8fe9463e7853b7053978d0544e) ([#8521](https://github.com/yt-dlp/yt-dlp/issues/8521)) by [bashonly](https://github.com/bashonly)
- [Include build origin in verbose output](https://github.com/yt-dlp/yt-dlp/commit/20314dd46f25e0e0a7e985a7804049aefa8b909f) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
- [Only ensure playlist thumbnail dir if writing thumbs](https://github.com/yt-dlp/yt-dlp/commit/a40e0b37dfc8c26916b0e01aa3f29f3bc42250b6) ([#8373](https://github.com/yt-dlp/yt-dlp/issues/8373)) by [bashonly](https://github.com/bashonly)
- **update**: [Overhaul self-updater](https://github.com/yt-dlp/yt-dlp/commit/0b6ad22e6a432006a75df968f0283e6c6b3cfae6) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
#### Extractor changes
- [Do not smuggle `http_headers`](https://github.com/yt-dlp/yt-dlp/commit/f04b5bedad7b281bee9814686bba1762bae092eb) by [coletdjnz](https://github.com/coletdjnz)
- [Do not test truth value of `xml.etree.ElementTree.Element`](https://github.com/yt-dlp/yt-dlp/commit/d4f14a72dc1dd79396e0e80980268aee902b61e4) ([#8582](https://github.com/yt-dlp/yt-dlp/issues/8582)) by [bashonly](https://github.com/bashonly)
- **brilliantpala**: [Fix cookies support](https://github.com/yt-dlp/yt-dlp/commit/9b5bedf13a3323074daceb0ec6ebb3cc6e0b9684) ([#8352](https://github.com/yt-dlp/yt-dlp/issues/8352)) by [pzhlkj6612](https://github.com/pzhlkj6612)
- **generic**: [Improve direct video link ext detection](https://github.com/yt-dlp/yt-dlp/commit/4ce2f29a50fcfb9920e6f2ffe42192945a2bad7e) ([#8340](https://github.com/yt-dlp/yt-dlp/issues/8340)) by [bashonly](https://github.com/bashonly)
- **laxarxames**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/312a2d1e8bc247264f9d85c5ec764e33aa0133b5) ([#8412](https://github.com/yt-dlp/yt-dlp/issues/8412)) by [aniolpages](https://github.com/aniolpages)
- **n-tv.de**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/8afd9468b0c822843bc480d366d1c86698daabfb) ([#8414](https://github.com/yt-dlp/yt-dlp/issues/8414)) by [1100101](https://github.com/1100101)
- **neteasemusic**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/46acc418a53470b7f32581b3309c3cb87aa8488d) ([#8531](https://github.com/yt-dlp/yt-dlp/issues/8531)) by [LoserFox](https://github.com/LoserFox)
- **nhk**: [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/54579be4364e148277c32e20a5c3efc2c3f52f5b) ([#8388](https://github.com/yt-dlp/yt-dlp/issues/8388)) by [garret1317](https://github.com/garret1317)
- **novaembed**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/3ff494f6f41c27549420fa88be27555bd449ffdc) ([#8368](https://github.com/yt-dlp/yt-dlp/issues/8368)) by [peci1](https://github.com/peci1)
- **npo**: [Send `POST` request to streams API endpoint](https://github.com/yt-dlp/yt-dlp/commit/8e02a4dcc800f9444e9d461edc41edd7b662f435) ([#8413](https://github.com/yt-dlp/yt-dlp/issues/8413)) by [bartbroere](https://github.com/bartbroere)
- **ondemandkorea**: [Overhaul extractor](https://github.com/yt-dlp/yt-dlp/commit/05adfd883a4f2ecae0267e670a62a2e45c351aeb) ([#8386](https://github.com/yt-dlp/yt-dlp/issues/8386)) by [seproDev](https://github.com/seproDev)
- **orf**: podcast: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/6ba3085616652cbf05d1858efc321fdbfc4c6119) ([#8486](https://github.com/yt-dlp/yt-dlp/issues/8486)) by [Esokrates](https://github.com/Esokrates)
- **polskieradio**: audition: [Fix playlist extraction](https://github.com/yt-dlp/yt-dlp/commit/464327acdb353ceb91d2115163a5a9621b22fe0d) ([#8459](https://github.com/yt-dlp/yt-dlp/issues/8459)) by [shubhexists](https://github.com/shubhexists)
- **qdance**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/177f0d963e4b9db749805c482e6f288354c8be84) ([#8426](https://github.com/yt-dlp/yt-dlp/issues/8426)) by [bashonly](https://github.com/bashonly)
- **radiocomercial**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/ef12dbdcd3e7264bd3d744c1e3107597bd23ad35) ([#8508](https://github.com/yt-dlp/yt-dlp/issues/8508)) by [SirElderling](https://github.com/SirElderling)
- **sbs.co.kr**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/25a4bd345a0dcfece6fef752d4537eb403da94d9) ([#8326](https://github.com/yt-dlp/yt-dlp/issues/8326)) by [seproDev](https://github.com/seproDev)
- **theatercomplextown**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/2863fcf2b6876d0c7965ff7d6d9242eea653dc6b) ([#8560](https://github.com/yt-dlp/yt-dlp/issues/8560)) by [bashonly](https://github.com/bashonly)
- **thisav**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/cb480e390d85fb3a598c1b6d5eef3438ce729fc9) ([#8346](https://github.com/yt-dlp/yt-dlp/issues/8346)) by [bashonly](https://github.com/bashonly)
- **thisoldhouse**: [Add login support](https://github.com/yt-dlp/yt-dlp/commit/c76c96677ff6a056f5844a568ef05ee22c46d6f4) ([#8561](https://github.com/yt-dlp/yt-dlp/issues/8561)) by [bashonly](https://github.com/bashonly)
- **twitcasting**: [Fix livestream extraction](https://github.com/yt-dlp/yt-dlp/commit/7b8b1cf5eb8bf44ce70bc24e1f56f0dba2737e98) ([#8427](https://github.com/yt-dlp/yt-dlp/issues/8427)) by [JC-Chung](https://github.com/JC-Chung), [saintliao](https://github.com/saintliao)
- **twitter**
- broadcast
- [Improve metadata extraction](https://github.com/yt-dlp/yt-dlp/commit/7d337ca977d73a0a6c07ab481ed8faa8f6ff8726) ([#8383](https://github.com/yt-dlp/yt-dlp/issues/8383)) by [HitomaruKonpaku](https://github.com/HitomaruKonpaku)
- [Support `--wait-for-video`](https://github.com/yt-dlp/yt-dlp/commit/f6e97090d2ed9e05441ab0f4bec3559b816d7a00) ([#8475](https://github.com/yt-dlp/yt-dlp/issues/8475)) by [bashonly](https://github.com/bashonly)
- **weibo**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15b252dfd2c6807fe57afc5a95e59abadb32ccd2) ([#8463](https://github.com/yt-dlp/yt-dlp/issues/8463)) by [c-basalt](https://github.com/c-basalt)
- **weverse**: [Fix login error handling](https://github.com/yt-dlp/yt-dlp/commit/4a601c9eff9fb42e24a4c8da3fa03628e035b35b) ([#8458](https://github.com/yt-dlp/yt-dlp/issues/8458)) by [seproDev](https://github.com/seproDev)
- **youtube**: [Check newly uploaded iOS HLS formats](https://github.com/yt-dlp/yt-dlp/commit/ef79d20dc9d27ac002a7196f073b37f2f2721aed) ([#8336](https://github.com/yt-dlp/yt-dlp/issues/8336)) by [bashonly](https://github.com/bashonly)
- **zoom**: [Extract combined view formats](https://github.com/yt-dlp/yt-dlp/commit/3906de07551fedb00b789345bf24cc27d6ddf128) ([#7847](https://github.com/yt-dlp/yt-dlp/issues/7847)) by [Mipsters](https://github.com/Mipsters)
#### Downloader changes
- **aria2c**: [Remove duplicate `--file-allocation=none`](https://github.com/yt-dlp/yt-dlp/commit/21b25281c51523620706b11bfc1c4a889858e1f2) ([#8332](https://github.com/yt-dlp/yt-dlp/issues/8332)) by [CrendKing](https://github.com/CrendKing)
- **dash**: [Force native downloader for `--live-from-start`](https://github.com/yt-dlp/yt-dlp/commit/2622c804d1a5accc3045db398e0fc52074f4bdb3) ([#8339](https://github.com/yt-dlp/yt-dlp/issues/8339)) by [bashonly](https://github.com/bashonly)
#### Networking changes
- **Request Handler**: requests: [Add handler for `requests` HTTP library (#3668)](https://github.com/yt-dlp/yt-dlp/commit/8a8b54523addf46dfd50ef599761a81bc22362e6) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [Grub4K](https://github.com/Grub4K) (With fixes in [4e38e2a](https://github.com/yt-dlp/yt-dlp/commit/4e38e2ae9d7380015349e6aee59c78bb3938befd))
Adds support for HTTPS proxies and persistent connections (keep-alive)
#### Misc. changes
- **build**
- [Include secretstorage in Linux builds](https://github.com/yt-dlp/yt-dlp/commit/9970d74c8383432c6c8779aa47d3253dcf412b14) by [bashonly](https://github.com/bashonly)
- [Overhaul and unify release workflow](https://github.com/yt-dlp/yt-dlp/commit/1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
- **ci**
- [Bump `actions/checkout` to v4](https://github.com/yt-dlp/yt-dlp/commit/5438593a35b7b042fc48fe29cad0b9039f07c9bb) by [bashonly](https://github.com/bashonly)
- [Run core tests with dependencies](https://github.com/yt-dlp/yt-dlp/commit/700444c23ddb65f618c2abd942acdc0c58c650b1) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz)
- **cleanup**
- [Fix changelog typo](https://github.com/yt-dlp/yt-dlp/commit/a9d3f4b20a3533d2a40104c85bc2cc6c2564c800) by [bashonly](https://github.com/bashonly)
- [Update documentation for master and nightly channels](https://github.com/yt-dlp/yt-dlp/commit/a00af29853b8c7350ce086f4cab8c2c9cf2fcf1d) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
- Miscellaneous: [b012271](https://github.com/yt-dlp/yt-dlp/commit/b012271d01b59759e4eefeab0308698cd9e7224c) by [bashonly](https://github.com/bashonly), [coletdjnz](https://github.com/coletdjnz), [dirkf](https://github.com/dirkf), [gamer191](https://github.com/gamer191), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
- **test**: update: [Implement simple updater unit tests](https://github.com/yt-dlp/yt-dlp/commit/87264d4fdadcddd91289b968dd0e4bf58d449267) by [bashonly](https://github.com/bashonly)
### 2023.10.13
#### Core changes

View File

@ -163,10 +163,10 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
For ease of use, a few more compat options are available:
* `--compat-options all`: Use all compat options (Do NOT use)
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter,-manifest-filesize-approx`
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter,-manifest-filesize-approx`
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams,-playlist-match-filter`
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect,-playlist-match-filter`
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler,manifest-filesize-approx`. Use this to enable all future compat options
* `--compat-options 2022`: Same as `--compat-options playlist-match-filter,no-external-downloader-progress,prefer-legacy-http-handler`. Use this to enable all future compat options
# INSTALLATION
@ -380,8 +380,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
CHANNEL can be a repository as well. CHANNEL
and TAG default to "stable" and "latest"
respectively if omitted; See "UPDATE" for
details. Supported channels: stable,
nightly, master
details. Supported channels: stable, nightly
-i, --ignore-errors Ignore download and postprocessing errors.
The download will be considered successful
even if the postprocessing fails

View File

@ -98,21 +98,5 @@
"action": "add",
"when": "61bdf15fc7400601c3da1aa7a43917310a5bf391",
"short": "[priority] Security: [[CVE-2023-40581](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-40581)] [Prevent RCE when using `--exec` with `%q` on Windows](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-42h4-v29r-42qg)\n - The shell escape function is now using `\"\"` instead of `\\\"`.\n - `utils.Popen` has been patched to properly quote commands."
},
{
"action": "change",
"when": "8a8b54523addf46dfd50ef599761a81bc22362e6",
"short": "[rh:requests] Add handler for `requests` HTTP library (#3668)\n\n\tAdds support for HTTPS proxies and persistent connections (keep-alive)",
"authors": ["bashonly", "coletdjnz", "Grub4K"]
},
{
"action": "add",
"when": "1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa",
"short": "[priority] **The release channels have been adjusted!**\n\t* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.\n\t* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes."
},
{
"action": "add",
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitrary `http_headers`; extractors now only use specific headers"
}
]

View File

@ -6,3 +6,4 @@ brotlicffi; implementation_name!='cpython'
certifi
requests>=2.31.0,<3
urllib3>=1.26.17,<3
secretstorage; sys_platform=='linux' and (implementation_name!='pypy' or implementation_version>='7.3.10')

View File

@ -414,7 +414,6 @@
- **EllenTubeVideo**
- **Elonet**
- **ElPais**: El País
- **ElTreceTV**: El Trece TV (Argentina)
- **Embedly**
- **EMPFlix**
- **Engadget**
@ -655,8 +654,6 @@
- **Jamendo**
- **JamendoAlbum**
- **JeuxVideo**
- **JioSaavnAlbum**
- **JioSaavnSong**
- **Joj**
- **Jove**
- **JStream**
@ -703,7 +700,6 @@
- **LastFM**
- **LastFMPlaylist**
- **LastFMUser**
- **LaXarxaMes**: [*laxarxames*](## "netrc machine")
- **lbry**
- **lbry:channel**
- **lbry:playlist**
@ -979,6 +975,7 @@
- **Nitter**
- **njoy**: N-JOY
- **njoy:embed**
- **NJPWWorld**: [*njpwworld*](## "netrc machine") 新日本プロレスワールド
- **NobelPrize**
- **NoicePodcast**
- **NonkTube**
@ -1029,7 +1026,6 @@
- **on24**: ON24
- **OnDemandChinaEpisode**
- **OnDemandKorea**
- **OnDemandKoreaProgram**
- **OneFootball**
- **OnePlacePodcast**
- **onet.pl**
@ -1047,7 +1043,6 @@
- **OraTV**
- **orf:fm4:story**: fm4.orf.at stories
- **orf:iptv**: iptv.ORF.at
- **orf:podcast**
- **orf:radio**
- **orf:tvthek**: ORF TVthek
- **OsnatelTV**: [*osnateltv*](## "netrc machine")
@ -1185,8 +1180,6 @@
- **radiobremen**
- **radiocanada**
- **radiocanada:audiovideo**
- **RadioComercial**
- **RadioComercialPlaylist**
- **radiofrance**
- **RadioFranceLive**
- **RadioFrancePodcast**
@ -1313,9 +1306,6 @@
- **Sapo**: SAPO Vídeos
- **savefrom.net**
- **SBS**: sbs.com.au
- **sbs.co.kr**
- **sbs.co.kr:allvod_program**
- **sbs.co.kr:programs_vod**
- **schooltv**
- **ScienceChannel**
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
@ -1484,8 +1474,6 @@
- **TenPlaySeason**
- **TF1**
- **TFO**
- **theatercomplextown:ppv**: [*theatercomplextown*](## "netrc machine")
- **theatercomplextown:vod**: [*theatercomplextown*](## "netrc machine")
- **TheHoleTv**
- **TheIntercept**
- **ThePlatform**
@ -1494,7 +1482,8 @@
- **TheSun**
- **TheWeatherChannel**
- **ThisAmericanLife**
- **ThisOldHouse**: [*thisoldhouse*](## "netrc machine")
- **ThisAV**
- **ThisOldHouse**
- **ThisVid**
- **ThisVidMember**
- **ThisVidPlaylist**

View File

@ -1293,10 +1293,6 @@ class TestYoutubeDLNetworking:
assert 'Youtubedl-no-compression' not in rh.headers
assert rh.headers.get('Accept-Encoding') == 'identity'
with FakeYDL({'http_headers': {'Ytdl-socks-proxy': 'socks://localhost:1080'}}) as ydl:
rh = self.build_handler(ydl)
assert 'Ytdl-socks-proxy' not in rh.headers
def test_build_handler_params(self):
with FakeYDL({
'http_headers': {'test': 'testtest'},

View File

@ -565,7 +565,6 @@ from .ellentube import (
)
from .elonet import ElonetIE
from .elpais import ElPaisIE
from .eltrecetv import ElTreceTVIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .epicon import (
@ -894,10 +893,6 @@ from .japandiet import (
SangiinIE,
)
from .jeuxvideo import JeuxVideoIE
from .jiosaavn import (
JioSaavnSongIE,
JioSaavnAlbumIE,
)
from .jove import JoveIE
from .joj import JojIE
from .jstream import JStreamIE
@ -1325,6 +1320,7 @@ from .ninegag import NineGagIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .nitter import NitterIE
from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE
from .noice import NoicePodcastIE
from .nonktube import NonkTubeIE
@ -2590,7 +2586,8 @@ from .zingmp3 import (
ZingMp3HubIE,
ZingMp3LiveRadioIE,
ZingMp3PodcastEpisodeIE,
ZingMp3PodcastIE,
ZingMp3PodcastCategoriesIE,
ZingMp3PodcastNewIE,
)
from .zoom import ZoomIE
from .zype import ZypeIE

View File

@ -16,7 +16,6 @@ from ..utils import (
try_get,
unescapeHTML,
update_url_query,
url_or_none,
)
@ -380,18 +379,6 @@ class ABCIViewShowSeriesIE(InfoExtractor):
'noplaylist': True,
'skip_download': 'm3u8',
},
}, {
# 'videoEpisodes' is a dict with `items` key
'url': 'https://iview.abc.net.au/show/7-30-mark-humphries-satire',
'info_dict': {
'id': '178458-0',
'title': 'Episodes',
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
'series': '7.30 Mark Humphries Satire',
'season': 'Episodes',
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
},
'playlist_count': 15,
}]
def _real_extract(self, url):
@ -411,14 +398,12 @@ class ABCIViewShowSeriesIE(InfoExtractor):
series = video_data['selectedSeries']
return {
'_type': 'playlist',
'entries': [self.url_result(episode_url, ABCIViewIE)
for episode_url in traverse_obj(series, (
'_embedded', 'videoEpisodes', (None, 'items'), ..., 'shareUrl', {url_or_none}))],
'entries': [self.url_result(episode['shareUrl'])
for episode in series['_embedded']['videoEpisodes']],
'id': series.get('id'),
'title': dict_get(series, ('title', 'displaySubtitle')),
'description': series.get('description'),
'series': dict_get(series, ('showTitle', 'displayTitle')),
'season': dict_get(series, ('title', 'displaySubtitle')),
'thumbnail': traverse_obj(
series, 'thumbnail', ('images', lambda _, v: v['name'] == 'seriesThumbnail', 'url'), get_all=False),
'thumbnail': series.get('thumbnail'),
}

View File

@ -3,13 +3,14 @@ from .youtube import YoutubeIE, YoutubeTabIE
class BeatBumpVideoIE(InfoExtractor):
_VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
_VALID_URL = r'https://beatbump\.ml/listen\?id=(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
'md5': '5ff3fff41d3935b9810a9731e485fe66',
'info_dict': {
'id': 'MgNrAu2pzNs',
'ext': 'mp4',
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen',
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
@ -21,9 +22,10 @@ class BeatBumpVideoIE(InfoExtractor):
'alt_title': 'Voyeur Girl',
'view_count': int,
'track': 'Voyeur Girl',
'uploader': 'Stephen',
'uploader': 'Stephen - Topic',
'title': 'Voyeur Girl',
'channel_follower_count': int,
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'age_limit': 0,
'availability': 'public',
'live_status': 'not_live',
@ -34,12 +36,7 @@ class BeatBumpVideoIE(InfoExtractor):
'tags': 'count:11',
'creator': 'Stephen',
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'channel_is_verified': True,
'heatmap': 'count:100',
},
}, {
'url': 'https://beatbump.io/listen?id=LDGZAprNGWo',
'only_matching': True,
}
}]
def _real_extract(self, url):
@ -48,7 +45,7 @@ class BeatBumpVideoIE(InfoExtractor):
class BeatBumpPlaylistIE(InfoExtractor):
_VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
_VALID_URL = r'https://beatbump\.ml/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
_TESTS = [{
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
'playlist_count': 50,
@ -59,28 +56,25 @@ class BeatBumpPlaylistIE(InfoExtractor):
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'description': '',
'tags': [],
'modified_date': '20231110',
},
'expected_warnings': ['YouTube Music is not directly supported'],
'modified_date': '20221223',
}
}, {
'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg',
'playlist_mincount': 1,
'params': {'flatplaylist': True},
'info_dict': {
'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'uploader_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': '@NoCopyrightSounds',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'channel_follower_count': int,
'title': 'NoCopyrightSounds',
'title': 'NoCopyrightSounds - Videos',
'uploader': 'NoCopyrightSounds',
'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a',
'channel': 'NoCopyrightSounds',
'tags': 'count:65',
'tags': 'count:12',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'channel_is_verified': True,
},
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'playlist_mincount': 1,
@ -90,20 +84,16 @@ class BeatBumpPlaylistIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'view_count': int,
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': '@NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/@NoCopyrightSounds',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'title': 'NCS : All Releases 💿',
'uploader': 'NoCopyrightSounds',
'availability': 'public',
'channel': 'NoCopyrightSounds',
'tags': [],
'modified_date': '20231112',
'modified_date': '20221225',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
},
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.io/playlist/VLPLFCHGavqRG-q_2ZhmgU2XB2--ZY6irT1c',
'only_matching': True,
}
}]
def _real_extract(self, url):

View File

@ -1,9 +1,8 @@
import base64
import json
import re
import json
import base64
import time
import urllib.parse
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import (
@ -388,7 +387,7 @@ class CBCGemIE(InfoExtractor):
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
if not secret_xml:
return
for child in secret_xml:

View File

@ -2225,9 +2225,7 @@ class InfoExtractor:
mpd_url, video_id,
note='Downloading MPD VOD manifest' if note is None else note,
errnote='Failed to download VOD manifest' if errnote is None else errnote,
fatal=False, data=data, headers=headers, query=query)
if not isinstance(mpd_doc, xml.etree.ElementTree.Element):
return None
fatal=False, data=data, headers=headers, query=query) or {}
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
@staticmethod

View File

@ -105,7 +105,7 @@ class CybraryIE(CybraryBaseIE):
'chapter': module.get('title'),
'chapter_id': str_or_none(module.get('id')),
'title': activity.get('title'),
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'referer': 'https://api.cybrary.it'})
'url': smuggle_url(f'https://player.vimeo.com/video/{vimeo_id}', {'http_headers': {'Referer': 'https://api.cybrary.it'}})
}

View File

@ -93,7 +93,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'''(?ix)
https?://
(?:
(?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player(?:/\w+)?\.html\?)?video|swf)|
(?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player\.html\?)?video|swf)|
(?:www\.)?lequipe\.fr/video
)
[/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))?
@ -114,10 +114,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'uploader': 'Deadline',
'uploader_id': 'x1xm8ri',
'age_limit': 0,
'view_count': int,
'like_count': int,
'tags': ['hollywood', 'celeb', 'celebrity', 'movies', 'red carpet'],
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/K456B1aXqIx58LKWQ/x1080',
},
}, {
'url': 'https://geo.dailymotion.com/player.html?video=x89eyek&mute=true',
@ -136,7 +132,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'view_count': int,
'like_count': int,
'tags': ['en_quete_d_esprit'],
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/Tncwi1YNg_RUl7ueu/x1080',
'thumbnail': 'https://s2.dmcdn.net/v/Tncwi1YGKdvFbDuDY/x1080',
}
}, {
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
@ -205,12 +201,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
}, {
'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw',
'only_matching': True,
}, {
'url': 'https://geo.dailymotion.com/player/x86gw.html?video=k46oCapRs4iikoz9DWy',
'only_matching': True,
}, {
'url': 'https://geo.dailymotion.com/player/xakln.html?video=x8mjju4&customConfig%5BcustomParams%5D=%2Ffr-fr%2Ftennis%2Fwimbledon-mens-singles%2Farticles-video',
'only_matching': True,
}]
_GEO_BYPASS = False
_COMMON_MEDIA_FIELDS = '''description

View File

@ -1,17 +1,21 @@
import json
import uuid
import binascii
import hashlib
import re
from .common import InfoExtractor
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import compat_urllib_parse_unquote
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
mimetype2ext,
parse_iso8601,
try_call,
str_or_none,
traverse_obj,
unified_timestamp,
update_url_query,
url_or_none,
)
from ..utils.traversal import traverse_obj
SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s'
@ -20,7 +24,7 @@ class DRTVIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?dr\.dk/tv/se(?:/ondemand)?/(?:[^/?#]+/)*|
(?:www\.)?dr\.dk/(?:tv/se|nyheder|(?P<radio>radio|lyd)(?:/ondemand)?)/(?:[^/]+/)*|
(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/
)
(?P<id>[\da-z_-]+)
@ -49,6 +53,22 @@ class DRTVIE(InfoExtractor):
},
'expected_warnings': ['Unable to download f4m manifest'],
'skip': 'this video has been removed',
}, {
# embed
'url': 'https://www.dr.dk/nyheder/indland/live-christianias-rydning-af-pusher-street-er-i-gang',
'info_dict': {
'id': 'urn:dr:mu:programcard:57c926176187a50a9c6e83c6',
'ext': 'mp4',
'title': 'christiania pusher street ryddes drdkrjpo',
'description': 'md5:2a71898b15057e9b97334f61d04e6eb5',
'timestamp': 1472800279,
'upload_date': '20160902',
'duration': 131.4,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, {
# with SignLanguage formats
'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder',
@ -67,53 +87,32 @@ class DRTVIE(InfoExtractor):
'season': 'Historien om Danmark',
'series': 'Historien om Danmark',
},
'skip': 'this video has been removed',
}, {
'url': 'https://www.dr.dk/drtv/se/frank-and-kastaniegaarden_71769',
'info_dict': {
'id': '00951930010',
'ext': 'mp4',
'title': 'Frank & Kastaniegaarden',
'description': 'md5:974e1780934cf3275ef10280204bccb0',
'release_timestamp': 1546545600,
'release_date': '20190103',
'duration': 2576,
'season': 'Frank & Kastaniegaarden',
'season_id': '67125',
'release_year': 2019,
'season_number': 2019,
'series': 'Frank & Kastaniegaarden',
'episode_number': 1,
'episode': 'Frank & Kastaniegaarden',
'thumbnail': r're:https?://.+',
},
'params': {
'skip_download': True,
},
}, {
# Foreign and Regular subtitle track
'url': 'https://www.dr.dk/drtv/se/spise-med-price_-pasta-selv_397445',
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/p4-nyheder-2019-06-26-17-30-9',
'only_matching': True,
}, {
'url': 'https://www.dr.dk/drtv/se/bonderoeven_71769',
'info_dict': {
'id': '00212301010',
'id': '00951930010',
'ext': 'mp4',
'title': 'Bonderøven 2019 (1:8)',
'description': 'md5:b6dcfe9b6f0bea6703e9a0092739a5bd',
'timestamp': 1654856100,
'upload_date': '20220610',
'duration': 2576.6,
'season': 'Bonderøven 2019',
'season_id': 'urn:dr:mu:bundle:5c201667a11fa01ca4528ce5',
'release_year': 2019,
'season_number': 2019,
'series': 'Frank & Kastaniegaarden',
'episode_number': 1,
'title': 'Spise med Price: Pasta Selv',
'alt_title': '1. Pasta Selv',
'release_date': '20230807',
'description': 'md5:2da9060524fed707810d71080b3d0cd8',
'duration': 1750,
'season': 'Spise med Price',
'release_timestamp': 1691438400,
'season_id': '397440',
'episode': 'Spise med Price: Pasta Selv',
'thumbnail': r're:https?://.+',
'season_number': 15,
'series': 'Spise med Price',
'release_year': 2022,
'subtitles': 'mincount:2',
'episode': 'Episode 1',
},
'params': {
'skip_download': 'm3u8',
'skip_download': True,
},
}, {
'url': 'https://www.dr.dk/drtv/episode/bonderoeven_71769',
@ -124,127 +123,226 @@ class DRTVIE(InfoExtractor):
}, {
'url': 'https://www.dr.dk/drtv/program/jagten_220924',
'only_matching': True,
}, {
'url': 'https://www.dr.dk/lyd/p4aarhus/regionale-nyheder-ar4/regionale-nyheder-2022-05-05-12-30-3',
'info_dict': {
'id': 'urn:dr:mu:programcard:6265cb2571401424d0360113',
'title': "Regionale nyheder",
'ext': 'mp4',
'duration': 120.043,
'series': 'P4 Østjylland regionale nyheder',
'timestamp': 1651746600,
'season': 'Regionale nyheder',
'release_year': 0,
'season_id': 'urn:dr:mu:bundle:61c26889539f0201586b73c5',
'description': '',
'upload_date': '20220505',
},
'params': {
'skip_download': True,
},
'skip': 'this video has been removed',
}, {
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/regionale-nyheder-2023-03-14-10-30-9',
'info_dict': {
'ext': 'mp4',
'id': '14802310112',
'timestamp': 1678786200,
'duration': 120.043,
'season_id': 'urn:dr:mu:bundle:63a4f7c87140143504b6710f',
'series': 'P4 København regionale nyheder',
'upload_date': '20230314',
'release_year': 0,
'description': 'Hør seneste regionale nyheder fra P4 København.',
'season': 'Regionale nyheder',
'title': 'Regionale nyheder',
},
}]
SUBTITLE_LANGS = {
'DanishLanguageSubtitles': 'da',
'ForeignLanguageSubtitles': 'da_foreign',
'CombinedLanguageSubtitles': 'da_combined',
}
_TOKEN = None
def _real_initialize(self):
if self._TOKEN:
return
token_response = self._download_json(
'https://production.dr-massive.com/api/authorization/anonymous-sso', None,
note='Downloading anonymous token', headers={
'content-type': 'application/json',
}, query={
'device': 'web_browser',
'ff': 'idp,ldp,rpt',
'lang': 'da',
'supportFallbackToken': 'true',
}, data=json.dumps({
'deviceId': str(uuid.uuid4()),
'scopes': ['Catalog'],
'optout': True,
}).encode())
self._TOKEN = traverse_obj(
token_response, (lambda _, x: x['type'] == 'UserAccount', 'value', {str}), get_all=False)
if not self._TOKEN:
raise ExtractorError('Unable to get anonymous token')
def _real_extract(self, url):
url_slug = self._match_id(url)
webpage = self._download_webpage(url, url_slug)
raw_video_id, is_radio_url = self._match_valid_url(url).group('id', 'radio')
json_data = self._search_json(
r'window\.__data\s*=', webpage, 'data', url_slug, fatal=False) or {}
item = traverse_obj(
json_data, ('cache', 'page', ..., (None, ('entries', 0)), 'item', {dict}), get_all=False)
if item:
item_id = item.get('id')
webpage = self._download_webpage(url, raw_video_id)
if '>Programmet er ikke længere tilgængeligt' in webpage:
raise ExtractorError(
'Video %s is not available' % raw_video_id, expected=True)
video_id = self._search_regex(
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
r'data-resource="[^>"]+mu/programcard/expanded/([^"]+)"'),
webpage, 'video id', default=None)
if not video_id:
video_id = self._search_regex(
r'(urn(?:%3A|:)dr(?:%3A|:)mu(?:%3A|:)programcard(?:%3A|:)[\da-f]+)',
webpage, 'urn', default=None)
if video_id:
video_id = compat_urllib_parse_unquote(video_id)
_PROGRAMCARD_BASE = 'https://www.dr.dk/mu-online/api/1.4/programcard'
query = {'expanded': 'true'}
if video_id:
programcard_url = '%s/%s' % (_PROGRAMCARD_BASE, video_id)
else:
item_id = url_slug.rsplit('_', 1)[-1]
item = self._download_json(
f'https://production-cdn.dr-massive.com/api/items/{item_id}', item_id,
note='Attempting to download backup item data', query={
'device': 'web_browser',
'expand': 'all',
'ff': 'idp,ldp,rpt',
'geoLocation': 'dk',
'isDeviceAbroad': 'false',
'lang': 'da',
'segments': 'drtv,optedout',
'sub': 'Anonymous',
})
programcard_url = _PROGRAMCARD_BASE
if is_radio_url:
video_id = self._search_nextjs_data(
webpage, raw_video_id)['props']['pageProps']['episode']['productionNumber']
else:
json_data = self._search_json(
r'window\.__data\s*=', webpage, 'data', raw_video_id)
video_id = traverse_obj(json_data, (
'cache', 'page', ..., (None, ('entries', 0)), 'item', 'customId',
{lambda x: x.split(':')[-1]}), get_all=False)
if not video_id:
raise ExtractorError('Unable to extract video id')
query['productionnumber'] = video_id
video_id = try_call(lambda: item['customId'].rsplit(':', 1)[-1]) or item_id
stream_data = self._download_json(
f'https://production.dr-massive.com/api/account/items/{item_id}/videos', video_id,
note='Downloading stream data', query={
'delivery': 'stream',
'device': 'web_browser',
'ff': 'idp,ldp,rpt',
'lang': 'da',
'resolution': 'HD-1080',
'sub': 'Anonymous',
}, headers={'authorization': f'Bearer {self._TOKEN}'})
data = self._download_json(
programcard_url, video_id, 'Downloading video JSON', query=query)
supplementary_data = {}
if re.search(r'_\d+$', raw_video_id):
supplementary_data = self._download_json(
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, fatal=False) or {}
title = str_or_none(data.get('Title')) or re.sub(
r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '',
self._og_search_title(webpage))
description = self._og_search_description(
webpage, default=None) or data.get('Description')
timestamp = unified_timestamp(
data.get('PrimaryBroadcastStartTime') or data.get('SortDateTime'))
thumbnail = None
duration = None
restricted_to_denmark = False
formats = []
subtitles = {}
for stream in traverse_obj(stream_data, (lambda _, x: x['url'])):
format_id = stream.get('format', 'na')
access_service = stream.get('accessService')
preference = None
subtitle_suffix = ''
if access_service in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
assets = []
primary_asset = data.get('PrimaryAsset')
if isinstance(primary_asset, dict):
assets.append(primary_asset)
secondary_assets = data.get('SecondaryAssets')
if isinstance(secondary_assets, list):
for secondary_asset in secondary_assets:
if isinstance(secondary_asset, dict):
assets.append(secondary_asset)
def hex_to_bytes(hex):
return binascii.a2b_hex(hex.encode('ascii'))
def decrypt_uri(e):
n = int(e[2:10], 16)
a = e[10 + n:]
data = hex_to_bytes(e[10:10 + n])
key = hashlib.sha256(('%s:sRBzYNXBzkKgnjj8pGtkACch' % a).encode('utf-8')).digest()
iv = hex_to_bytes(a)
decrypted = unpad_pkcs7(aes_cbc_decrypt_bytes(data, key, iv))
return decrypted.decode('utf-8').split('?')[0]
for asset in assets:
kind = asset.get('Kind')
if kind == 'Image':
thumbnail = url_or_none(asset.get('Uri'))
elif kind in ('VideoResource', 'AudioResource'):
duration = float_or_none(asset.get('DurationInMilliseconds'), 1000)
restricted_to_denmark = asset.get('RestrictedToDenmark')
asset_target = asset.get('Target')
for link in asset.get('Links', []):
uri = link.get('Uri')
if not uri:
encrypted_uri = link.get('EncryptedUri')
if not encrypted_uri:
continue
try:
uri = decrypt_uri(encrypted_uri)
except Exception:
self.report_warning(
'Unable to decrypt EncryptedUri', video_id)
continue
uri = url_or_none(uri)
if not uri:
continue
target = link.get('Target')
format_id = target or ''
if asset_target in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
preference = -1
format_id += f'-{access_service}'
subtitle_suffix = f'-{access_service}'
elif access_service == 'StandardVideo':
format_id += '-%s' % asset_target
elif asset_target == 'Default':
preference = 1
else:
preference = None
if target == 'HDS':
f4m_formats = self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id, fatal=False)
if kind == 'AudioResource':
for f in f4m_formats:
f['vcodec'] = 'none'
formats.extend(f4m_formats)
elif target == 'HLS':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
stream.get('url'), video_id, preference=preference, m3u8_id=format_id, fatal=False)
uri, video_id, 'mp4', entry_protocol='m3u8_native',
quality=preference, m3u8_id=format_id, fatal=False)
formats.extend(fmts)
api_subtitles = traverse_obj(stream, ('subtitles', lambda _, v: url_or_none(v['link']), {dict}))
if not api_subtitles:
self._merge_subtitles(subs, target=subtitles)
for sub_track in api_subtitles:
lang = sub_track.get('language') or 'da'
subtitles.setdefault(self.SUBTITLE_LANGS.get(lang, lang) + subtitle_suffix, []).append({
'url': sub_track['link'],
'ext': mimetype2ext(sub_track.get('format')) or 'vtt'
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': int_or_none(bitrate),
'ext': link.get('FileFormat'),
'vcodec': 'none' if kind == 'AudioResource' else None,
'quality': preference,
})
subtitles_list = asset.get('SubtitlesList') or asset.get('Subtitleslist')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'da',
}
for subs in subtitles_list:
if not isinstance(subs, dict):
continue
sub_uri = url_or_none(subs.get('Uri'))
if not sub_uri:
continue
lang = subs.get('Language') or 'da'
subtitles.setdefault(LANGS.get(lang, lang), []).append({
'url': sub_uri,
'ext': mimetype2ext(subs.get('MimeType')) or 'vtt'
})
if not formats and traverse_obj(item, ('season', 'customFields', 'IsGeoRestricted')):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
if not formats and restricted_to_denmark:
self.raise_geo_restricted(
'Unfortunately, DR is not allowed to show this program outside Denmark.',
countries=self._GEO_COUNTRIES)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
**traverse_obj(item, {
'title': 'title',
'alt_title': 'contextualTitle',
'description': 'description',
'thumbnail': ('images', 'wallpaper'),
'release_timestamp': ('customFields', 'BroadcastTimeDK', {parse_iso8601}),
'duration': ('duration', {int_or_none}),
'series': ('season', 'show', 'title'),
'season': ('season', 'title'),
'season_number': ('season', 'seasonNumber', {int_or_none}),
'season_id': 'seasonId',
'episode': 'episodeName',
'episode_number': ('episodeNumber', {int_or_none}),
'release_year': ('releaseYear', {int_or_none}),
}),
'series': str_or_none(data.get('SeriesTitle')),
'season': str_or_none(data.get('SeasonTitle')),
'season_number': int_or_none(data.get('SeasonNumber')),
'season_id': str_or_none(data.get('SeasonUrn')),
'episode': traverse_obj(supplementary_data, ('entries', 0, 'item', 'contextualTitle')) or str_or_none(data.get('EpisodeTitle')),
'episode_number': traverse_obj(supplementary_data, ('entries', 0, 'item', 'episodeNumber')) or int_or_none(data.get('EpisodeNumber')),
'release_year': int_or_none(data.get('ProductionYear')),
}
@ -314,8 +412,6 @@ class DRTVSeasonIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden',
'season_number': 2008,
'alt_title': 'Season 2008',
},
'playlist_mincount': 8
}, {
@ -325,8 +421,6 @@ class DRTVSeasonIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden',
'season_number': 2009,
'alt_title': 'Season 2009',
},
'playlist_mincount': 19
}]
@ -340,7 +434,6 @@ class DRTVSeasonIE(InfoExtractor):
'url': f'https://www.dr.dk/drtv{episode["path"]}',
'ie_key': DRTVIE.ie_key(),
'title': episode.get('title'),
'alt_title': episode.get('contextualTitle'),
'episode': episode.get('episodeName'),
'description': episode.get('shortDescription'),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
@ -353,7 +446,6 @@ class DRTVSeasonIE(InfoExtractor):
'id': season_id,
'display_id': display_id,
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'entries': entries,
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
@ -371,7 +463,6 @@ class DRTVSeriesIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden',
'alt_title': '',
},
'playlist_mincount': 15
}]
@ -385,7 +476,6 @@ class DRTVSeriesIE(InfoExtractor):
'url': f'https://www.dr.dk/drtv{season.get("path")}',
'ie_key': DRTVSeasonIE.ie_key(),
'title': season.get('title'),
'alt_title': season.get('contextualTitle'),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
} for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))]
@ -395,7 +485,6 @@ class DRTVSeriesIE(InfoExtractor):
'id': series_id,
'display_id': display_id,
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'entries': entries
}

View File

@ -138,7 +138,7 @@ class DubokuIE(InfoExtractor):
# of the video.
return {
'_type': 'url_transparent',
'url': smuggle_url(data_url, {'referer': webpage_url}),
'url': smuggle_url(data_url, {'http_headers': headers}),
'id': video_id,
'title': title,
'series': series_title,

View File

@ -1,62 +0,0 @@
from .common import InfoExtractor
class ElTreceTVIE(InfoExtractor):
IE_DESC = 'El Trece TV (Argentina)'
_VALID_URL = r'https?://(?:www\.)?eltrecetv\.com\.ar/[\w-]+/capitulos/temporada-\d+/(?P<id>[\w-]+)'
_TESTS = [
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-061023/',
'md5': '71a66673dc63f9a5939d97bfe4b311ba',
'info_dict': {
'id': 'AHCA05102023145553329621094',
'ext': 'mp4',
'title': 'AHORA CAIGO - Programa 06/10/23',
'thumbnail': 'https://thumbs.vodgc.net/AHCA05102023145553329621094.JPG?649339',
}
},
{
'url': 'https://www.eltrecetv.com.ar/poco-correctos/capitulos/temporada-2023/programa-del-250923-invitada-dalia-gutmann/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/argentina-tierra-de-amor-y-venganza/capitulos/temporada-2023/atav-2-capitulo-121-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/pasaplatos/capitulos/temporada-2023/pasaplatos-el-restaurante-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/el-galpon/capitulos/temporada-2023/programa-del-160923-invitado-raul-lavie/',
'only_matching': True,
}
]
def _real_extract(self, url):
slug = self._match_id(url)
webpage = self._download_webpage(url, slug)
config = self._search_json(
r'Fusion.globalContent\s*=', webpage, 'content', slug)['promo_items']['basic']['embed']['config']
video_url = config['m3u8']
video_id = self._search_regex(r'/(\w+)\.m3u8', video_url, 'video id', default=slug)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', m3u8_id='hls')
formats.extend([{
'url': f['url'][:-23],
'format_id': f['format_id'].replace('hls', 'http'),
'width': f.get('width'),
'height': f.get('height'),
} for f in formats if f['url'].endswith('/tracks-v1a1/index.m3u8') and f.get('height') != 1080])
return {
'id': video_id,
'title': config.get('title'),
'thumbnail': config.get('thumbnail'),
'formats': formats,
'subtitles': subtitles,
}

View File

@ -106,4 +106,4 @@ class EmbedlyIE(InfoExtractor):
return self.url_result(src, YoutubeTabIE)
return self.url_result(smuggle_url(
urllib.parse.unquote(traverse_obj(qs, ('src', 0), ('url', 0))),
{'referer': url}))
{'http_headers': {'Referer': url}}))

View File

@ -17,7 +17,6 @@ from ..utils import (
determine_protocol,
dict_get,
extract_basic_auth,
filter_dict,
format_field,
int_or_none,
is_html,
@ -2436,10 +2435,10 @@ class GenericIE(InfoExtractor):
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after a HEAD request, but not sure if we can rely on this.
full_response = self._request_webpage(url, video_id, headers=filter_dict({
full_response = self._request_webpage(url, video_id, headers={
'Accept-Encoding': 'identity',
'Referer': smuggled_data.get('referer'),
}))
**smuggled_data.get('http_headers', {})
})
new_url = full_response.url
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
if new_url != extract_basic_auth(url)[0]:
@ -2459,7 +2458,7 @@ class GenericIE(InfoExtractor):
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
self.report_detected('direct video link')
headers = filter_dict({'Referer': smuggled_data.get('referer')})
headers = smuggled_data.get('http_headers', {})
format_id = str(m.group('format_id'))
ext = determine_ext(url, default_ext=None) or urlhandle_detect_ext(full_response)
subtitles = {}
@ -2711,7 +2710,7 @@ class GenericIE(InfoExtractor):
'url': smuggle_url(json_ld['url'], {
'force_videoid': video_id,
'to_generic': True,
'referer': url,
'http_headers': {'Referer': url},
}),
}, json_ld)]

View File

@ -1,79 +0,0 @@
from .common import InfoExtractor
from ..utils import (
js_to_json,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..utils.traversal import traverse_obj
class JioSaavnBaseIE(InfoExtractor):
def _extract_initial_data(self, url, audio_id):
webpage = self._download_webpage(url, audio_id)
return self._search_json(
r'window\.__INITIAL_DATA__\s*=', webpage,
'init json', audio_id, transform_source=js_to_json)
class JioSaavnSongIE(JioSaavnBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?:jiosaavn\.com/song/[^/?#]+/|saavn\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',
'md5': '7b1f70de088ede3a152ea34aece4df42',
'info_dict': {
'id': 'OQsEfQFVUXk',
'ext': 'mp3',
'title': 'Leja Re',
'album': 'Leja Re',
'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
},
}, {
'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
song_data = self._extract_initial_data(url, audio_id)['song']['song']
media_data = self._download_json(
'https://www.jiosaavn.com/api.php', audio_id, data=urlencode_postdata({
'__call': 'song.generateAuthToken',
'_format': 'json',
'bitrate': '128',
'url': song_data['encrypted_media_url'],
}))
return {
'id': audio_id,
'url': media_data['auth_url'],
'ext': media_data.get('type'),
'vcodec': 'none',
**traverse_obj(song_data, {
'title': ('title', 'text'),
'album': ('album', 'text'),
'thumbnail': ('image', 0, {url_or_none}),
}),
}
class JioSaavnAlbumIE(JioSaavnBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?:jio)?saavn\.com/album/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',
'info_dict': {
'id': 'buIOjYZDrNA_',
'title': '96',
},
'playlist_count': 10,
}]
def _real_extract(self, url):
album_id = self._match_id(url)
album_view = self._extract_initial_data(url, album_id)['albumView']
return self.playlist_from_matches(
traverse_obj(album_view, (
'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),
album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,
getter=lambda x: urljoin('https://www.jiosaavn.com/', x))

View File

@ -208,9 +208,9 @@ class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete I
'url': 'https://www.la7.it/propagandalive/podcast',
'info_dict': {
'id': 'propagandalive',
'title': 'Propaganda Live',
'title': "Propaganda Live",
},
'playlist_mincount': 10,
'playlist_count_min': 10,
}]
def _real_extract(self, url):

View File

@ -1,5 +1,4 @@
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from ..compat import compat_str
@ -138,7 +137,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
mediagen_doc = self._download_xml(
mediagen_url, video_id, 'Downloading video urls', fatal=False)
if not isinstance(mediagen_doc, xml.etree.ElementTree.Element):
if mediagen_doc is False:
return None
item = mediagen_doc.find('./video/item')

View File

@ -1,7 +1,6 @@
import base64
import json
import re
import xml.etree.ElementTree
from .common import InfoExtractor
from .theplatform import ThePlatformIE, default_ns
@ -804,10 +803,8 @@ class NBCStationsIE(InfoExtractor):
smil = self._download_xml(
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
note='Downloading SMIL data', query=query, fatal=is_live)
if not isinstance(smil, xml.etree.ElementTree.Element):
smil = None
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil is not None else {}
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil is not None else []:
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil else {}
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil else []:
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
video_src_url = video.get('src')
ext = mimetype2ext(video.get('type'), default=determine_ext(video_src_url))

View File

@ -0,0 +1,82 @@
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
get_element_by_class,
urlencode_postdata,
)
class NJPWWorldIE(InfoExtractor):
_VALID_URL = r'https?://(front\.)?njpwworld\.com/p/(?P<id>[a-z0-9_]+)'
IE_DESC = '新日本プロレスワールド'
_NETRC_MACHINE = 'njpwworld'
_TESTS = [{
'url': 'http://njpwworld.com/p/s_series_00155_1_9/',
'info_dict': {
'id': 's_series_00155_1_9',
'ext': 'mp4',
'title': '闘強導夢2000 2000年1月4日 東京ドーム 第9試合 ランディ・サベージ VS リック・スタイナー',
'tags': list,
},
'params': {
'skip_download': True, # AES-encrypted m3u8
},
'skip': 'Requires login',
}, {
'url': 'https://front.njpwworld.com/p/s_series_00563_16_bs',
'info_dict': {
'id': 's_series_00563_16_bs',
'ext': 'mp4',
'title': 'WORLD TAG LEAGUE 2020 & BEST OF THE SUPER Jr.27 2020年12月6日 福岡・福岡国際センター バックステージコメント(字幕あり)',
'tags': ["福岡・福岡国際センター", "バックステージコメント", "2020", "20年代"],
},
'params': {
'skip_download': True,
},
}]
_LOGIN_URL = 'https://front.njpwworld.com/auth/login'
def _perform_login(self, username, password):
# Setup session (will set necessary cookies)
self._request_webpage(
'https://njpwworld.com/', None, note='Setting up session')
webpage, urlh = self._download_webpage_handle(
self._LOGIN_URL, None,
note='Logging in', errnote='Unable to login',
data=urlencode_postdata({'login_id': username, 'pw': password}),
headers={'Referer': 'https://front.njpwworld.com/auth'})
# /auth/login will return 302 for successful logins
if urlh.url == self._LOGIN_URL:
self.report_warning('unable to login')
return False
return True
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for kind, vid in re.findall(r'if\s+\(\s*imageQualityType\s*==\s*\'([^\']+)\'\s*\)\s*{\s*video_id\s*=\s*"(\d+)"', webpage):
player_path = '/intent?id=%s&type=url' % vid
player_url = compat_urlparse.urljoin(url, player_path)
formats += self._extract_m3u8_formats(
player_url, video_id, 'mp4', 'm3u8_native', m3u8_id=kind, fatal=False, quality=int(kind == 'high'))
tag_block = get_element_by_class('tag-block', webpage)
tags = re.findall(
r'<a[^>]+class="tag-[^"]+"[^>]*>([^<]+)</a>', tag_block
) if tag_block else None
return {
'id': video_id,
'title': get_element_by_class('article-title', webpage) or self._og_search_title(webpage),
'formats': formats,
'tags': tags,
}

View File

@ -39,7 +39,7 @@ class RedTubeIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
f'https://www.redtube.com/{video_id}', video_id)
'http://www.redtube.com/%s' % video_id, video_id)
ERRORS = (
(('video-deleted-info', '>This video has been removed'), 'has been removed'),

View File

@ -1,6 +1,5 @@
import re
import urllib.parse
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
@ -470,9 +469,8 @@ class SlidesLiveIE(InfoExtractor):
slides = self._download_xml(
player_info['slides_xml_url'], video_id, fatal=False,
note='Downloading slides XML', errnote='Failed to download slides info')
if isinstance(slides, xml.etree.ElementTree.Element):
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
for slide_id, slide in enumerate(slides.findall('./slide')):
for slide_id, slide in enumerate(slides.findall('./slide') if slides else [], 1):
slides_info.append((
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
int_or_none(xpath_text(slide, './timeSec', 'time'))))
@ -530,7 +528,7 @@ class SlidesLiveIE(InfoExtractor):
if service_name == 'vimeo':
info['url'] = smuggle_url(
f'https://player.vimeo.com/video/{service_id}',
{'referer': url})
{'http_headers': {'Referer': url}})
video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id'))
if not video_slides:

View File

@ -32,7 +32,9 @@ class StoryFireBaseIE(InfoExtractor):
'description': video.get('description'),
'url': smuggle_url(
'https://player.vimeo.com/video/' + vimeo_id, {
'referer': 'https://storyfire.com/',
'http_headers': {
'Referer': 'https://storyfire.com/',
}
}),
'thumbnail': video.get('storyImage'),
'view_count': int_or_none(video.get('views')),

View File

@ -84,13 +84,6 @@ class TV5MondePlusIE(InfoExtractor):
}]
_GEO_BYPASS = False
@staticmethod
def _extract_subtitles(data_captions):
subtitles = {}
for f in traverse_obj(data_captions, ('files', lambda _, v: url_or_none(v['file']))):
subtitles.setdefault(f.get('label') or 'fra', []).append({'url': f['file']})
return subtitles
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
@ -183,8 +176,6 @@ class TV5MondePlusIE(InfoExtractor):
'duration': duration,
'upload_date': upload_date,
'formats': formats,
'subtitles': self._extract_subtitles(self._parse_json(
traverse_obj(vpl_data, ('data-captions', {str}), default='{}'), display_id, fatal=False)),
'series': series,
'episode': episode,
}

View File

@ -11,7 +11,6 @@ from ..utils import (
float_or_none,
get_element_by_class,
get_element_by_id,
int_or_none,
parse_duration,
qualities,
str_to_int,
@ -242,8 +241,6 @@ class TwitCastingLiveIE(InfoExtractor):
'expected_exception': 'UserNotLive',
}]
_PROTECTED_LIVE_RE = r'(?s)(<span\s*class="tw-movie-thumbnail2-badge"\s*data-status="live">\s*LIVE)'
def _real_extract(self, url):
uploader_id = self._match_id(url)
self.to_screen(
@ -251,27 +248,24 @@ class TwitCastingLiveIE(InfoExtractor):
'Pass "https://twitcasting.tv/{0}/show" to download the history'.format(uploader_id))
webpage = self._download_webpage(url, uploader_id)
is_live = self._search_regex( # first pattern is for public live
(r'(data-is-onlive="true")', self._PROTECTED_LIVE_RE), webpage, 'is live?', default=None)
current_live = int_or_none(self._search_regex(
(r'data-type="movie" data-id="(\d+)">', # not available?
r'tw-sound-flag-open-link" data-id="(\d+)" style=', # not available?
r'data-movie-id="(\d+)"'), # if not currently live, value may be 0
webpage, 'current live ID', default=None))
if is_live and not current_live:
current_live = self._search_regex(
(r'data-type="movie" data-id="(\d+)">',
r'tw-sound-flag-open-link" data-id="(\d+)" style=',),
webpage, 'current live ID', default=None)
if not current_live:
# fetch unfiltered /show to find running livestreams; we can't get ID of the password-protected livestream above
webpage = self._download_webpage(
f'https://twitcasting.tv/{uploader_id}/show/', uploader_id,
note='Downloading live history')
is_live = self._search_regex(self._PROTECTED_LIVE_RE, webpage, 'is live?', default=None)
is_live = self._search_regex(r'(?s)(<span\s*class="tw-movie-thumbnail-badge"\s*data-status="live">\s*LIVE)', webpage, 'is live?', default=None)
if is_live:
# get the first live; running live is always at the first
current_live = self._search_regex(
r'(?s)<a\s+class="tw-movie-thumbnail2"\s*href="/[^/]+/movie/(?P<video_id>\d+)"\s*>.+?</a>',
r'(?s)<a\s+class="tw-movie-thumbnail"\s*href="/[^/]+/movie/(?P<video_id>\d+)"\s*>.+?</a>',
webpage, 'current live ID 2', default=None, group='video_id')
if not current_live:
raise UserNotLive(video_id=uploader_id)
return self.url_result(f'https://twitcasting.tv/{uploader_id}/movie/{current_live}', TwitCastingIE)
return self.url_result('https://twitcasting.tv/%s/movie/%s' % (uploader_id, current_live))
class TwitCastingUserIE(InfoExtractor):

View File

@ -48,7 +48,6 @@ class KnownDRMIE(UnsupportedInfoExtractor):
r'joyn\.de',
r'amazon\.(?:\w{2}\.)?\w+/gp/video',
r'music\.amazon\.(?:\w{2}\.)?\w+',
r'(?:watch|front)\.njpwworld\.com',
)
_TESTS = [{
@ -142,13 +141,6 @@ class KnownDRMIE(UnsupportedInfoExtractor):
# https://github.com/yt-dlp/yt-dlp/issues/5767
'url': 'https://www.hulu.com/movie/anthem-6b25fac9-da2b-45a3-8e09-e4156b0471cc',
'only_matching': True,
}, {
# https://github.com/yt-dlp/yt-dlp/pull/8570
'url': 'https://watch.njpwworld.com/player/36447/series?assetType=series',
'only_matching': True,
}, {
'url': 'https://front.njpwworld.com/p/s_series_00563_16_bs',
'only_matching': True,
}]
def _real_extract(self, url):

View File

@ -11,7 +11,6 @@ from ..utils import (
ExtractorError,
InAdvancePagedList,
int_or_none,
remove_start,
traverse_obj,
update_url_query,
url_or_none,
@ -40,11 +39,11 @@ class VideoKenBaseIE(InfoExtractor):
if not video_url and not video_id:
return
elif not video_url or 'embed/sign-in' in video_url:
video_url = f'https://slideslive.com/embed/{remove_start(video_id, "slideslive-")}'
video_url = f'https://slideslive.com/embed/{video_id.lstrip("slideslive-")}'
if url_or_none(referer):
return update_url_query(video_url, {
'embed_parent_url': referer,
'embed_container_origin': f'https://{urllib.parse.urlparse(referer).hostname}',
'embed_container_origin': f'https://{urllib.parse.urlparse(referer).netloc}',
})
return video_url
@ -58,12 +57,12 @@ class VideoKenBaseIE(InfoExtractor):
video_url = video_id
ie_key = 'Youtube'
else:
video_url = traverse_obj(video, 'embed_url', 'embeddableurl', expected_type=url_or_none)
if not video_url:
continue
elif urllib.parse.urlparse(video_url).hostname == 'slideslive.com':
video_url = traverse_obj(video, 'embed_url', 'embeddableurl')
if urllib.parse.urlparse(video_url).netloc == 'slideslive.com':
ie_key = SlidesLiveIE
video_url = self._create_slideslive_url(video_url, video_id, url)
if not video_url:
continue
yield self.url_result(video_url, ie_key, video_id)
@ -179,7 +178,7 @@ class VideoKenIE(VideoKenBaseIE):
return self.url_result(
self._create_slideslive_url(None, video_id, url), SlidesLiveIE, video_id)
elif re.match(r'^[\w-]{11}$', video_id):
return self.url_result(video_id, 'Youtube', video_id)
self.url_result(video_id, 'Youtube', video_id)
else:
raise ExtractorError('Unable to extract without VideoKen API response')

View File

@ -37,14 +37,14 @@ class VimeoBaseInfoExtractor(InfoExtractor):
@staticmethod
def _smuggle_referrer(url, referrer_url):
return smuggle_url(url, {'referer': referrer_url})
return smuggle_url(url, {'http_headers': {'Referer': referrer_url}})
def _unsmuggle_headers(self, url):
"""@returns (url, smuggled_data, headers)"""
url, data = unsmuggle_url(url, {})
headers = self.get_param('http_headers').copy()
if 'referer' in data:
headers['Referer'] = data['referer']
if 'http_headers' in data:
headers.update(data['http_headers'])
return url, data, headers
def _perform_login(self, username, password):

View File

@ -194,7 +194,7 @@ class ZenYandexIE(InfoExtractor):
'id': '60c7c443da18892ebfe85ed7',
'ext': 'mp4',
'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах',
'description': 'md5:8684912f6086f298f8078d4af0e8a600',
'description': 'md5:f3db3d995763b9bbb7b56d4ccdedea89',
'thumbnail': 're:^https://avatars.dzeninfra.ru/',
'uploader': 'AcademeG DailyStream'
},
@ -209,7 +209,7 @@ class ZenYandexIE(InfoExtractor):
'id': '60c7c443da18892ebfe85ed7',
'ext': 'mp4',
'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах',
'description': 'md5:8684912f6086f298f8078d4af0e8a600',
'description': 'md5:f3db3d995763b9bbb7b56d4ccdedea89',
'thumbnail': r're:^https://avatars\.dzeninfra\.ru/',
'uploader': 'AcademeG DailyStream',
'upload_date': '20191111',
@ -258,7 +258,7 @@ class ZenYandexIE(InfoExtractor):
video_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, video_id, note='Redirecting')
data_json = self._search_json(
r'("data"\s*:|data\s*=)', webpage, 'metadata', video_id, contains_pattern=r'{["\']_*serverState_*video.+}')
r'data\s*=', webpage, 'metadata', video_id, contains_pattern=r'{["\']_*serverState_*video.+}')
serverstate = self._search_regex(r'(_+serverState_+video-site_[^_]+_+)',
webpage, 'server state').replace('State', 'Settings')
uploader = self._search_regex(r'(<a\s*class=["\']card-channel-link[^"\']+["\'][^>]+>)',
@ -266,25 +266,22 @@ class ZenYandexIE(InfoExtractor):
uploader_name = extract_attributes(uploader).get('aria-label')
video_json = try_get(data_json, lambda x: x[serverstate]['exportData']['video'], dict)
stream_urls = try_get(video_json, lambda x: x['video']['streams'])
formats, subtitles = [], {}
formats = []
for s_url in stream_urls:
ext = determine_ext(s_url)
if ext == 'mpd':
fmts, subs = self._extract_mpd_formats_and_subtitles(s_url, video_id, mpd_id='dash')
formats.extend(self._extract_mpd_formats(s_url, video_id, mpd_id='dash'))
elif ext == 'm3u8':
fmts, subs = self._extract_m3u8_formats_and_subtitles(s_url, video_id, 'mp4')
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
formats.extend(self._extract_m3u8_formats(s_url, video_id, 'mp4'))
return {
'id': video_id,
'title': video_json.get('title') or self._og_search_title(webpage),
'formats': formats,
'subtitles': subtitles,
'duration': int_or_none(video_json.get('duration')),
'view_count': int_or_none(video_json.get('views')),
'timestamp': int_or_none(video_json.get('publicationDate')),
'uploader': uploader_name or data_json.get('authorName') or try_get(data_json, lambda x: x['publisher']['name']),
'description': video_json.get('description') or self._og_search_description(webpage),
'description': self._og_search_description(webpage) or try_get(data_json, lambda x: x['og']['description']),
'thumbnail': self._og_search_thumbnail(webpage) or try_get(data_json, lambda x: x['og']['imageUrl']),
}
@ -299,7 +296,6 @@ class ZenYandexChannelIE(InfoExtractor):
'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56',
},
'playlist_mincount': 169,
'skip': 'The page does not exist',
}, {
'url': 'https://dzen.ru/tok_media',
'info_dict': {
@ -308,7 +304,6 @@ class ZenYandexChannelIE(InfoExtractor):
'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56',
},
'playlist_mincount': 169,
'skip': 'The page does not exist',
}, {
'url': 'https://zen.yandex.ru/id/606fd806cc13cb3c58c05cf5',
'info_dict': {
@ -323,21 +318,21 @@ class ZenYandexChannelIE(InfoExtractor):
'url': 'https://zen.yandex.ru/jony_me',
'info_dict': {
'id': 'jony_me',
'description': 'md5:ce0a5cad2752ab58701b5497835b2cc5',
'description': 'md5:a2c62b4ef5cf3e3efb13d25f61f739e1',
'title': 'JONY ',
},
'playlist_count': 18,
'playlist_count': 20,
}, {
# Test that the playlist extractor finishes extracting when the
# channel has more than one page of entries
'url': 'https://zen.yandex.ru/tatyanareva',
'info_dict': {
'id': 'tatyanareva',
'description': 'md5:40a1e51f174369ec3ba9d657734ac31f',
'description': 'md5:296b588d60841c3756c9105f237b70c6',
'title': 'Татьяна Рева',
'entries': 'maxcount:200',
},
'playlist_mincount': 46,
'playlist_count': 46,
}, {
'url': 'https://dzen.ru/id/606fd806cc13cb3c58c05cf5',
'info_dict': {
@ -380,7 +375,7 @@ class ZenYandexChannelIE(InfoExtractor):
item_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, item_id, note='Redirecting')
data = self._search_json(
r'("data"\s*:|data\s*=)', webpage, 'channel data', item_id, contains_pattern=r'{\"__serverState__.+}')
r'var\s+data\s*=', webpage, 'channel data', item_id, contains_pattern=r'{\"__serverState__.+}')
server_state_json = traverse_obj(data, lambda k, _: k.startswith('__serverState__'), get_all=False)
server_settings_json = traverse_obj(data, lambda k, _: k.startswith('__serverSettings__'), get_all=False)

View File

@ -6687,7 +6687,7 @@ class YoutubePlaylistIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/@milan5503',
'availability': 'public',
},
'expected_warnings': [r'[Uu]navailable videos? (is|are|will be) hidden', 'Retrying', 'Giving up'],
'expected_warnings': [r'[Uu]navailable videos? (is|are|will be) hidden'],
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 455,

View File

@ -10,7 +10,6 @@ from ..utils import (
int_or_none,
try_call,
urljoin,
url_or_none
)
from ..utils.traversal import traverse_obj
@ -50,7 +49,7 @@ class ZingMp3BaseIE(InfoExtractor):
'hub': '/api/v2/page/get/hub-detail',
'new-release': '/api/v2/chart/get/new-release',
'top100': '/api/v2/page/get/top-100',
'podcast-new': '/api/v2/podcast/program/get/list-by-type',
'podcast-discover': '/api/v2/podcast/program/get/list-by-type',
'top-podcast': '/api/v2/podcast/program/get/top-episode',
}
@ -456,9 +455,10 @@ class ZingMp3UserIE(ZingMp3BaseIE):
# Handle for new-release
if alias == 'new-release' and url_type in ('song', 'album'):
self._IE_NAME = 'zingmp3:NewRelease'
_id = f'{alias}-{url_type}'
return self.playlist_result(self._parse_items(
self._call_api('new-release', params={'type': url_type}, display_id=_id)), _id)
data = self._call_api('new-release', params={'type': url_type}, display_id=_id)
return self.playlist_result(self._parse_items(data), _id)
else:
# Handle for user/artist
if url_type in ('bai-hat', 'video'):
@ -553,7 +553,7 @@ class ZingMp3LiveRadioIE(ZingMp3BaseIE):
'subtitles': subtitles,
**traverse_obj(info, {
'title': 'title',
'thumbnail': (('thumbnail', 'thumbnailM', 'thumbnailV', 'thumbnailH'), {url_or_none}),
'thumbnail': (None, ('thumbnail', 'thumbnailM', 'thumbnailV', 'thumbnailH')),
'view_count': ('activeUsers', {int_or_none}),
'like_count': ('totalReaction', {int_or_none}),
'description': 'description',
@ -562,7 +562,7 @@ class ZingMp3LiveRadioIE(ZingMp3BaseIE):
class ZingMp3PodcastEpisodeIE(ZingMp3BaseIE):
IE_NAME = 'zingmp3:podcast-episode'
IE_NAME = 'zingmp3:PodcastEpisode'
_VALID_URL = ZingMp3BaseIE._VALID_URL_TMPL % 'pgr|cgr'
_TESTS = [{
'url': 'https://zingmp3.vn/pgr/Nhac-Moi-Moi-Ngay/68Z9W66B.html',
@ -596,9 +596,9 @@ class ZingMp3PodcastEpisodeIE(ZingMp3BaseIE):
entries, podcast_id, podcast_info.get('title'), podcast_info.get('description'))
class ZingMp3PodcastIE(ZingMp3BaseIE):
IE_NAME = 'zingmp3:podcast'
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<id>(?:cgr|top-podcast|podcast-new))/?(?:[#?]|$)'
class ZingMp3PodcastCategoriesIE(ZingMp3BaseIE):
IE_NAME = 'zingmp3:podcast-categories'
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<id>(?:cgr|top-podcast))/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://zingmp3.vn/cgr',
'info_dict': {
@ -611,7 +611,18 @@ class ZingMp3PodcastIE(ZingMp3BaseIE):
'id': 'top-podcast',
},
'playlist_mincount': 7,
}, {
}]
def _real_extract(self, url):
url_type = self._match_id(url)
data = self._call_api('cgrs' if url_type == 'cgr' else url_type, {'id': url_type})
return self.playlist_result(self._parse_items(data.get('items')), url_type)
class ZingMp3PodcastNewIE(ZingMp3BaseIE):
IE_NAME = 'zingmp3:podcast-new'
_VALID_URL = r'https?://(?:mp3\.zing|zingmp3)\.vn/(?P<id>podcast-new)/?(?:[#?]|$)'
_TESTS = [{
'url': 'https://zingmp3.vn/podcast-new',
'info_dict': {
'id': 'podcast-new',
@ -621,8 +632,5 @@ class ZingMp3PodcastIE(ZingMp3BaseIE):
def _real_extract(self, url):
url_type = self._match_id(url)
params = {'id': url_type}
if url_type == 'podcast-new':
params.update({'type': 'new'})
data = self._call_api('cgrs' if url_type == 'cgr' else url_type, params)
data = self._call_api('podcast-discover', {'id': url_type, 'type': 'new'})
return self.playlist_result(self._parse_items(data.get('items')), url_type)

View File

@ -255,8 +255,7 @@ class RequestsRH(RequestHandler, InstanceStoreMixin):
handler.setFormatter(logging.Formatter('requests: %(message)s'))
handler.addFilter(Urllib3LoggingFilter())
logger.addHandler(handler)
# TODO: Use a logger filter to suppress pool reuse warning instead
logger.setLevel(logging.ERROR)
logger.setLevel(logging.WARNING)
if self.verbose:
# Setting this globally is not ideal, but is easier than hacking with urllib3.

View File

@ -123,7 +123,6 @@ def clean_headers(headers: HTTPHeaderDict):
if 'Youtubedl-No-Compression' in headers: # compat
del headers['Youtubedl-No-Compression']
headers['Accept-Encoding'] = 'identity'
headers.pop('Ytdl-socks-proxy', None)
def remove_dot_segments(path):

View File

@ -1,8 +1,8 @@
# Autogenerated by devscripts/update-version.py
__version__ = '2023.11.16'
__version__ = '2023.10.13'
RELEASE_GIT_HEAD = '24f827875c6ba513f12ed09a3aef2bbed223760d'
RELEASE_GIT_HEAD = 'b634ba742d8f38ce9ecfa0546485728b0c6c59d1'
VARIANT = None
@ -12,4 +12,4 @@ CHANNEL = 'stable'
ORIGIN = 'yt-dlp/yt-dlp'
_pkg_version = '2023.11.16'
_pkg_version = '2023.10.13'