Compare commits

..

1 Commits

Author SHA1 Message Date
Chris Caruso
ddfa1aeb86
Merge 7f7356078c into 4613096f2e 2024-11-03 19:19:12 -05:00
86 changed files with 394 additions and 784 deletions

View File

@ -63,15 +63,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -75,15 +75,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -71,15 +71,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -56,15 +56,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -52,15 +52,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -58,15 +58,14 @@ body:
placeholder: | placeholder: |
[debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc'] [debug] Command-line config: ['-vU', 'https://www.youtube.com/watch?v=BaW_jenozKc']
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8 [debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version nightly@... from yt-dlp/yt-dlp-nightly-builds [1a176d874] (win_exe) [debug] yt-dlp version nightly@... from yt-dlp/yt-dlp [b634ba742] (win_exe)
[debug] Python 3.10.11 (CPython AMD64 64bit) - Windows-10-10.0.20348-SP0 (OpenSSL 1.1.1t 7 Feb 2023) [debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
[debug] exe versions: ffmpeg 7.0.2 (setts), ffprobe 7.0.2 [debug] exe versions: ffmpeg N-106550-g072101bd52-20220410 (fdk,setts), ffprobe N-106624-g391ce570c8-20220415, phantomjs 2.1.1
[debug] Optional libraries: Cryptodome-3.21.0, brotli-1.1.0, certifi-2024.08.30, curl_cffi-0.5.10, mutagen-1.47.0, requests-2.32.3, sqlite3-3.40.1, urllib3-2.2.3, websockets-13.1 [debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
[debug] Proxy map: {} [debug] Proxy map: {}
[debug] Request Handlers: urllib, requests, websockets, curl_cffi [debug] Request Handlers: urllib, requests
[debug] Loaded 1838 extractors [debug] Loaded 1893 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest [debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp-nightly-builds/releases/latest
Latest version: nightly@... from yt-dlp/yt-dlp-nightly-builds
yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds) yt-dlp is up to date (nightly@... from yt-dlp/yt-dlp-nightly-builds)
[youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc [youtube] Extracting URL: https://www.youtube.com/watch?v=BaW_jenozKc
<more lines> <more lines>

View File

@ -688,10 +688,3 @@ KarboniteKream
mikkovedru mikkovedru
pktiuk pktiuk
rubyevadestaxes rubyevadestaxes
avagordon01
CounterPillow
JoseAngelB
KBelmin
kesor
MellowKyler
Wesley107772

View File

@ -4,62 +4,6 @@
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master # To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
--> -->
### 2024.11.04
#### Important changes
- **Beginning with this release, yt-dlp's Python dependencies *must* be installed using the `default` group**
If you're installing yt-dlp with pip/pipx or requiring yt-dlp in your own Python project, you'll need to specify `yt-dlp[default]` if you want to also install yt-dlp's optional dependencies (which were previously included by default). [Read more](https://github.com/yt-dlp/yt-dlp/pull/11255)
- **The minimum *required* Python version has been raised to 3.9**
Python 3.8 reached its end-of-life on 2024.10.07, and yt-dlp has now removed support for it. As an unfortunate side effect, the official `yt-dlp.exe` and `yt-dlp_x86.exe` binaries are no longer supported on Windows 7. [Read more](https://github.com/yt-dlp/yt-dlp/issues/10086)
#### Core changes
- [Allow thumbnails with `.jpe` extension](https://github.com/yt-dlp/yt-dlp/commit/5bc5fb2835ea59bdf326bd12176d74d2c7348a95) ([#11408](https://github.com/yt-dlp/yt-dlp/issues/11408)) by [bashonly](https://github.com/bashonly)
- [Expand paths in `--plugin-dirs`](https://github.com/yt-dlp/yt-dlp/commit/914af9a0cf51c9a3f74aa88d952bee8334c67511) ([#11334](https://github.com/yt-dlp/yt-dlp/issues/11334)) by [bashonly](https://github.com/bashonly)
- [Fix `--netrc` empty string parsing for Python <=3.10](https://github.com/yt-dlp/yt-dlp/commit/88402b714ec124633933737bc156b172a3dec3d6) ([#11414](https://github.com/yt-dlp/yt-dlp/issues/11414)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
- [Populate format sorting fields before dependent fields](https://github.com/yt-dlp/yt-dlp/commit/5c880ef42e9c2b2fc412f6d69dad37d34fb75a62) ([#11353](https://github.com/yt-dlp/yt-dlp/issues/11353)) by [Grub4K](https://github.com/Grub4K)
- [Prioritize AV1](https://github.com/yt-dlp/yt-dlp/commit/3945677a75e94a1fecc085432d791e1c21220cd3) ([#11153](https://github.com/yt-dlp/yt-dlp/issues/11153)) by [seproDev](https://github.com/seproDev)
- [Remove Python 3.8 support](https://github.com/yt-dlp/yt-dlp/commit/d784464399b600ba9516bbcec6286f11d68974dd) ([#11321](https://github.com/yt-dlp/yt-dlp/issues/11321)) by [bashonly](https://github.com/bashonly)
- **aes**: [Fix GCM pad length calculation](https://github.com/yt-dlp/yt-dlp/commit/beae2db127d3b5017cbcf685da9de7a9ef496541) ([#11438](https://github.com/yt-dlp/yt-dlp/issues/11438)) by [seproDev](https://github.com/seproDev)
- **cookies**: [Support chrome table version 24](https://github.com/yt-dlp/yt-dlp/commit/4613096f2e6eab9dcbac0e98b6cec760bbc99375) ([#11425](https://github.com/yt-dlp/yt-dlp/issues/11425)) by [kesor](https://github.com/kesor), [seproDev](https://github.com/seproDev)
- **utils**
- [Allow partial application for more functions](https://github.com/yt-dlp/yt-dlp/commit/b6dc2c49e8793c6dfa21275e61caf49ec1148b81) ([#11391](https://github.com/yt-dlp/yt-dlp/issues/11391)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K) (With fixes in [422195e](https://github.com/yt-dlp/yt-dlp/commit/422195ec70a00b0d2002b238cacbae7790c57fdf) by [Grub4K](https://github.com/Grub4K))
- [Fix `find_element` by class](https://github.com/yt-dlp/yt-dlp/commit/f93c16395cea1fe9ffc3c594d3e019c3b214544c) ([#11402](https://github.com/yt-dlp/yt-dlp/issues/11402)) by [bashonly](https://github.com/bashonly)
- [Fix and improve `find_element` and `find_elements`](https://github.com/yt-dlp/yt-dlp/commit/b103aca24d35b72b405c340357dc01a0ed534281) ([#11443](https://github.com/yt-dlp/yt-dlp/issues/11443)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
#### Extractor changes
- [Resolve `language` to ISO639-2 for ISM formats](https://github.com/yt-dlp/yt-dlp/commit/21cdcf03a237a0c4979c941d5a5385cae44c7906) ([#11359](https://github.com/yt-dlp/yt-dlp/issues/11359)) by [bashonly](https://github.com/bashonly)
- **ardmediathek**: [Extract chapters](https://github.com/yt-dlp/yt-dlp/commit/59f8dd8239c31f00b708da53b39b1e2e9409b6e6) ([#11442](https://github.com/yt-dlp/yt-dlp/issues/11442)) by [iw0nderhow](https://github.com/iw0nderhow)
- **bfmtv**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/754940e9a558565d6bd3c0c529802569b1d0ae4e) ([#11444](https://github.com/yt-dlp/yt-dlp/issues/11444)) by [seproDev](https://github.com/seproDev)
- **bluesky**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/5c7a5aaab27e9c3cb367b663a6136ca58866e547) ([#11055](https://github.com/yt-dlp/yt-dlp/issues/11055)) by [MellowKyler](https://github.com/MellowKyler), [seproDev](https://github.com/seproDev)
- **ccma**: [Support new 3cat.cat domain](https://github.com/yt-dlp/yt-dlp/commit/330335386d4f7603d92d6796798375336005275e) ([#11222](https://github.com/yt-dlp/yt-dlp/issues/11222)) by [JoseAngelB](https://github.com/JoseAngelB)
- **chzzk**: video: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/9c6534da81e485b2325b3489ee4128943e6d3e4b) ([#11228](https://github.com/yt-dlp/yt-dlp/issues/11228)) by [hui1601](https://github.com/hui1601)
- **cnn**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/9acf79c91a8c6c55ca972747c6858e784e2da351) ([#10185](https://github.com/yt-dlp/yt-dlp/issues/10185)) by [kylegustavo](https://github.com/kylegustavo), [seproDev](https://github.com/seproDev)
- **dailymotion**
- [Improve embed extraction](https://github.com/yt-dlp/yt-dlp/commit/a403dcf9be20b49cbb3017328f4aaa352fb6d685) ([#10843](https://github.com/yt-dlp/yt-dlp/issues/10843)) by [bashonly](https://github.com/bashonly), [pzhlkj6612](https://github.com/pzhlkj6612)
- [Support shortened URLs](https://github.com/yt-dlp/yt-dlp/commit/d1358231371f20fa23020fa9176be3b56119873e) ([#11374](https://github.com/yt-dlp/yt-dlp/issues/11374)) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev)
- **facebook**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/ec9b25043f399de6a591d8370d32bf0e66c117f2) ([#11343](https://github.com/yt-dlp/yt-dlp/issues/11343)) by [kclauhk](https://github.com/kclauhk)
- **generic**: [Do not impersonate by default](https://github.com/yt-dlp/yt-dlp/commit/c29f5a7fae93a08f3cfbb6127b2faa75145b06a0) ([#11336](https://github.com/yt-dlp/yt-dlp/issues/11336)) by [bashonly](https://github.com/bashonly)
- **nfl**: [Fix extractors](https://github.com/yt-dlp/yt-dlp/commit/838f4385de8300a4dd4e7ffbbf0e5b7b85fb52c2) ([#11409](https://github.com/yt-dlp/yt-dlp/issues/11409)) by [bashonly](https://github.com/bashonly)
- **niconicouser**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6abef74232c0fc695cd803c18ae446cacb129389) ([#11324](https://github.com/yt-dlp/yt-dlp/issues/11324)) by [Wesley107772](https://github.com/Wesley107772)
- **soundcloud**: [Extract artists](https://github.com/yt-dlp/yt-dlp/commit/f101e5d34c97c608156ad5396714c2a2edca966a) ([#11377](https://github.com/yt-dlp/yt-dlp/issues/11377)) by [seproDev](https://github.com/seproDev)
- **tumblr**: [Support more URLs](https://github.com/yt-dlp/yt-dlp/commit/b03267bf0675eeb8df5baf1daac7cf67840c91a5) ([#6057](https://github.com/yt-dlp/yt-dlp/issues/6057)) by [selfisekai](https://github.com/selfisekai), [seproDev](https://github.com/seproDev)
- **twitter**: [Remove cookies migration workaround](https://github.com/yt-dlp/yt-dlp/commit/76802f461332d444e596437c42374fa237fa5174) ([#11392](https://github.com/yt-dlp/yt-dlp/issues/11392)) by [bashonly](https://github.com/bashonly)
- **vimeo**: [Fix API retries](https://github.com/yt-dlp/yt-dlp/commit/57212a5f97ce367590aaa5c3e9a135eead8f81f7) ([#11351](https://github.com/yt-dlp/yt-dlp/issues/11351)) by [bashonly](https://github.com/bashonly)
- **yle_areena**: [Support live events](https://github.com/yt-dlp/yt-dlp/commit/a6783a3b9905e547f6c1d4df9d7c7999feda8afa) ([#11358](https://github.com/yt-dlp/yt-dlp/issues/11358)) by [bashonly](https://github.com/bashonly), [CounterPillow](https://github.com/CounterPillow)
- **youtube**: [Adjust OAuth refresh token handling](https://github.com/yt-dlp/yt-dlp/commit/d569a8845254d90ce13ad74ae76695e8d6441068) ([#11414](https://github.com/yt-dlp/yt-dlp/issues/11414)) by [bashonly](https://github.com/bashonly)
#### Misc. changes
- **build**
- [Disable attestations for trusted publishing](https://github.com/yt-dlp/yt-dlp/commit/428ffb75aa3534b275cf54de42693a4d261519da) ([#11418](https://github.com/yt-dlp/yt-dlp/issues/11418)) by [bashonly](https://github.com/bashonly)
- [Move optional dependencies to the `default` group](https://github.com/yt-dlp/yt-dlp/commit/87884f15580910e4e0fe0e1db73508debc657471) ([#11255](https://github.com/yt-dlp/yt-dlp/issues/11255)) by [bashonly](https://github.com/bashonly)
- [Use Ubuntu 20.04 and Python 3.9 for Linux ARM builds](https://github.com/yt-dlp/yt-dlp/commit/dd2e24446954246a2ec4d4a7e95531f52a14b351) ([#8638](https://github.com/yt-dlp/yt-dlp/issues/8638)) by [bashonly](https://github.com/bashonly)
- **cleanup**
- Miscellaneous
- [ea9e35d](https://github.com/yt-dlp/yt-dlp/commit/ea9e35d85fba5eab341cdcaf1eaed69b57f7e465) by [bashonly](https://github.com/bashonly)
- [c998238](https://github.com/yt-dlp/yt-dlp/commit/c998238c2e76c62d1d29962c6e8ebe916cc7913b) by [bashonly](https://github.com/bashonly), [KBelmin](https://github.com/KBelmin)
- [197d0b0](https://github.com/yt-dlp/yt-dlp/commit/197d0b03b6a3c8fe4fa5ace630eeffec629bf72c) by [avagordon01](https://github.com/avagordon01), [bashonly](https://github.com/bashonly), [grqz](https://github.com/grqz), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev)
- **devscripts**: `make_changelog`: [Parse full commit message for fixes](https://github.com/yt-dlp/yt-dlp/commit/0a3991edae0e10f2ea41ece9fdea5e48f789f1de) ([#11366](https://github.com/yt-dlp/yt-dlp/issues/11366)) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K)
### 2024.10.22 ### 2024.10.22
#### Important changes #### Important changes

View File

@ -479,8 +479,7 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
--no-download-archive Do not use archive file (default) --no-download-archive Do not use archive file (default)
--max-downloads NUMBER Abort after downloading NUMBER files --max-downloads NUMBER Abort after downloading NUMBER files
--break-on-existing Stop the download process when encountering --break-on-existing Stop the download process when encountering
a file that is in the archive supplied with a file that is in the archive
the --download-archive option
--no-break-on-existing Do not stop the download process when --no-break-on-existing Do not stop the download process when
encountering a file that is in the archive encountering a file that is in the archive
(default) (default)

View File

@ -190,7 +190,6 @@
- **blerp** - **blerp**
- **blogger.com** - **blogger.com**
- **Bloomberg** - **Bloomberg**
- **Bluesky**
- **BokeCC** - **BokeCC**
- **BongaCams** - **BongaCams**
- **Boosty** - **Boosty**
@ -248,7 +247,7 @@
- **cbsnews:livevideo**: CBS News Live Videos - **cbsnews:livevideo**: CBS News Live Videos
- **cbssports**: (**Currently broken**) - **cbssports**: (**Currently broken**)
- **cbssports:embed**: (**Currently broken**) - **cbssports:embed**: (**Currently broken**)
- **CCMA**: 3Cat, TV3 and Catalunya Ràdio - **CCMA**
- **CCTV**: 央视网 - **CCTV**: 央视网
- **CDA**: [*cdapl*](## "netrc machine") - **CDA**: [*cdapl*](## "netrc machine")
- **CDAFolder** - **CDAFolder**
@ -281,6 +280,8 @@
- **cmt.com**: (**Currently broken**) - **cmt.com**: (**Currently broken**)
- **CNBCVideo** - **CNBCVideo**
- **CNN** - **CNN**
- **CNNArticle**
- **CNNBlogs**
- **CNNIndonesia** - **CNNIndonesia**
- **ComedyCentral** - **ComedyCentral**
- **ComedyCentralTV** - **ComedyCentralTV**
@ -684,9 +685,9 @@
- **LastFMPlaylist** - **LastFMPlaylist**
- **LastFMUser** - **LastFMUser**
- **LaXarxaMes**: [*laxarxames*](## "netrc machine") - **LaXarxaMes**: [*laxarxames*](## "netrc machine")
- **lbry**: odysee.com - **lbry**
- **lbry:channel**: odysee.com channels - **lbry:channel**
- **lbry:playlist**: odysee.com playlists - **lbry:playlist**
- **LCI** - **LCI**
- **Lcp** - **Lcp**
- **LcpPlay** - **LcpPlay**
@ -1445,7 +1446,7 @@
- **TeleQuebecSquat** - **TeleQuebecSquat**
- **TeleQuebecVideo** - **TeleQuebecVideo**
- **TeleTask**: (**Currently broken**) - **TeleTask**: (**Currently broken**)
- **Telewebion**: (**Currently broken**) - **Telewebion**
- **Tempo** - **Tempo**
- **TennisTV**: [*tennistv*](## "netrc machine") - **TennisTV**: [*tennistv*](## "netrc machine")
- **TenPlay**: [*10play*](## "netrc machine") - **TenPlay**: [*10play*](## "netrc machine")

View File

@ -490,7 +490,7 @@ class TestTraversalHelpers:
{'url': 'https://example.com/subs/en', 'name': 'en'}, {'url': 'https://example.com/subs/en', 'name': 'en'},
], [..., { ], [..., {
'id': 'name', 'id': 'name',
'ext': ['url', {determine_ext(default_ext=None)}], 'ext': ['url', {lambda x: determine_ext(x, default_ext=None)}],
'url': 'url', 'url': 'url',
}, all, {subs_list_to_dict(ext='ext')}]) == { }, all, {subs_list_to_dict(ext='ext')}]) == {
'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}], 'de': [{'url': 'https://example.com/subs/de.ass', 'ext': 'ass'}],

View File

@ -2156,7 +2156,7 @@ Line 1
assert callable(int_or_none(scale=10)), 'missing positional parameter should apply partially' assert callable(int_or_none(scale=10)), 'missing positional parameter should apply partially'
assert int_or_none(10, scale=0.1) == 100, 'positionally passed argument should call function' assert int_or_none(10, scale=0.1) == 100, 'positionally passed argument should call function'
assert int_or_none(v=10) == 10, 'keyword passed positional should call function' assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function' assert int_or_none(scale=0.1)(10) == 100, 'call after partial applicatino should call the function'
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially' assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
assert callable(join_nonempty()), 'varargs positional should apply partially' assert callable(join_nonempty()), 'varargs positional should apply partially'

View File

@ -708,7 +708,6 @@ from .gab import (
GabTVIE, GabTVIE,
) )
from .gaia import GaiaIE from .gaia import GaiaIE
from .gamedevtv import GameDevTVDashboardIE
from .gamejolt import ( from .gamejolt import (
GameJoltCommunityIE, GameJoltCommunityIE,
GameJoltGameIE, GameJoltGameIE,

View File

@ -1362,7 +1362,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
def _download_webpage_handle(self, *args, **kwargs): def _download_webpage_handle(self, *args, **kwargs):
headers = self.geo_verification_headers() headers = self.geo_verification_headers()
headers.update(kwargs.get('headers') or {}) headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers kwargs['headers'] = headers
return super()._download_webpage_handle( return super()._download_webpage_handle(
*args, **kwargs) *args, **kwargs)

View File

@ -154,7 +154,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'title': ('title', {str}), 'title': ('title', {str}),
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('bj_id', {str}), 'uploader_id': ('bj_id', {str}),
'duration': ('total_file_duration', {int_or_none(scale=1000)}), 'duration': ('total_file_duration', {functools.partial(int_or_none, scale=1000)}),
'thumbnail': ('thumb', {url_or_none}), 'thumbnail': ('thumb', {url_or_none}),
}) })
@ -178,7 +178,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'title': f'{common_info.get("title") or "Untitled"} (part {file_num})', 'title': f'{common_info.get("title") or "Untitled"} (part {file_num})',
'formats': formats, 'formats': formats,
**traverse_obj(file_element, { **traverse_obj(file_element, {
'duration': ('duration', {int_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
'timestamp': ('file_start', {unified_timestamp}), 'timestamp': ('file_start', {unified_timestamp}),
}), }),
}) })
@ -234,7 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
'catch_list', lambda _, v: v['files'][0]['file'], { 'catch_list', lambda _, v: v['files'][0]['file'], {
'id': ('files', 0, 'file_info_key', {str}), 'id': ('files', 0, 'file_info_key', {str}),
'url': ('files', 0, 'file', {url_or_none}), 'url': ('files', 0, 'file', {url_or_none}),
'duration': ('files', 0, 'duration', {int_or_none(scale=1000)}), 'duration': ('files', 0, 'duration', {functools.partial(int_or_none, scale=1000)}),
'title': ('title', {str}), 'title': ('title', {str}),
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('writer_id', {str}), 'uploader_id': ('writer_id', {str}),

View File

@ -71,7 +71,7 @@ class AllstarBaseIE(InfoExtractor):
'thumbnails': (('clipImageThumb', 'clipImageSource'), {'url': {media_url_or_none}}), 'thumbnails': (('clipImageThumb', 'clipImageSource'), {'url': {media_url_or_none}}),
'duration': ('clipLength', {int_or_none}), 'duration': ('clipLength', {int_or_none}),
'filesize': ('clipSizeBytes', {int_or_none}), 'filesize': ('clipSizeBytes', {int_or_none}),
'timestamp': ('createdDate', {int_or_none(scale=1000)}), 'timestamp': ('createdDate', {functools.partial(int_or_none, scale=1000)}),
'uploader': ('username', {str}), 'uploader': ('username', {str}),
'uploader_id': ('user', '_id', {str}), 'uploader_id': ('user', '_id', {str}),
'view_count': ('views', {int_or_none}), 'view_count': ('views', {int_or_none}),

View File

@ -1,3 +1,4 @@
import functools
import json import json
import random import random
import re import re
@ -9,6 +10,7 @@ from ..utils import (
ExtractorError, ExtractorError,
extract_attributes, extract_attributes,
float_or_none, float_or_none,
get_element_html_by_id,
int_or_none, int_or_none,
parse_filesize, parse_filesize,
str_or_none, str_or_none,
@ -19,7 +21,7 @@ from ..utils import (
url_or_none, url_or_none,
urljoin, urljoin,
) )
from ..utils.traversal import find_element, traverse_obj from ..utils.traversal import traverse_obj
class BandcampIE(InfoExtractor): class BandcampIE(InfoExtractor):
@ -43,8 +45,6 @@ class BandcampIE(InfoExtractor):
'uploader_url': 'https://youtube-dl.bandcamp.com', 'uploader_url': 'https://youtube-dl.bandcamp.com',
'uploader_id': 'youtube-dl', 'uploader_id': 'youtube-dl',
'thumbnail': 'https://f4.bcbits.com/img/a3216802731_5.jpg', 'thumbnail': 'https://f4.bcbits.com/img/a3216802731_5.jpg',
'artists': ['youtube-dl "\'/\\ä↭'],
'album_artists': ['youtube-dl "\'/\\ä↭'],
}, },
'skip': 'There is a limit of 200 free downloads / month for the test song', 'skip': 'There is a limit of 200 free downloads / month for the test song',
}, { }, {
@ -271,18 +271,6 @@ class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE
'timestamp': 1311756226, 'timestamp': 1311756226,
'upload_date': '20110727', 'upload_date': '20110727',
'uploader': 'Blazo', 'uploader': 'Blazo',
'thumbnail': 'https://f4.bcbits.com/img/a1721150828_5.jpg',
'album_artists': ['Blazo'],
'uploader_url': 'https://blazo.bandcamp.com',
'release_date': '20110727',
'release_timestamp': 1311724800.0,
'track': 'Intro',
'uploader_id': 'blazo',
'track_number': 1,
'album': 'Jazz Format Mixtape vol.1',
'artists': ['Blazo'],
'duration': 19.335,
'track_id': '1353101989',
}, },
}, },
{ {
@ -294,18 +282,6 @@ class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE
'timestamp': 1311757238, 'timestamp': 1311757238,
'upload_date': '20110727', 'upload_date': '20110727',
'uploader': 'Blazo', 'uploader': 'Blazo',
'track': 'Kero One - Keep It Alive (Blazo remix)',
'release_date': '20110727',
'track_id': '38097443',
'track_number': 2,
'duration': 181.467,
'uploader_url': 'https://blazo.bandcamp.com',
'album': 'Jazz Format Mixtape vol.1',
'uploader_id': 'blazo',
'album_artists': ['Blazo'],
'artists': ['Blazo'],
'thumbnail': 'https://f4.bcbits.com/img/a1721150828_5.jpg',
'release_timestamp': 1311724800.0,
}, },
}, },
], ],
@ -313,7 +289,6 @@ class BandcampAlbumIE(BandcampIE): # XXX: Do not subclass from concrete IE
'title': 'Jazz Format Mixtape vol.1', 'title': 'Jazz Format Mixtape vol.1',
'id': 'jazz-format-mixtape-vol-1', 'id': 'jazz-format-mixtape-vol-1',
'uploader_id': 'blazo', 'uploader_id': 'blazo',
'description': 'md5:38052a93217f3ffdc033cd5dbbce2989',
}, },
'params': { 'params': {
'playlistend': 2, 'playlistend': 2,
@ -388,10 +363,10 @@ class BandcampWeeklyIE(BandcampIE): # XXX: Do not subclass from concrete IE
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)' _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
_TESTS = [{ _TESTS = [{
'url': 'https://bandcamp.com/?show=224', 'url': 'https://bandcamp.com/?show=224',
'md5': '61acc9a002bed93986b91168aa3ab433', 'md5': 'b00df799c733cf7e0c567ed187dea0fd',
'info_dict': { 'info_dict': {
'id': '224', 'id': '224',
'ext': 'mp3', 'ext': 'opus',
'title': 'BC Weekly April 4th 2017 - Magic Moments', 'title': 'BC Weekly April 4th 2017 - Magic Moments',
'description': 'md5:5d48150916e8e02d030623a48512c874', 'description': 'md5:5d48150916e8e02d030623a48512c874',
'duration': 5829.77, 'duration': 5829.77,
@ -401,7 +376,7 @@ class BandcampWeeklyIE(BandcampIE): # XXX: Do not subclass from concrete IE
'episode_id': '224', 'episode_id': '224',
}, },
'params': { 'params': {
'format': 'mp3-128', 'format': 'opus-lo',
}, },
}, { }, {
'url': 'https://bandcamp.com/?blah/blah@&show=228', 'url': 'https://bandcamp.com/?blah/blah@&show=228',
@ -509,7 +484,7 @@ class BandcampUserIE(InfoExtractor):
or re.findall(r'<div[^>]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage)) or re.findall(r'<div[^>]+trackTitle["\'][^"\']+["\']([^"\']+)', webpage))
yield from traverse_obj(webpage, ( yield from traverse_obj(webpage, (
{find_element(id='music-grid', html=True)}, {extract_attributes}, {functools.partial(get_element_html_by_id, 'music-grid')}, {extract_attributes},
'data-client-items', {json.loads}, ..., 'page_url', {str})) 'data-client-items', {json.loads}, ..., 'page_url', {str}))
def _real_extract(self, url): def _real_extract(self, url):
@ -518,4 +493,4 @@ class BandcampUserIE(InfoExtractor):
return self.playlist_from_matches( return self.playlist_from_matches(
self._yield_items(webpage), uploader, f'Discography of {uploader}', self._yield_items(webpage), uploader, f'Discography of {uploader}',
getter=urljoin(url)) getter=functools.partial(urljoin, url))

View File

@ -1284,9 +1284,9 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
**traverse_obj(model, { **traverse_obj(model, {
'title': ('title', {str}), 'title': ('title', {str}),
'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}), 'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
'description': ('synopses', ('long', 'medium', 'short'), {str}, filter, any), 'description': ('synopses', ('long', 'medium', 'short'), {str}, {lambda x: x or None}, any),
'duration': ('versions', 0, 'duration', {int}), 'duration': ('versions', 0, 'duration', {int}),
'timestamp': ('versions', 0, 'availableFrom', {int_or_none(scale=1000)}), 'timestamp': ('versions', 0, 'availableFrom', {functools.partial(int_or_none, scale=1000)}),
}), }),
} }
@ -1386,7 +1386,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
formats = traverse_obj(media_data, ('playlist', lambda _, v: url_or_none(v['url']), { formats = traverse_obj(media_data, ('playlist', lambda _, v: url_or_none(v['url']), {
'url': ('url', {url_or_none}), 'url': ('url', {url_or_none}),
'ext': ('format', {str}), 'ext': ('format', {str}),
'tbr': ('bitrate', {int_or_none(scale=1000)}), 'tbr': ('bitrate', {functools.partial(int_or_none, scale=1000)}),
})) }))
if formats: if formats:
entry = { entry = {
@ -1398,7 +1398,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
'title': ('title', {str}), 'title': ('title', {str}),
'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}), 'thumbnail': ('imageUrl', {lambda u: urljoin(url, u.replace('$recipe', 'raw'))}),
'description': ('synopses', ('long', 'medium', 'short'), {str}, any), 'description': ('synopses', ('long', 'medium', 'short'), {str}, any),
'timestamp': ('firstPublished', {int_or_none(scale=1000)}), 'timestamp': ('firstPublished', {functools.partial(int_or_none, scale=1000)}),
}), }),
} }
done = True done = True
@ -1428,7 +1428,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
if not entry.get('timestamp'): if not entry.get('timestamp'):
entry['timestamp'] = traverse_obj(next_data, ( entry['timestamp'] = traverse_obj(next_data, (
..., 'contents', is_type('timestamp'), 'model', ..., 'contents', is_type('timestamp'), 'model',
'timestamp', {int_or_none(scale=1000)}, any)) 'timestamp', {functools.partial(int_or_none, scale=1000)}, any))
entries.append(entry) entries.append(entry)
return self.playlist_result( return self.playlist_result(
entries, playlist_id, playlist_title, playlist_description) entries, playlist_id, playlist_title, playlist_description)

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -49,7 +50,7 @@ class BibelTVBaseIE(InfoExtractor):
**traverse_obj(data, { **traverse_obj(data, {
'title': 'title', 'title': 'title',
'description': 'description', 'description': 'description',
'duration': ('duration', {int_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(int_or_none, scale=1000)}),
'timestamp': ('schedulingStart', {parse_iso8601}), 'timestamp': ('schedulingStart', {parse_iso8601}),
'season_number': 'seasonNumber', 'season_number': 'seasonNumber',
'episode_number': 'episodeNumber', 'episode_number': 'episodeNumber',

View File

@ -109,7 +109,7 @@ class BilibiliBaseIE(InfoExtractor):
fragments = traverse_obj(play_info, ('durl', lambda _, v: url_or_none(v['url']), { fragments = traverse_obj(play_info, ('durl', lambda _, v: url_or_none(v['url']), {
'url': ('url', {url_or_none}), 'url': ('url', {url_or_none}),
'duration': ('length', {float_or_none(scale=1000)}), 'duration': ('length', {functools.partial(float_or_none, scale=1000)}),
'filesize': ('size', {int_or_none}), 'filesize': ('size', {int_or_none}),
})) }))
if fragments: if fragments:
@ -124,7 +124,7 @@ class BilibiliBaseIE(InfoExtractor):
'quality': ('quality', {int_or_none}), 'quality': ('quality', {int_or_none}),
'format_id': ('quality', {str_or_none}), 'format_id': ('quality', {str_or_none}),
'format_note': ('quality', {lambda x: format_names.get(x)}), 'format_note': ('quality', {lambda x: format_names.get(x)}),
'duration': ('timelength', {float_or_none(scale=1000)}), 'duration': ('timelength', {functools.partial(float_or_none, scale=1000)}),
}), }),
**parse_resolution(format_names.get(play_info.get('quality'))), **parse_resolution(format_names.get(play_info.get('quality'))),
}) })
@ -1585,7 +1585,7 @@ class BilibiliPlaylistIE(BilibiliSpaceListBaseIE):
'title': ('title', {str}), 'title': ('title', {str}),
'uploader': ('upper', 'name', {str}), 'uploader': ('upper', 'name', {str}),
'uploader_id': ('upper', 'mid', {str_or_none}), 'uploader_id': ('upper', 'mid', {str_or_none}),
'timestamp': ('ctime', {int_or_none}, filter), 'timestamp': ('ctime', {int_or_none}, {lambda x: x or None}),
'thumbnail': ('cover', {url_or_none}), 'thumbnail': ('cover', {url_or_none}),
})), })),
} }

View File

@ -382,7 +382,7 @@ class BlueskyIE(InfoExtractor):
'age_limit': ( 'age_limit': (
'labels', ..., 'val', {lambda x: 18 if x in ('sexual', 'porn', 'graphic-media') else None}, any), 'labels', ..., 'val', {lambda x: 18 if x in ('sexual', 'porn', 'graphic-media') else None}, any),
'description': (*record_path, 'text', {str}, filter), 'description': (*record_path, 'text', {str}, filter),
'title': (*record_path, 'text', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}), 'title': (*record_path, 'text', {lambda x: x.replace('\n', '')}, {truncate_string(left=50)}),
}), }),
}) })
return entries return entries

View File

@ -1,20 +1,35 @@
import functools
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, clean_html,
extract_attributes, extract_attributes,
get_element_text_and_html_by_tag,
get_elements_by_class,
join_nonempty, join_nonempty,
js_to_json, js_to_json,
mimetype2ext, mimetype2ext,
unified_strdate, unified_strdate,
url_or_none, url_or_none,
urljoin, urljoin,
variadic,
) )
from ..utils.traversal import ( from ..utils.traversal import traverse_obj
find_element,
traverse_obj,
) def html_get_element(tag=None, cls=None):
assert tag or cls, 'One of tag or class is required'
if cls:
func = functools.partial(get_elements_by_class, cls, tag=tag)
else:
func = functools.partial(get_element_text_and_html_by_tag, tag)
def html_get_element_wrapper(html):
return variadic(func(html))[0]
return html_get_element_wrapper
class BpbIE(InfoExtractor): class BpbIE(InfoExtractor):
@ -26,12 +41,12 @@ class BpbIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '297', 'id': '297',
'ext': 'mp4', 'ext': 'mp4',
'creators': ['Kooperative Berlin'], 'creator': 'Kooperative Berlin',
'description': r're:Joachim Gauck, .*\n\nKamera: .*', 'description': 'md5:f4f75885ba009d3e2b156247a8941ce6',
'release_date': '20150716', 'release_date': '20160115',
'series': 'Interview auf dem Geschichtsforum 1989 | 2009', 'series': 'Interview auf dem Geschichtsforum 1989 | 2009',
'tags': [], 'tags': ['Friedliche Revolution', 'Erinnerungskultur', 'Vergangenheitspolitik', 'DDR 1949 - 1990', 'Freiheitsrecht', 'BStU', 'Deutschland'],
'thumbnail': r're:https?://www\.bpb\.de/cache/images/7/297_teaser_16x9_1240\.jpg.*', 'thumbnail': 'https://www.bpb.de/cache/images/7/297_teaser_16x9_1240.jpg?8839D',
'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR', 'title': 'Joachim Gauck zu 1989 und die Erinnerung an die DDR',
'uploader': 'Bundeszentrale für politische Bildung', 'uploader': 'Bundeszentrale für politische Bildung',
}, },
@ -40,12 +55,11 @@ class BpbIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '522184', 'id': '522184',
'ext': 'mp4', 'ext': 'mp4',
'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'], 'creator': 'Institute for Strategic Dialogue Germany gGmbH (ISD)',
'description': 'md5:f83c795ff8f825a69456a9e51fc15903', 'description': 'md5:f83c795ff8f825a69456a9e51fc15903',
'release_date': '20230621', 'release_date': '20230621',
'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)', 'tags': ['Desinformation', 'Ukraine', 'Russland', 'Geflüchtete'],
'tags': [], 'thumbnail': 'https://www.bpb.de/cache/images/4/522184_teaser_16x9_1240.png?EABFB',
'thumbnail': r're:https://www\.bpb\.de/cache/images/4/522184_teaser_16x9_1240\.png.*',
'title': 'md5:9b01ccdbf58dbf9e5c9f6e771a803b1c', 'title': 'md5:9b01ccdbf58dbf9e5c9f6e771a803b1c',
'uploader': 'Bundeszentrale für politische Bildung', 'uploader': 'Bundeszentrale für politische Bildung',
}, },
@ -54,12 +68,11 @@ class BpbIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '518789', 'id': '518789',
'ext': 'mp4', 'ext': 'mp4',
'creators': ['Institute for Strategic Dialogue Germany gGmbH (ISD)'], 'creator': 'Institute for Strategic Dialogue Germany gGmbH (ISD)',
'description': 'md5:85228aed433e84ff0ff9bc582abd4ea8', 'description': 'md5:85228aed433e84ff0ff9bc582abd4ea8',
'release_date': '20230302', 'release_date': '20230302',
'series': 'Narrative über den Krieg Russlands gegen die Ukraine (NUK)', 'tags': ['Desinformation', 'Ukraine', 'Russland', 'Geflüchtete'],
'tags': [], 'thumbnail': 'https://www.bpb.de/cache/images/9/518789_teaser_16x9_1240.jpeg?56D0D',
'thumbnail': r're:https://www\.bpb\.de/cache/images/9/518789_teaser_16x9_1240\.jpeg.*',
'title': 'md5:3e956f264bb501f6383f10495a401da4', 'title': 'md5:3e956f264bb501f6383f10495a401da4',
'uploader': 'Bundeszentrale für politische Bildung', 'uploader': 'Bundeszentrale für politische Bildung',
}, },
@ -71,12 +84,12 @@ class BpbIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '315813', 'id': '315813',
'ext': 'mp3', 'ext': 'mp3',
'creators': ['Axel Schröder'], 'creator': 'Axel Schröder',
'description': 'md5:eda9d1af34e5912efef5baf54fba4427', 'description': 'md5:eda9d1af34e5912efef5baf54fba4427',
'release_date': '20200921', 'release_date': '20200921',
'series': 'Auf Endlagersuche. Der deutsche Weg zu einem sicheren Atommülllager', 'series': 'Auf Endlagersuche. Der deutsche Weg zu einem sicheren Atommülllager',
'tags': ['Atomenergie', 'Endlager', 'hoch-radioaktiver Abfall', 'Endlagersuche', 'Atommüll', 'Atomendlager', 'Gorleben', 'Deutschland'], 'tags': ['Atomenergie', 'Endlager', 'hoch-radioaktiver Abfall', 'Endlagersuche', 'Atommüll', 'Atomendlager', 'Gorleben', 'Deutschland'],
'thumbnail': r're:https://www\.bpb\.de/cache/images/3/315813_teaser_16x9_1240\.png.*', 'thumbnail': 'https://www.bpb.de/cache/images/3/315813_teaser_16x9_1240.png?92A94',
'title': 'Folge 1: Eine Einführung', 'title': 'Folge 1: Eine Einführung',
'uploader': 'Bundeszentrale für politische Bildung', 'uploader': 'Bundeszentrale für politische Bildung',
}, },
@ -85,12 +98,12 @@ class BpbIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': '517806', 'id': '517806',
'ext': 'mp3', 'ext': 'mp3',
'creators': ['Bundeszentrale für politische Bildung'], 'creator': 'Bundeszentrale für politische Bildung',
'description': 'md5:594689600e919912aade0b2871cc3fed', 'description': 'md5:594689600e919912aade0b2871cc3fed',
'release_date': '20230127', 'release_date': '20230127',
'series': 'Vorträge des Fachtags "Modernisierer. Grenzgänger. Anstifter. Sechs Jahrzehnte \'Neue Rechte\'"', 'series': 'Vorträge des Fachtags "Modernisierer. Grenzgänger. Anstifter. Sechs Jahrzehnte \'Neue Rechte\'"',
'tags': ['Rechtsextremismus', 'Konservatismus', 'Konservativismus', 'neue Rechte', 'Rechtspopulismus', 'Schnellroda', 'Deutschland'], 'tags': ['Rechtsextremismus', 'Konservatismus', 'Konservativismus', 'neue Rechte', 'Rechtspopulismus', 'Schnellroda', 'Deutschland'],
'thumbnail': r're:https://www\.bpb\.de/cache/images/6/517806_teaser_16x9_1240\.png.*', 'thumbnail': 'https://www.bpb.de/cache/images/6/517806_teaser_16x9_1240.png?7A7A0',
'title': 'Die Weltanschauung der "Neuen Rechten"', 'title': 'Die Weltanschauung der "Neuen Rechten"',
'uploader': 'Bundeszentrale für politische Bildung', 'uploader': 'Bundeszentrale für politische Bildung',
}, },
@ -134,7 +147,7 @@ class BpbIE(InfoExtractor):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
title_result = traverse_obj(webpage, ({find_element(cls='opening-header__title')}, {self._TITLE_RE.match})) title_result = traverse_obj(webpage, ({html_get_element(cls='opening-header__title')}, {self._TITLE_RE.match}))
json_lds = list(self._yield_json_ld(webpage, video_id, fatal=False)) json_lds = list(self._yield_json_ld(webpage, video_id, fatal=False))
return { return {
@ -143,15 +156,15 @@ class BpbIE(InfoExtractor):
# This metadata could be interpreted otherwise, but it fits "series" the most # This metadata could be interpreted otherwise, but it fits "series" the most
'series': traverse_obj(title_result, ('series', {str.strip})) or None, 'series': traverse_obj(title_result, ('series', {str.strip})) or None,
'description': join_nonempty(*traverse_obj(webpage, [( 'description': join_nonempty(*traverse_obj(webpage, [(
{find_element(cls='opening-intro')}, {html_get_element(cls='opening-intro')},
[{find_element(tag='bpb-accordion-item')}, {find_element(cls='text-content')}], [{html_get_element(tag='bpb-accordion-item')}, {html_get_element(cls='text-content')}],
), {clean_html}]), delim='\n\n') or None, ), {clean_html}]), delim='\n\n') or None,
'creators': traverse_obj(self._html_search_meta('author', webpage), all), 'creator': self._html_search_meta('author', webpage),
'uploader': self._html_search_meta('publisher', webpage), 'uploader': self._html_search_meta('publisher', webpage),
'release_date': unified_strdate(self._html_search_meta('date', webpage)), 'release_date': unified_strdate(self._html_search_meta('date', webpage)),
'tags': traverse_obj(json_lds, (..., 'keywords', {lambda x: x.split(',')}, ...)), 'tags': traverse_obj(json_lds, (..., 'keywords', {lambda x: x.split(',')}, ...)),
**traverse_obj(self._parse_vue_attributes('bpb-player', webpage, video_id), { **traverse_obj(self._parse_vue_attributes('bpb-player', webpage, video_id), {
'formats': (':sources', ..., {self._process_source}), 'formats': (':sources', ..., {self._process_source}),
'thumbnail': ('poster', {urljoin(url)}), 'thumbnail': ('poster', {lambda x: urljoin(url, x)}),
}), }),
} }

View File

@ -145,9 +145,10 @@ class BravoTVIE(AdobePassIE):
tp_metadata = self._download_json( tp_metadata = self._download_json(
update_url_query(tp_url, {'format': 'preview'}), video_id, fatal=False) update_url_query(tp_url, {'format': 'preview'}), video_id, fatal=False)
seconds_or_none = lambda x: float_or_none(x, 1000)
chapters = traverse_obj(tp_metadata, ('chapters', ..., { chapters = traverse_obj(tp_metadata, ('chapters', ..., {
'start_time': ('startTime', {float_or_none(scale=1000)}), 'start_time': ('startTime', {seconds_or_none}),
'end_time': ('endTime', {float_or_none(scale=1000)}), 'end_time': ('endTime', {seconds_or_none}),
})) }))
# prune pointless single chapters that span the entire duration from short videos # prune pointless single chapters that span the entire duration from short videos
if len(chapters) == 1 and not traverse_obj(chapters, (0, 'end_time')): if len(chapters) == 1 and not traverse_obj(chapters, (0, 'end_time')):
@ -167,8 +168,8 @@ class BravoTVIE(AdobePassIE):
**merge_dicts(traverse_obj(tp_metadata, { **merge_dicts(traverse_obj(tp_metadata, {
'title': 'title', 'title': 'title',
'description': 'description', 'description': 'description',
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {seconds_or_none}),
'timestamp': ('pubDate', {float_or_none(scale=1000)}), 'timestamp': ('pubDate', {seconds_or_none}),
'season_number': (('pl1$seasonNumber', 'nbcu$seasonNumber'), {int_or_none}), 'season_number': (('pl1$seasonNumber', 'nbcu$seasonNumber'), {int_or_none}),
'episode_number': (('pl1$episodeNumber', 'nbcu$episodeNumber'), {int_or_none}), 'episode_number': (('pl1$episodeNumber', 'nbcu$episodeNumber'), {int_or_none}),
'series': (('pl1$show', 'nbcu$show'), (None, ...), {str}), 'series': (('pl1$show', 'nbcu$show'), (None, ...), {str}),

View File

@ -8,13 +8,11 @@ from ..utils import (
bug_reports_message, bug_reports_message,
clean_html, clean_html,
format_field, format_field,
get_element_text_and_html_by_tag,
int_or_none, int_or_none,
url_or_none, url_or_none,
) )
from ..utils.traversal import ( from ..utils.traversal import traverse_obj
find_element,
traverse_obj,
)
class BundestagIE(InfoExtractor): class BundestagIE(InfoExtractor):
@ -117,8 +115,9 @@ class BundestagIE(InfoExtractor):
note='Downloading metadata overlay', fatal=False, note='Downloading metadata overlay', fatal=False,
), { ), {
'title': ( 'title': (
{find_element(tag='h3')}, {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}), {functools.partial(get_element_text_and_html_by_tag, 'h3')}, 0,
'description': ({find_element(tag='p')}, {clean_html}), {functools.partial(re.sub, r'<span[^>]*>[^<]+</span>', '')}, {clean_html}),
'description': ({functools.partial(get_element_text_and_html_by_tag, 'p')}, 0, {clean_html}),
})) }))
return result return result

View File

@ -53,7 +53,7 @@ class CaffeineTVIE(InfoExtractor):
'like_count': ('like_count', {int_or_none}), 'like_count': ('like_count', {int_or_none}),
'view_count': ('view_count', {int_or_none}), 'view_count': ('view_count', {int_or_none}),
'comment_count': ('comment_count', {int_or_none}), 'comment_count': ('comment_count', {int_or_none}),
'tags': ('tags', ..., {str}, filter), 'tags': ('tags', ..., {str}, {lambda x: x or None}),
'uploader': ('user', 'name', {str}), 'uploader': ('user', 'name', {str}),
'uploader_id': (((None, 'user'), 'username'), {str}, any), 'uploader_id': (((None, 'user'), 'username'), {str}, any),
'is_live': ('is_live', {bool}), 'is_live': ('is_live', {bool}),
@ -62,7 +62,7 @@ class CaffeineTVIE(InfoExtractor):
'title': ('broadcast_title', {str}), 'title': ('broadcast_title', {str}),
'duration': ('content_duration', {int_or_none}), 'duration': ('content_duration', {int_or_none}),
'timestamp': ('broadcast_start_time', {parse_iso8601}), 'timestamp': ('broadcast_start_time', {parse_iso8601}),
'thumbnail': ('preview_image_path', {urljoin(url)}), 'thumbnail': ('preview_image_path', {lambda x: urljoin(url, x)}),
}), }),
'age_limit': { 'age_limit': {
# assume Apple Store ratings: https://en.wikipedia.org/wiki/Mobile_software_content_rating_system # assume Apple Store ratings: https://en.wikipedia.org/wiki/Mobile_software_content_rating_system

View File

@ -453,8 +453,8 @@ class CBCPlayerIE(InfoExtractor):
chapters = traverse_obj(data, ( chapters = traverse_obj(data, (
'media', 'chapters', lambda _, v: float(v['startTime']) is not None, { 'media', 'chapters', lambda _, v: float(v['startTime']) is not None, {
'start_time': ('startTime', {float_or_none(scale=1000)}), 'start_time': ('startTime', {functools.partial(float_or_none, scale=1000)}),
'end_time': ('endTime', {float_or_none(scale=1000)}), 'end_time': ('endTime', {functools.partial(float_or_none, scale=1000)}),
'title': ('name', {str}), 'title': ('name', {str}),
})) }))
# Filter out pointless single chapters with start_time==0 and no end_time # Filter out pointless single chapters with start_time==0 and no end_time
@ -465,8 +465,8 @@ class CBCPlayerIE(InfoExtractor):
**traverse_obj(data, { **traverse_obj(data, {
'title': ('title', {str}), 'title': ('title', {str}),
'description': ('description', {str.strip}), 'description': ('description', {str.strip}),
'thumbnail': ('image', 'url', {url_or_none}, {update_url(query=None)}), 'thumbnail': ('image', 'url', {url_or_none}, {functools.partial(update_url, query=None)}),
'timestamp': ('publishedAt', {float_or_none(scale=1000)}), 'timestamp': ('publishedAt', {functools.partial(float_or_none, scale=1000)}),
'media_type': ('media', 'clipType', {str}), 'media_type': ('media', 'clipType', {str}),
'series': ('showName', {str}), 'series': ('showName', {str}),
'season_number': ('media', 'season', {int_or_none}), 'season_number': ('media', 'season', {int_or_none}),

View File

@ -96,7 +96,7 @@ class CBSNewsBaseIE(InfoExtractor):
**traverse_obj(item, { **traverse_obj(item, {
'title': (None, ('fulltitle', 'title')), 'title': (None, ('fulltitle', 'title')),
'description': 'dek', 'description': 'dek',
'timestamp': ('timestamp', {float_or_none(scale=1000)}), 'timestamp': ('timestamp', {lambda x: float_or_none(x, 1000)}),
'duration': ('duration', {float_or_none}), 'duration': ('duration', {float_or_none}),
'subtitles': ('captions', {get_subtitles}), 'subtitles': ('captions', {get_subtitles}),
'thumbnail': ('images', ('hd', 'sd'), {url_or_none}), 'thumbnail': ('images', ('hd', 'sd'), {url_or_none}),

View File

@ -1,3 +1,5 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
UserNotLive, UserNotLive,
@ -75,7 +77,7 @@ class CHZZKLiveIE(InfoExtractor):
'thumbnails': thumbnails, 'thumbnails': thumbnails,
**traverse_obj(live_detail, { **traverse_obj(live_detail, {
'title': ('liveTitle', {str}), 'title': ('liveTitle', {str}),
'timestamp': ('openDate', {parse_iso8601(delimiter=' ')}), 'timestamp': ('openDate', {functools.partial(parse_iso8601, delimiter=' ')}),
'concurrent_view_count': ('concurrentUserCount', {int_or_none}), 'concurrent_view_count': ('concurrentUserCount', {int_or_none}),
'view_count': ('accumulateCount', {int_or_none}), 'view_count': ('accumulateCount', {int_or_none}),
'channel': ('channel', 'channelName', {str}), 'channel': ('channel', 'channelName', {str}),
@ -174,7 +176,7 @@ class CHZZKVideoIE(InfoExtractor):
**traverse_obj(video_meta, { **traverse_obj(video_meta, {
'title': ('videoTitle', {str}), 'title': ('videoTitle', {str}),
'thumbnail': ('thumbnailImageUrl', {url_or_none}), 'thumbnail': ('thumbnailImageUrl', {url_or_none}),
'timestamp': ('publishDateAt', {float_or_none(scale=1000)}), 'timestamp': ('publishDateAt', {functools.partial(float_or_none, scale=1000)}),
'view_count': ('readCount', {int_or_none}), 'view_count': ('readCount', {int_or_none}),
'duration': ('duration', {int_or_none}), 'duration': ('duration', {int_or_none}),
'channel': ('channel', 'channelName', {str}), 'channel': ('channel', 'channelName', {str}),

View File

@ -3,7 +3,6 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
filter_dict, filter_dict,
float_or_none,
int_or_none, int_or_none,
parse_age_limit, parse_age_limit,
smuggle_url, smuggle_url,
@ -86,7 +85,7 @@ class CineverseIE(CineverseBaseIE):
'title': 'title', 'title': 'title',
'id': ('details', 'item_id'), 'id': ('details', 'item_id'),
'description': ('details', 'description'), 'description': ('details', 'description'),
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {lambda x: x / 1000}),
'cast': ('details', 'cast', {lambda x: x.split(', ')}), 'cast': ('details', 'cast', {lambda x: x.split(', ')}),
'modified_timestamp': ('details', 'updated_by', 0, 'update_time', 'time', {int_or_none}), 'modified_timestamp': ('details', 'updated_by', 0, 'update_time', 'time', {int_or_none}),
'season_number': ('details', 'season', {int_or_none}), 'season_number': ('details', 'season', {int_or_none}),

View File

@ -1,3 +1,4 @@
import functools
import json import json
import re import re
@ -198,7 +199,7 @@ class CNNIE(InfoExtractor):
'timestamp': ('data-publish-date', {parse_iso8601}), 'timestamp': ('data-publish-date', {parse_iso8601}),
'thumbnail': ( 'thumbnail': (
'data-poster-image-override', {json.loads}, 'big', 'uri', {url_or_none}, 'data-poster-image-override', {json.loads}, 'big', 'uri', {url_or_none},
{update_url(query='c=original')}), {functools.partial(update_url, query='c=original')}),
'display_id': 'data-video-slug', 'display_id': 'data-video-slug',
}), }),
**traverse_obj(video_data, { **traverse_obj(video_data, {

View File

@ -1578,9 +1578,7 @@ class InfoExtractor:
if default is not NO_DEFAULT: if default is not NO_DEFAULT:
fatal = False fatal = False
for mobj in re.finditer(JSON_LD_RE, html): for mobj in re.finditer(JSON_LD_RE, html):
json_ld_item = self._parse_json( json_ld_item = self._parse_json(mobj.group('json_ld'), video_id, fatal=fatal)
mobj.group('json_ld'), video_id, fatal=fatal,
errnote=False if default is not NO_DEFAULT else None)
for json_ld in variadic(json_ld_item): for json_ld in variadic(json_ld_item):
if isinstance(json_ld, dict): if isinstance(json_ld, dict):
yield json_ld yield json_ld

View File

@ -12,7 +12,6 @@ from ..utils import (
parse_iso8601, parse_iso8601,
strip_or_none, strip_or_none,
try_get, try_get,
urljoin,
) )
@ -113,7 +112,8 @@ class CondeNastIE(InfoExtractor):
m_paths = re.finditer( m_paths = re.finditer(
r'(?s)<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', webpage) r'(?s)<p class="cne-thumb-title">.*?<a href="(/watch/.+?)["\?]', webpage)
paths = orderedSet(m.group(1) for m in m_paths) paths = orderedSet(m.group(1) for m in m_paths)
entries = [self.url_result(urljoin(base_url, path), 'CondeNast') for path in paths] build_url = lambda path: urllib.parse.urljoin(base_url, path)
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
return self.playlist_result(entries, playlist_title=title) return self.playlist_result(entries, playlist_title=title)
def _extract_video_params(self, webpage, display_id): def _extract_video_params(self, webpage, display_id):

View File

@ -456,7 +456,7 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
}), }),
}), }),
**traverse_obj(metadata, { **traverse_obj(metadata, {
'duration': ('duration_ms', {float_or_none(scale=1000)}), 'duration': ('duration_ms', {lambda x: float_or_none(x, 1000)}),
'timestamp': ('upload_date', {parse_iso8601}), 'timestamp': ('upload_date', {parse_iso8601}),
'series': ('series_title', {str}), 'series': ('series_title', {str}),
'series_id': ('series_id', {str}), 'series_id': ('series_id', {str}),
@ -484,7 +484,7 @@ class CrunchyrollBetaIE(CrunchyrollCmsBaseIE):
}), }),
}), }),
**traverse_obj(metadata, { **traverse_obj(metadata, {
'duration': ('duration_ms', {float_or_none(scale=1000)}), 'duration': ('duration_ms', {lambda x: float_or_none(x, 1000)}),
'age_limit': ('maturity_ratings', -1, {parse_age_limit}), 'age_limit': ('maturity_ratings', -1, {parse_age_limit}),
}), }),
} }

View File

@ -40,7 +40,7 @@ class DangalPlayBaseIE(InfoExtractor):
'id': ('content_id', {str}), 'id': ('content_id', {str}),
'title': ('display_title', {str}), 'title': ('display_title', {str}),
'episode': ('title', {str}), 'episode': ('title', {str}),
'series': ('show_name', {str}, filter), 'series': ('show_name', {str}, {lambda x: x or None}),
'series_id': ('catalog_id', {str}), 'series_id': ('catalog_id', {str}),
'duration': ('duration', {int_or_none}), 'duration': ('duration', {int_or_none}),
'release_timestamp': ('release_date_uts', {int_or_none}), 'release_timestamp': ('release_date_uts', {int_or_none}),

View File

@ -207,7 +207,7 @@ class ERRJupiterIE(InfoExtractor):
**traverse_obj(data, { **traverse_obj(data, {
'title': ('heading', {str}), 'title': ('heading', {str}),
'alt_title': ('subHeading', {str}), 'alt_title': ('subHeading', {str}),
'description': (('lead', 'body'), {clean_html}, filter), 'description': (('lead', 'body'), {clean_html}, {lambda x: x or None}),
'timestamp': ('created', {int_or_none}), 'timestamp': ('created', {int_or_none}),
'modified_timestamp': ('updated', {int_or_none}), 'modified_timestamp': ('updated', {int_or_none}),
'release_timestamp': (('scheduleStart', 'publicStart'), {int_or_none}), 'release_timestamp': (('scheduleStart', 'publicStart'), {int_or_none}),

View File

@ -1,141 +0,0 @@
import json
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
join_nonempty,
parse_iso8601,
str_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class GameDevTVDashboardIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gamedev\.tv/dashboard/courses/(?P<course_id>\d+)(?:/(?P<lecture_id>\d+))?'
_NETRC_MACHINE = 'gamedevtv'
_TESTS = [{
'url': 'https://www.gamedev.tv/dashboard/courses/25',
'info_dict': {
'id': '25',
'title': 'Complete Blender Creator 3: Learn 3D Modelling for Beginners',
'tags': ['blender', 'course', 'all', 'box modelling', 'sculpting'],
'categories': ['Blender', '3D Art'],
'thumbnail': 'https://gamedev-files.b-cdn.net/courses/qisc9pmu1jdc.jpg',
'upload_date': '20220516',
'timestamp': 1652694420,
'modified_date': '20241027',
'modified_timestamp': 1730049658,
},
'playlist_count': 100,
}, {
'url': 'https://www.gamedev.tv/dashboard/courses/63/2279',
'info_dict': {
'id': 'df04f4d8-68a4-4756-a71b-9ca9446c3a01',
'ext': 'mp4',
'modified_timestamp': 1701695752,
'upload_date': '20230504',
'episode': 'MagicaVoxel Community Course Introduction',
'series_id': '63',
'title': 'MagicaVoxel Community Course Introduction',
'timestamp': 1683195397,
'modified_date': '20231204',
'categories': ['3D Art', 'MagicaVoxel'],
'season': 'MagicaVoxel Community Course',
'tags': ['MagicaVoxel', 'all', 'course'],
'series': 'MagicaVoxel 3D Art Mini Course',
'duration': 1405,
'episode_number': 1,
'season_number': 1,
'season_id': '219',
'description': 'md5:a378738c5bbec1c785d76c067652d650',
'display_id': '63-219-2279',
'alt_title': '1_CC_MVX MagicaVoxel Community Course Introduction.mp4',
'thumbnail': 'https://vz-23691c65-6fa.b-cdn.net/df04f4d8-68a4-4756-a71b-9ca9446c3a01/thumbnail.jpg',
},
}]
_API_HEADERS = {}
def _perform_login(self, username, password):
try:
response = self._download_json(
'https://api.gamedev.tv/api/students/login', None, 'Logging in',
headers={'Content-Type': 'application/json'},
data=json.dumps({
'email': username,
'password': password,
'cart_items': [],
}).encode())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
raise ExtractorError('Invalid username/password', expected=True)
raise
self._API_HEADERS['Authorization'] = f'{response["token_type"]} {response["access_token"]}'
def _real_initialize(self):
if not self._API_HEADERS.get('Authorization'):
self.raise_login_required(
'This content is only available with purchase', method='password')
def _entries(self, data, course_id, course_info, selected_lecture):
for section in traverse_obj(data, ('sections', ..., {dict})):
section_info = traverse_obj(section, {
'season_id': ('id', {str_or_none}),
'season': ('title', {str}),
'season_number': ('order', {int_or_none}),
})
for lecture in traverse_obj(section, ('lectures', lambda _, v: url_or_none(v['video']['playListUrl']))):
if selected_lecture and str(lecture.get('id')) != selected_lecture:
continue
display_id = join_nonempty(course_id, section_info.get('season_id'), lecture.get('id'))
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
lecture['video']['playListUrl'], display_id, 'mp4', m3u8_id='hls')
yield {
**course_info,
**section_info,
'id': display_id, # fallback
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'series': course_info.get('title'),
'series_id': course_id,
**traverse_obj(lecture, {
'id': ('video', 'guid', {str}),
'title': ('title', {str}),
'alt_title': ('video', 'title', {str}),
'description': ('description', {clean_html}),
'episode': ('title', {str}),
'episode_number': ('order', {int_or_none}),
'duration': ('video', 'duration_in_sec', {int_or_none}),
'timestamp': ('video', 'created_at', {parse_iso8601}),
'modified_timestamp': ('video', 'updated_at', {parse_iso8601}),
'thumbnail': ('video', 'thumbnailUrl', {url_or_none}),
}),
}
def _real_extract(self, url):
course_id, lecture_id = self._match_valid_url(url).group('course_id', 'lecture_id')
data = self._download_json(
f'https://api.gamedev.tv/api/courses/my/{course_id}', course_id,
headers=self._API_HEADERS)['data']
course_info = traverse_obj(data, {
'title': ('title', {str}),
'tags': ('tags', ..., 'name', {str}),
'categories': ('categories', ..., 'title', {str}),
'timestamp': ('created_at', {parse_iso8601}),
'modified_timestamp': ('updated_at', {parse_iso8601}),
'thumbnail': ('image', {url_or_none}),
})
entries = self._entries(data, course_id, course_info, lecture_id)
if lecture_id:
lecture = next(entries, None)
if not lecture:
raise ExtractorError('Lecture not found')
return lecture
return self.playlist_result(entries, course_id, **course_info)

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -62,7 +63,7 @@ class IlPostIE(InfoExtractor):
'url': ('podcast_raw_url', {url_or_none}), 'url': ('podcast_raw_url', {url_or_none}),
'thumbnail': ('image', {url_or_none}), 'thumbnail': ('image', {url_or_none}),
'timestamp': ('timestamp', {int_or_none}), 'timestamp': ('timestamp', {int_or_none}),
'duration': ('milliseconds', {float_or_none(scale=1000)}), 'duration': ('milliseconds', {functools.partial(float_or_none, scale=1000)}),
'availability': ('free', {lambda v: 'public' if v else 'subscriber_only'}), 'availability': ('free', {lambda v: 'public' if v else 'subscriber_only'}),
}), }),
} }

View File

@ -326,11 +326,11 @@ class JioCinemaIE(JioCinemaBaseIE):
# fallback metadata # fallback metadata
'title': ('name', {str}), 'title': ('name', {str}),
'description': ('fullSynopsis', {str}), 'description': ('fullSynopsis', {str}),
'series': ('show', 'name', {str}, filter), 'series': ('show', 'name', {str}, {lambda x: x or None}),
'season': ('tournamentName', {str}, {lambda x: x if x != 'Season 0' else None}), 'season': ('tournamentName', {str}, {lambda x: x if x != 'Season 0' else None}),
'season_number': ('episode', 'season', {int_or_none}, filter), 'season_number': ('episode', 'season', {int_or_none}, {lambda x: x or None}),
'episode': ('fullTitle', {str}), 'episode': ('fullTitle', {str}),
'episode_number': ('episode', 'episodeNo', {int_or_none}, filter), 'episode_number': ('episode', 'episodeNo', {int_or_none}, {lambda x: x or None}),
'age_limit': ('ageNemonic', {parse_age_limit}), 'age_limit': ('ageNemonic', {parse_age_limit}),
'duration': ('totalDuration', {float_or_none}), 'duration': ('totalDuration', {float_or_none}),
'thumbnail': ('images', {url_or_none}), 'thumbnail': ('images', {url_or_none}),
@ -338,10 +338,10 @@ class JioCinemaIE(JioCinemaBaseIE):
**traverse_obj(metadata, ('result', 0, { **traverse_obj(metadata, ('result', 0, {
'title': ('fullTitle', {str}), 'title': ('fullTitle', {str}),
'description': ('fullSynopsis', {str}), 'description': ('fullSynopsis', {str}),
'series': ('showName', {str}, filter), 'series': ('showName', {str}, {lambda x: x or None}),
'season': ('seasonName', {str}, filter), 'season': ('seasonName', {str}, {lambda x: x or None}),
'season_number': ('season', {int_or_none}), 'season_number': ('season', {int_or_none}),
'season_id': ('seasonId', {str}, filter), 'season_id': ('seasonId', {str}, {lambda x: x or None}),
'episode': ('fullTitle', {str}), 'episode': ('fullTitle', {str}),
'episode_number': ('episode', {int_or_none}), 'episode_number': ('episode', {int_or_none}),
'timestamp': ('uploadTime', {int_or_none}), 'timestamp': ('uploadTime', {int_or_none}),

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..networking import HEADRequest from ..networking import HEADRequest
@ -136,7 +137,7 @@ class KickVODIE(KickBaseIE):
'uploader': ('livestream', 'channel', 'user', 'username', {str}), 'uploader': ('livestream', 'channel', 'user', 'username', {str}),
'uploader_id': ('livestream', 'channel', 'user_id', {int}, {str_or_none}), 'uploader_id': ('livestream', 'channel', 'user_id', {int}, {str_or_none}),
'timestamp': ('created_at', {parse_iso8601}), 'timestamp': ('created_at', {parse_iso8601}),
'duration': ('livestream', 'duration', {float_or_none(scale=1000)}), 'duration': ('livestream', 'duration', {functools.partial(float_or_none, scale=1000)}),
'thumbnail': ('livestream', 'thumbnail', {url_or_none}), 'thumbnail': ('livestream', 'thumbnail', {url_or_none}),
'categories': ('livestream', 'categories', ..., 'name', {str}), 'categories': ('livestream', 'categories', ..., 'name', {str}),
'view_count': ('views', {int_or_none}), 'view_count': ('views', {int_or_none}),

View File

@ -119,7 +119,7 @@ class KikaIE(InfoExtractor):
'width': ('frameWidth', {int_or_none}), 'width': ('frameWidth', {int_or_none}),
'height': ('frameHeight', {int_or_none}), 'height': ('frameHeight', {int_or_none}),
# NB: filesize is 0 if unknown, bitrate is -1 if unknown # NB: filesize is 0 if unknown, bitrate is -1 if unknown
'filesize': ('fileSize', {int_or_none}, filter), 'filesize': ('fileSize', {int_or_none}, {lambda x: x or None}),
'abr': ('bitrateAudio', {int_or_none}, {lambda x: None if x == -1 else x}), 'abr': ('bitrateAudio', {int_or_none}, {lambda x: None if x == -1 else x}),
'vbr': ('bitrateVideo', {int_or_none}, {lambda x: None if x == -1 else x}), 'vbr': ('bitrateVideo', {int_or_none}, {lambda x: None if x == -1 else x}),
}), }),

View File

@ -32,7 +32,7 @@ class LaracastsBaseIE(InfoExtractor):
VimeoIE, url_transparent=True, VimeoIE, url_transparent=True,
**traverse_obj(episode, { **traverse_obj(episode, {
'id': ('id', {int}, {str_or_none}), 'id': ('id', {int}, {str_or_none}),
'webpage_url': ('path', {urljoin('https://laracasts.com')}), 'webpage_url': ('path', {lambda x: urljoin('https://laracasts.com', x)}),
'title': ('title', {clean_html}), 'title': ('title', {clean_html}),
'season_number': ('chapter', {int_or_none}), 'season_number': ('chapter', {int_or_none}),
'episode_number': ('position', {int_or_none}), 'episode_number': ('position', {int_or_none}),
@ -104,7 +104,7 @@ class LaracastsPlaylistIE(LaracastsBaseIE):
'description': ('body', {clean_html}), 'description': ('body', {clean_html}),
'thumbnail': (('large_thumbnail', 'thumbnail'), {url_or_none}, any), 'thumbnail': (('large_thumbnail', 'thumbnail'), {url_or_none}, any),
'duration': ('runTime', {parse_duration}), 'duration': ('runTime', {parse_duration}),
'categories': ('taxonomy', 'name', {str}, all, filter), 'categories': ('taxonomy', 'name', {str}, {lambda x: x and [x]}),
'tags': ('topics', ..., 'name', {str}), 'tags': ('topics', ..., 'name', {str}),
'modified_date': ('lastUpdated', {unified_strdate}), 'modified_date': ('lastUpdated', {unified_strdate}),
}), }),

View File

@ -66,7 +66,7 @@ class LBRYBaseIE(InfoExtractor):
'license': ('value', 'license', {str}), 'license': ('value', 'license', {str}),
'timestamp': ('timestamp', {int_or_none}), 'timestamp': ('timestamp', {int_or_none}),
'release_timestamp': ('value', 'release_time', {int_or_none}), 'release_timestamp': ('value', 'release_time', {int_or_none}),
'tags': ('value', 'tags', ..., filter), 'tags': ('value', 'tags', ..., {lambda x: x or None}),
'duration': ('value', stream_type, 'duration', {int_or_none}), 'duration': ('value', stream_type, 'duration', {int_or_none}),
'channel': ('signing_channel', 'value', 'title', {str}), 'channel': ('signing_channel', 'value', 'title', {str}),
'channel_id': ('signing_channel', 'claim_id', {str}), 'channel_id': ('signing_channel', 'claim_id', {str}),

View File

@ -6,11 +6,13 @@ from ..utils import (
ExtractorError, ExtractorError,
clean_html, clean_html,
extract_attributes, extract_attributes,
get_element_by_class,
get_element_html_by_id,
join_nonempty, join_nonempty,
parse_duration, parse_duration,
unified_timestamp, unified_timestamp,
) )
from ..utils.traversal import find_element, traverse_obj from ..utils.traversal import traverse_obj
class LearningOnScreenIE(InfoExtractor): class LearningOnScreenIE(InfoExtractor):
@ -30,24 +32,28 @@ class LearningOnScreenIE(InfoExtractor):
def _real_initialize(self): def _real_initialize(self):
if not self._get_cookies('https://learningonscreen.ac.uk/').get('PHPSESSID-BOB-LIVE'): if not self._get_cookies('https://learningonscreen.ac.uk/').get('PHPSESSID-BOB-LIVE'):
self.raise_login_required(method='session_cookies') self.raise_login_required(
'Use --cookies for authentication. See '
' https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp '
'for how to manually pass cookies', method=None)
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
details = traverse_obj(webpage, ( details = traverse_obj(webpage, (
{find_element(id='programme-details', html=True)}, { {functools.partial(get_element_html_by_id, 'programme-details')}, {
'title': ({find_element(tag='h2')}, {clean_html}), 'title': ({functools.partial(re.search, r'<h2>([^<]+)</h2>')}, 1, {clean_html}),
'timestamp': ( 'timestamp': (
{find_element(cls='broadcast-date')}, {functools.partial(get_element_by_class, 'broadcast-date')},
{functools.partial(re.match, r'([^<]+)')}, 1, {unified_timestamp}), {functools.partial(re.match, r'([^<]+)')}, 1, {unified_timestamp}),
'duration': ( 'duration': (
{find_element(cls='prog-running-time')}, {clean_html}, {parse_duration}), {functools.partial(get_element_by_class, 'prog-running-time')},
{clean_html}, {parse_duration}),
})) }))
title = details.pop('title', None) or traverse_obj(webpage, ( title = details.pop('title', None) or traverse_obj(webpage, (
{find_element(id='add-to-existing-playlist', html=True)}, {functools.partial(get_element_html_by_id, 'add-to-existing-playlist')},
{extract_attributes}, 'data-record-title', {clean_html})) {extract_attributes}, 'data-record-title', {clean_html}))
entries = self._parse_html5_media_entries( entries = self._parse_html5_media_entries(

View File

@ -6,10 +6,12 @@ from ..utils import (
extract_attributes, extract_attributes,
get_element_by_class, get_element_by_class,
get_element_html_by_id, get_element_html_by_id,
get_element_text_and_html_by_tag,
parse_duration, parse_duration,
strip_or_none, strip_or_none,
traverse_obj,
try_call,
) )
from ..utils.traversal import find_element, traverse_obj
class ListenNotesIE(InfoExtractor): class ListenNotesIE(InfoExtractor):
@ -20,14 +22,14 @@ class ListenNotesIE(InfoExtractor):
'info_dict': { 'info_dict': {
'id': 'KrDgvNb_u1n', 'id': 'KrDgvNb_u1n',
'ext': 'mp3', 'ext': 'mp3',
'title': r're:Tim OReilly on noticing things other people .{113}', 'title': 'md5:32236591a921adf17bbdbf0441b6c0e9',
'description': r're:(?s)We shape reality by what we notice and .{27459}', 'description': 'md5:c581ed197eeddcee55a67cdb547c8cbd',
'duration': 2215.0, 'duration': 2148.0,
'channel': 'Amplifying Cognition', 'channel': 'Thriving on Overload',
'channel_id': 'ed84wITivxF', 'channel_id': 'ed84wITivxF',
'episode_id': 'e1312583fa7b4e24acfbb5131050be00', 'episode_id': 'e1312583fa7b4e24acfbb5131050be00',
'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/amplifying-cognition-ross-dawson-Iemft4Gdr0k-ed84wITivxF.300x300.jpg', 'thumbnail': 'https://production.listennotes.com/podcasts/thriving-on-overload-ross-dawson-1wb_KospA3P-ed84wITivxF.300x300.jpg',
'channel_url': 'https://www.listennotes.com/podcasts/amplifying-cognition-ross-dawson-ed84wITivxF/', 'channel_url': 'https://www.listennotes.com/podcasts/thriving-on-overload-ross-dawson-ed84wITivxF/',
'cast': ['Tim OReilly', 'Cookie Monster', 'Lao Tzu', 'Wallace Steven', 'Eric Raymond', 'Christine Peterson', 'John Maynard Keyne', 'Ross Dawson'], 'cast': ['Tim OReilly', 'Cookie Monster', 'Lao Tzu', 'Wallace Steven', 'Eric Raymond', 'Christine Peterson', 'John Maynard Keyne', 'Ross Dawson'],
}, },
}, { }, {
@ -37,13 +39,13 @@ class ListenNotesIE(InfoExtractor):
'id': 'lwEA3154JzG', 'id': 'lwEA3154JzG',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Episode 177: WireGuard with Jason Donenfeld', 'title': 'Episode 177: WireGuard with Jason Donenfeld',
'description': r're:(?s)Jason Donenfeld lead developer joins us this hour to discuss WireGuard, .{3169}', 'description': 'md5:24744f36456a3e95f83c1193a3458594',
'duration': 3861.0, 'duration': 3861.0,
'channel': 'Ask Noah Show', 'channel': 'Ask Noah Show',
'channel_id': '4DQTzdS5-j7', 'channel_id': '4DQTzdS5-j7',
'episode_id': '8c8954b95e0b4859ad1eecec8bf6d3a4', 'episode_id': '8c8954b95e0b4859ad1eecec8bf6d3a4',
'channel_url': 'https://www.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-4DQTzdS5-j7/', 'channel_url': 'https://www.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-4DQTzdS5-j7/',
'thumbnail': 'https://cdn-images-3.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-gD7vG150cxf-4DQTzdS5-j7.300x300.jpg', 'thumbnail': 'https://production.listennotes.com/podcasts/ask-noah-show-noah-j-chelliah-cfbRUw9Gs3F-4DQTzdS5-j7.300x300.jpg',
'cast': ['noah showlink', 'noah show', 'noah dashboard', 'jason donenfeld'], 'cast': ['noah showlink', 'noah show', 'noah dashboard', 'jason donenfeld'],
}, },
}] }]
@ -68,7 +70,7 @@ class ListenNotesIE(InfoExtractor):
'id': audio_id, 'id': audio_id,
'url': data['audio'], 'url': data['audio'],
'title': (data.get('data-title') 'title': (data.get('data-title')
or traverse_obj(webpage, ({find_element(tag='h1')}, {clean_html})) or try_call(lambda: get_element_text_and_html_by_tag('h1', webpage)[0])
or self._html_search_meta(('og:title', 'title', 'twitter:title'), webpage, 'title')), or self._html_search_meta(('og:title', 'title', 'twitter:title'), webpage, 'title')),
'description': (self._clean_description(get_element_by_class('ln-text-p', webpage)) 'description': (self._clean_description(get_element_by_class('ln-text-p', webpage))
or strip_or_none(description)), or strip_or_none(description)),

View File

@ -114,7 +114,7 @@ class LSMLREmbedIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
query = parse_qs(url) query = parse_qs(url)
video_id = traverse_obj(query, ( video_id = traverse_obj(query, (
('show', 'id'), 0, {int_or_none}, filter, {str_or_none}), get_all=False) ('show', 'id'), 0, {int_or_none}, {lambda x: x or None}, {str_or_none}), get_all=False)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
player_data, media_data = self._search_regex( player_data, media_data = self._search_regex(

View File

@ -57,6 +57,6 @@ class MagentaMusikIE(InfoExtractor):
'duration': ('runtimeInSeconds', {int_or_none}), 'duration': ('runtimeInSeconds', {int_or_none}),
'location': ('countriesOfProduction', {list}, {lambda x: join_nonempty(*x, delim=', ')}), 'location': ('countriesOfProduction', {list}, {lambda x: join_nonempty(*x, delim=', ')}),
'release_year': ('yearOfProduction', {int_or_none}), 'release_year': ('yearOfProduction', {int_or_none}),
'categories': ('mainGenre', {str}, all, filter), 'categories': ('mainGenre', {str}, {lambda x: x and [x]}),
})), })),
} }

View File

@ -17,7 +17,7 @@ class MediaStreamBaseIE(InfoExtractor):
_BASE_URL_RE = r'https?://mdstrm\.com/(?:embed|live-stream)' _BASE_URL_RE = r'https?://mdstrm\.com/(?:embed|live-stream)'
def _extract_mediastream_urls(self, webpage): def _extract_mediastream_urls(self, webpage):
yield from traverse_obj(list(self._yield_json_ld(webpage, None, default={})), ( yield from traverse_obj(list(self._yield_json_ld(webpage, None, fatal=False)), (
lambda _, v: v['@type'] == 'VideoObject', ('embedUrl', 'contentUrl'), lambda _, v: v['@type'] == 'VideoObject', ('embedUrl', 'contentUrl'),
{lambda x: x if re.match(rf'{self._BASE_URL_RE}/\w+', x) else None})) {lambda x: x if re.match(rf'{self._BASE_URL_RE}/\w+', x) else None}))

View File

@ -66,7 +66,7 @@ class MixchIE(InfoExtractor):
note='Downloading comments', errnote='Failed to download comments'), (..., { note='Downloading comments', errnote='Failed to download comments'), (..., {
'author': ('name', {str}), 'author': ('name', {str}),
'author_id': ('user_id', {str_or_none}), 'author_id': ('user_id', {str_or_none}),
'id': ('message_id', {str}, filter), 'id': ('message_id', {str}, {lambda x: x or None}),
'text': ('body', {str}), 'text': ('body', {str}),
'timestamp': ('created', {int}), 'timestamp': ('created', {int}),
})) }))

View File

@ -4,11 +4,15 @@ from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, clean_html,
extract_attributes, extract_attributes,
get_element_by_class,
get_element_html_by_class,
get_element_text_and_html_by_tag,
int_or_none, int_or_none,
strip_or_none, strip_or_none,
traverse_obj,
try_call,
unified_strdate, unified_strdate,
) )
from ..utils.traversal import find_element, traverse_obj
class MonstercatIE(InfoExtractor): class MonstercatIE(InfoExtractor):
@ -22,21 +26,19 @@ class MonstercatIE(InfoExtractor):
'thumbnail': 'https://www.monstercat.com/release/742779548009/cover', 'thumbnail': 'https://www.monstercat.com/release/742779548009/cover',
'release_date': '20230711', 'release_date': '20230711',
'album': 'The Secret Language of Trees', 'album': 'The Secret Language of Trees',
'album_artists': ['BT'], 'album_artist': 'BT',
}, },
}] }]
def _extract_tracks(self, table, album_meta): def _extract_tracks(self, table, album_meta):
for td in re.findall(r'<tr[^<]*>((?:(?!</tr>)[\w\W])+)', table): # regex by chatgpt due to lack of get_elements_by_tag for td in re.findall(r'<tr[^<]*>((?:(?!</tr>)[\w\W])+)', table): # regex by chatgpt due to lack of get_elements_by_tag
title = traverse_obj(td, ( title = clean_html(try_call(
{find_element(cls='d-inline-flex flex-column')}, lambda: get_element_by_class('d-inline-flex flex-column', td).partition(' <span')[0]))
{lambda x: x.partition(' <span')}, 0, {clean_html})) ids = extract_attributes(try_call(lambda: get_element_html_by_class('btn-play cursor-pointer mr-small', td)) or '')
ids = traverse_obj(td, (
{find_element(cls='btn-play cursor-pointer mr-small', html=True)}, {extract_attributes})) or {}
track_id = ids.get('data-track-id') track_id = ids.get('data-track-id')
release_id = ids.get('data-release-id') release_id = ids.get('data-release-id')
track_number = traverse_obj(td, ({find_element(cls='py-xsmall')}, {int_or_none})) track_number = int_or_none(try_call(lambda: get_element_by_class('py-xsmall', td)))
if not track_id or not release_id: if not track_id or not release_id:
self.report_warning(f'Skipping track {track_number}, ID(s) not found') self.report_warning(f'Skipping track {track_number}, ID(s) not found')
self.write_debug(f'release_id={release_id!r} track_id={track_id!r}') self.write_debug(f'release_id={release_id!r} track_id={track_id!r}')
@ -46,7 +48,7 @@ class MonstercatIE(InfoExtractor):
'title': title, 'title': title,
'track': title, 'track': title,
'track_number': track_number, 'track_number': track_number,
'artists': traverse_obj(td, ({find_element(cls='d-block fs-xxsmall')}, {clean_html}, all)), 'artist': clean_html(try_call(lambda: get_element_by_class('d-block fs-xxsmall', td))),
'url': f'https://www.monstercat.com/api/release/{release_id}/track-stream/{track_id}', 'url': f'https://www.monstercat.com/api/release/{release_id}/track-stream/{track_id}',
'id': track_id, 'id': track_id,
'ext': 'mp3', 'ext': 'mp3',
@ -55,19 +57,20 @@ class MonstercatIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
url_id = self._match_id(url) url_id = self._match_id(url)
html = self._download_webpage(url, url_id) html = self._download_webpage(url, url_id)
# NB: HTMLParser may choke on this html; use {find_element} or try_call(lambda: get_element...) # wrap all `get_elements` in `try_call`, HTMLParser has problems with site's html
tracklist_table = traverse_obj(html, {find_element(cls='table table-small')}) or '' tracklist_table = try_call(lambda: get_element_by_class('table table-small', html)) or ''
title = traverse_obj(html, ({find_element(tag='h1')}, {clean_html}))
title = try_call(lambda: get_element_text_and_html_by_tag('h1', html)[0])
date = traverse_obj(html, ({lambda html: get_element_by_class('font-italic mb-medium d-tablet-none d-phone-block',
html).partition('Released ')}, 2, {strip_or_none}, {unified_strdate}))
album_meta = { album_meta = {
'title': title, 'title': title,
'album': title, 'album': title,
'thumbnail': f'https://www.monstercat.com/release/{url_id}/cover', 'thumbnail': f'https://www.monstercat.com/release/{url_id}/cover',
'album_artists': traverse_obj(html, ( 'album_artist': try_call(
{find_element(cls='h-normal text-uppercase mb-desktop-medium mb-smallish')}, {clean_html}, all)), lambda: get_element_by_class('h-normal text-uppercase mb-desktop-medium mb-smallish', html)),
'release_date': traverse_obj(html, ( 'release_date': date,
{find_element(cls='font-italic mb-medium d-tablet-none d-phone-block')},
{lambda x: x.partition('Released ')}, 2, {strip_or_none}, {unified_strdate})),
} }
return self.playlist_result( return self.playlist_result(

View File

@ -86,7 +86,7 @@ class NebulaBaseIE(InfoExtractor):
def _extract_video_metadata(self, episode): def _extract_video_metadata(self, episode):
channel_url = traverse_obj( channel_url = traverse_obj(
episode, (('channel_slug', 'class_slug'), {urljoin('https://nebula.tv/')}), get_all=False) episode, (('channel_slug', 'class_slug'), {lambda x: urljoin('https://nebula.tv/', x)}), get_all=False)
return { return {
'id': episode['id'].partition(':')[2], 'id': episode['id'].partition(':')[2],
**traverse_obj(episode, { **traverse_obj(episode, {

View File

@ -6,10 +6,12 @@ from ..utils import (
determine_ext, determine_ext,
extract_attributes, extract_attributes,
get_element_by_class, get_element_by_class,
get_element_text_and_html_by_tag,
parse_duration, parse_duration,
traverse_obj,
try_call,
url_or_none, url_or_none,
) )
from ..utils.traversal import find_element, traverse_obj
class NekoHackerIE(InfoExtractor): class NekoHackerIE(InfoExtractor):
@ -33,7 +35,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20221101', 'release_date': '20221101',
'album': 'Nekoverse', 'album': 'Nekoverse',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'Spaceship', 'track': 'Spaceship',
'track_number': 1, 'track_number': 1,
'duration': 195.0, 'duration': 195.0,
@ -51,7 +53,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20221101', 'release_date': '20221101',
'album': 'Nekoverse', 'album': 'Nekoverse',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'City Runner', 'track': 'City Runner',
'track_number': 2, 'track_number': 2,
'duration': 148.0, 'duration': 148.0,
@ -69,7 +71,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20221101', 'release_date': '20221101',
'album': 'Nekoverse', 'album': 'Nekoverse',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'Nature Talk', 'track': 'Nature Talk',
'track_number': 3, 'track_number': 3,
'duration': 174.0, 'duration': 174.0,
@ -87,7 +89,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20221101', 'release_date': '20221101',
'album': 'Nekoverse', 'album': 'Nekoverse',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'Crystal World', 'track': 'Crystal World',
'track_number': 4, 'track_number': 4,
'duration': 199.0, 'duration': 199.0,
@ -113,7 +115,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20210115', 'release_date': '20210115',
'album': '進め!むじなカンパニー', 'album': '進め!むじなカンパニー',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'md5:1a5fcbc96ca3c3265b1c6f9f79f30fd0', 'track': 'md5:1a5fcbc96ca3c3265b1c6f9f79f30fd0',
'track_number': 1, 'track_number': 1,
}, },
@ -130,7 +132,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20210115', 'release_date': '20210115',
'album': '進め!むじなカンパニー', 'album': '進め!むじなカンパニー',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'むじな de なじむ feat. 六科なじむ (CV: 日高里菜 )', 'track': 'むじな de なじむ feat. 六科なじむ (CV: 日高里菜 )',
'track_number': 2, 'track_number': 2,
}, },
@ -147,7 +149,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20210115', 'release_date': '20210115',
'album': '進め!むじなカンパニー', 'album': '進め!むじなカンパニー',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': '進め!むじなカンパニー (instrumental)', 'track': '進め!むじなカンパニー (instrumental)',
'track_number': 3, 'track_number': 3,
}, },
@ -164,7 +166,7 @@ class NekoHackerIE(InfoExtractor):
'acodec': 'mp3', 'acodec': 'mp3',
'release_date': '20210115', 'release_date': '20210115',
'album': '進め!むじなカンパニー', 'album': '進め!むじなカンパニー',
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'track': 'むじな de なじむ (instrumental)', 'track': 'むじな de なじむ (instrumental)',
'track_number': 4, 'track_number': 4,
}, },
@ -179,17 +181,14 @@ class NekoHackerIE(InfoExtractor):
playlist = get_element_by_class('playlist', webpage) playlist = get_element_by_class('playlist', webpage)
if not playlist: if not playlist:
iframe_src = traverse_obj(webpage, ( iframe = try_call(lambda: get_element_text_and_html_by_tag('iframe', webpage)[1]) or ''
{find_element(tag='iframe', html=True)}, {extract_attributes}, 'src', {url_or_none})) iframe_src = url_or_none(extract_attributes(iframe).get('src'))
if not iframe_src: if not iframe_src:
raise ExtractorError('No playlist or embed found in webpage') raise ExtractorError('No playlist or embed found in webpage')
elif re.match(r'https?://(?:\w+\.)?spotify\.com/', iframe_src): elif re.match(r'https?://(?:\w+\.)?spotify\.com/', iframe_src):
raise ExtractorError('Spotify embeds are not supported', expected=True) raise ExtractorError('Spotify embeds are not supported', expected=True)
return self.url_result(url, 'Generic') return self.url_result(url, 'Generic')
player_params = self._search_json(
r'var srp_player_params_[\da-f]+\s*=', webpage, 'player params', playlist_id, default={})
entries = [] entries = []
for track_number, track in enumerate(re.findall(r'(<li[^>]+data-audiopath[^>]+>)', playlist), 1): for track_number, track in enumerate(re.findall(r'(<li[^>]+data-audiopath[^>]+>)', playlist), 1):
entry = traverse_obj(extract_attributes(track), { entry = traverse_obj(extract_attributes(track), {
@ -201,12 +200,12 @@ class NekoHackerIE(InfoExtractor):
'album': 'data-albumtitle', 'album': 'data-albumtitle',
'duration': ('data-tracktime', {parse_duration}), 'duration': ('data-tracktime', {parse_duration}),
'release_date': ('data-releasedate', {lambda x: re.match(r'\d{8}', x.replace('.', ''))}, 0), 'release_date': ('data-releasedate', {lambda x: re.match(r'\d{8}', x.replace('.', ''))}, 0),
'thumbnail': ('data-albumart', {url_or_none}),
}) })
entries.append({ entries.append({
**entry, **entry,
'thumbnail': url_or_none(player_params.get('artwork')),
'track_number': track_number, 'track_number': track_number,
'artists': ['Neko Hacker'], 'artist': 'Neko Hacker',
'vcodec': 'none', 'vcodec': 'none',
'acodec': 'mp3' if entry['ext'] == 'mp3' else None, 'acodec': 'mp3' if entry['ext'] == 'mp3' else None,
}) })

View File

@ -36,6 +36,10 @@ class NetEaseMusicBaseIE(InfoExtractor):
_API_BASE = 'http://music.163.com/api/' _API_BASE = 'http://music.163.com/api/'
_GEO_BYPASS = False _GEO_BYPASS = False
@staticmethod
def _kilo_or_none(value):
return int_or_none(value, scale=1000)
def _create_eapi_cipher(self, api_path, query_body, cookies): def _create_eapi_cipher(self, api_path, query_body, cookies):
request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':')) request_text = json.dumps({**query_body, 'header': cookies}, separators=(',', ':'))
@ -97,7 +101,7 @@ class NetEaseMusicBaseIE(InfoExtractor):
'vcodec': 'none', 'vcodec': 'none',
**traverse_obj(song, { **traverse_obj(song, {
'ext': ('type', {str}), 'ext': ('type', {str}),
'abr': ('br', {int_or_none(scale=1000)}), 'abr': ('br', {self._kilo_or_none}),
'filesize': ('size', {int_or_none}), 'filesize': ('size', {int_or_none}),
}), }),
}) })
@ -278,9 +282,9 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
**lyric_data, **lyric_data,
**traverse_obj(info, { **traverse_obj(info, {
'title': ('name', {str}), 'title': ('name', {str}),
'timestamp': ('album', 'publishTime', {int_or_none(scale=1000)}), 'timestamp': ('album', 'publishTime', {self._kilo_or_none}),
'thumbnail': ('album', 'picUrl', {url_or_none}), 'thumbnail': ('album', 'picUrl', {url_or_none}),
'duration': ('duration', {int_or_none(scale=1000)}), 'duration': ('duration', {self._kilo_or_none}),
'album': ('album', 'name', {str}), 'album': ('album', 'name', {str}),
'average_rating': ('score', {int_or_none}), 'average_rating': ('score', {int_or_none}),
}), }),
@ -436,7 +440,7 @@ class NetEaseMusicListIE(NetEaseMusicBaseIE):
'tags': ('tags', ..., {str}), 'tags': ('tags', ..., {str}),
'uploader': ('creator', 'nickname', {str}), 'uploader': ('creator', 'nickname', {str}),
'uploader_id': ('creator', 'userId', {str_or_none}), 'uploader_id': ('creator', 'userId', {str_or_none}),
'timestamp': ('updateTime', {int_or_none(scale=1000)}), 'timestamp': ('updateTime', {self._kilo_or_none}),
})) }))
if traverse_obj(info, ('playlist', 'specialType')) == 10: if traverse_obj(info, ('playlist', 'specialType')) == 10:
metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}' metainfo['title'] = f'{metainfo.get("title")} {strftime_or_none(metainfo.get("timestamp"), "%Y-%m-%d")}'
@ -513,10 +517,10 @@ class NetEaseMusicMvIE(NetEaseMusicBaseIE):
'creators': traverse_obj(info, ('artists', ..., 'name')) or [info.get('artistName')], 'creators': traverse_obj(info, ('artists', ..., 'name')) or [info.get('artistName')],
**traverse_obj(info, { **traverse_obj(info, {
'title': ('name', {str}), 'title': ('name', {str}),
'description': (('desc', 'briefDesc'), {str}, filter), 'description': (('desc', 'briefDesc'), {str}, {lambda x: x or None}),
'upload_date': ('publishTime', {unified_strdate}), 'upload_date': ('publishTime', {unified_strdate}),
'thumbnail': ('cover', {url_or_none}), 'thumbnail': ('cover', {url_or_none}),
'duration': ('duration', {int_or_none(scale=1000)}), 'duration': ('duration', {self._kilo_or_none}),
'view_count': ('playCount', {int_or_none}), 'view_count': ('playCount', {int_or_none}),
'like_count': ('likeCount', {int_or_none}), 'like_count': ('likeCount', {int_or_none}),
'comment_count': ('commentCount', {int_or_none}), 'comment_count': ('commentCount', {int_or_none}),
@ -584,7 +588,7 @@ class NetEaseMusicProgramIE(NetEaseMusicBaseIE):
'description': ('description', {str}), 'description': ('description', {str}),
'creator': ('dj', 'brand', {str}), 'creator': ('dj', 'brand', {str}),
'thumbnail': ('coverUrl', {url_or_none}), 'thumbnail': ('coverUrl', {url_or_none}),
'timestamp': ('createTime', {int_or_none(scale=1000)}), 'timestamp': ('createTime', {self._kilo_or_none}),
}) })
if not self._yes_playlist( if not self._yes_playlist(
@ -594,7 +598,7 @@ class NetEaseMusicProgramIE(NetEaseMusicBaseIE):
return { return {
'id': str(info['mainSong']['id']), 'id': str(info['mainSong']['id']),
'formats': formats, 'formats': formats,
'duration': traverse_obj(info, ('mainSong', 'duration', {int_or_none(scale=1000)})), 'duration': traverse_obj(info, ('mainSong', 'duration', {self._kilo_or_none})),
**metainfo, **metainfo,
} }

View File

@ -371,11 +371,11 @@ class NiconicoIE(InfoExtractor):
'acodec': 'aac', 'acodec': 'aac',
'vcodec': 'h264', 'vcodec': 'h264',
**traverse_obj(audio_quality, ('metadata', { **traverse_obj(audio_quality, ('metadata', {
'abr': ('bitrate', {float_or_none(scale=1000)}), 'abr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
'asr': ('samplingRate', {int_or_none}), 'asr': ('samplingRate', {int_or_none}),
})), })),
**traverse_obj(video_quality, ('metadata', { **traverse_obj(video_quality, ('metadata', {
'vbr': ('bitrate', {float_or_none(scale=1000)}), 'vbr': ('bitrate', {functools.partial(float_or_none, scale=1000)}),
'height': ('resolution', 'height', {int_or_none}), 'height': ('resolution', 'height', {int_or_none}),
'width': ('resolution', 'width', {int_or_none}), 'width': ('resolution', 'width', {int_or_none}),
})), })),
@ -428,7 +428,7 @@ class NiconicoIE(InfoExtractor):
**audio_fmt, **audio_fmt,
**traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), { **traverse_obj(audios, (lambda _, v: audio_fmt['format_id'].startswith(v['id']), {
'format_id': ('id', {str}), 'format_id': ('id', {str}),
'abr': ('bitRate', {float_or_none(scale=1000)}), 'abr': ('bitRate', {functools.partial(float_or_none, scale=1000)}),
'asr': ('samplingRate', {int_or_none}), 'asr': ('samplingRate', {int_or_none}),
}), get_all=False), }), get_all=False),
'acodec': 'aac', 'acodec': 'aac',

View File

@ -10,10 +10,10 @@ from ..utils import (
get_element_html_by_class, get_element_html_by_class,
get_elements_by_class, get_elements_by_class,
int_or_none, int_or_none,
try_call,
unified_timestamp, unified_timestamp,
urlencode_postdata, urlencode_postdata,
) )
from ..utils.traversal import find_element, find_elements, traverse_obj
class NubilesPornIE(InfoExtractor): class NubilesPornIE(InfoExtractor):
@ -70,8 +70,9 @@ class NubilesPornIE(InfoExtractor):
url, get_element_by_class('watch-page-video-wrapper', page), video_id)[0] url, get_element_by_class('watch-page-video-wrapper', page), video_id)[0]
channel_id, channel_name = self._search_regex( channel_id, channel_name = self._search_regex(
r'/video/website/(?P<id>\d+).+>(?P<name>\w+).com', get_element_html_by_class('site-link', page) or '', r'/video/website/(?P<id>\d+).+>(?P<name>\w+).com', get_element_html_by_class('site-link', page),
'channel', fatal=False, group=('id', 'name')) or (None, None) 'channel', fatal=False, group=('id', 'name')) or (None, None)
channel_name = re.sub(r'([^A-Z]+)([A-Z]+)', r'\1 \2', channel_name)
return { return {
'id': video_id, 'id': video_id,
@ -81,14 +82,14 @@ class NubilesPornIE(InfoExtractor):
'thumbnail': media_entries.get('thumbnail'), 'thumbnail': media_entries.get('thumbnail'),
'description': clean_html(get_element_html_by_class('content-pane-description', page)), 'description': clean_html(get_element_html_by_class('content-pane-description', page)),
'timestamp': unified_timestamp(get_element_by_class('date', page)), 'timestamp': unified_timestamp(get_element_by_class('date', page)),
'channel': re.sub(r'([^A-Z]+)([A-Z]+)', r'\1 \2', channel_name) if channel_name else None, 'channel': channel_name,
'channel_id': channel_id, 'channel_id': channel_id,
'channel_url': format_field(channel_id, None, 'https://members.nubiles-porn.com/video/website/%s'), 'channel_url': format_field(channel_id, None, 'https://members.nubiles-porn.com/video/website/%s'),
'like_count': int_or_none(get_element_by_id('likecount', page)), 'like_count': int_or_none(get_element_by_id('likecount', page)),
'average_rating': float_or_none(get_element_by_class('score', page)), 'average_rating': float_or_none(get_element_by_class('score', page)),
'age_limit': 18, 'age_limit': 18,
'categories': traverse_obj(page, ({find_element(cls='categories')}, {find_elements(cls='btn')}, ..., {clean_html})), 'categories': try_call(lambda: list(map(clean_html, get_elements_by_class('btn', get_element_by_class('categories', page))))),
'tags': traverse_obj(page, ({find_elements(cls='tags')}, 1, {find_elements(cls='btn')}, ..., {clean_html})), 'tags': try_call(lambda: list(map(clean_html, get_elements_by_class('btn', get_elements_by_class('tags', page)[1])))),
'cast': get_elements_by_class('content-pane-performer', page), 'cast': get_elements_by_class('content-pane-performer', page),
'availability': 'needs_auth', 'availability': 'needs_auth',
'series': channel_name, 'series': channel_name,

View File

@ -235,7 +235,7 @@ class NYTimesArticleIE(NYTimesBaseIE):
details = traverse_obj(block, { details = traverse_obj(block, {
'id': ('sourceId', {str}), 'id': ('sourceId', {str}),
'uploader': ('bylines', ..., 'renderedRepresentation', {str}), 'uploader': ('bylines', ..., 'renderedRepresentation', {str}),
'duration': (None, (('duration', {float_or_none(scale=1000)}), ('length', {int_or_none}))), 'duration': (None, (('duration', {lambda x: float_or_none(x, scale=1000)}), ('length', {int_or_none}))),
'timestamp': ('firstPublished', {parse_iso8601}), 'timestamp': ('firstPublished', {parse_iso8601}),
'series': ('podcastSeries', {str}), 'series': ('podcastSeries', {str}),
}, get_all=False) }, get_all=False)

View File

@ -115,7 +115,7 @@ class OnDemandKoreaIE(InfoExtractor):
**traverse_obj(data, { **traverse_obj(data, {
'thumbnail': ('episode', 'images', 'thumbnail', {url_or_none}), 'thumbnail': ('episode', 'images', 'thumbnail', {url_or_none}),
'release_date': ('episode', 'release_date', {lambda x: x.replace('-', '')}, {unified_strdate}), 'release_date': ('episode', 'release_date', {lambda x: x.replace('-', '')}, {unified_strdate}),
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
'age_limit': ('age_rating', 'name', {lambda x: x.replace('R', '')}, {parse_age_limit}), 'age_limit': ('age_rating', 'name', {lambda x: x.replace('R', '')}, {parse_age_limit}),
'series': ('episode', {if_series(key='program')}, 'title'), 'series': ('episode', {if_series(key='program')}, 'title'),
'series_id': ('episode', {if_series(key='program')}, 'id', {str_or_none}), 'series_id': ('episode', {if_series(key='program')}, 'id', {str_or_none}),

View File

@ -1,4 +1,5 @@
import base64 import base64
import functools
import re import re
from .common import InfoExtractor from .common import InfoExtractor
@ -191,7 +192,7 @@ class ORFPodcastIE(InfoExtractor):
'ext': ('enclosures', 0, 'type', {mimetype2ext}), 'ext': ('enclosures', 0, 'type', {mimetype2ext}),
'title': 'title', 'title': 'title',
'description': ('description', {clean_html}), 'description': ('description', {clean_html}),
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
'series': ('podcast', 'title'), 'series': ('podcast', 'title'),
})), })),
} }
@ -493,7 +494,7 @@ class ORFONIE(InfoExtractor):
return traverse_obj(api_json, { return traverse_obj(api_json, {
'id': ('id', {int}, {str_or_none}), 'id': ('id', {int}, {str_or_none}),
'age_limit': ('age_classification', {parse_age_limit}), 'age_limit': ('age_classification', {parse_age_limit}),
'duration': ('exact_duration', {float_or_none(scale=1000)}), 'duration': ('exact_duration', {functools.partial(float_or_none, scale=1000)}),
'title': (('title', 'headline'), {str}), 'title': (('title', 'headline'), {str}),
'description': (('description', 'teaser_text'), {str}), 'description': (('description', 'teaser_text'), {str}),
'media_type': ('video_type', {str}), 'media_type': ('video_type', {str}),

View File

@ -1,3 +1,5 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from .youtube import YoutubeIE from .youtube import YoutubeIE
from ..utils import ( from ..utils import (
@ -81,7 +83,7 @@ class ParlerIE(InfoExtractor):
'timestamp': ('date_created', {unified_timestamp}), 'timestamp': ('date_created', {unified_timestamp}),
'uploader': ('user', 'name', {strip_or_none}), 'uploader': ('user', 'name', {strip_or_none}),
'uploader_id': ('user', 'username', {str}), 'uploader_id': ('user', 'username', {str}),
'uploader_url': ('user', 'username', {urljoin('https://parler.com/')}), 'uploader_url': ('user', 'username', {functools.partial(urljoin, 'https://parler.com/')}),
'view_count': ('views', {int_or_none}), 'view_count': ('views', {int_or_none}),
'comment_count': ('total_comments', {int_or_none}), 'comment_count': ('total_comments', {int_or_none}),
'repost_count': ('echos', {int_or_none}), 'repost_count': ('echos', {int_or_none}),

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -104,7 +105,7 @@ class PornboxIE(InfoExtractor):
get_quality = qualities(['web', 'vga', 'hd', '1080p', '4k', '8k']) get_quality = qualities(['web', 'vga', 'hd', '1080p', '4k', '8k'])
metadata['formats'] = traverse_obj(stream_data, ('qualities', lambda _, v: v['src'], { metadata['formats'] = traverse_obj(stream_data, ('qualities', lambda _, v: v['src'], {
'url': 'src', 'url': 'src',
'vbr': ('bitrate', {int_or_none(scale=1000)}), 'vbr': ('bitrate', {functools.partial(int_or_none, scale=1000)}),
'format_id': ('quality', {str_or_none}), 'format_id': ('quality', {str_or_none}),
'quality': ('quality', {get_quality}), 'quality': ('quality', {get_quality}),
'width': ('size', {lambda x: int(x[:-1])}), 'width': ('size', {lambda x: int(x[:-1])}),

View File

@ -198,6 +198,6 @@ class Pr0grammIE(InfoExtractor):
'dislike_count': ('down', {int}), 'dislike_count': ('down', {int}),
'timestamp': ('created', {int}), 'timestamp': ('created', {int}),
'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}), 'upload_date': ('created', {int}, {dt.date.fromtimestamp}, {lambda x: x.strftime('%Y%m%d')}),
'thumbnail': ('thumb', {urljoin('https://thumb.pr0gramm.com')}), 'thumbnail': ('thumb', {lambda x: urljoin('https://thumb.pr0gramm.com', x)}),
}), }),
} }

View File

@ -140,7 +140,7 @@ class QDanceIE(InfoExtractor):
'description': ('description', {str.strip}), 'description': ('description', {str.strip}),
'display_id': ('slug', {str}), 'display_id': ('slug', {str}),
'thumbnail': ('thumbnail', {url_or_none}), 'thumbnail': ('thumbnail', {url_or_none}),
'duration': ('durationInSeconds', {int_or_none}, filter), 'duration': ('durationInSeconds', {int_or_none}, {lambda x: x or None}),
'availability': ('subscription', 'level', {extract_availability}), 'availability': ('subscription', 'level', {extract_availability}),
'is_live': ('type', {lambda x: x.lower() == 'live'}), 'is_live': ('type', {lambda x: x.lower() == 'live'}),
'artist': ('acts', ..., {str}), 'artist': ('acts', ..., {str}),

View File

@ -211,10 +211,10 @@ class QQMusicIE(QQMusicBaseIE):
'formats': formats, 'formats': formats,
**traverse_obj(info_data, { **traverse_obj(info_data, {
'title': ('title', {str}), 'title': ('title', {str}),
'album': ('album', 'title', {str}, filter), 'album': ('album', 'title', {str}, {lambda x: x or None}),
'release_date': ('time_public', {lambda x: x.replace('-', '') or None}), 'release_date': ('time_public', {lambda x: x.replace('-', '') or None}),
'creators': ('singer', ..., 'name', {str}), 'creators': ('singer', ..., 'name', {str}),
'alt_title': ('subtitle', {str}, filter), 'alt_title': ('subtitle', {str}, {lambda x: x or None}),
'duration': ('interval', {int_or_none}), 'duration': ('interval', {int_or_none}),
}), }),
**traverse_obj(init_data, ('detail', { **traverse_obj(init_data, ('detail', {

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..networking import HEADRequest from ..networking import HEADRequest
@ -117,7 +118,7 @@ class RedCDNLivxIE(InfoExtractor):
time_scale = traverse_obj(ism_doc, ('@TimeScale', {int_or_none})) or 10000000 time_scale = traverse_obj(ism_doc, ('@TimeScale', {int_or_none})) or 10000000
duration = traverse_obj( duration = traverse_obj(
ism_doc, ('@Duration', {float_or_none(scale=time_scale)})) or None ism_doc, ('@Duration', {functools.partial(float_or_none, scale=time_scale)})) or None
live_status = None live_status = None
if traverse_obj(ism_doc, '@IsLive') == 'TRUE': if traverse_obj(ism_doc, '@IsLive') == 'TRUE':

View File

@ -187,4 +187,4 @@ class RTVSLOShowIE(InfoExtractor):
return self.playlist_from_matches( return self.playlist_from_matches(
re.findall(r'<a [^>]*\bhref="(/arhiv/[^"]+)"', webpage), re.findall(r'<a [^>]*\bhref="(/arhiv/[^"]+)"', webpage),
playlist_id, self._html_extract_title(webpage), playlist_id, self._html_extract_title(webpage),
getter=urljoin('https://365.rtvslo.si'), ie=RTVSLOIE) getter=lambda x: urljoin('https://365.rtvslo.si', x), ie=RTVSLOIE)

View File

@ -56,13 +56,13 @@ class SnapchatSpotlightIE(InfoExtractor):
**traverse_obj(video_data, ('videoMetadata', { **traverse_obj(video_data, ('videoMetadata', {
'title': ('name', {str}), 'title': ('name', {str}),
'description': ('description', {str}), 'description': ('description', {str}),
'timestamp': ('uploadDateMs', {float_or_none(scale=1000)}), 'timestamp': ('uploadDateMs', {lambda x: float_or_none(x, 1000)}),
'view_count': ('viewCount', {int_or_none}, {lambda x: None if x == -1 else x}), 'view_count': ('viewCount', {int_or_none}, {lambda x: None if x == -1 else x}),
'repost_count': ('shareCount', {int_or_none}), 'repost_count': ('shareCount', {int_or_none}),
'url': ('contentUrl', {url_or_none}), 'url': ('contentUrl', {url_or_none}),
'width': ('width', {int_or_none}), 'width': ('width', {int_or_none}),
'height': ('height', {int_or_none}), 'height': ('height', {int_or_none}),
'duration': ('durationMs', {float_or_none(scale=1000)}), 'duration': ('durationMs', {lambda x: float_or_none(x, 1000)}),
'thumbnail': ('thumbnailUrl', {url_or_none}), 'thumbnail': ('thumbnailUrl', {url_or_none}),
'uploader': ('creator', 'personCreator', 'username', {str}), 'uploader': ('creator', 'personCreator', 'username', {str}),
'uploader_url': ('creator', 'personCreator', 'url', {url_or_none}), 'uploader_url': ('creator', 'personCreator', 'url', {url_or_none}),

View File

@ -3,12 +3,14 @@ from ..networking.exceptions import HTTPError
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
clean_html, clean_html,
get_element_text_and_html_by_tag,
int_or_none, int_or_none,
str_or_none, str_or_none,
traverse_obj,
try_call,
unified_timestamp, unified_timestamp,
urljoin, urljoin,
) )
from ..utils.traversal import find_element, traverse_obj
class TBSJPEpisodeIE(InfoExtractor): class TBSJPEpisodeIE(InfoExtractor):
@ -62,7 +64,7 @@ class TBSJPEpisodeIE(InfoExtractor):
self._merge_subtitles(subs, target=subtitles) self._merge_subtitles(subs, target=subtitles)
return { return {
'title': traverse_obj(webpage, ({find_element(tag='h3')}, {clean_html})), 'title': try_call(lambda: clean_html(get_element_text_and_html_by_tag('h3', webpage)[0])),
'id': video_id, 'id': video_id,
**traverse_obj(episode, { **traverse_obj(episode, {
'categories': ('keywords', {list}), 'categories': ('keywords', {list}),

View File

@ -136,7 +136,7 @@ class TeamcocoIE(TeamcocoBaseIE):
'blocks', lambda _, v: v['name'] in ('meta-tags', 'video-player', 'video-info'), 'props', {dict}))) 'blocks', lambda _, v: v['name'] in ('meta-tags', 'video-player', 'video-info'), 'props', {dict})))
thumbnail = traverse_obj( thumbnail = traverse_obj(
info, (('image', 'poster'), {urljoin('https://teamcoco.com/')}), get_all=False) info, (('image', 'poster'), {lambda x: urljoin('https://teamcoco.com/', x)}), get_all=False)
video_id = traverse_obj(parse_qs(thumbnail), ('id', 0)) or display_id video_id = traverse_obj(parse_qs(thumbnail), ('id', 0)) or display_id
formats, subtitles = self._get_formats_and_subtitles(info, video_id) formats, subtitles = self._get_formats_and_subtitles(info, video_id)

View File

@ -10,11 +10,10 @@ from ..utils.traversal import traverse_obj
def _fmt_url(url): def _fmt_url(url):
return format_field(template=url, default=None) return functools.partial(format_field, template=url, default=None)
class TelewebionIE(InfoExtractor): class TelewebionIE(InfoExtractor):
_WORKING = False
_VALID_URL = r'https?://(?:www\.)?telewebion\.com/episode/(?P<id>(?:0x[a-fA-F\d]+|\d+))' _VALID_URL = r'https?://(?:www\.)?telewebion\.com/episode/(?P<id>(?:0x[a-fA-F\d]+|\d+))'
_TESTS = [{ _TESTS = [{
'url': 'http://www.telewebion.com/episode/0x1b3139c/', 'url': 'http://www.telewebion.com/episode/0x1b3139c/',

View File

@ -1,3 +1,4 @@
import functools
import random import random
import re import re
import string import string
@ -277,7 +278,7 @@ class VQQSeriesIE(VQQBaseIE):
webpage)] webpage)]
return self.playlist_from_matches( return self.playlist_from_matches(
episode_paths, series_id, ie=VQQVideoIE, getter=urljoin(url), episode_paths, series_id, ie=VQQVideoIE, getter=functools.partial(urljoin, url),
title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title')) title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
or self._og_search_title(webpage)), or self._og_search_title(webpage)),
description=(traverse_obj(webpage_metadata, ('coverInfo', 'description')) description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))
@ -327,7 +328,7 @@ class WeTvBaseIE(TencentBaseIE):
or re.findall(r'<a[^>]+class="play-video__link"[^>]+href="(?P<path>[^"]+)', webpage)) or re.findall(r'<a[^>]+class="play-video__link"[^>]+href="(?P<path>[^"]+)', webpage))
return self.playlist_from_matches( return self.playlist_from_matches(
episode_paths, series_id, ie=ie, getter=urljoin(url), episode_paths, series_id, ie=ie, getter=functools.partial(urljoin, url),
title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title')) title=self._get_clean_title(traverse_obj(webpage_metadata, ('coverInfo', 'title'))
or self._og_search_title(webpage)), or self._og_search_title(webpage)),
description=(traverse_obj(webpage_metadata, ('coverInfo', 'description')) description=(traverse_obj(webpage_metadata, ('coverInfo', 'description'))

View File

@ -1,3 +1,4 @@
import functools
import itertools import itertools
from .common import InfoExtractor from .common import InfoExtractor
@ -160,4 +161,4 @@ class TenPlaySeasonIE(InfoExtractor):
return self.playlist_from_matches( return self.playlist_from_matches(
self._entries(urljoin(url, episodes_carousel['loadMoreUrl']), playlist_id), self._entries(urljoin(url, episodes_carousel['loadMoreUrl']), playlist_id),
playlist_id, traverse_obj(season_info, ('content', 0, 'title', {str})), playlist_id, traverse_obj(season_info, ('content', 0, 'title', {str})),
getter=urljoin(url)) getter=functools.partial(urljoin, url))

View File

@ -131,4 +131,4 @@ class TheGuardianPodcastPlaylistIE(InfoExtractor):
return self.playlist_from_matches( return self.playlist_from_matches(
self._entries(url, podcast_id), podcast_id, title, description=description, self._entries(url, podcast_id), podcast_id, title, description=description,
ie=TheGuardianPodcastIE, getter=urljoin('https://www.theguardian.com')) ie=TheGuardianPodcastIE, getter=lambda x: urljoin('https://www.theguardian.com', x))

View File

@ -469,7 +469,7 @@ class TikTokBaseIE(InfoExtractor):
aweme_detail, aweme_id, traverse_obj(author_info, 'uploader', 'uploader_id', 'channel_id')), aweme_detail, aweme_id, traverse_obj(author_info, 'uploader', 'uploader_id', 'channel_id')),
'thumbnails': thumbnails, 'thumbnails': thumbnails,
'duration': (traverse_obj(video_info, ( 'duration': (traverse_obj(video_info, (
(None, 'download_addr'), 'duration', {int_or_none(scale=1000)}, any)) (None, 'download_addr'), 'duration', {functools.partial(int_or_none, scale=1000)}, any))
or traverse_obj(music_info, ('duration', {int_or_none}))), or traverse_obj(music_info, ('duration', {int_or_none}))),
'availability': self._availability( 'availability': self._availability(
is_private='Private' in labels, is_private='Private' in labels,
@ -583,7 +583,7 @@ class TikTokBaseIE(InfoExtractor):
author_info, ['uploader', 'uploader_id'], self._UPLOADER_URL_FORMAT, default=None), author_info, ['uploader', 'uploader_id'], self._UPLOADER_URL_FORMAT, default=None),
**traverse_obj(aweme_detail, ('music', { **traverse_obj(aweme_detail, ('music', {
'track': ('title', {str}), 'track': ('title', {str}),
'album': ('album', {str}, filter), 'album': ('album', {str}, {lambda x: x or None}),
'artists': ('authorName', {str}, {lambda x: re.split(r'(?:, | & )', x) if x else None}), 'artists': ('authorName', {str}, {lambda x: re.split(r'(?:, | & )', x) if x else None}),
'duration': ('duration', {int_or_none}), 'duration': ('duration', {int_or_none}),
})), })),
@ -591,7 +591,7 @@ class TikTokBaseIE(InfoExtractor):
'title': ('desc', {str}), 'title': ('desc', {str}),
'description': ('desc', {str}), 'description': ('desc', {str}),
# audio-only slideshows have a video duration of 0 and an actual audio duration # audio-only slideshows have a video duration of 0 and an actual audio duration
'duration': ('video', 'duration', {int_or_none}, filter), 'duration': ('video', 'duration', {int_or_none}, {lambda x: x or None}),
'timestamp': ('createTime', {int_or_none}), 'timestamp': ('createTime', {int_or_none}),
}), }),
**traverse_obj(aweme_detail, ('stats', { **traverse_obj(aweme_detail, ('stats', {
@ -1493,7 +1493,7 @@ class TikTokLiveIE(TikTokBaseIE):
sdk_params = traverse_obj(stream, ('main', 'sdk_params', {parse_inner}, { sdk_params = traverse_obj(stream, ('main', 'sdk_params', {parse_inner}, {
'vcodec': ('VCodec', {str}), 'vcodec': ('VCodec', {str}),
'tbr': ('vbitrate', {int_or_none(scale=1000)}), 'tbr': ('vbitrate', {lambda x: int_or_none(x, 1000)}),
'resolution': ('resolution', {lambda x: re.match(r'(?i)\d+x\d+|\d+p', x).group().lower()}), 'resolution': ('resolution', {lambda x: re.match(r'(?i)\d+x\d+|\d+p', x).group().lower()}),
})) }))

View File

@ -3,13 +3,12 @@ from ..utils import (
ExtractorError, ExtractorError,
int_or_none, int_or_none,
traverse_obj, traverse_obj,
url_or_none,
urlencode_postdata, urlencode_postdata,
) )
class TumblrIE(InfoExtractor): class TumblrIE(InfoExtractor):
_VALID_URL = r'https?://(?P<blog_name_1>[^/?#&]+)\.tumblr\.com/(?:post|video|(?P<blog_name_2>[a-zA-Z\d-]+))/(?P<id>[0-9]+)(?:$|[/?#])' _VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])'
_NETRC_MACHINE = 'tumblr' _NETRC_MACHINE = 'tumblr'
_LOGIN_URL = 'https://www.tumblr.com/login' _LOGIN_URL = 'https://www.tumblr.com/login'
_OAUTH_URL = 'https://www.tumblr.com/api/v2/oauth2/token' _OAUTH_URL = 'https://www.tumblr.com/api/v2/oauth2/token'
@ -67,7 +66,6 @@ class TumblrIE(InfoExtractor):
'age_limit': 0, 'age_limit': 0,
'tags': [], 'tags': [],
}, },
'skip': '404',
}, { }, {
'note': 'dashboard only (original post)', 'note': 'dashboard only (original post)',
'url': 'https://jujanon.tumblr.com/post/159704441298/my-baby-eating', 'url': 'https://jujanon.tumblr.com/post/159704441298/my-baby-eating',
@ -100,6 +98,7 @@ class TumblrIE(InfoExtractor):
'like_count': int, 'like_count': int,
'repost_count': int, 'repost_count': int,
'age_limit': 0, 'age_limit': 0,
'tags': [],
}, },
}, { }, {
'note': 'dashboard only (external)', 'note': 'dashboard only (external)',
@ -110,13 +109,14 @@ class TumblrIE(InfoExtractor):
'title': 'The Blues Remembers Everything the Country Forgot', 'title': 'The Blues Remembers Everything the Country Forgot',
'alt_title': 'The Blues Remembers Everything the Country Forgot', 'alt_title': 'The Blues Remembers Everything the Country Forgot',
'description': 'md5:1a6b4097e451216835a24c1023707c79', 'description': 'md5:1a6b4097e451216835a24c1023707c79',
'release_date': '20201224',
'creator': 'md5:c2239ba15430e87c3b971ba450773272', 'creator': 'md5:c2239ba15430e87c3b971ba450773272',
'uploader': 'Moor Mother - Topic', 'uploader': 'Moor Mother - Topic',
'upload_date': '20201223', 'upload_date': '20201223',
'uploader_id': 'UCxrMtFBRkFvQJ_vVM4il08w', 'uploader_id': 'UCxrMtFBRkFvQJ_vVM4il08w',
'uploader_url': 'http://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w', 'uploader_url': 'http://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w',
'thumbnail': r're:^https?://i.ytimg.com/.*', 'thumbnail': r're:^https?://i.ytimg.com/.*',
'channel': 'Moor Mother', 'channel': 'Moor Mother - Topic',
'channel_id': 'UCxrMtFBRkFvQJ_vVM4il08w', 'channel_id': 'UCxrMtFBRkFvQJ_vVM4il08w',
'channel_url': 'https://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w', 'channel_url': 'https://www.youtube.com/channel/UCxrMtFBRkFvQJ_vVM4il08w',
'channel_follower_count': int, 'channel_follower_count': int,
@ -135,10 +135,24 @@ class TumblrIE(InfoExtractor):
'release_year': 2020, 'release_year': 2020,
}, },
'add_ie': ['Youtube'], 'add_ie': ['Youtube'],
'skip': 'Video Unavailable', }, {
'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching',
'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab',
'info_dict': {
'id': 'Wmur',
'ext': 'mp4',
'title': 'naked smoking & stretching',
'upload_date': '20150506',
'timestamp': 1430931613,
'age_limit': 18,
'uploader_id': '1638622',
'uploader': 'naked-yogi',
},
# 'add_ie': ['Vidme'],
'skip': 'dead embedded video host',
}, { }, {
'url': 'https://prozdvoices.tumblr.com/post/673201091169681408/what-recording-voice-acting-sounds-like', 'url': 'https://prozdvoices.tumblr.com/post/673201091169681408/what-recording-voice-acting-sounds-like',
'md5': 'cb8328a6723c30556cef59e370202918', 'md5': 'a0063fc8110e6c9afe44065b4ea68177',
'info_dict': { 'info_dict': {
'id': 'eomhW5MLGWA', 'id': 'eomhW5MLGWA',
'ext': 'mp4', 'ext': 'mp4',
@ -146,8 +160,8 @@ class TumblrIE(InfoExtractor):
'description': 'md5:1da3faa22d0e0b1d8b50216c284ee798', 'description': 'md5:1da3faa22d0e0b1d8b50216c284ee798',
'uploader': 'ProZD', 'uploader': 'ProZD',
'upload_date': '20220112', 'upload_date': '20220112',
'uploader_id': '@ProZD', 'uploader_id': 'ProZD',
'uploader_url': 'https://www.youtube.com/@ProZD', 'uploader_url': 'http://www.youtube.com/user/ProZD',
'thumbnail': r're:^https?://i.ytimg.com/.*', 'thumbnail': r're:^https?://i.ytimg.com/.*',
'channel': 'ProZD', 'channel': 'ProZD',
'channel_id': 'UC6MFZAOHXlKK1FI7V0XQVeA', 'channel_id': 'UC6MFZAOHXlKK1FI7V0XQVeA',
@ -162,10 +176,6 @@ class TumblrIE(InfoExtractor):
'live_status': 'not_live', 'live_status': 'not_live',
'playable_in_embed': True, 'playable_in_embed': True,
'availability': 'public', 'availability': 'public',
'heatmap': 'count:100',
'channel_is_verified': True,
'timestamp': 1642014562,
'comment_count': int,
}, },
'add_ie': ['Youtube'], 'add_ie': ['Youtube'],
}, { }, {
@ -173,20 +183,16 @@ class TumblrIE(InfoExtractor):
'md5': '203e9eb8077e3f45bfaeb4c86c1467b8', 'md5': '203e9eb8077e3f45bfaeb4c86c1467b8',
'info_dict': { 'info_dict': {
'id': '87816359', 'id': '87816359',
'ext': 'mp4', 'ext': 'mov',
'title': 'Harold Ramis', 'title': 'Harold Ramis',
'description': 'md5:c99882405fcca0b1d348ad093f8f1672', 'description': 'md5:be8e68cbf56ce0785c77f0c6c6dfaf2c',
'uploader': 'Resolution Productions Group', 'uploader': 'Resolution Productions Group',
'uploader_id': 'resolutionproductions', 'uploader_id': 'resolutionproductions',
'uploader_url': 'https://vimeo.com/resolutionproductions', 'uploader_url': 'https://vimeo.com/resolutionproductions',
'upload_date': '20140227', 'upload_date': '20140227',
'thumbnail': r're:^https?://i.vimeocdn.com/video/.*', 'thumbnail': r're:^https?://i.vimeocdn.com/video/.*',
'timestamp': 1393541719, 'timestamp': 1393523719,
'duration': 291, 'duration': 291,
'comment_count': int,
'like_count': int,
'release_timestamp': 1393541719,
'release_date': '20140227',
}, },
'add_ie': ['Vimeo'], 'add_ie': ['Vimeo'],
}, { }, {
@ -208,7 +214,6 @@ class TumblrIE(InfoExtractor):
'view_count': int, 'view_count': int,
}, },
'add_ie': ['Vine'], 'add_ie': ['Vine'],
'skip': 'Vine is unavailable',
}, { }, {
'url': 'https://silami.tumblr.com/post/84250043974/my-bad-river-flows-in-you-impression-on-maschine', 'url': 'https://silami.tumblr.com/post/84250043974/my-bad-river-flows-in-you-impression-on-maschine',
'md5': '3c92d7c3d867f14ccbeefa2119022277', 'md5': '3c92d7c3d867f14ccbeefa2119022277',
@ -227,140 +232,6 @@ class TumblrIE(InfoExtractor):
'upload_date': '20140429', 'upload_date': '20140429',
}, },
'add_ie': ['Instagram'], 'add_ie': ['Instagram'],
}, {
'note': 'new url scheme',
'url': 'https://www.tumblr.com/autumnsister/765162750456578048?source=share',
'info_dict': {
'id': '765162750456578048',
'ext': 'mp4',
'uploader_url': 'https://autumnsister.tumblr.com/',
'tags': ['autumn', 'food', 'curators on tumblr'],
'like_count': int,
'thumbnail': 'https://64.media.tumblr.com/tumblr_sklad89N3x1ygquow_frame1.jpg',
'title': '🪹',
'uploader_id': 'autumnsister',
'repost_count': int,
'age_limit': 0,
},
}, {
'note': 'bandcamp album embed',
'url': 'https://patricia-taxxon.tumblr.com/post/704473755725004800/patricia-taxxon-agnes-hilda-patricia-taxxon',
'info_dict': {
'id': 'agnes-hilda',
'title': 'Agnes & Hilda',
'description': 'The inexplicable joy of an artist. Wash paws after listening.',
'uploader_id': 'patriciataxxon',
},
'playlist_count': 8,
}, {
'note': 'bandcamp track embeds (many)',
'url': 'https://www.tumblr.com/felixcosm/730460905855467520/if-youre-looking-for-new-music-to-write-or',
'info_dict': {
'id': '730460905855467520',
'uploader_id': 'felixcosm',
'repost_count': int,
'tags': 'count:15',
'description': 'md5:2eb3482a3c6987280cbefb6839068f32',
'like_count': int,
'age_limit': 0,
'title': 'If you\'re looking for new music to write or imagine scenerios to: STOP. This is for you.',
'uploader_url': 'https://felixcosm.tumblr.com/',
},
'playlist_count': 10,
}, {
'note': 'soundcloud track embed',
'url': 'https://silverfoxstole.tumblr.com/post/765305403763556352/jamie-robertson-doctor-who-8th-doctor',
'info_dict': {
'id': '1218136399',
'ext': 'opus',
'comment_count': int,
'genres': [],
'repost_count': int,
'uploader': 'Jamie Robertson',
'title': 'Doctor Who - 8th doctor - Stranded Theme never released and used.',
'duration': 46.106,
'uploader_id': '2731064',
'thumbnail': 'https://i1.sndcdn.com/artworks-MVgcPm5jN42isC5M-6Dz22w-original.jpg',
'timestamp': 1645181261,
'uploader_url': 'https://soundcloud.com/jamierobertson',
'view_count': int,
'upload_date': '20220218',
'description': 'md5:ab924dd9994d0a7d64d6d31bf2af4625',
'license': 'all-rights-reserved',
'like_count': int,
},
}, {
'note': 'soundcloud set embed',
'url': 'https://www.tumblr.com/beyourselfchulanmaria/703505323122638848/chu-lan-maria-the-playlist-%E5%BF%83%E7%9A%84%E5%91%BC%E5%96%9A-call-of-the',
'info_dict': {
'id': '691222680',
'title': '心的呼喚 Call of the heart I',
'description': 'md5:25952a8d178a3aa55e40fcbb646a38c3',
},
'playlist_mincount': 19,
}, {
'note': 'dailymotion video embed',
'url': 'https://www.tumblr.com/funvibecentral/759390024460632064',
'info_dict': {
'id': 'x94cnnk',
'ext': 'mp4',
'description': 'Funny dailymotion shorts.\n#funny #fun#comedy #romantic #exciting',
'uploader': 'FunVibe Central',
'like_count': int,
'view_count': int,
'timestamp': 1724210553,
'title': 'Woman watching other Woman',
'tags': [],
'upload_date': '20240821',
'age_limit': 0,
'uploader_id': 'x32m6ye',
'duration': 20,
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/Wtqh01cnxKNXLG1N8/x1080',
},
}, {
'note': 'tiktok video embed',
'url': 'https://fansofcolor.tumblr.com/post/660637918605475840/blockquote-class-tiktok-embed',
'info_dict': {
'id': '7000937272010935558',
'ext': 'mp4',
'artists': ['Alicia Dreaming'],
'like_count': int,
'repost_count': int,
'thumbnail': r're:^https?://[\w\/\.\-]+(~[\w\-]+\.image)?',
'channel_id': 'MS4wLjABAAAAsJohwz_dU4KfAOc61cbGDAZ46-5hg2ANTXVQlRe1ipDhpX08PywR3PPiple1NTAo',
'uploader': 'aliciadreaming',
'description': 'huge casting news Greyworm will be #louisdulac #racebending #interviewwiththevampire',
'title': 'huge casting news Greyworm will be #louisdulac #racebending #interviewwiththevampire',
'channel_url': 'https://www.tiktok.com/@MS4wLjABAAAAsJohwz_dU4KfAOc61cbGDAZ46-5hg2ANTXVQlRe1ipDhpX08PywR3PPiple1NTAo',
'uploader_id': '7000478462196990982',
'uploader_url': 'https://www.tiktok.com/@aliciadreaming',
'timestamp': 1630032733,
'channel': 'Alicia Dreaming',
'track': 'original sound',
'upload_date': '20210827',
'view_count': int,
'comment_count': int,
'duration': 59,
},
}, {
'note': 'tumblr video AND youtube embed',
'url': 'https://www.tumblr.com/anyaboz/765332564457209856/my-music-video-for-selkie-by-nobodys-wolf-child',
'info_dict': {
'id': '765332564457209856',
'uploader_id': 'anyaboz',
'repost_count': int,
'age_limit': 0,
'uploader_url': 'https://anyaboz.tumblr.com/',
'description': 'md5:9a129cf6ce9d87a80ffd3c6dedd4d1e6',
'like_count': int,
'title': 'md5:b18a2ac9387681d20303e485db85c1b5',
'tags': ['music video', 'nobodys wolf child', 'selkie', 'Stop Motion Animation', 'stop Motion', 'room guardians', 'Youtube'],
},
'playlist_count': 2,
}, {
# twitch_live provider - error when linked account is not live
'url': 'https://www.tumblr.com/anarcho-skamunist/722224493650722816/hollow-knight-stream-right-now-going-to-fight',
'only_matching': True,
}] }]
_providers = { _providers = {
@ -368,16 +239,6 @@ class TumblrIE(InfoExtractor):
'vimeo': 'Vimeo', 'vimeo': 'Vimeo',
'vine': 'Vine', 'vine': 'Vine',
'youtube': 'Youtube', 'youtube': 'Youtube',
'dailymotion': 'Dailymotion',
'tiktok': 'TikTok',
'twitch_live': 'TwitchStream',
'bandcamp': None,
'soundcloud': None,
}
# known not to be supported
_unsupported_providers = {
# seems like podcasts can't be embedded
'spotify',
} }
_ACCESS_TOKEN = None _ACCESS_TOKEN = None
@ -395,40 +256,23 @@ class TumblrIE(InfoExtractor):
if not self._ACCESS_TOKEN: if not self._ACCESS_TOKEN:
return return
data = { self._download_json(
self._OAUTH_URL, None, 'Logging in',
data=urlencode_postdata({
'password': password, 'password': password,
'grant_type': 'password', 'grant_type': 'password',
'username': username, 'username': username,
} }), headers={
if self.get_param('twofactor'):
data['tfa_token'] = self.get_param('twofactor')
def _call_login():
return self._download_json(
self._OAUTH_URL, None, 'Logging in',
data=urlencode_postdata(data),
headers={
'Content-Type': 'application/x-www-form-urlencoded', 'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': f'Bearer {self._ACCESS_TOKEN}', 'Authorization': f'Bearer {self._ACCESS_TOKEN}',
}, },
errnote='Login failed', fatal=False, errnote='Login failed', fatal=False)
expected_status=lambda s: 400 <= s < 500)
response = _call_login()
if traverse_obj(response, 'error') == 'tfa_required':
data['tfa_token'] = self._get_tfa_info()
response = _call_login()
if traverse_obj(response, 'error'):
raise ExtractorError(
f'API returned error {": ".join(traverse_obj(response, (("error", "error_description"), {str})))}')
def _real_extract(self, url): def _real_extract(self, url):
blog_1, blog_2, video_id = self._match_valid_url(url).groups() blog, video_id = self._match_valid_url(url).groups()
blog = blog_2 or blog_1
url = f'http://{blog}.tumblr.com/post/{video_id}' url = f'http://{blog}.tumblr.com/post/{video_id}/'
webpage, urlh = self._download_webpage_handle( webpage, urlh = self._download_webpage_handle(url, video_id)
url, video_id, headers={'User-Agent': 'WhatsApp/2.0'}) # whatsapp ua bypasses problems
redirect_url = urlh.url redirect_url = urlh.url
@ -445,69 +289,23 @@ class TumblrIE(InfoExtractor):
self._download_json( self._download_json(
f'https://www.tumblr.com/api/v2/blog/{blog}/posts/{video_id}/permalink', f'https://www.tumblr.com/api/v2/blog/{blog}/posts/{video_id}/permalink',
video_id, headers={'Authorization': f'Bearer {self._ACCESS_TOKEN}'}, fatal=False), video_id, headers={'Authorization': f'Bearer {self._ACCESS_TOKEN}'}, fatal=False),
('response', 'timeline', 'elements', 0, {dict})) or {} ('response', 'timeline', 'elements', 0)) or {}
content_json = traverse_obj(post_json, ((('trail', 0), None), 'content', ..., {dict})) content_json = traverse_obj(post_json, ('trail', 0, 'content'), ('content')) or []
video_json = next(
# the url we're extracting from might be an original post or it might be a reblog. (item for item in content_json if item.get('type') == 'video'), {})
# if it's a reblog, og:description will be the reblogger's comment, not the uploader's.
# content_json is always the op, so if it exists but has no text, there's no description
if content_json:
description = '\n\n'.join(
item.get('text') for item in content_json if item.get('type') == 'text') or None
else:
description = self._og_search_description(webpage, default=None)
uploader_id = traverse_obj(post_json, 'reblogged_root_name', 'blog_name')
info_dict = {
'id': video_id,
'title': post_json.get('summary') or (blog if api_only else self._html_search_regex(
r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>', webpage, 'title', default=blog)),
'description': description,
'uploader_id': uploader_id,
'uploader_url': f'https://{uploader_id}.tumblr.com/' if uploader_id else None,
**traverse_obj(post_json, {
'like_count': ('like_count', {int_or_none}),
'repost_count': ('reblog_count', {int_or_none}),
'tags': ('tags', ..., {str}),
}),
'age_limit': {True: 18, False: 0}.get(post_json.get('is_nsfw')),
}
# for tumblr's own video hosting
fallback_format = None
formats = []
video_url = self._og_search_video_url(webpage, default=None)
# for external video hosts
entries = []
ignored_providers = set()
unknown_providers = set()
for video_json in traverse_obj(content_json, lambda _, v: v['type'] in ('video', 'audio')):
media_json = video_json.get('media') or {} media_json = video_json.get('media') or {}
if api_only and not media_json.get('url') and not video_json.get('url'): if api_only and not media_json.get('url') and not video_json.get('url'):
raise ExtractorError('Failed to find video data for dashboard-only post') raise ExtractorError('Failed to find video data for dashboard-only post')
provider = video_json.get('provider')
if provider in ('tumblr', None): if not media_json.get('url') and video_json.get('url'):
fallback_format = {
'url': media_json.get('url') or video_url,
'width': int_or_none(
media_json.get('width') or self._og_search_property('video:width', webpage, default=None)),
'height': int_or_none(
media_json.get('height') or self._og_search_property('video:height', webpage, default=None)),
}
continue
elif provider in self._unsupported_providers:
ignored_providers.add(provider)
continue
elif provider and provider not in self._providers:
unknown_providers.add(provider)
if video_json.get('url'):
# external video host # external video host
entries.append(self.url_result( return self.url_result(
video_json['url'], self._providers.get(provider))) video_json['url'],
self._providers.get(video_json.get('provider'), 'Generic'))
video_url = self._og_search_video_url(webpage, default=None)
duration = None duration = None
formats = []
# iframes can supply duration and sometimes additional formats, so check for one # iframes can supply duration and sometimes additional formats, so check for one
iframe_url = self._search_regex( iframe_url = self._search_regex(
@ -546,36 +344,44 @@ class TumblrIE(InfoExtractor):
'quality': quality, 'quality': quality,
} for quality, (video_url, format_id) in enumerate(sources)] } for quality, (video_url, format_id) in enumerate(sources)]
if not formats and fallback_format: if not media_json.get('url') and not video_url and not iframe_url:
formats.append(fallback_format) # external video host (but we weren't able to figure it out from the api)
iframe_url = self._search_regex(
r'src=["\'](https?://safe\.txmblr\.com/svc/embed/inline/[^"\']+)["\']',
webpage, 'embed iframe url', default=None)
return self.url_result(iframe_url or redirect_url, 'Generic')
if formats: formats = formats or [{
# tumblr's own video is always above embeds 'url': media_json.get('url') or video_url,
entries.insert(0, { 'width': int_or_none(
**info_dict, media_json.get('width') or self._og_search_property('video:width', webpage, default=None)),
'formats': formats, 'height': int_or_none(
'duration': duration, media_json.get('height') or self._og_search_property('video:height', webpage, default=None)),
'thumbnail': (traverse_obj(video_json, ('poster', 0, 'url', {url_or_none})) }]
or self._og_search_thumbnail(webpage, default=None)),
})
if ignored_providers: # the url we're extracting from might be an original post or it might be a reblog.
if not entries: # if it's a reblog, og:description will be the reblogger's comment, not the uploader's.
raise ExtractorError(f'None of embed providers are supported: {", ".join(ignored_providers)!s}', video_id=video_id, expected=True) # content_json is always the op, so if it exists but has no text, there's no description
if content_json:
description = '\n\n'.join(
item.get('text') for item in content_json if item.get('type') == 'text') or None
else: else:
self.report_warning(f'Skipped embeds from unsupported providers: {", ".join(ignored_providers)!s}', video_id) description = self._og_search_description(webpage, default=None)
if unknown_providers: uploader_id = traverse_obj(post_json, 'reblogged_root_name', 'blog_name')
self.report_warning(f'Unrecognized providers, please report: {", ".join(unknown_providers)!s}', video_id)
if not entries:
self.raise_no_formats('No video could be found in this post', expected=True, video_id=video_id)
if len(entries) == 1:
return { return {
**info_dict, 'id': video_id,
**entries[0], 'title': post_json.get('summary') or (blog if api_only else self._html_search_regex(
} r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>', webpage, 'title')),
return { 'description': description,
**info_dict, 'thumbnail': (traverse_obj(video_json, ('poster', 0, 'url'))
'_type': 'playlist', or self._og_search_thumbnail(webpage, default=None)),
'entries': entries, 'uploader_id': uploader_id,
'uploader_url': f'https://{uploader_id}.tumblr.com/' if uploader_id else None,
'duration': duration,
'like_count': post_json.get('like_count'),
'repost_count': post_json.get('reblog_count'),
'age_limit': {True: 18, False: 0}.get(post_json.get('is_nsfw')),
'tags': post_json.get('tags'),
'formats': formats,
} }

View File

@ -1,3 +1,4 @@
import functools
import re import re
from .brightcove import BrightcoveNewIE from .brightcove import BrightcoveNewIE
@ -67,7 +68,7 @@ class TVAIE(InfoExtractor):
'episode': episode, 'episode': episode,
**traverse_obj(entity, { **traverse_obj(entity, {
'description': ('longDescription', {str}), 'description': ('longDescription', {str}),
'duration': ('durationMillis', {float_or_none(scale=1000)}), 'duration': ('durationMillis', {functools.partial(float_or_none, scale=1000)}),
'channel': ('knownEntities', 'channel', 'name', {str}), 'channel': ('knownEntities', 'channel', 'name', {str}),
'series': ('knownEntities', 'videoShow', 'name', {str}), 'series': ('knownEntities', 'videoShow', 'name', {str}),
'season_number': ('slug', {lambda x: re.search(r'/s(?:ai|ea)son-(\d+)/', x)}, 1, {int_or_none}), 'season_number': ('slug', {lambda x: re.search(r'/s(?:ai|ea)son-(\d+)/', x)}, 1, {int_or_none}),

View File

@ -1,3 +1,4 @@
import functools
import re import re
from .common import InfoExtractor from .common import InfoExtractor
@ -71,9 +72,9 @@ class VidyardBaseIE(InfoExtractor):
'id': ('facadeUuid', {str}), 'id': ('facadeUuid', {str}),
'display_id': ('videoId', {int}, {str_or_none}), 'display_id': ('videoId', {int}, {str_or_none}),
'title': ('name', {str}), 'title': ('name', {str}),
'description': ('description', {str}, {unescapeHTML}, filter), 'description': ('description', {str}, {unescapeHTML}, {lambda x: x or None}),
'duration': (( 'duration': ((
('milliseconds', {float_or_none(scale=1000)}), ('milliseconds', {functools.partial(float_or_none, scale=1000)}),
('seconds', {int_or_none})), any), ('seconds', {int_or_none})), any),
'thumbnails': ('thumbnailUrls', ('small', 'normal'), {'url': {url_or_none}}), 'thumbnails': ('thumbnailUrls', ('small', 'normal'), {'url': {url_or_none}}),
'tags': ('tags', ..., 'name', {str}), 'tags': ('tags', ..., 'name', {str}),

View File

@ -1,3 +1,4 @@
import functools
import json import json
import time import time
import urllib.parse import urllib.parse
@ -170,7 +171,7 @@ class VRTIE(VRTBaseIE):
**traverse_obj(data, { **traverse_obj(data, {
'title': ('title', {str}), 'title': ('title', {str}),
'description': ('shortDescription', {str}), 'description': ('shortDescription', {str}),
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
'thumbnail': ('posterImageUrl', {url_or_none}), 'thumbnail': ('posterImageUrl', {url_or_none}),
}), }),
} }

View File

@ -67,7 +67,7 @@ class WeiboBaseIE(InfoExtractor):
'format': ('quality_desc', {str}), 'format': ('quality_desc', {str}),
'format_id': ('label', {str}), 'format_id': ('label', {str}),
'ext': ('mime', {mimetype2ext}), 'ext': ('mime', {mimetype2ext}),
'tbr': ('bitrate', {int_or_none}, filter), 'tbr': ('bitrate', {int_or_none}, {lambda x: x or None}),
'vcodec': ('video_codecs', {str}), 'vcodec': ('video_codecs', {str}),
'fps': ('fps', {int_or_none}), 'fps': ('fps', {int_or_none}),
'width': ('width', {int_or_none}), 'width': ('width', {int_or_none}),
@ -107,14 +107,14 @@ class WeiboBaseIE(InfoExtractor):
**traverse_obj(video_info, { **traverse_obj(video_info, {
'id': (('id', 'id_str', 'mid'), {str_or_none}), 'id': (('id', 'id_str', 'mid'), {str_or_none}),
'display_id': ('mblogid', {str_or_none}), 'display_id': ('mblogid', {str_or_none}),
'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, filter), 'title': ('page_info', 'media_info', ('video_title', 'kol_title', 'name'), {str}, {lambda x: x or None}),
'description': ('text_raw', {str}), 'description': ('text_raw', {str}),
'duration': ('page_info', 'media_info', 'duration', {int_or_none}), 'duration': ('page_info', 'media_info', 'duration', {int_or_none}),
'timestamp': ('page_info', 'media_info', 'video_publish_time', {int_or_none}), 'timestamp': ('page_info', 'media_info', 'video_publish_time', {int_or_none}),
'thumbnail': ('page_info', 'page_pic', {url_or_none}), 'thumbnail': ('page_info', 'page_pic', {url_or_none}),
'uploader': ('user', 'screen_name', {str}), 'uploader': ('user', 'screen_name', {str}),
'uploader_id': ('user', ('id', 'id_str'), {str_or_none}), 'uploader_id': ('user', ('id', 'id_str'), {str_or_none}),
'uploader_url': ('user', 'profile_url', {urljoin('https://weibo.com/')}), 'uploader_url': ('user', 'profile_url', {lambda x: urljoin('https://weibo.com/', x)}),
'view_count': ('page_info', 'media_info', 'online_users_number', {int_or_none}), 'view_count': ('page_info', 'media_info', 'online_users_number', {int_or_none}),
'like_count': ('attitudes_count', {int_or_none}), 'like_count': ('attitudes_count', {int_or_none}),
'repost_count': ('reposts_count', {int_or_none}), 'repost_count': ('reposts_count', {int_or_none}),

View File

@ -159,8 +159,8 @@ class WeverseBaseIE(InfoExtractor):
'creators': ('community', 'communityName', {str}, all), 'creators': ('community', 'communityName', {str}, all),
'channel_id': (('community', 'author'), 'communityId', {str_or_none}), 'channel_id': (('community', 'author'), 'communityId', {str_or_none}),
'duration': ('extension', 'video', 'playTime', {float_or_none}), 'duration': ('extension', 'video', 'playTime', {float_or_none}),
'timestamp': ('publishedAt', {int_or_none(scale=1000)}), 'timestamp': ('publishedAt', {lambda x: int_or_none(x, 1000)}),
'release_timestamp': ('extension', 'video', 'onAirStartAt', {int_or_none(scale=1000)}), 'release_timestamp': ('extension', 'video', 'onAirStartAt', {lambda x: int_or_none(x, 1000)}),
'thumbnail': ('extension', (('mediaInfo', 'thumbnail', 'url'), ('video', 'thumb')), {url_or_none}), 'thumbnail': ('extension', (('mediaInfo', 'thumbnail', 'url'), ('video', 'thumb')), {url_or_none}),
'view_count': ('extension', 'video', 'playCount', {int_or_none}), 'view_count': ('extension', 'video', 'playCount', {int_or_none}),
'like_count': ('extension', 'video', 'likeCount', {int_or_none}), 'like_count': ('extension', 'video', 'likeCount', {int_or_none}),
@ -469,7 +469,7 @@ class WeverseMomentIE(WeverseBaseIE):
'creator': (('community', 'author'), 'communityName', {str}), 'creator': (('community', 'author'), 'communityName', {str}),
'channel_id': (('community', 'author'), 'communityId', {str_or_none}), 'channel_id': (('community', 'author'), 'communityId', {str_or_none}),
'duration': ('extension', 'moment', 'video', 'uploadInfo', 'playTime', {float_or_none}), 'duration': ('extension', 'moment', 'video', 'uploadInfo', 'playTime', {float_or_none}),
'timestamp': ('publishedAt', {int_or_none(scale=1000)}), 'timestamp': ('publishedAt', {lambda x: int_or_none(x, 1000)}),
'thumbnail': ('extension', 'moment', 'video', 'uploadInfo', 'imageUrl', {url_or_none}), 'thumbnail': ('extension', 'moment', 'video', 'uploadInfo', 'imageUrl', {url_or_none}),
'like_count': ('emotionCount', {int_or_none}), 'like_count': ('emotionCount', {int_or_none}),
'comment_count': ('commentCount', {int_or_none}), 'comment_count': ('commentCount', {int_or_none}),

View File

@ -78,7 +78,7 @@ class WeVidiIE(InfoExtractor):
} }
src_path = f'{wvplayer_props["srcVID"]}/{wvplayer_props["srcUID"]}/{wvplayer_props["srcNAME"]}' src_path = f'{wvplayer_props["srcVID"]}/{wvplayer_props["srcUID"]}/{wvplayer_props["srcNAME"]}'
for res in traverse_obj(wvplayer_props, ('resolutions', ..., {int}, filter)): for res in traverse_obj(wvplayer_props, ('resolutions', ..., {int}, {lambda x: x or None})):
format_id = str(-(res // -2) - 1) format_id = str(-(res // -2) - 1)
yield { yield {
'acodec': 'mp4a.40.2', 'acodec': 'mp4a.40.2',

View File

@ -1,3 +1,4 @@
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
@ -50,7 +51,7 @@ class XiaoHongShuIE(InfoExtractor):
'tbr': ('avgBitrate', {int_or_none}), 'tbr': ('avgBitrate', {int_or_none}),
'format': ('qualityType', {str}), 'format': ('qualityType', {str}),
'filesize': ('size', {int_or_none}), 'filesize': ('size', {int_or_none}),
'duration': ('duration', {float_or_none(scale=1000)}), 'duration': ('duration', {functools.partial(float_or_none, scale=1000)}),
}) })
formats.extend(traverse_obj(info, (('mediaUrl', ('backupUrls', ...)), { formats.extend(traverse_obj(info, (('mediaUrl', ('backupUrls', ...)), {

View File

@ -247,7 +247,7 @@ class YouPornListBase(InfoExtractor):
if not html: if not html:
return return
for element in get_elements_html_by_class('video-title', html): for element in get_elements_html_by_class('video-title', html):
if video_url := traverse_obj(element, ({extract_attributes}, 'href', {urljoin(url)})): if video_url := traverse_obj(element, ({extract_attributes}, 'href', {lambda x: urljoin(url, x)})):
yield self.url_result(video_url) yield self.url_result(video_url)
if page_num is not None: if page_num is not None:

View File

@ -3611,7 +3611,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'frameworkUpdates', 'entityBatchUpdate', 'mutations', 'frameworkUpdates', 'entityBatchUpdate', 'mutations',
lambda _, v: v['payload']['macroMarkersListEntity']['markersList']['markerType'] == 'MARKER_TYPE_HEATMAP', lambda _, v: v['payload']['macroMarkersListEntity']['markersList']['markerType'] == 'MARKER_TYPE_HEATMAP',
'payload', 'macroMarkersListEntity', 'markersList', 'markers', ..., { 'payload', 'macroMarkersListEntity', 'markersList', 'markers', ..., {
'start_time': ('startMillis', {float_or_none(scale=1000)}), 'start_time': ('startMillis', {functools.partial(float_or_none, scale=1000)}),
'end_time': {lambda x: (int(x['startMillis']) + int(x['durationMillis'])) / 1000}, 'end_time': {lambda x: (int(x['startMillis']) + int(x['durationMillis'])) / 1000},
'value': ('intensityScoreNormalized', {float_or_none}), 'value': ('intensityScoreNormalized', {float_or_none}),
})) or None })) or None
@ -3637,7 +3637,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'author_is_verified': ('author', 'isVerified', {bool}), 'author_is_verified': ('author', 'isVerified', {bool}),
'author_url': ('author', 'channelCommand', 'innertubeCommand', ( 'author_url': ('author', 'channelCommand', 'innertubeCommand', (
('browseEndpoint', 'canonicalBaseUrl'), ('commandMetadata', 'webCommandMetadata', 'url'), ('browseEndpoint', 'canonicalBaseUrl'), ('commandMetadata', 'webCommandMetadata', 'url'),
), {urljoin('https://www.youtube.com')}), ), {lambda x: urljoin('https://www.youtube.com', x)}),
}, get_all=False), }, get_all=False),
'is_favorited': (None if toolbar_entity_payload is None else 'is_favorited': (None if toolbar_entity_payload is None else
toolbar_entity_payload.get('heartState') == 'TOOLBAR_HEART_STATE_HEARTED'), toolbar_entity_payload.get('heartState') == 'TOOLBAR_HEART_STATE_HEARTED'),
@ -4304,7 +4304,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
continue continue
tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000) tbr = float_or_none(fmt.get('averageBitrate') or fmt.get('bitrate'), 1000)
format_duration = traverse_obj(fmt, ('approxDurationMs', {float_or_none(scale=1000)})) format_duration = traverse_obj(fmt, ('approxDurationMs', {lambda x: float_or_none(x, 1000)}))
# Some formats may have much smaller duration than others (possibly damaged during encoding) # Some formats may have much smaller duration than others (possibly damaged during encoding)
# E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823 # E.g. 2-nOtRESiUc Ref: https://github.com/yt-dlp/yt-dlp/issues/2823
# Make sure to avoid false positives with small duration differences. # Make sure to avoid false positives with small duration differences.

View File

@ -109,7 +109,7 @@ class ZaikoIE(ZaikoBaseIE):
'uploader': ('profile', 'name', {str}), 'uploader': ('profile', 'name', {str}),
'uploader_id': ('profile', 'id', {str_or_none}), 'uploader_id': ('profile', 'id', {str_or_none}),
'release_timestamp': ('stream', 'start', 'timestamp', {int_or_none}), 'release_timestamp': ('stream', 'start', 'timestamp', {int_or_none}),
'categories': ('event', 'genres', ..., filter), 'categories': ('event', 'genres', ..., {lambda x: x or None}),
}), }),
'alt_title': traverse_obj(initial_event_info, ('title', {str})), 'alt_title': traverse_obj(initial_event_info, ('title', {str})),
'thumbnails': [{'url': url, 'id': url_basename(url)} for url in thumbnail_urls if url_or_none(url)], 'thumbnails': [{'url': url, 'id': url_basename(url)} for url in thumbnail_urls if url_or_none(url)],

View File

@ -700,8 +700,7 @@ def create_parser():
selection.add_option( selection.add_option(
'--break-on-existing', '--break-on-existing',
action='store_true', dest='break_on_existing', default=False, action='store_true', dest='break_on_existing', default=False,
help='Stop the download process when encountering a file that is in the archive ' help='Stop the download process when encountering a file that is in the archive')
'supplied with the --download-archive option')
selection.add_option( selection.add_option(
'--no-break-on-existing', '--no-break-on-existing',
action='store_false', dest='break_on_existing', action='store_false', dest='break_on_existing',

View File

@ -5142,7 +5142,6 @@ class _UnsafeExtensionError(Exception):
'rm', 'rm',
'swf', 'swf',
'ts', 'ts',
'vid',
'vob', 'vob',
'vp9', 'vp9',
@ -5175,7 +5174,6 @@ class _UnsafeExtensionError(Exception):
'heic', 'heic',
'ico', 'ico',
'image', 'image',
'jfif',
'jng', 'jng',
'jpe', 'jpe',
'jpeg', 'jpeg',

View File

@ -1,8 +1,8 @@
# Autogenerated by devscripts/update-version.py # Autogenerated by devscripts/update-version.py
__version__ = '2024.11.04' __version__ = '2024.10.22'
RELEASE_GIT_HEAD = '197d0b03b6a3c8fe4fa5ace630eeffec629bf72c' RELEASE_GIT_HEAD = '67adeb7bab00662ba55d473e405b301abb42fe61'
VARIANT = None VARIANT = None
@ -12,4 +12,4 @@ CHANNEL = 'stable'
ORIGIN = 'yt-dlp/yt-dlp' ORIGIN = 'yt-dlp/yt-dlp'
_pkg_version = '2024.11.04' _pkg_version = '2024.10.22'