Compare commits

...

16 Commits

Author SHA1 Message Date
bashonly
6b1e9ec11f
Update setup.cfg 2023-11-16 12:20:56 -06:00
bashonly
7da42ed686
[test:update] Test update spec with Py3.7 locks 2023-11-16 11:38:38 -06:00
bashonly
0b473bf07d
[build] Actually finalize _update_spec 2023-11-16 07:10:10 -06:00
bashonly
930491e203
Merge branch 'yt-dlp:master' into cleanup/remove-3.7 2023-11-16 07:04:46 -06:00
github-actions[bot]
fe6c82ccff Release 2023.11.16
Created by: bashonly

:ci skip all :ci run dl
2023-11-16 00:01:38 +00:00
bashonly
24f827875c
[build] Make secretstorage an optional dependency (#8585)
Authored by: bashonly
2023-11-15 23:31:32 +00:00
bashonly
15cb3528cb
[ie/abc.net.au:iview:showseries] Fix extraction (#8586)
Closes #8554, Closes #8572
Authored by: bashonly
2023-11-15 23:24:55 +00:00
JC-Chung
2325d03aa7
[ie/twitcasting] Fix livestream detection (#8574)
Authored by: JC-Chung
2023-11-15 23:23:18 +00:00
aarubui
e569c2d1f4
[ie/njpwworld] Remove (#8570)
Authored by: aarubui
2023-11-15 23:21:33 +00:00
TravisDupes
a489f07150
[ie/dailymotion] Improve _VALID_URL (#7692)
Closes #7601
Authored by: TravisDupes
2023-11-15 23:19:34 +00:00
Boris Nagaev
5efe68b73c
[ie/ZenYandex] Fix extraction (#8454)
Closes #8275
Authored by: starius
2023-11-15 23:16:54 +00:00
Awal Garg
b530118e7f
[ie/JioSaavn] Add extractors (#8307)
Authored by: awalgarg
2023-11-15 23:15:06 +00:00
Eze Livinsky
dcfad52812
[ie/eltrecetv] Add extractor (#8216)
Authored by: elivinsky
2023-11-15 23:13:05 +00:00
almx
0783fd558e
[ie/DRTV] Fix extractor (#8484)
Closes #8298
Authored by: almx, seproDev

Co-authored-by: sepro <4618135+seproDev@users.noreply.github.com>
2023-11-15 22:42:18 +00:00
FrankZ85
0f634dba3a
[ie/tv5mondeplus] Extract subtitles (#4209)
Closes #4205
Authored by: FrankZ85
2023-11-15 22:38:52 +00:00
sepro
21dc069bea
[ie/beatbump] Update _VALID_URL (#8576)
Authored by: seproDev
2023-11-15 14:34:39 +00:00
21 changed files with 511 additions and 432 deletions

View File

@ -204,11 +204,11 @@ jobs:
apt -y install zlib1g-dev python3.8 python3.8-dev python3.8-distutils python3-pip apt -y install zlib1g-dev python3.8 python3.8-dev python3.8-distutils python3-pip
python3.8 -m pip install -U pip setuptools wheel python3.8 -m pip install -U pip setuptools wheel
# Cannot access requirements.txt from the repo directory at this stage # Cannot access requirements.txt from the repo directory at this stage
python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi python3.8 -m pip install -U Pyinstaller mutagen pycryptodomex websockets brotli certifi secretstorage
run: | run: |
cd repo cd repo
python3.8 -m pip install -U Pyinstaller -r requirements.txt # Cached version may be out of date python3.8 -m pip install -U Pyinstaller secretstorage -r requirements.txt # Cached version may be out of date
python3.8 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}" python3.8 devscripts/update-version.py -c "${{ inputs.channel }}" -r "${{ needs.process.outputs.origin }}" "${{ inputs.version }}"
python3.8 devscripts/make_lazy_extractors.py python3.8 devscripts/make_lazy_extractors.py
python3.8 pyinst.py python3.8 pyinst.py
@ -437,15 +437,15 @@ jobs:
cat >> _update_spec << EOF cat >> _update_spec << EOF
# This file is used for regulating self-update # This file is used for regulating self-update
lock 2022.08.18.36 .+ Python 3\.6 lock 2022.08.18.36 .+ Python 3\.6
lock 2023.11.15 (?!win_x86_exe).+ Python 3\.7 lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
lock 2023.11.15 win_x86_exe .+ Windows-(?:Vista|2008Server) lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6 lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
lockV2 yt-dlp/yt-dlp 2023.11.15 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp 2023.11.15 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.14.232721 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.14.232721 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.14.214307 (?!win_x86_exe).+ Python 3\.7 lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.14.214307 win_x86_exe .+ Windows-(?:Vista|2008Server) lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
EOF EOF
- name: Sign checksum files - name: Sign checksum files

View File

@ -216,8 +216,8 @@ jobs:
if: | if: |
!inputs.prerelease && env.target_repo == github.repository !inputs.prerelease && env.target_repo == github.repository
run: | run: |
git config --global user.name github-actions git config --global user.name "github-actions[bot]"
git config --global user.email github-actions@github.com git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
git add -u git add -u
git commit -m "Release ${{ env.version }}" \ git commit -m "Release ${{ env.version }}" \
-m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl" -m "Created by: ${{ github.event.sender.login }}" -m ":ci skip all :ci run dl"

View File

@ -524,3 +524,7 @@ peci1
saintliao saintliao
shubhexists shubhexists
SirElderling SirElderling
almx
elivinsky
starius
TravisDupes

View File

@ -4,6 +4,23 @@
# To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master # To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master
--> -->
### 2023.11.16
#### Extractor changes
- **abc.net.au**: iview, showseries: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/15cb3528cbda7b6198f49a6b5953c226d701696b) ([#8586](https://github.com/yt-dlp/yt-dlp/issues/8586)) by [bashonly](https://github.com/bashonly)
- **beatbump**: [Update `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/21dc069bea2d4d99345dd969e098f4535c751d45) ([#8576](https://github.com/yt-dlp/yt-dlp/issues/8576)) by [seproDev](https://github.com/seproDev)
- **dailymotion**: [Improve `_VALID_URL`](https://github.com/yt-dlp/yt-dlp/commit/a489f071508ec5caf5f32052d142afe86c28df7a) ([#7692](https://github.com/yt-dlp/yt-dlp/issues/7692)) by [TravisDupes](https://github.com/TravisDupes)
- **drtv**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/0783fd558ed0d3a8bc754beb75a406256f8b97b2) ([#8484](https://github.com/yt-dlp/yt-dlp/issues/8484)) by [almx](https://github.com/almx), [seproDev](https://github.com/seproDev)
- **eltrecetv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/dcfad52812aa8ce007cefbfbe63f58b49f6b1046) ([#8216](https://github.com/yt-dlp/yt-dlp/issues/8216)) by [elivinsky](https://github.com/elivinsky)
- **jiosaavn**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/b530118e7f48232cacf8050d79a6b20bdfcf5468) ([#8307](https://github.com/yt-dlp/yt-dlp/issues/8307)) by [awalgarg](https://github.com/awalgarg)
- **njpwworld**: [Remove](https://github.com/yt-dlp/yt-dlp/commit/e569c2d1f4b665795a2b64f0aaf7f76930664233) ([#8570](https://github.com/yt-dlp/yt-dlp/issues/8570)) by [aarubui](https://github.com/aarubui)
- **tv5mondeplus**: [Extract subtitles](https://github.com/yt-dlp/yt-dlp/commit/0f634dba3afdc429ece8839b02f6d56c27b7973a) ([#4209](https://github.com/yt-dlp/yt-dlp/issues/4209)) by [FrankZ85](https://github.com/FrankZ85)
- **twitcasting**: [Fix livestream detection](https://github.com/yt-dlp/yt-dlp/commit/2325d03aa7bb80f56ba52cd6992258e44727b424) ([#8574](https://github.com/yt-dlp/yt-dlp/issues/8574)) by [JC-Chung](https://github.com/JC-Chung)
- **zenyandex**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/5efe68b73cbf6e907c2e6a3aa338664385084184) ([#8454](https://github.com/yt-dlp/yt-dlp/issues/8454)) by [starius](https://github.com/starius)
#### Misc. changes
- **build**: [Make `secretstorage` an optional dependency](https://github.com/yt-dlp/yt-dlp/commit/24f827875c6ba513f12ed09a3aef2bbed223760d) ([#8585](https://github.com/yt-dlp/yt-dlp/issues/8585)) by [bashonly](https://github.com/bashonly)
### 2023.11.14 ### 2023.11.14
#### Important changes #### Important changes

View File

@ -6,4 +6,3 @@ brotlicffi; implementation_name!='cpython'
certifi certifi
requests>=2.31.0,<3 requests>=2.31.0,<3
urllib3>=1.26.17,<3 urllib3>=1.26.17,<3
secretstorage; sys_platform=='linux' and (implementation_name!='pypy' or implementation_version>='7.3.10')

View File

@ -26,7 +26,7 @@ markers =
[tox:tox] [tox:tox]
skipsdist = true skipsdist = true
envlist = py{36,37,38,39,310,311},pypy{36,37,38,39} envlist = py{38,39,310,311,312},pypy{38,39,310}
skip_missing_interpreters = true skip_missing_interpreters = true
[testenv] # tox [testenv] # tox
@ -39,7 +39,7 @@ setenv =
[isort] [isort]
py_version = 37 py_version = 38
multi_line_output = VERTICAL_HANGING_INDENT multi_line_output = VERTICAL_HANGING_INDENT
line_length = 80 line_length = 80
reverse_relative = true reverse_relative = true

View File

@ -414,6 +414,7 @@
- **EllenTubeVideo** - **EllenTubeVideo**
- **Elonet** - **Elonet**
- **ElPais**: El País - **ElPais**: El País
- **ElTreceTV**: El Trece TV (Argentina)
- **Embedly** - **Embedly**
- **EMPFlix** - **EMPFlix**
- **Engadget** - **Engadget**
@ -654,6 +655,8 @@
- **Jamendo** - **Jamendo**
- **JamendoAlbum** - **JamendoAlbum**
- **JeuxVideo** - **JeuxVideo**
- **JioSaavnAlbum**
- **JioSaavnSong**
- **Joj** - **Joj**
- **Jove** - **Jove**
- **JStream** - **JStream**
@ -976,7 +979,6 @@
- **Nitter** - **Nitter**
- **njoy**: N-JOY - **njoy**: N-JOY
- **njoy:embed** - **njoy:embed**
- **NJPWWorld**: [*njpwworld*](## "netrc machine") 新日本プロレスワールド
- **NobelPrize** - **NobelPrize**
- **NoicePodcast** - **NoicePodcast**
- **NonkTube** - **NonkTube**

View File

@ -68,25 +68,34 @@ TEST_API_DATA = {
}, },
} }
TEST_LOCKFILE_V1 = '''# This file is used for regulating self-update TEST_LOCKFILE_COMMENT = '# This file is used for regulating self-update'
lock 2022.08.18.36 .+ Python 3.6
lock 2023.11.13 .+ Python 3.7 TEST_LOCKFILE_V1 = r'''%s
lock 2022.08.18.36 .+ Python 3\.6
lock 2023.11.16 (?!win_x86_exe).+ Python 3\.7
lock 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
''' % TEST_LOCKFILE_COMMENT
TEST_LOCKFILE_V2_TMPL = r'''%s
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3\.6
lockV2 yt-dlp/yt-dlp 2023.11.16 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp 2023.11.16 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp-nightly-builds 2023.11.15.232826 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 (?!win_x86_exe).+ Python 3\.7
lockV2 yt-dlp/yt-dlp-master-builds 2023.11.15.232812 win_x86_exe .+ Windows-(?:Vista|2008Server)
''' '''
TEST_LOCKFILE_V2 = '''# This file is used for regulating self-update TEST_LOCKFILE_V2 = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_COMMENT
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3.6
lockV2 yt-dlp/yt-dlp 2023.11.13 .+ Python 3.7
'''
TEST_LOCKFILE_V1_V2 = '''# This file is used for regulating self-update TEST_LOCKFILE_ACTUAL = TEST_LOCKFILE_V2_TMPL % TEST_LOCKFILE_V1.rstrip('\n')
lock 2022.08.18.36 .+ Python 3.6
lock 2023.11.13 .+ Python 3.7 TEST_LOCKFILE_FORK = r'''%s# Test if a fork blocks updates to non-numeric tags
lockV2 yt-dlp/yt-dlp 2022.08.18.36 .+ Python 3.6
lockV2 yt-dlp/yt-dlp 2023.11.13 .+ Python 3.7
lockV2 fork/yt-dlp pr0000 .+ Python 3.6 lockV2 fork/yt-dlp pr0000 .+ Python 3.6
lockV2 fork/yt-dlp pr1234 .+ Python 3.7 lockV2 fork/yt-dlp pr1234 (?!win_x86_exe).+ Python 3\.7
lockV2 fork/yt-dlp pr1234 win_x86_exe .+ Windows-(?:Vista|2008Server)
lockV2 fork/yt-dlp pr9999 .+ Python 3.11 lockV2 fork/yt-dlp pr9999 .+ Python 3.11
''' ''' % TEST_LOCKFILE_ACTUAL
class FakeUpdater(Updater): class FakeUpdater(Updater):
@ -97,7 +106,7 @@ class FakeUpdater(Updater):
_origin = 'yt-dlp/yt-dlp' _origin = 'yt-dlp/yt-dlp'
def _download_update_spec(self, *args, **kwargs): def _download_update_spec(self, *args, **kwargs):
return TEST_LOCKFILE_V1_V2 return TEST_LOCKFILE_ACTUAL
def _call_api(self, tag): def _call_api(self, tag):
tag = f'tags/{tag}' if tag != 'latest' else tag tag = f'tags/{tag}' if tag != 'latest' else tag
@ -112,7 +121,7 @@ class TestUpdate(unittest.TestCase):
def test_update_spec(self): def test_update_spec(self):
ydl = FakeYDL() ydl = FakeYDL()
updater = FakeUpdater(ydl, 'stable@latest') updater = FakeUpdater(ydl, 'stable')
def test(lockfile, identifier, input_tag, expect_tag, exact=False, repo='yt-dlp/yt-dlp'): def test(lockfile, identifier, input_tag, expect_tag, exact=False, repo='yt-dlp/yt-dlp'):
updater._identifier = identifier updater._identifier = identifier
@ -124,35 +133,46 @@ class TestUpdate(unittest.TestCase):
f'{identifier!r} requesting {repo}@{input_tag} (exact={exact}) ' f'{identifier!r} requesting {repo}@{input_tag} (exact={exact}) '
f'returned {result!r} instead of {expect_tag!r}') f'returned {result!r} instead of {expect_tag!r}')
test(TEST_LOCKFILE_V1, 'zip Python 3.11.0', '2023.11.13', '2023.11.13') for lockfile in (TEST_LOCKFILE_V1, TEST_LOCKFILE_V2, TEST_LOCKFILE_ACTUAL, TEST_LOCKFILE_FORK):
test(TEST_LOCKFILE_V1, 'zip stable Python 3.11.0', '2023.11.13', '2023.11.13', exact=True) # Normal operation
test(TEST_LOCKFILE_V1, 'zip Python 3.6.0', '2023.11.13', '2022.08.18.36') test(lockfile, 'zip Python 3.12.0', '2023.12.31', '2023.12.31')
test(TEST_LOCKFILE_V1, 'zip stable Python 3.6.0', '2023.11.13', None, exact=True) test(lockfile, 'zip stable Python 3.12.0', '2023.12.31', '2023.12.31', exact=True)
test(TEST_LOCKFILE_V1, 'zip Python 3.7.0', '2023.11.13', '2023.11.13') # Python 3.6 --update should update only to its lock
test(TEST_LOCKFILE_V1, 'zip stable Python 3.7.1', '2023.11.13', '2023.11.13') test(lockfile, 'zip Python 3.6.0', '2023.11.16', '2022.08.18.36')
test(TEST_LOCKFILE_V1, 'zip Python 3.7.1', '2023.12.31', '2023.11.13') # --update-to an exact version later than the lock should return None
test(TEST_LOCKFILE_V1, 'zip stable Python 3.7.1', '2023.12.31', '2023.11.13') test(lockfile, 'zip stable Python 3.6.0', '2023.11.16', None, exact=True)
# Python 3.7 should be able to update to its lock
test(lockfile, 'zip Python 3.7.0', '2023.11.16', '2023.11.16')
test(lockfile, 'zip stable Python 3.7.1', '2023.11.16', '2023.11.16', exact=True)
# Non-win_x86_exe builds on py3.7 must be locked
test(lockfile, 'zip Python 3.7.1', '2023.12.31', '2023.11.16')
test(lockfile, 'zip stable Python 3.7.1', '2023.12.31', None, exact=True)
test( # Windows Vista w/ win_x86_exe must be locked
lockfile, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2',
'2023.12.31', '2023.11.16')
test( # Windows 2008Server w/ win_x86_exe must be locked
lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-2008Server',
'2023.12.31', None, exact=True)
test( # Windows 7 w/ win_x86_exe py3.7 build should be able to update beyond lock
lockfile, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1',
'2023.12.31', '2023.12.31')
test( # Windows 8.1 w/ '2008Server' in platform string should be able to update beyond lock
lockfile, 'win_x86_exe Python 3.7.9 (CPython x86 32bit) - Windows-post2008Server-6.2.9200',
'2023.12.31', '2023.12.31', exact=True)
test(TEST_LOCKFILE_V2, 'zip Python 3.11.1', '2023.11.13', '2023.11.13') # Forks can block updates to non-numeric tags rather than lock
test(TEST_LOCKFILE_V2, 'zip stable Python 3.11.1', '2023.12.31', '2023.12.31') test(TEST_LOCKFILE_FORK, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V2, 'zip Python 3.6.1', '2023.11.13', '2022.08.18.36') test(TEST_LOCKFILE_FORK, 'zip stable Python 3.7.4', 'pr0000', 'pr0000', repo='fork/yt-dlp')
test(TEST_LOCKFILE_V2, 'zip stable Python 3.7.2', '2023.11.13', '2023.11.13') test(TEST_LOCKFILE_FORK, 'zip stable Python 3.7.4', 'pr1234', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V2, 'zip Python 3.7.2', '2023.12.31', '2023.11.13') test(TEST_LOCKFILE_FORK, 'zip Python 3.8.1', 'pr1234', 'pr1234', repo='fork/yt-dlp', exact=True)
test(
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.2', '2023.11.13', '2023.11.13') TEST_LOCKFILE_FORK, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-Vista-6.0.6003-SP2',
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.11.2', '2023.12.31', '2023.12.31') 'pr1234', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.2', '2023.11.13', '2022.08.18.36') test(
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.7.3', '2023.11.13', '2023.11.13') TEST_LOCKFILE_FORK, 'win_x86_exe stable Python 3.7.9 (CPython x86 32bit) - Windows-7-6.1.7601-SP1',
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.3', '2023.12.31', '2023.11.13') '2023.12.31', '2023.12.31', repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.3', 'pr0000', None, repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip Python 3.11.2', 'pr9999', None, repo='fork/yt-dlp', exact=True)
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.7.4', 'pr0000', 'pr0000', repo='fork/yt-dlp') test(TEST_LOCKFILE_FORK, 'zip stable Python 3.12.0', 'pr9999', 'pr9999', repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.6.4', 'pr0000', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.4', 'pr1234', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.8.1', 'pr1234', 'pr1234', repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.7.5', 'pr1234', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.3', 'pr9999', None, repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip stable Python 3.12.0', 'pr9999', 'pr9999', repo='fork/yt-dlp')
test(TEST_LOCKFILE_V1_V2, 'zip Python 3.11.4', 'pr9999', None, repo='fork/yt-dlp')
def test_query_update(self): def test_query_update(self):
ydl = FakeYDL() ydl = FakeYDL()

View File

@ -565,6 +565,7 @@ from .ellentube import (
) )
from .elonet import ElonetIE from .elonet import ElonetIE
from .elpais import ElPaisIE from .elpais import ElPaisIE
from .eltrecetv import ElTreceTVIE
from .embedly import EmbedlyIE from .embedly import EmbedlyIE
from .engadget import EngadgetIE from .engadget import EngadgetIE
from .epicon import ( from .epicon import (
@ -893,6 +894,10 @@ from .japandiet import (
SangiinIE, SangiinIE,
) )
from .jeuxvideo import JeuxVideoIE from .jeuxvideo import JeuxVideoIE
from .jiosaavn import (
JioSaavnSongIE,
JioSaavnAlbumIE,
)
from .jove import JoveIE from .jove import JoveIE
from .joj import JojIE from .joj import JojIE
from .jstream import JStreamIE from .jstream import JStreamIE
@ -1320,7 +1325,6 @@ from .ninegag import NineGagIE
from .ninenow import NineNowIE from .ninenow import NineNowIE
from .nintendo import NintendoIE from .nintendo import NintendoIE
from .nitter import NitterIE from .nitter import NitterIE
from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE from .nobelprize import NobelPrizeIE
from .noice import NoicePodcastIE from .noice import NoicePodcastIE
from .nonktube import NonkTubeIE from .nonktube import NonkTubeIE

View File

@ -16,6 +16,7 @@ from ..utils import (
try_get, try_get,
unescapeHTML, unescapeHTML,
update_url_query, update_url_query,
url_or_none,
) )
@ -379,6 +380,18 @@ class ABCIViewShowSeriesIE(InfoExtractor):
'noplaylist': True, 'noplaylist': True,
'skip_download': 'm3u8', 'skip_download': 'm3u8',
}, },
}, {
# 'videoEpisodes' is a dict with `items` key
'url': 'https://iview.abc.net.au/show/7-30-mark-humphries-satire',
'info_dict': {
'id': '178458-0',
'title': 'Episodes',
'description': 'Satirist Mark Humphries brings his unique perspective on current political events for 7.30.',
'series': '7.30 Mark Humphries Satire',
'season': 'Episodes',
'thumbnail': r're:^https?://cdn\.iview\.abc\.net\.au/thumbs/.*\.jpg$'
},
'playlist_count': 15,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -398,12 +411,14 @@ class ABCIViewShowSeriesIE(InfoExtractor):
series = video_data['selectedSeries'] series = video_data['selectedSeries']
return { return {
'_type': 'playlist', '_type': 'playlist',
'entries': [self.url_result(episode['shareUrl']) 'entries': [self.url_result(episode_url, ABCIViewIE)
for episode in series['_embedded']['videoEpisodes']], for episode_url in traverse_obj(series, (
'_embedded', 'videoEpisodes', (None, 'items'), ..., 'shareUrl', {url_or_none}))],
'id': series.get('id'), 'id': series.get('id'),
'title': dict_get(series, ('title', 'displaySubtitle')), 'title': dict_get(series, ('title', 'displaySubtitle')),
'description': series.get('description'), 'description': series.get('description'),
'series': dict_get(series, ('showTitle', 'displayTitle')), 'series': dict_get(series, ('showTitle', 'displayTitle')),
'season': dict_get(series, ('title', 'displaySubtitle')), 'season': dict_get(series, ('title', 'displaySubtitle')),
'thumbnail': series.get('thumbnail'), 'thumbnail': traverse_obj(
series, 'thumbnail', ('images', lambda _, v: v['name'] == 'seriesThumbnail', 'url'), get_all=False),
} }

View File

@ -3,14 +3,13 @@ from .youtube import YoutubeIE, YoutubeTabIE
class BeatBumpVideoIE(InfoExtractor): class BeatBumpVideoIE(InfoExtractor):
_VALID_URL = r'https://beatbump\.ml/listen\?id=(?P<id>[\w-]+)' _VALID_URL = r'https://beatbump\.(?:ml|io)/listen\?id=(?P<id>[\w-]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs', 'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
'md5': '5ff3fff41d3935b9810a9731e485fe66', 'md5': '5ff3fff41d3935b9810a9731e485fe66',
'info_dict': { 'info_dict': {
'id': 'MgNrAu2pzNs', 'id': 'MgNrAu2pzNs',
'ext': 'mp4', 'ext': 'mp4',
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
'artist': 'Stephen', 'artist': 'Stephen',
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp', 'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA', 'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
@ -22,10 +21,9 @@ class BeatBumpVideoIE(InfoExtractor):
'alt_title': 'Voyeur Girl', 'alt_title': 'Voyeur Girl',
'view_count': int, 'view_count': int,
'track': 'Voyeur Girl', 'track': 'Voyeur Girl',
'uploader': 'Stephen - Topic', 'uploader': 'Stephen',
'title': 'Voyeur Girl', 'title': 'Voyeur Girl',
'channel_follower_count': int, 'channel_follower_count': int,
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
'age_limit': 0, 'age_limit': 0,
'availability': 'public', 'availability': 'public',
'live_status': 'not_live', 'live_status': 'not_live',
@ -36,7 +34,12 @@ class BeatBumpVideoIE(InfoExtractor):
'tags': 'count:11', 'tags': 'count:11',
'creator': 'Stephen', 'creator': 'Stephen',
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA', 'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
} 'channel_is_verified': True,
'heatmap': 'count:100',
},
}, {
'url': 'https://beatbump.io/listen?id=LDGZAprNGWo',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -45,7 +48,7 @@ class BeatBumpVideoIE(InfoExtractor):
class BeatBumpPlaylistIE(InfoExtractor): class BeatBumpPlaylistIE(InfoExtractor):
_VALID_URL = r'https://beatbump\.ml/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)' _VALID_URL = r'https://beatbump\.(?:ml|io)/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE', 'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
'playlist_count': 50, 'playlist_count': 50,
@ -56,25 +59,28 @@ class BeatBumpPlaylistIE(InfoExtractor):
'title': 'Album - Royalty Free Music Library V2 (50 Songs)', 'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
'description': '', 'description': '',
'tags': [], 'tags': [],
'modified_date': '20221223', 'modified_date': '20231110',
} },
'expected_warnings': ['YouTube Music is not directly supported'],
}, { }, {
'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg', 'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg',
'playlist_mincount': 1, 'playlist_mincount': 1,
'params': {'flatplaylist': True}, 'params': {'flatplaylist': True},
'info_dict': { 'info_dict': {
'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg', 'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg', 'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'uploader_id': '@NoCopyrightSounds',
'channel_follower_count': int, 'channel_follower_count': int,
'title': 'NoCopyrightSounds - Videos', 'title': 'NoCopyrightSounds',
'uploader': 'NoCopyrightSounds', 'uploader': 'NoCopyrightSounds',
'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a', 'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a',
'channel': 'NoCopyrightSounds', 'channel': 'NoCopyrightSounds',
'tags': 'count:12', 'tags': 'count:65',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
'channel_is_verified': True,
}, },
'expected_warnings': ['YouTube Music is not directly supported'],
}, { }, {
'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq', 'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
'playlist_mincount': 1, 'playlist_mincount': 1,
@ -84,16 +90,20 @@ class BeatBumpPlaylistIE(InfoExtractor):
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds', 'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!', 'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
'view_count': int, 'view_count': int,
'channel_url': 'https://www.youtube.com/@NoCopyrightSounds', 'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'uploader_id': '@NoCopyrightSounds',
'title': 'NCS : All Releases 💿', 'title': 'NCS : All Releases 💿',
'uploader': 'NoCopyrightSounds', 'uploader': 'NoCopyrightSounds',
'availability': 'public', 'availability': 'public',
'channel': 'NoCopyrightSounds', 'channel': 'NoCopyrightSounds',
'tags': [], 'tags': [],
'modified_date': '20221225', 'modified_date': '20231112',
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg', 'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
} },
'expected_warnings': ['YouTube Music is not directly supported'],
}, {
'url': 'https://beatbump.io/playlist/VLPLFCHGavqRG-q_2ZhmgU2XB2--ZY6irT1c',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -93,7 +93,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'''(?ix) _VALID_URL = r'''(?ix)
https?:// https?://
(?: (?:
(?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player\.html\?)?video|swf)| (?:(?:www|touch|geo)\.)?dailymotion\.[a-z]{2,3}/(?:(?:(?:(?:embed|swf|\#)/)|player(?:/\w+)?\.html\?)?video|swf)|
(?:www\.)?lequipe\.fr/video (?:www\.)?lequipe\.fr/video
) )
[/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))? [/=](?P<id>[^/?_&]+)(?:.+?\bplaylist=(?P<playlist_id>x[0-9a-z]+))?
@ -107,13 +107,17 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'id': 'x5kesuj', 'id': 'x5kesuj',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Office Christmas Party Review Jason Bateman, Olivia Munn, T.J. Miller', 'title': 'Office Christmas Party Review Jason Bateman, Olivia Munn, T.J. Miller',
'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller', 'description': 'Office Christmas Party Review - Jason Bateman, Olivia Munn, T.J. Miller',
'duration': 187, 'duration': 187,
'timestamp': 1493651285, 'timestamp': 1493651285,
'upload_date': '20170501', 'upload_date': '20170501',
'uploader': 'Deadline', 'uploader': 'Deadline',
'uploader_id': 'x1xm8ri', 'uploader_id': 'x1xm8ri',
'age_limit': 0, 'age_limit': 0,
'view_count': int,
'like_count': int,
'tags': ['hollywood', 'celeb', 'celebrity', 'movies', 'red carpet'],
'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/K456B1aXqIx58LKWQ/x1080',
}, },
}, { }, {
'url': 'https://geo.dailymotion.com/player.html?video=x89eyek&mute=true', 'url': 'https://geo.dailymotion.com/player.html?video=x89eyek&mute=true',
@ -132,7 +136,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
'view_count': int, 'view_count': int,
'like_count': int, 'like_count': int,
'tags': ['en_quete_d_esprit'], 'tags': ['en_quete_d_esprit'],
'thumbnail': 'https://s2.dmcdn.net/v/Tncwi1YGKdvFbDuDY/x1080', 'thumbnail': r're:https://(?:s[12]\.)dmcdn\.net/v/Tncwi1YNg_RUl7ueu/x1080',
} }
}, { }, {
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames', 'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
@ -201,6 +205,12 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
}, { }, {
'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw', 'url': 'https://www.dailymotion.com/video/x3z49k?playlist=xv4bw',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://geo.dailymotion.com/player/x86gw.html?video=k46oCapRs4iikoz9DWy',
'only_matching': True,
}, {
'url': 'https://geo.dailymotion.com/player/xakln.html?video=x8mjju4&customConfig%5BcustomParams%5D=%2Ffr-fr%2Ftennis%2Fwimbledon-mens-singles%2Farticles-video',
'only_matching': True,
}] }]
_GEO_BYPASS = False _GEO_BYPASS = False
_COMMON_MEDIA_FIELDS = '''description _COMMON_MEDIA_FIELDS = '''description

View File

@ -1,21 +1,17 @@
import binascii import json
import hashlib import uuid
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
from ..compat import compat_urllib_parse_unquote
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
float_or_none,
int_or_none, int_or_none,
mimetype2ext, mimetype2ext,
str_or_none, parse_iso8601,
traverse_obj, try_call,
unified_timestamp,
update_url_query, update_url_query,
url_or_none, url_or_none,
) )
from ..utils.traversal import traverse_obj
SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s' SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s'
@ -24,7 +20,7 @@ class DRTVIE(InfoExtractor):
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
(?: (?:
(?:www\.)?dr\.dk/(?:tv/se|nyheder|(?P<radio>radio|lyd)(?:/ondemand)?)/(?:[^/]+/)*| (?:www\.)?dr\.dk/tv/se(?:/ondemand)?/(?:[^/?#]+/)*|
(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/ (?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/(?:se|episode|program)/
) )
(?P<id>[\da-z_-]+) (?P<id>[\da-z_-]+)
@ -53,22 +49,6 @@ class DRTVIE(InfoExtractor):
}, },
'expected_warnings': ['Unable to download f4m manifest'], 'expected_warnings': ['Unable to download f4m manifest'],
'skip': 'this video has been removed', 'skip': 'this video has been removed',
}, {
# embed
'url': 'https://www.dr.dk/nyheder/indland/live-christianias-rydning-af-pusher-street-er-i-gang',
'info_dict': {
'id': 'urn:dr:mu:programcard:57c926176187a50a9c6e83c6',
'ext': 'mp4',
'title': 'christiania pusher street ryddes drdkrjpo',
'description': 'md5:2a71898b15057e9b97334f61d04e6eb5',
'timestamp': 1472800279,
'upload_date': '20160902',
'duration': 131.4,
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Unable to download f4m manifest'],
}, { }, {
# with SignLanguage formats # with SignLanguage formats
'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder', 'url': 'https://www.dr.dk/tv/se/historien-om-danmark/-/historien-om-danmark-stenalder',
@ -87,33 +67,54 @@ class DRTVIE(InfoExtractor):
'season': 'Historien om Danmark', 'season': 'Historien om Danmark',
'series': 'Historien om Danmark', 'series': 'Historien om Danmark',
}, },
'params': { 'skip': 'this video has been removed',
'skip_download': True,
},
}, { }, {
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/p4-nyheder-2019-06-26-17-30-9', 'url': 'https://www.dr.dk/drtv/se/frank-and-kastaniegaarden_71769',
'only_matching': True,
}, {
'url': 'https://www.dr.dk/drtv/se/bonderoeven_71769',
'info_dict': { 'info_dict': {
'id': '00951930010', 'id': '00951930010',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Bonderøven 2019 (1:8)', 'title': 'Frank & Kastaniegaarden',
'description': 'md5:b6dcfe9b6f0bea6703e9a0092739a5bd', 'description': 'md5:974e1780934cf3275ef10280204bccb0',
'timestamp': 1654856100, 'release_timestamp': 1546545600,
'upload_date': '20220610', 'release_date': '20190103',
'duration': 2576.6, 'duration': 2576,
'season': 'Bonderøven 2019', 'season': 'Frank & Kastaniegaarden',
'season_id': 'urn:dr:mu:bundle:5c201667a11fa01ca4528ce5', 'season_id': '67125',
'release_year': 2019, 'release_year': 2019,
'season_number': 2019, 'season_number': 2019,
'series': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden',
'episode_number': 1, 'episode_number': 1,
'episode': 'Episode 1', 'episode': 'Frank & Kastaniegaarden',
'thumbnail': r're:https?://.+',
}, },
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
}, {
# Foreign and Regular subtitle track
'url': 'https://www.dr.dk/drtv/se/spise-med-price_-pasta-selv_397445',
'info_dict': {
'id': '00212301010',
'ext': 'mp4',
'episode_number': 1,
'title': 'Spise med Price: Pasta Selv',
'alt_title': '1. Pasta Selv',
'release_date': '20230807',
'description': 'md5:2da9060524fed707810d71080b3d0cd8',
'duration': 1750,
'season': 'Spise med Price',
'release_timestamp': 1691438400,
'season_id': '397440',
'episode': 'Spise med Price: Pasta Selv',
'thumbnail': r're:https?://.+',
'season_number': 15,
'series': 'Spise med Price',
'release_year': 2022,
'subtitles': 'mincount:2',
},
'params': {
'skip_download': 'm3u8',
},
}, { }, {
'url': 'https://www.dr.dk/drtv/episode/bonderoeven_71769', 'url': 'https://www.dr.dk/drtv/episode/bonderoeven_71769',
'only_matching': True, 'only_matching': True,
@ -123,226 +124,127 @@ class DRTVIE(InfoExtractor):
}, { }, {
'url': 'https://www.dr.dk/drtv/program/jagten_220924', 'url': 'https://www.dr.dk/drtv/program/jagten_220924',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.dr.dk/lyd/p4aarhus/regionale-nyheder-ar4/regionale-nyheder-2022-05-05-12-30-3',
'info_dict': {
'id': 'urn:dr:mu:programcard:6265cb2571401424d0360113',
'title': "Regionale nyheder",
'ext': 'mp4',
'duration': 120.043,
'series': 'P4 Østjylland regionale nyheder',
'timestamp': 1651746600,
'season': 'Regionale nyheder',
'release_year': 0,
'season_id': 'urn:dr:mu:bundle:61c26889539f0201586b73c5',
'description': '',
'upload_date': '20220505',
},
'params': {
'skip_download': True,
},
'skip': 'this video has been removed',
}, {
'url': 'https://www.dr.dk/lyd/p4kbh/regionale-nyheder-kh4/regionale-nyheder-2023-03-14-10-30-9',
'info_dict': {
'ext': 'mp4',
'id': '14802310112',
'timestamp': 1678786200,
'duration': 120.043,
'season_id': 'urn:dr:mu:bundle:63a4f7c87140143504b6710f',
'series': 'P4 København regionale nyheder',
'upload_date': '20230314',
'release_year': 0,
'description': 'Hør seneste regionale nyheder fra P4 København.',
'season': 'Regionale nyheder',
'title': 'Regionale nyheder',
},
}] }]
SUBTITLE_LANGS = {
'DanishLanguageSubtitles': 'da',
'ForeignLanguageSubtitles': 'da_foreign',
'CombinedLanguageSubtitles': 'da_combined',
}
_TOKEN = None
def _real_initialize(self):
if self._TOKEN:
return
token_response = self._download_json(
'https://production.dr-massive.com/api/authorization/anonymous-sso', None,
note='Downloading anonymous token', headers={
'content-type': 'application/json',
}, query={
'device': 'web_browser',
'ff': 'idp,ldp,rpt',
'lang': 'da',
'supportFallbackToken': 'true',
}, data=json.dumps({
'deviceId': str(uuid.uuid4()),
'scopes': ['Catalog'],
'optout': True,
}).encode())
self._TOKEN = traverse_obj(
token_response, (lambda _, x: x['type'] == 'UserAccount', 'value', {str}), get_all=False)
if not self._TOKEN:
raise ExtractorError('Unable to get anonymous token')
def _real_extract(self, url): def _real_extract(self, url):
raw_video_id, is_radio_url = self._match_valid_url(url).group('id', 'radio') url_slug = self._match_id(url)
webpage = self._download_webpage(url, url_slug)
webpage = self._download_webpage(url, raw_video_id) json_data = self._search_json(
r'window\.__data\s*=', webpage, 'data', url_slug, fatal=False) or {}
if '>Programmet er ikke længere tilgængeligt' in webpage: item = traverse_obj(
raise ExtractorError( json_data, ('cache', 'page', ..., (None, ('entries', 0)), 'item', {dict}), get_all=False)
'Video %s is not available' % raw_video_id, expected=True) if item:
item_id = item.get('id')
video_id = self._search_regex(
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
r'data-resource="[^>"]+mu/programcard/expanded/([^"]+)"'),
webpage, 'video id', default=None)
if not video_id:
video_id = self._search_regex(
r'(urn(?:%3A|:)dr(?:%3A|:)mu(?:%3A|:)programcard(?:%3A|:)[\da-f]+)',
webpage, 'urn', default=None)
if video_id:
video_id = compat_urllib_parse_unquote(video_id)
_PROGRAMCARD_BASE = 'https://www.dr.dk/mu-online/api/1.4/programcard'
query = {'expanded': 'true'}
if video_id:
programcard_url = '%s/%s' % (_PROGRAMCARD_BASE, video_id)
else: else:
programcard_url = _PROGRAMCARD_BASE item_id = url_slug.rsplit('_', 1)[-1]
if is_radio_url: item = self._download_json(
video_id = self._search_nextjs_data( f'https://production-cdn.dr-massive.com/api/items/{item_id}', item_id,
webpage, raw_video_id)['props']['pageProps']['episode']['productionNumber'] note='Attempting to download backup item data', query={
else: 'device': 'web_browser',
json_data = self._search_json( 'expand': 'all',
r'window\.__data\s*=', webpage, 'data', raw_video_id) 'ff': 'idp,ldp,rpt',
video_id = traverse_obj(json_data, ( 'geoLocation': 'dk',
'cache', 'page', ..., (None, ('entries', 0)), 'item', 'customId', 'isDeviceAbroad': 'false',
{lambda x: x.split(':')[-1]}), get_all=False) 'lang': 'da',
if not video_id: 'segments': 'drtv,optedout',
raise ExtractorError('Unable to extract video id') 'sub': 'Anonymous',
query['productionnumber'] = video_id })
data = self._download_json( video_id = try_call(lambda: item['customId'].rsplit(':', 1)[-1]) or item_id
programcard_url, video_id, 'Downloading video JSON', query=query) stream_data = self._download_json(
f'https://production.dr-massive.com/api/account/items/{item_id}/videos', video_id,
supplementary_data = {} note='Downloading stream data', query={
if re.search(r'_\d+$', raw_video_id): 'delivery': 'stream',
supplementary_data = self._download_json( 'device': 'web_browser',
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, fatal=False) or {} 'ff': 'idp,ldp,rpt',
'lang': 'da',
title = str_or_none(data.get('Title')) or re.sub( 'resolution': 'HD-1080',
r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '', 'sub': 'Anonymous',
self._og_search_title(webpage)) }, headers={'authorization': f'Bearer {self._TOKEN}'})
description = self._og_search_description(
webpage, default=None) or data.get('Description')
timestamp = unified_timestamp(
data.get('PrimaryBroadcastStartTime') or data.get('SortDateTime'))
thumbnail = None
duration = None
restricted_to_denmark = False
formats = [] formats = []
subtitles = {} subtitles = {}
for stream in traverse_obj(stream_data, (lambda _, x: x['url'])):
format_id = stream.get('format', 'na')
access_service = stream.get('accessService')
preference = None
subtitle_suffix = ''
if access_service in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
preference = -1
format_id += f'-{access_service}'
subtitle_suffix = f'-{access_service}'
elif access_service == 'StandardVideo':
preference = 1
fmts, subs = self._extract_m3u8_formats_and_subtitles(
stream.get('url'), video_id, preference=preference, m3u8_id=format_id, fatal=False)
formats.extend(fmts)
assets = [] api_subtitles = traverse_obj(stream, ('subtitles', lambda _, v: url_or_none(v['link']), {dict}))
primary_asset = data.get('PrimaryAsset') if not api_subtitles:
if isinstance(primary_asset, dict): self._merge_subtitles(subs, target=subtitles)
assets.append(primary_asset)
secondary_assets = data.get('SecondaryAssets')
if isinstance(secondary_assets, list):
for secondary_asset in secondary_assets:
if isinstance(secondary_asset, dict):
assets.append(secondary_asset)
def hex_to_bytes(hex): for sub_track in api_subtitles:
return binascii.a2b_hex(hex.encode('ascii')) lang = sub_track.get('language') or 'da'
subtitles.setdefault(self.SUBTITLE_LANGS.get(lang, lang) + subtitle_suffix, []).append({
'url': sub_track['link'],
'ext': mimetype2ext(sub_track.get('format')) or 'vtt'
})
def decrypt_uri(e): if not formats and traverse_obj(item, ('season', 'customFields', 'IsGeoRestricted')):
n = int(e[2:10], 16) self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
a = e[10 + n:]
data = hex_to_bytes(e[10:10 + n])
key = hashlib.sha256(('%s:sRBzYNXBzkKgnjj8pGtkACch' % a).encode('utf-8')).digest()
iv = hex_to_bytes(a)
decrypted = unpad_pkcs7(aes_cbc_decrypt_bytes(data, key, iv))
return decrypted.decode('utf-8').split('?')[0]
for asset in assets:
kind = asset.get('Kind')
if kind == 'Image':
thumbnail = url_or_none(asset.get('Uri'))
elif kind in ('VideoResource', 'AudioResource'):
duration = float_or_none(asset.get('DurationInMilliseconds'), 1000)
restricted_to_denmark = asset.get('RestrictedToDenmark')
asset_target = asset.get('Target')
for link in asset.get('Links', []):
uri = link.get('Uri')
if not uri:
encrypted_uri = link.get('EncryptedUri')
if not encrypted_uri:
continue
try:
uri = decrypt_uri(encrypted_uri)
except Exception:
self.report_warning(
'Unable to decrypt EncryptedUri', video_id)
continue
uri = url_or_none(uri)
if not uri:
continue
target = link.get('Target')
format_id = target or ''
if asset_target in ('SpokenSubtitles', 'SignLanguage', 'VisuallyInterpreted'):
preference = -1
format_id += '-%s' % asset_target
elif asset_target == 'Default':
preference = 1
else:
preference = None
if target == 'HDS':
f4m_formats = self._extract_f4m_formats(
uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43',
video_id, preference, f4m_id=format_id, fatal=False)
if kind == 'AudioResource':
for f in f4m_formats:
f['vcodec'] = 'none'
formats.extend(f4m_formats)
elif target == 'HLS':
fmts, subs = self._extract_m3u8_formats_and_subtitles(
uri, video_id, 'mp4', entry_protocol='m3u8_native',
quality=preference, m3u8_id=format_id, fatal=False)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
else:
bitrate = link.get('Bitrate')
if bitrate:
format_id += '-%s' % bitrate
formats.append({
'url': uri,
'format_id': format_id,
'tbr': int_or_none(bitrate),
'ext': link.get('FileFormat'),
'vcodec': 'none' if kind == 'AudioResource' else None,
'quality': preference,
})
subtitles_list = asset.get('SubtitlesList') or asset.get('Subtitleslist')
if isinstance(subtitles_list, list):
LANGS = {
'Danish': 'da',
}
for subs in subtitles_list:
if not isinstance(subs, dict):
continue
sub_uri = url_or_none(subs.get('Uri'))
if not sub_uri:
continue
lang = subs.get('Language') or 'da'
subtitles.setdefault(LANGS.get(lang, lang), []).append({
'url': sub_uri,
'ext': mimetype2ext(subs.get('MimeType')) or 'vtt'
})
if not formats and restricted_to_denmark:
self.raise_geo_restricted(
'Unfortunately, DR is not allowed to show this program outside Denmark.',
countries=self._GEO_COUNTRIES)
return { return {
'id': video_id, 'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'duration': duration,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
'series': str_or_none(data.get('SeriesTitle')), **traverse_obj(item, {
'season': str_or_none(data.get('SeasonTitle')), 'title': 'title',
'season_number': int_or_none(data.get('SeasonNumber')), 'alt_title': 'contextualTitle',
'season_id': str_or_none(data.get('SeasonUrn')), 'description': 'description',
'episode': traverse_obj(supplementary_data, ('entries', 0, 'item', 'contextualTitle')) or str_or_none(data.get('EpisodeTitle')), 'thumbnail': ('images', 'wallpaper'),
'episode_number': traverse_obj(supplementary_data, ('entries', 0, 'item', 'episodeNumber')) or int_or_none(data.get('EpisodeNumber')), 'release_timestamp': ('customFields', 'BroadcastTimeDK', {parse_iso8601}),
'release_year': int_or_none(data.get('ProductionYear')), 'duration': ('duration', {int_or_none}),
'series': ('season', 'show', 'title'),
'season': ('season', 'title'),
'season_number': ('season', 'seasonNumber', {int_or_none}),
'season_id': 'seasonId',
'episode': 'episodeName',
'episode_number': ('episodeNumber', {int_or_none}),
'release_year': ('releaseYear', {int_or_none}),
}),
} }
@ -412,6 +314,8 @@ class DRTVSeasonIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden', 'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden', 'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden',
'season_number': 2008,
'alt_title': 'Season 2008',
}, },
'playlist_mincount': 8 'playlist_mincount': 8
}, { }, {
@ -421,6 +325,8 @@ class DRTVSeasonIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden', 'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden', 'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden',
'season_number': 2009,
'alt_title': 'Season 2009',
}, },
'playlist_mincount': 19 'playlist_mincount': 19
}] }]
@ -434,6 +340,7 @@ class DRTVSeasonIE(InfoExtractor):
'url': f'https://www.dr.dk/drtv{episode["path"]}', 'url': f'https://www.dr.dk/drtv{episode["path"]}',
'ie_key': DRTVIE.ie_key(), 'ie_key': DRTVIE.ie_key(),
'title': episode.get('title'), 'title': episode.get('title'),
'alt_title': episode.get('contextualTitle'),
'episode': episode.get('episodeName'), 'episode': episode.get('episodeName'),
'description': episode.get('shortDescription'), 'description': episode.get('shortDescription'),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
@ -446,6 +353,7 @@ class DRTVSeasonIE(InfoExtractor):
'id': season_id, 'id': season_id,
'display_id': display_id, 'display_id': display_id,
'title': traverse_obj(data, ('entries', 0, 'item', 'title')), 'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'entries': entries, 'entries': entries,
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')) 'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
@ -463,6 +371,7 @@ class DRTVSeriesIE(InfoExtractor):
'display_id': 'frank-and-kastaniegaarden', 'display_id': 'frank-and-kastaniegaarden',
'title': 'Frank & Kastaniegaarden', 'title': 'Frank & Kastaniegaarden',
'series': 'Frank & Kastaniegaarden', 'series': 'Frank & Kastaniegaarden',
'alt_title': '',
}, },
'playlist_mincount': 15 'playlist_mincount': 15
}] }]
@ -476,6 +385,7 @@ class DRTVSeriesIE(InfoExtractor):
'url': f'https://www.dr.dk/drtv{season.get("path")}', 'url': f'https://www.dr.dk/drtv{season.get("path")}',
'ie_key': DRTVSeasonIE.ie_key(), 'ie_key': DRTVSeasonIE.ie_key(),
'title': season.get('title'), 'title': season.get('title'),
'alt_title': season.get('contextualTitle'),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')) 'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
} for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))] } for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))]
@ -485,6 +395,7 @@ class DRTVSeriesIE(InfoExtractor):
'id': series_id, 'id': series_id,
'display_id': display_id, 'display_id': display_id,
'title': traverse_obj(data, ('entries', 0, 'item', 'title')), 'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
'alt_title': traverse_obj(data, ('entries', 0, 'item', 'contextualTitle')),
'series': traverse_obj(data, ('entries', 0, 'item', 'title')), 'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
'entries': entries 'entries': entries
} }

View File

@ -0,0 +1,62 @@
from .common import InfoExtractor
class ElTreceTVIE(InfoExtractor):
IE_DESC = 'El Trece TV (Argentina)'
_VALID_URL = r'https?://(?:www\.)?eltrecetv\.com\.ar/[\w-]+/capitulos/temporada-\d+/(?P<id>[\w-]+)'
_TESTS = [
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-061023/',
'md5': '71a66673dc63f9a5939d97bfe4b311ba',
'info_dict': {
'id': 'AHCA05102023145553329621094',
'ext': 'mp4',
'title': 'AHORA CAIGO - Programa 06/10/23',
'thumbnail': 'https://thumbs.vodgc.net/AHCA05102023145553329621094.JPG?649339',
}
},
{
'url': 'https://www.eltrecetv.com.ar/poco-correctos/capitulos/temporada-2023/programa-del-250923-invitada-dalia-gutmann/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/argentina-tierra-de-amor-y-venganza/capitulos/temporada-2023/atav-2-capitulo-121-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/ahora-caigo/capitulos/temporada-2023/programa-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/pasaplatos/capitulos/temporada-2023/pasaplatos-el-restaurante-del-250923/',
'only_matching': True,
},
{
'url': 'https://www.eltrecetv.com.ar/el-galpon/capitulos/temporada-2023/programa-del-160923-invitado-raul-lavie/',
'only_matching': True,
}
]
def _real_extract(self, url):
slug = self._match_id(url)
webpage = self._download_webpage(url, slug)
config = self._search_json(
r'Fusion.globalContent\s*=', webpage, 'content', slug)['promo_items']['basic']['embed']['config']
video_url = config['m3u8']
video_id = self._search_regex(r'/(\w+)\.m3u8', video_url, 'video id', default=slug)
formats, subtitles = self._extract_m3u8_formats_and_subtitles(video_url, video_id, 'mp4', m3u8_id='hls')
formats.extend([{
'url': f['url'][:-23],
'format_id': f['format_id'].replace('hls', 'http'),
'width': f.get('width'),
'height': f.get('height'),
} for f in formats if f['url'].endswith('/tracks-v1a1/index.m3u8') and f.get('height') != 1080])
return {
'id': video_id,
'title': config.get('title'),
'thumbnail': config.get('thumbnail'),
'formats': formats,
'subtitles': subtitles,
}

View File

@ -0,0 +1,79 @@
from .common import InfoExtractor
from ..utils import (
js_to_json,
url_or_none,
urlencode_postdata,
urljoin,
)
from ..utils.traversal import traverse_obj
class JioSaavnBaseIE(InfoExtractor):
def _extract_initial_data(self, url, audio_id):
webpage = self._download_webpage(url, audio_id)
return self._search_json(
r'window\.__INITIAL_DATA__\s*=', webpage,
'init json', audio_id, transform_source=js_to_json)
class JioSaavnSongIE(JioSaavnBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?:jiosaavn\.com/song/[^/?#]+/|saavn\.com/s/song/(?:[^/?#]+/){3})(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/song/leja-re/OQsEfQFVUXk',
'md5': '7b1f70de088ede3a152ea34aece4df42',
'info_dict': {
'id': 'OQsEfQFVUXk',
'ext': 'mp3',
'title': 'Leja Re',
'album': 'Leja Re',
'thumbnail': 'https://c.saavncdn.com/258/Leja-Re-Hindi-2018-20181124024539-500x500.jpg',
},
}, {
'url': 'https://www.saavn.com/s/song/hindi/Saathiya/O-Humdum-Suniyo-Re/KAMiazoCblU',
'only_matching': True,
}]
def _real_extract(self, url):
audio_id = self._match_id(url)
song_data = self._extract_initial_data(url, audio_id)['song']['song']
media_data = self._download_json(
'https://www.jiosaavn.com/api.php', audio_id, data=urlencode_postdata({
'__call': 'song.generateAuthToken',
'_format': 'json',
'bitrate': '128',
'url': song_data['encrypted_media_url'],
}))
return {
'id': audio_id,
'url': media_data['auth_url'],
'ext': media_data.get('type'),
'vcodec': 'none',
**traverse_obj(song_data, {
'title': ('title', 'text'),
'album': ('album', 'text'),
'thumbnail': ('image', 0, {url_or_none}),
}),
}
class JioSaavnAlbumIE(JioSaavnBaseIE):
_VALID_URL = r'https?://(?:www\.)?(?:jio)?saavn\.com/album/[^/?#]+/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'https://www.jiosaavn.com/album/96/buIOjYZDrNA_',
'info_dict': {
'id': 'buIOjYZDrNA_',
'title': '96',
},
'playlist_count': 10,
}]
def _real_extract(self, url):
album_id = self._match_id(url)
album_view = self._extract_initial_data(url, album_id)['albumView']
return self.playlist_from_matches(
traverse_obj(album_view, (
'modules', lambda _, x: x['key'] == 'list', 'data', ..., 'title', 'action', {str})),
album_id, traverse_obj(album_view, ('album', 'title', 'text', {str})), ie=JioSaavnSongIE,
getter=lambda x: urljoin('https://www.jiosaavn.com/', x))

View File

@ -1,82 +0,0 @@
import re
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
get_element_by_class,
urlencode_postdata,
)
class NJPWWorldIE(InfoExtractor):
_VALID_URL = r'https?://(front\.)?njpwworld\.com/p/(?P<id>[a-z0-9_]+)'
IE_DESC = '新日本プロレスワールド'
_NETRC_MACHINE = 'njpwworld'
_TESTS = [{
'url': 'http://njpwworld.com/p/s_series_00155_1_9/',
'info_dict': {
'id': 's_series_00155_1_9',
'ext': 'mp4',
'title': '闘強導夢2000 2000年1月4日 東京ドーム 第9試合 ランディ・サベージ VS リック・スタイナー',
'tags': list,
},
'params': {
'skip_download': True, # AES-encrypted m3u8
},
'skip': 'Requires login',
}, {
'url': 'https://front.njpwworld.com/p/s_series_00563_16_bs',
'info_dict': {
'id': 's_series_00563_16_bs',
'ext': 'mp4',
'title': 'WORLD TAG LEAGUE 2020 & BEST OF THE SUPER Jr.27 2020年12月6日 福岡・福岡国際センター バックステージコメント(字幕あり)',
'tags': ["福岡・福岡国際センター", "バックステージコメント", "2020", "20年代"],
},
'params': {
'skip_download': True,
},
}]
_LOGIN_URL = 'https://front.njpwworld.com/auth/login'
def _perform_login(self, username, password):
# Setup session (will set necessary cookies)
self._request_webpage(
'https://njpwworld.com/', None, note='Setting up session')
webpage, urlh = self._download_webpage_handle(
self._LOGIN_URL, None,
note='Logging in', errnote='Unable to login',
data=urlencode_postdata({'login_id': username, 'pw': password}),
headers={'Referer': 'https://front.njpwworld.com/auth'})
# /auth/login will return 302 for successful logins
if urlh.url == self._LOGIN_URL:
self.report_warning('unable to login')
return False
return True
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
formats = []
for kind, vid in re.findall(r'if\s+\(\s*imageQualityType\s*==\s*\'([^\']+)\'\s*\)\s*{\s*video_id\s*=\s*"(\d+)"', webpage):
player_path = '/intent?id=%s&type=url' % vid
player_url = compat_urlparse.urljoin(url, player_path)
formats += self._extract_m3u8_formats(
player_url, video_id, 'mp4', 'm3u8_native', m3u8_id=kind, fatal=False, quality=int(kind == 'high'))
tag_block = get_element_by_class('tag-block', webpage)
tags = re.findall(
r'<a[^>]+class="tag-[^"]+"[^>]*>([^<]+)</a>', tag_block
) if tag_block else None
return {
'id': video_id,
'title': get_element_by_class('article-title', webpage) or self._og_search_title(webpage),
'formats': formats,
'tags': tags,
}

View File

@ -84,6 +84,13 @@ class TV5MondePlusIE(InfoExtractor):
}] }]
_GEO_BYPASS = False _GEO_BYPASS = False
@staticmethod
def _extract_subtitles(data_captions):
subtitles = {}
for f in traverse_obj(data_captions, ('files', lambda _, v: url_or_none(v['file']))):
subtitles.setdefault(f.get('label') or 'fra', []).append({'url': f['file']})
return subtitles
def _real_extract(self, url): def _real_extract(self, url):
display_id = self._match_id(url) display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id) webpage = self._download_webpage(url, display_id)
@ -176,6 +183,8 @@ class TV5MondePlusIE(InfoExtractor):
'duration': duration, 'duration': duration,
'upload_date': upload_date, 'upload_date': upload_date,
'formats': formats, 'formats': formats,
'subtitles': self._extract_subtitles(self._parse_json(
traverse_obj(vpl_data, ('data-captions', {str}), default='{}'), display_id, fatal=False)),
'series': series, 'series': series,
'episode': episode, 'episode': episode,
} }

View File

@ -11,6 +11,7 @@ from ..utils import (
float_or_none, float_or_none,
get_element_by_class, get_element_by_class,
get_element_by_id, get_element_by_id,
int_or_none,
parse_duration, parse_duration,
qualities, qualities,
str_to_int, str_to_int,
@ -241,6 +242,8 @@ class TwitCastingLiveIE(InfoExtractor):
'expected_exception': 'UserNotLive', 'expected_exception': 'UserNotLive',
}] }]
_PROTECTED_LIVE_RE = r'(?s)(<span\s*class="tw-movie-thumbnail2-badge"\s*data-status="live">\s*LIVE)'
def _real_extract(self, url): def _real_extract(self, url):
uploader_id = self._match_id(url) uploader_id = self._match_id(url)
self.to_screen( self.to_screen(
@ -248,24 +251,27 @@ class TwitCastingLiveIE(InfoExtractor):
'Pass "https://twitcasting.tv/{0}/show" to download the history'.format(uploader_id)) 'Pass "https://twitcasting.tv/{0}/show" to download the history'.format(uploader_id))
webpage = self._download_webpage(url, uploader_id) webpage = self._download_webpage(url, uploader_id)
current_live = self._search_regex( is_live = self._search_regex( # first pattern is for public live
(r'data-type="movie" data-id="(\d+)">', (r'(data-is-onlive="true")', self._PROTECTED_LIVE_RE), webpage, 'is live?', default=None)
r'tw-sound-flag-open-link" data-id="(\d+)" style=',), current_live = int_or_none(self._search_regex(
webpage, 'current live ID', default=None) (r'data-type="movie" data-id="(\d+)">', # not available?
if not current_live: r'tw-sound-flag-open-link" data-id="(\d+)" style=', # not available?
r'data-movie-id="(\d+)"'), # if not currently live, value may be 0
webpage, 'current live ID', default=None))
if is_live and not current_live:
# fetch unfiltered /show to find running livestreams; we can't get ID of the password-protected livestream above # fetch unfiltered /show to find running livestreams; we can't get ID of the password-protected livestream above
webpage = self._download_webpage( webpage = self._download_webpage(
f'https://twitcasting.tv/{uploader_id}/show/', uploader_id, f'https://twitcasting.tv/{uploader_id}/show/', uploader_id,
note='Downloading live history') note='Downloading live history')
is_live = self._search_regex(r'(?s)(<span\s*class="tw-movie-thumbnail-badge"\s*data-status="live">\s*LIVE)', webpage, 'is live?', default=None) is_live = self._search_regex(self._PROTECTED_LIVE_RE, webpage, 'is live?', default=None)
if is_live: if is_live:
# get the first live; running live is always at the first # get the first live; running live is always at the first
current_live = self._search_regex( current_live = self._search_regex(
r'(?s)<a\s+class="tw-movie-thumbnail"\s*href="/[^/]+/movie/(?P<video_id>\d+)"\s*>.+?</a>', r'(?s)<a\s+class="tw-movie-thumbnail2"\s*href="/[^/]+/movie/(?P<video_id>\d+)"\s*>.+?</a>',
webpage, 'current live ID 2', default=None, group='video_id') webpage, 'current live ID 2', default=None, group='video_id')
if not current_live: if not current_live:
raise UserNotLive(video_id=uploader_id) raise UserNotLive(video_id=uploader_id)
return self.url_result('https://twitcasting.tv/%s/movie/%s' % (uploader_id, current_live)) return self.url_result(f'https://twitcasting.tv/{uploader_id}/movie/{current_live}', TwitCastingIE)
class TwitCastingUserIE(InfoExtractor): class TwitCastingUserIE(InfoExtractor):

View File

@ -48,6 +48,7 @@ class KnownDRMIE(UnsupportedInfoExtractor):
r'joyn\.de', r'joyn\.de',
r'amazon\.(?:\w{2}\.)?\w+/gp/video', r'amazon\.(?:\w{2}\.)?\w+/gp/video',
r'music\.amazon\.(?:\w{2}\.)?\w+', r'music\.amazon\.(?:\w{2}\.)?\w+',
r'(?:watch|front)\.njpwworld\.com',
) )
_TESTS = [{ _TESTS = [{
@ -141,6 +142,13 @@ class KnownDRMIE(UnsupportedInfoExtractor):
# https://github.com/yt-dlp/yt-dlp/issues/5767 # https://github.com/yt-dlp/yt-dlp/issues/5767
'url': 'https://www.hulu.com/movie/anthem-6b25fac9-da2b-45a3-8e09-e4156b0471cc', 'url': 'https://www.hulu.com/movie/anthem-6b25fac9-da2b-45a3-8e09-e4156b0471cc',
'only_matching': True, 'only_matching': True,
}, {
# https://github.com/yt-dlp/yt-dlp/pull/8570
'url': 'https://watch.njpwworld.com/player/36447/series?assetType=series',
'only_matching': True,
}, {
'url': 'https://front.njpwworld.com/p/s_series_00563_16_bs',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -194,7 +194,7 @@ class ZenYandexIE(InfoExtractor):
'id': '60c7c443da18892ebfe85ed7', 'id': '60c7c443da18892ebfe85ed7',
'ext': 'mp4', 'ext': 'mp4',
'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах', 'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах',
'description': 'md5:f3db3d995763b9bbb7b56d4ccdedea89', 'description': 'md5:8684912f6086f298f8078d4af0e8a600',
'thumbnail': 're:^https://avatars.dzeninfra.ru/', 'thumbnail': 're:^https://avatars.dzeninfra.ru/',
'uploader': 'AcademeG DailyStream' 'uploader': 'AcademeG DailyStream'
}, },
@ -209,7 +209,7 @@ class ZenYandexIE(InfoExtractor):
'id': '60c7c443da18892ebfe85ed7', 'id': '60c7c443da18892ebfe85ed7',
'ext': 'mp4', 'ext': 'mp4',
'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах', 'title': 'ВОТ ЭТО Focus. Деды Морозы на гидроциклах',
'description': 'md5:f3db3d995763b9bbb7b56d4ccdedea89', 'description': 'md5:8684912f6086f298f8078d4af0e8a600',
'thumbnail': r're:^https://avatars\.dzeninfra\.ru/', 'thumbnail': r're:^https://avatars\.dzeninfra\.ru/',
'uploader': 'AcademeG DailyStream', 'uploader': 'AcademeG DailyStream',
'upload_date': '20191111', 'upload_date': '20191111',
@ -258,7 +258,7 @@ class ZenYandexIE(InfoExtractor):
video_id = self._match_id(redirect) video_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, video_id, note='Redirecting') webpage = self._download_webpage(redirect, video_id, note='Redirecting')
data_json = self._search_json( data_json = self._search_json(
r'data\s*=', webpage, 'metadata', video_id, contains_pattern=r'{["\']_*serverState_*video.+}') r'("data"\s*:|data\s*=)', webpage, 'metadata', video_id, contains_pattern=r'{["\']_*serverState_*video.+}')
serverstate = self._search_regex(r'(_+serverState_+video-site_[^_]+_+)', serverstate = self._search_regex(r'(_+serverState_+video-site_[^_]+_+)',
webpage, 'server state').replace('State', 'Settings') webpage, 'server state').replace('State', 'Settings')
uploader = self._search_regex(r'(<a\s*class=["\']card-channel-link[^"\']+["\'][^>]+>)', uploader = self._search_regex(r'(<a\s*class=["\']card-channel-link[^"\']+["\'][^>]+>)',
@ -266,22 +266,25 @@ class ZenYandexIE(InfoExtractor):
uploader_name = extract_attributes(uploader).get('aria-label') uploader_name = extract_attributes(uploader).get('aria-label')
video_json = try_get(data_json, lambda x: x[serverstate]['exportData']['video'], dict) video_json = try_get(data_json, lambda x: x[serverstate]['exportData']['video'], dict)
stream_urls = try_get(video_json, lambda x: x['video']['streams']) stream_urls = try_get(video_json, lambda x: x['video']['streams'])
formats = [] formats, subtitles = [], {}
for s_url in stream_urls: for s_url in stream_urls:
ext = determine_ext(s_url) ext = determine_ext(s_url)
if ext == 'mpd': if ext == 'mpd':
formats.extend(self._extract_mpd_formats(s_url, video_id, mpd_id='dash')) fmts, subs = self._extract_mpd_formats_and_subtitles(s_url, video_id, mpd_id='dash')
elif ext == 'm3u8': elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(s_url, video_id, 'mp4')) fmts, subs = self._extract_m3u8_formats_and_subtitles(s_url, video_id, 'mp4')
formats.extend(fmts)
subtitles = self._merge_subtitles(subtitles, subs)
return { return {
'id': video_id, 'id': video_id,
'title': video_json.get('title') or self._og_search_title(webpage), 'title': video_json.get('title') or self._og_search_title(webpage),
'formats': formats, 'formats': formats,
'subtitles': subtitles,
'duration': int_or_none(video_json.get('duration')), 'duration': int_or_none(video_json.get('duration')),
'view_count': int_or_none(video_json.get('views')), 'view_count': int_or_none(video_json.get('views')),
'timestamp': int_or_none(video_json.get('publicationDate')), 'timestamp': int_or_none(video_json.get('publicationDate')),
'uploader': uploader_name or data_json.get('authorName') or try_get(data_json, lambda x: x['publisher']['name']), 'uploader': uploader_name or data_json.get('authorName') or try_get(data_json, lambda x: x['publisher']['name']),
'description': self._og_search_description(webpage) or try_get(data_json, lambda x: x['og']['description']), 'description': video_json.get('description') or self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage) or try_get(data_json, lambda x: x['og']['imageUrl']), 'thumbnail': self._og_search_thumbnail(webpage) or try_get(data_json, lambda x: x['og']['imageUrl']),
} }
@ -296,6 +299,7 @@ class ZenYandexChannelIE(InfoExtractor):
'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56', 'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56',
}, },
'playlist_mincount': 169, 'playlist_mincount': 169,
'skip': 'The page does not exist',
}, { }, {
'url': 'https://dzen.ru/tok_media', 'url': 'https://dzen.ru/tok_media',
'info_dict': { 'info_dict': {
@ -304,6 +308,7 @@ class ZenYandexChannelIE(InfoExtractor):
'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56', 'description': 'md5:a9e5b3c247b7fe29fd21371a428bcf56',
}, },
'playlist_mincount': 169, 'playlist_mincount': 169,
'skip': 'The page does not exist',
}, { }, {
'url': 'https://zen.yandex.ru/id/606fd806cc13cb3c58c05cf5', 'url': 'https://zen.yandex.ru/id/606fd806cc13cb3c58c05cf5',
'info_dict': { 'info_dict': {
@ -318,21 +323,21 @@ class ZenYandexChannelIE(InfoExtractor):
'url': 'https://zen.yandex.ru/jony_me', 'url': 'https://zen.yandex.ru/jony_me',
'info_dict': { 'info_dict': {
'id': 'jony_me', 'id': 'jony_me',
'description': 'md5:a2c62b4ef5cf3e3efb13d25f61f739e1', 'description': 'md5:ce0a5cad2752ab58701b5497835b2cc5',
'title': 'JONY ', 'title': 'JONY ',
}, },
'playlist_count': 20, 'playlist_count': 18,
}, { }, {
# Test that the playlist extractor finishes extracting when the # Test that the playlist extractor finishes extracting when the
# channel has more than one page of entries # channel has more than one page of entries
'url': 'https://zen.yandex.ru/tatyanareva', 'url': 'https://zen.yandex.ru/tatyanareva',
'info_dict': { 'info_dict': {
'id': 'tatyanareva', 'id': 'tatyanareva',
'description': 'md5:296b588d60841c3756c9105f237b70c6', 'description': 'md5:40a1e51f174369ec3ba9d657734ac31f',
'title': 'Татьяна Рева', 'title': 'Татьяна Рева',
'entries': 'maxcount:200', 'entries': 'maxcount:200',
}, },
'playlist_count': 46, 'playlist_mincount': 46,
}, { }, {
'url': 'https://dzen.ru/id/606fd806cc13cb3c58c05cf5', 'url': 'https://dzen.ru/id/606fd806cc13cb3c58c05cf5',
'info_dict': { 'info_dict': {
@ -375,7 +380,7 @@ class ZenYandexChannelIE(InfoExtractor):
item_id = self._match_id(redirect) item_id = self._match_id(redirect)
webpage = self._download_webpage(redirect, item_id, note='Redirecting') webpage = self._download_webpage(redirect, item_id, note='Redirecting')
data = self._search_json( data = self._search_json(
r'var\s+data\s*=', webpage, 'channel data', item_id, contains_pattern=r'{\"__serverState__.+}') r'("data"\s*:|data\s*=)', webpage, 'channel data', item_id, contains_pattern=r'{\"__serverState__.+}')
server_state_json = traverse_obj(data, lambda k, _: k.startswith('__serverState__'), get_all=False) server_state_json = traverse_obj(data, lambda k, _: k.startswith('__serverState__'), get_all=False)
server_settings_json = traverse_obj(data, lambda k, _: k.startswith('__serverSettings__'), get_all=False) server_settings_json = traverse_obj(data, lambda k, _: k.startswith('__serverSettings__'), get_all=False)

View File

@ -1,8 +1,8 @@
# Autogenerated by devscripts/update-version.py # Autogenerated by devscripts/update-version.py
__version__ = '2023.11.14' __version__ = '2023.11.16'
RELEASE_GIT_HEAD = 'a9d3f4b20a3533d2a40104c85bc2cc6c2564c800' RELEASE_GIT_HEAD = '24f827875c6ba513f12ed09a3aef2bbed223760d'
VARIANT = None VARIANT = None
@ -12,4 +12,4 @@ CHANNEL = 'stable'
ORIGIN = 'yt-dlp/yt-dlp' ORIGIN = 'yt-dlp/yt-dlp'
_pkg_version = '2023.11.14' _pkg_version = '2023.11.16'