mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-28 18:21:26 +01:00
Compare commits
No commits in common. "912ab88517757599ebdb3852ece6be545ce58e14" and "8e1914b1280593c3355dccb4b2cf897356902b3e" have entirely different histories.
912ab88517
...
8e1914b128
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
|
@ -266,7 +266,7 @@ jobs:
|
|||
run: |
|
||||
python devscripts/update-version.py -c "${{ env.channel }}" -r "${{ env.target_repo }}" -s "${{ env.suffix }}" "${{ env.version }}"
|
||||
python devscripts/make_lazy_extractors.py
|
||||
sed -i -E '0,/(name = ")[^"]+(")/s//\1${{ env.pypi_project }}\2/' pyproject.toml
|
||||
sed -i -E "s/(name=')[^']+(', # package name)/\1${{ env.pypi_project }}\2/" setup.py
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
|
|
|
@ -280,7 +280,7 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
|
|||
|
||||
* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging separate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. License [depends on the build](https://www.ffmpeg.org/legal.html)
|
||||
|
||||
There are bugs in ffmpeg that cause various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||
There are bugs in ffmpeg that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||
|
||||
**Important**: What you need is ffmpeg *binary*, **NOT** [the python package of the same name](https://pypi.org/project/ffmpeg)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ maintainers = [
|
|||
{name = "Grub4K", email = "contact@grub4k.xyz"},
|
||||
]
|
||||
description = "A youtube-dl fork with additional features and patches"
|
||||
requires-python = ">=3.8"
|
||||
requires-python = ">=3.7"
|
||||
keywords = [
|
||||
"youtube-dl",
|
||||
"video-downloader",
|
||||
|
@ -26,6 +26,7 @@ classifiers = [
|
|||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
"Programming Language :: Python",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
|
@ -40,18 +41,20 @@ classifiers = [
|
|||
dynamic = ["version", "readme"]
|
||||
|
||||
dependencies = [
|
||||
"pycryptodomex",
|
||||
"websockets",
|
||||
"brotli; implementation_name=='cpython'",
|
||||
"brotlicffi; implementation_name!='cpython'",
|
||||
"certifi",
|
||||
"pycryptodomex",
|
||||
"requests>=2.31.0,<3",
|
||||
"urllib3>=1.26.17,<3",
|
||||
"websockets>=12.0",
|
||||
"secretstorage; sys_platform=='linux' and (implementation_name!='pypy' or python_version>='3.10')",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
requests = [
|
||||
"requests>=2.31.0,<3",
|
||||
"urllib3>=1.26.17,<3",
|
||||
]
|
||||
mutagen = ["mutagen"]
|
||||
secretstorage = ["secretstorage"]
|
||||
|
||||
[project.urls]
|
||||
Documentation = "https://github.com/yt-dlp/yt-dlp#readme"
|
||||
|
@ -63,7 +66,7 @@ Funding = "https://github.com/yt-dlp/yt-dlp/blob/master/Collaborators.md#collabo
|
|||
packages = ["yt_dlp"]
|
||||
|
||||
[tool.setuptools.dynamic]
|
||||
version = {attr = "yt_dlp.version._pkg_version"}
|
||||
version = {attr = "yt_dlp.version.__version__"}
|
||||
readme = {file = ["README.md"], content-type = "text/markdown"}
|
||||
|
||||
[project.scripts]
|
||||
|
|
|
@ -2340,58 +2340,6 @@ Line 1
|
|||
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
|
||||
msg='function on a `re.Match` should give group name as well')
|
||||
|
||||
# Test xml.etree.ElementTree.Element as input obj
|
||||
etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
|
||||
<data>
|
||||
<country name="Liechtenstein">
|
||||
<rank>1</rank>
|
||||
<year>2008</year>
|
||||
<gdppc>141100</gdppc>
|
||||
<neighbor name="Austria" direction="E"/>
|
||||
<neighbor name="Switzerland" direction="W"/>
|
||||
</country>
|
||||
<country name="Singapore">
|
||||
<rank>4</rank>
|
||||
<year>2011</year>
|
||||
<gdppc>59900</gdppc>
|
||||
<neighbor name="Malaysia" direction="N"/>
|
||||
</country>
|
||||
<country name="Panama">
|
||||
<rank>68</rank>
|
||||
<year>2011</year>
|
||||
<gdppc>13600</gdppc>
|
||||
<neighbor name="Costa Rica" direction="W"/>
|
||||
<neighbor name="Colombia" direction="E"/>
|
||||
</country>
|
||||
</data>''')
|
||||
self.assertEqual(traverse_obj(etree, ''), etree,
|
||||
msg='empty str key should return the element itself')
|
||||
self.assertEqual(traverse_obj(etree, 'country'), list(etree),
|
||||
msg='str key should lead all children with that tag name')
|
||||
self.assertEqual(traverse_obj(etree, ...), list(etree),
|
||||
msg='`...` as key should return all children')
|
||||
self.assertEqual(traverse_obj(etree, lambda _, x: x[0].text == '4'), [etree[1]],
|
||||
msg='function as key should get element as value')
|
||||
self.assertEqual(traverse_obj(etree, lambda i, _: i == 1), [etree[1]],
|
||||
msg='function as key should get index as key')
|
||||
self.assertEqual(traverse_obj(etree, 0), etree[0],
|
||||
msg='int key should return the nth child')
|
||||
self.assertEqual(traverse_obj(etree, './/neighbor/@name'),
|
||||
['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia'],
|
||||
msg='`@<attribute>` at end of path should give that attribute')
|
||||
self.assertEqual(traverse_obj(etree, '//neighbor/@fail'), [None, None, None, None, None],
|
||||
msg='`@<nonexistant>` at end of path should give `None`')
|
||||
self.assertEqual(traverse_obj(etree, ('//neighbor/@', 2)), {'name': 'Malaysia', 'direction': 'N'},
|
||||
msg='`@` should give the full attribute dict')
|
||||
self.assertEqual(traverse_obj(etree, '//year/text()'), ['2008', '2011', '2011'],
|
||||
msg='`text()` at end of path should give the inner text')
|
||||
self.assertEqual(traverse_obj(etree, '//*[@direction]/@direction'), ['E', 'W', 'N', 'W', 'E'],
|
||||
msg='full python xpath features should be supported')
|
||||
self.assertEqual(traverse_obj(etree, (0, '@name')), 'Liechtenstein',
|
||||
msg='special transformations should act on current element')
|
||||
self.assertEqual(traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})), [1, 2008, 141100],
|
||||
msg='special transformations should act on current element')
|
||||
|
||||
def test_http_header_dict(self):
|
||||
headers = HTTPHeaderDict()
|
||||
headers['ytdl-test'] = b'0'
|
||||
|
|
|
@ -186,7 +186,7 @@ def _firefox_browser_dir():
|
|||
if sys.platform in ('cygwin', 'win32'):
|
||||
return os.path.expandvars(R'%APPDATA%\Mozilla\Firefox\Profiles')
|
||||
elif sys.platform == 'darwin':
|
||||
return os.path.expanduser('~/Library/Application Support/Firefox/Profiles')
|
||||
return os.path.expanduser('~/Library/Application Support/Firefox')
|
||||
return os.path.expanduser('~/.mozilla/firefox')
|
||||
|
||||
|
||||
|
|
|
@ -2019,6 +2019,7 @@ from .tunein import (
|
|||
TuneInPodcastEpisodeIE,
|
||||
TuneInShortenerIE,
|
||||
)
|
||||
from .turbo import TurboIE
|
||||
from .tv2 import (
|
||||
TV2IE,
|
||||
TV2ArticleIE,
|
||||
|
@ -2222,7 +2223,6 @@ from .viki import (
|
|||
VikiIE,
|
||||
VikiChannelIE,
|
||||
)
|
||||
from .viously import ViouslyIE
|
||||
from .viqeo import ViqeoIE
|
||||
from .viu import (
|
||||
ViuIE,
|
||||
|
|
|
@ -4,7 +4,6 @@ from functools import partial
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
OnDemandPagedList,
|
||||
bug_reports_message,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
|
@ -234,7 +233,7 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
(?:(?:beta|www)\.)?ardmediathek\.de/
|
||||
(?:[^/]+/)?
|
||||
(?:player|live|video)/
|
||||
(?:[^?#]+/)?
|
||||
(?:(?P<display_id>[^?#]+)/)?
|
||||
(?P<id>[a-zA-Z0-9]+)
|
||||
/?(?:[?#]|$)'''
|
||||
_GEO_COUNTRIES = ['DE']
|
||||
|
@ -243,8 +242,8 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
'url': 'https://www.ardmediathek.de/video/filme-im-mdr/liebe-auf-vier-pfoten/mdr-fernsehen/Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0',
|
||||
'md5': 'b6e8ab03f2bcc6e1f9e6cef25fcc03c4',
|
||||
'info_dict': {
|
||||
'display_id': 'Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0',
|
||||
'id': '12939099',
|
||||
'display_id': 'filme-im-mdr/liebe-auf-vier-pfoten/mdr-fernsehen',
|
||||
'id': 'Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0',
|
||||
'title': 'Liebe auf vier Pfoten',
|
||||
'description': r're:^Claudia Schmitt, Anwältin in Salzburg',
|
||||
'duration': 5222,
|
||||
|
@ -256,7 +255,7 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
'series': 'Filme im MDR',
|
||||
'age_limit': 0,
|
||||
'channel': 'MDR',
|
||||
'_old_archive_ids': ['ardbetamediathek Y3JpZDovL21kci5kZS9zZW5kdW5nLzI4MjA0MC80MjIwOTEtNDAyNTM0'],
|
||||
'_old_archive_ids': ['ardbetamediathek 12939099'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/mdr/video/die-robuste-roswita/Y3JpZDovL21kci5kZS9iZWl0cmFnL2Ntcy84MWMxN2MzZC0wMjkxLTRmMzUtODk4ZS0wYzhlOWQxODE2NGI/',
|
||||
|
@ -277,37 +276,37 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
'url': 'https://www.ardmediathek.de/video/tagesschau-oder-tagesschau-20-00-uhr/das-erste/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||
'md5': '1e73ded21cb79bac065117e80c81dc88',
|
||||
'info_dict': {
|
||||
'id': '10049223',
|
||||
'id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||
'ext': 'mp4',
|
||||
'title': 'tagesschau, 20:00 Uhr',
|
||||
'timestamp': 1636398000,
|
||||
'description': 'md5:39578c7b96c9fe50afdf5674ad985e6b',
|
||||
'upload_date': '20211108',
|
||||
'display_id': 'Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll',
|
||||
'display_id': 'tagesschau-oder-tagesschau-20-00-uhr/das-erste',
|
||||
'duration': 915,
|
||||
'episode': 'tagesschau, 20:00 Uhr',
|
||||
'series': 'tagesschau',
|
||||
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:fbb21142783b0a49?w=960&ch=ee69108ae344f678',
|
||||
'channel': 'ARD-Aktuell',
|
||||
'_old_archive_ids': ['ardbetamediathek Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhZ2Vzc2NoYXUvZmM4ZDUxMjgtOTE0ZC00Y2MzLTgzNzAtNDZkNGNiZWJkOTll'],
|
||||
'_old_archive_ids': ['ardbetamediathek 10049223'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.ardmediathek.de/video/7-tage/7-tage-unter-harten-jungs/hr-fernsehen/N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3',
|
||||
'md5': 'c428b9effff18ff624d4f903bda26315',
|
||||
'info_dict': {
|
||||
'id': '94834686',
|
||||
'id': 'N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3',
|
||||
'ext': 'mp4',
|
||||
'duration': 2700,
|
||||
'episode': '7 Tage ... unter harten Jungs',
|
||||
'description': 'md5:0f215470dcd2b02f59f4bd10c963f072',
|
||||
'upload_date': '20231005',
|
||||
'timestamp': 1696491171,
|
||||
'display_id': 'N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3',
|
||||
'display_id': '7-tage/7-tage-unter-harten-jungs/hr-fernsehen',
|
||||
'series': '7 Tage ...',
|
||||
'channel': 'HR',
|
||||
'thumbnail': 'https://api.ardmediathek.de/image-service/images/urn:ard:image:f6e6d5ffac41925c?w=960&ch=fa32ba69bc87989a',
|
||||
'title': '7 Tage ... unter harten Jungs',
|
||||
'_old_archive_ids': ['ardbetamediathek N2I2YmM5MzgtNWFlOS00ZGFlLTg2NzMtYzNjM2JlNjk4MDg3'],
|
||||
'_old_archive_ids': ['ardbetamediathek 94834686'],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beta.ardmediathek.de/ard/video/Y3JpZDovL2Rhc2Vyc3RlLmRlL3RhdG9ydC9mYmM4NGM1NC0xNzU4LTRmZGYtYWFhZS0wYzcyZTIxNGEyMDE',
|
||||
|
@ -358,25 +357,14 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
}), get_all=False)
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
video_id, display_id = self._match_valid_url(url).group('id', 'display_id')
|
||||
|
||||
page_data = self._download_json(
|
||||
f'https://api.ardmediathek.de/page-gateway/pages/ard/item/{display_id}', display_id, query={
|
||||
f'https://api.ardmediathek.de/page-gateway/pages/ard/item/{video_id}', video_id, query={
|
||||
'embedded': 'false',
|
||||
'mcV6': 'true',
|
||||
})
|
||||
|
||||
# For user convenience we use the old contentId instead of the longer crid
|
||||
# Ref: https://github.com/yt-dlp/yt-dlp/issues/8731#issuecomment-1874398283
|
||||
old_id = traverse_obj(page_data, ('tracking', 'atiCustomVars', 'contentId', {int}))
|
||||
if old_id is not None:
|
||||
video_id = str(old_id)
|
||||
archive_ids = [make_archive_id(ARDBetaMediathekIE, display_id)]
|
||||
else:
|
||||
self.report_warning(f'Could not extract contentId{bug_reports_message()}')
|
||||
video_id = display_id
|
||||
archive_ids = None
|
||||
|
||||
player_data = traverse_obj(
|
||||
page_data, ('widgets', lambda _, v: v['type'] in ('player_ondemand', 'player_live'), {dict}), get_all=False)
|
||||
is_live = player_data.get('type') == 'player_live'
|
||||
|
@ -431,6 +419,8 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
})
|
||||
|
||||
age_limit = traverse_obj(page_data, ('fskRating', {lambda x: remove_start(x, 'FSK')}, {int_or_none}))
|
||||
old_id = traverse_obj(page_data, ('tracking', 'atiCustomVars', 'contentId'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
|
@ -448,7 +438,7 @@ class ARDBetaMediathekIE(InfoExtractor):
|
|||
'channel': 'clipSourceName',
|
||||
})),
|
||||
**self._extract_episode_info(page_data.get('title')),
|
||||
'_old_archive_ids': archive_ids,
|
||||
'_old_archive_ids': [make_archive_id(ARDBetaMediathekIE, old_id)],
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,8 +29,7 @@ class BigoIE(InfoExtractor):
|
|||
|
||||
info_raw = self._download_json(
|
||||
'https://ta.bigo.tv/official_website/studio/getInternalStudioInfo',
|
||||
user_id, data=urlencode_postdata({'siteId': user_id}),
|
||||
headers={'Accept': 'application/json'})
|
||||
user_id, data=urlencode_postdata({'siteId': user_id}))
|
||||
|
||||
if not isinstance(info_raw, dict):
|
||||
raise ExtractorError('Received invalid JSON data')
|
||||
|
|
|
@ -665,7 +665,7 @@ class NhkRadiruLiveIE(InfoExtractor):
|
|||
|
||||
noa_info = self._download_json(
|
||||
f'https:{config.find(".//url_program_noa").text}'.format(area=data.find('areakey').text),
|
||||
station, note=f'Downloading {area} station metadata', fatal=False)
|
||||
station, note=f'Downloading {area} station metadata')
|
||||
present_info = traverse_obj(noa_info, ('nowonair_list', self._NOA_STATION_IDS.get(station), 'present'))
|
||||
|
||||
return {
|
||||
|
|
64
yt_dlp/extractor/turbo.py
Normal file
64
yt_dlp/extractor/turbo.py
Normal file
|
@ -0,0 +1,64 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
qualities,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class TurboIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?turbo\.fr/videos-voiture/(?P<id>[0-9]+)-'
|
||||
_API_URL = 'http://www.turbo.fr/api/tv/xml.php?player_generique=player_generique&id={0:}'
|
||||
_TEST = {
|
||||
'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
|
||||
'md5': '33f4b91099b36b5d5a91f84b5bcba600',
|
||||
'info_dict': {
|
||||
'id': '454443',
|
||||
'ext': 'mp4',
|
||||
'duration': 3715,
|
||||
'title': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia... ',
|
||||
'description': 'Turbo du 07/09/2014 : Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
playlist = self._download_xml(self._API_URL.format(video_id), video_id)
|
||||
item = playlist.find('./channel/item')
|
||||
if item is None:
|
||||
raise ExtractorError('Playlist item was not found', expected=True)
|
||||
|
||||
title = xpath_text(item, './title', 'title')
|
||||
duration = int_or_none(xpath_text(item, './durate', 'duration'))
|
||||
thumbnail = xpath_text(item, './visuel_clip', 'thumbnail')
|
||||
description = self._html_search_meta('description', webpage)
|
||||
|
||||
formats = []
|
||||
get_quality = qualities(['3g', 'sd', 'hq'])
|
||||
for child in item:
|
||||
m = re.search(r'url_video_(?P<quality>.+)', child.tag)
|
||||
if m:
|
||||
quality = compat_str(m.group('quality'))
|
||||
formats.append({
|
||||
'format_id': quality,
|
||||
'url': child.text,
|
||||
'quality': get_quality(quality),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
}
|
|
@ -8,6 +8,7 @@ from .common import InfoExtractor
|
|||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
|
@ -190,20 +191,6 @@ class TwitchBaseIE(InfoExtractor):
|
|||
'url': thumbnail,
|
||||
}] if thumbnail else None
|
||||
|
||||
def _extract_twitch_m3u8_formats(self, video_id, token, signature):
|
||||
"""Subclasses must define _M3U8_PATH"""
|
||||
return self._extract_m3u8_formats(
|
||||
f'{self._USHER_BASE}/{self._M3U8_PATH}/{video_id}.m3u8', video_id, 'mp4', query={
|
||||
'allow_source': 'true',
|
||||
'allow_audio_only': 'true',
|
||||
'allow_spectre': 'true',
|
||||
'p': random.randint(1000000, 10000000),
|
||||
'player': 'twitchweb',
|
||||
'playlist_include_framerate': 'true',
|
||||
'sig': signature,
|
||||
'token': token,
|
||||
})
|
||||
|
||||
|
||||
class TwitchVodIE(TwitchBaseIE):
|
||||
IE_NAME = 'twitch:vod'
|
||||
|
@ -216,7 +203,6 @@ class TwitchVodIE(TwitchBaseIE):
|
|||
)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
_M3U8_PATH = 'vod'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.twitch.tv/riotgames/v/6528877?t=5m10s',
|
||||
|
@ -546,8 +532,20 @@ class TwitchVodIE(TwitchBaseIE):
|
|||
info = self._extract_info_gql(video, vod_id)
|
||||
access_token = self._download_access_token(vod_id, 'video', 'id')
|
||||
|
||||
formats = self._extract_twitch_m3u8_formats(
|
||||
vod_id, access_token['value'], access_token['signature'])
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/vod/%s.m3u8?%s' % (
|
||||
self._USHER_BASE, vod_id,
|
||||
compat_urllib_parse_urlencode({
|
||||
'allow_source': 'true',
|
||||
'allow_audio_only': 'true',
|
||||
'allow_spectre': 'true',
|
||||
'player': 'twitchweb',
|
||||
'playlist_include_framerate': 'true',
|
||||
'nauth': access_token['value'],
|
||||
'nauthsig': access_token['signature'],
|
||||
})),
|
||||
vod_id, 'mp4', entry_protocol='m3u8_native')
|
||||
|
||||
formats.extend(self._extract_storyboard(vod_id, video.get('storyboard'), info.get('duration')))
|
||||
|
||||
self._prefer_source(formats)
|
||||
|
@ -926,7 +924,6 @@ class TwitchStreamIE(TwitchBaseIE):
|
|||
)
|
||||
(?P<id>[^/#?]+)
|
||||
'''
|
||||
_M3U8_PATH = 'api/channel/hls'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.twitch.tv/shroomztv',
|
||||
|
@ -1029,10 +1026,23 @@ class TwitchStreamIE(TwitchBaseIE):
|
|||
|
||||
access_token = self._download_access_token(
|
||||
channel_name, 'stream', 'channelName')
|
||||
token = access_token['value']
|
||||
|
||||
stream_id = stream.get('id') or channel_name
|
||||
formats = self._extract_twitch_m3u8_formats(
|
||||
channel_name, access_token['value'], access_token['signature'])
|
||||
query = {
|
||||
'allow_source': 'true',
|
||||
'allow_audio_only': 'true',
|
||||
'allow_spectre': 'true',
|
||||
'p': random.randint(1000000, 10000000),
|
||||
'player': 'twitchweb',
|
||||
'playlist_include_framerate': 'true',
|
||||
'segment_preference': '4',
|
||||
'sig': access_token['signature'].encode('utf-8'),
|
||||
'token': token.encode('utf-8'),
|
||||
}
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s/api/channel/hls/%s.m3u8' % (self._USHER_BASE, channel_name),
|
||||
stream_id, 'mp4', query=query)
|
||||
self._prefer_source(formats)
|
||||
|
||||
view_count = stream.get('viewers')
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
import base64
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class ViouslyIE(InfoExtractor):
|
||||
_VALID_URL = False
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'http://www.turbo.fr/videos-voiture/454443-turbo-du-07-09-2014-renault-twingo-3-bentley-continental-gt-speed-ces-guide-achat-dacia.html',
|
||||
'md5': '37a6c3381599381ff53a7e1e0575c0bc',
|
||||
'info_dict': {
|
||||
'id': 'F_xQzS2jwb3',
|
||||
'ext': 'mp4',
|
||||
'title': 'Turbo du 07/09/2014\xa0: Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...',
|
||||
'description': 'Turbo du 07/09/2014\xa0: Renault Twingo 3, Bentley Continental GT Speed, CES, Guide Achat Dacia...',
|
||||
'age_limit': 0,
|
||||
'upload_date': '20230328',
|
||||
'timestamp': 1680037507,
|
||||
'duration': 3716,
|
||||
'categories': ['motors'],
|
||||
}
|
||||
}]
|
||||
|
||||
def _extract_from_webpage(self, url, webpage):
|
||||
viously_players = re.findall(r'<div[^>]*class="(?:[^"]*\s)?v(?:iou)?sly-player(?:\s[^"]*)?"[^>]*>', webpage)
|
||||
if not viously_players:
|
||||
return
|
||||
|
||||
def custom_decode(text):
|
||||
STANDARD_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/='
|
||||
CUSTOM_ALPHABET = 'VIOUSLYABCDEFGHJKMNPQRTWXZviouslyabcdefghjkmnpqrtwxz9876543210+/='
|
||||
data = base64.b64decode(text.translate(str.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET)))
|
||||
return data.decode('utf-8').strip('\x00')
|
||||
|
||||
for video_id in traverse_obj(viously_players, (..., {extract_attributes}, 'id')):
|
||||
formats = self._extract_m3u8_formats(
|
||||
f'https://www.viously.com/video/hls/{video_id}/index.m3u8', video_id, fatal=False)
|
||||
if not formats:
|
||||
continue
|
||||
data = self._download_json(
|
||||
f'https://www.viously.com/export/json/{video_id}', video_id,
|
||||
transform_source=custom_decode, fatal=False)
|
||||
yield {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
**traverse_obj(data, ('video', {
|
||||
'title': ('title', {str}),
|
||||
'description': ('description', {str}),
|
||||
'duration': ('duration', {int_or_none}),
|
||||
'timestamp': ('iso_date', {parse_iso8601}),
|
||||
'categories': ('category', 'name', {str}, {lambda x: [x] if x else None}),
|
||||
})),
|
||||
}
|
|
@ -3,7 +3,6 @@ import contextlib
|
|||
import inspect
|
||||
import itertools
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from ._utils import (
|
||||
IDENTITY,
|
||||
|
@ -119,7 +118,7 @@ def traverse_obj(
|
|||
branching = True
|
||||
if isinstance(obj, collections.abc.Mapping):
|
||||
result = obj.values()
|
||||
elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element):
|
||||
elif is_iterable_like(obj):
|
||||
result = obj
|
||||
elif isinstance(obj, re.Match):
|
||||
result = obj.groups()
|
||||
|
@ -133,7 +132,7 @@ def traverse_obj(
|
|||
branching = True
|
||||
if isinstance(obj, collections.abc.Mapping):
|
||||
iter_obj = obj.items()
|
||||
elif is_iterable_like(obj) or isinstance(obj, xml.etree.ElementTree.Element):
|
||||
elif is_iterable_like(obj):
|
||||
iter_obj = enumerate(obj)
|
||||
elif isinstance(obj, re.Match):
|
||||
iter_obj = itertools.chain(
|
||||
|
@ -169,7 +168,7 @@ def traverse_obj(
|
|||
result = next((v for k, v in obj.groupdict().items() if casefold(k) == key), None)
|
||||
|
||||
elif isinstance(key, (int, slice)):
|
||||
if is_iterable_like(obj, (collections.abc.Sequence, xml.etree.ElementTree.Element)):
|
||||
if is_iterable_like(obj, collections.abc.Sequence):
|
||||
branching = isinstance(key, slice)
|
||||
with contextlib.suppress(IndexError):
|
||||
result = obj[key]
|
||||
|
@ -177,34 +176,6 @@ def traverse_obj(
|
|||
with contextlib.suppress(IndexError):
|
||||
result = str(obj)[key]
|
||||
|
||||
elif isinstance(obj, xml.etree.ElementTree.Element) and isinstance(key, str):
|
||||
xpath, _, special = key.rpartition('/')
|
||||
if not special.startswith('@') and special != 'text()':
|
||||
xpath = key
|
||||
special = None
|
||||
|
||||
# Allow abbreviations of relative paths, absolute paths error
|
||||
if xpath.startswith('/'):
|
||||
xpath = f'.{xpath}'
|
||||
elif xpath and not xpath.startswith('./'):
|
||||
xpath = f'./{xpath}'
|
||||
|
||||
def apply_specials(element):
|
||||
if special is None:
|
||||
return element
|
||||
if special == '@':
|
||||
return element.attrib
|
||||
if special.startswith('@'):
|
||||
return try_call(element.attrib.get, args=(special[1:],))
|
||||
if special == 'text()':
|
||||
return element.text
|
||||
assert False, f'apply_specials is missing case for {special!r}'
|
||||
|
||||
if xpath:
|
||||
result = list(map(apply_specials, obj.iterfind(xpath)))
|
||||
else:
|
||||
result = apply_specials(obj)
|
||||
|
||||
return branching, result if branching else (result,)
|
||||
|
||||
def lazy_last(iterable):
|
||||
|
|
Loading…
Reference in New Issue
Block a user