mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 09:11:25 +01:00
Compare commits
1 Commits
94189f3847
...
fd8d1c18e2
Author | SHA1 | Date | |
---|---|---|---|
|
fd8d1c18e2 |
|
@ -481,7 +481,7 @@ class TestTraversalHelpers:
|
||||||
'id': 'name',
|
'id': 'name',
|
||||||
'data': 'content',
|
'data': 'content',
|
||||||
'url': 'url',
|
'url': 'url',
|
||||||
}, all, {subs_list_to_dict(lang=None)}]) == {
|
}, all, {subs_list_to_dict}]) == {
|
||||||
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
||||||
'en': [{'data': 'content'}],
|
'en': [{'data': 'content'}],
|
||||||
}, 'subs with mandatory items missing should be filtered'
|
}, 'subs with mandatory items missing should be filtered'
|
||||||
|
@ -507,54 +507,6 @@ class TestTraversalHelpers:
|
||||||
{'url': 'https://example.com/subs/en1', 'ext': 'ext'},
|
{'url': 'https://example.com/subs/en1', 'ext': 'ext'},
|
||||||
{'url': 'https://example.com/subs/en2', 'ext': 'ext'},
|
{'url': 'https://example.com/subs/en2', 'ext': 'ext'},
|
||||||
]}, '`quality` key should sort subtitle list accordingly'
|
]}, '`quality` key should sort subtitle list accordingly'
|
||||||
assert traverse_obj([
|
|
||||||
{'name': 'de', 'url': 'https://example.com/subs/de.ass'},
|
|
||||||
{'name': 'de'},
|
|
||||||
{'name': 'en', 'content': 'content'},
|
|
||||||
{'url': 'https://example.com/subs/en'},
|
|
||||||
], [..., {
|
|
||||||
'id': 'name',
|
|
||||||
'url': 'url',
|
|
||||||
'data': 'content',
|
|
||||||
}, all, {subs_list_to_dict(lang='en')}]) == {
|
|
||||||
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
|
||||||
'en': [
|
|
||||||
{'data': 'content'},
|
|
||||||
{'url': 'https://example.com/subs/en'},
|
|
||||||
],
|
|
||||||
}, 'optionally provided lang should be used if no id available'
|
|
||||||
assert traverse_obj([
|
|
||||||
{'name': 1, 'url': 'https://example.com/subs/de1'},
|
|
||||||
{'name': {}, 'url': 'https://example.com/subs/de2'},
|
|
||||||
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
|
|
||||||
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
|
|
||||||
], [..., {
|
|
||||||
'id': 'name',
|
|
||||||
'url': 'url',
|
|
||||||
'ext': 'ext',
|
|
||||||
}, all, {subs_list_to_dict(lang=None)}]) == {
|
|
||||||
'de': [
|
|
||||||
{'url': 'https://example.com/subs/de3'},
|
|
||||||
{'url': 'https://example.com/subs/de4'},
|
|
||||||
],
|
|
||||||
}, 'non str types should be ignored for id and ext'
|
|
||||||
assert traverse_obj([
|
|
||||||
{'name': 1, 'url': 'https://example.com/subs/de1'},
|
|
||||||
{'name': {}, 'url': 'https://example.com/subs/de2'},
|
|
||||||
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
|
|
||||||
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
|
|
||||||
], [..., {
|
|
||||||
'id': 'name',
|
|
||||||
'url': 'url',
|
|
||||||
'ext': 'ext',
|
|
||||||
}, all, {subs_list_to_dict(lang='de')}]) == {
|
|
||||||
'de': [
|
|
||||||
{'url': 'https://example.com/subs/de1'},
|
|
||||||
{'url': 'https://example.com/subs/de2'},
|
|
||||||
{'url': 'https://example.com/subs/de3'},
|
|
||||||
{'url': 'https://example.com/subs/de4'},
|
|
||||||
],
|
|
||||||
}, 'non str types should be replaced by default id'
|
|
||||||
|
|
||||||
def test_trim_str(self):
|
def test_trim_str(self):
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
|
@ -573,7 +525,7 @@ class TestTraversalHelpers:
|
||||||
def test_unpack(self):
|
def test_unpack(self):
|
||||||
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
|
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
|
||||||
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
|
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
|
||||||
assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3'
|
assert unpack(join_nonempty(delim=' '))([1, 2, 3]) == '1 2 3'
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
unpack(join_nonempty)()
|
unpack(join_nonempty)()
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
|
|
|
@ -72,6 +72,7 @@ from yt_dlp.utils import (
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
iri_to_uri,
|
iri_to_uri,
|
||||||
is_html,
|
is_html,
|
||||||
|
join_nonempty,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
limit_length,
|
limit_length,
|
||||||
locked_file,
|
locked_file,
|
||||||
|
@ -2157,6 +2158,10 @@ Line 1
|
||||||
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
||||||
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
||||||
|
|
||||||
|
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
|
||||||
|
assert callable(join_nonempty()), 'varargs positional should apply partially'
|
||||||
|
assert join_nonempty(None, delim=', ') == '', 'passed varargs should call the function'
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -4381,9 +4381,7 @@ class YoutubeDL:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
for idx, t in list(enumerate(thumbnails))[::-1]:
|
for idx, t in list(enumerate(thumbnails))[::-1]:
|
||||||
thumb_ext = t.get('ext') or determine_ext(t['url'], 'jpg')
|
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
|
||||||
if multiple:
|
|
||||||
thumb_ext = f'{t["id"]}.{thumb_ext}'
|
|
||||||
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
||||||
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
|
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
|
||||||
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
|
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
|
||||||
|
|
|
@ -66,14 +66,6 @@ class AfreecaTVBaseIE(InfoExtractor):
|
||||||
extensions={'legacy_ssl': True}), display_id,
|
extensions={'legacy_ssl': True}), display_id,
|
||||||
'Downloading API JSON', 'Unable to download API JSON')
|
'Downloading API JSON', 'Unable to download API JSON')
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _fixup_thumb(thumb_url):
|
|
||||||
if not url_or_none(thumb_url):
|
|
||||||
return None
|
|
||||||
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
|
|
||||||
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
|
|
||||||
return [{'url': thumb_url, 'ext': 'jpg'}]
|
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVIE(AfreecaTVBaseIE):
|
class AfreecaTVIE(AfreecaTVBaseIE):
|
||||||
IE_NAME = 'soop'
|
IE_NAME = 'soop'
|
||||||
|
@ -163,7 +155,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||||
'uploader': ('writer_nick', {str}),
|
'uploader': ('writer_nick', {str}),
|
||||||
'uploader_id': ('bj_id', {str}),
|
'uploader_id': ('bj_id', {str}),
|
||||||
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
|
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
|
||||||
'thumbnails': ('thumb', {self._fixup_thumb}),
|
'thumbnail': ('thumb', {url_or_none}),
|
||||||
})
|
})
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
@ -234,7 +226,8 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||||
|
|
||||||
return self.playlist_result(self._entries(data), video_id)
|
return self.playlist_result(self._entries(data), video_id)
|
||||||
|
|
||||||
def _entries(self, data):
|
@staticmethod
|
||||||
|
def _entries(data):
|
||||||
# 'files' is always a list with 1 element
|
# 'files' is always a list with 1 element
|
||||||
yield from traverse_obj(data, (
|
yield from traverse_obj(data, (
|
||||||
'data', lambda _, v: v['story_type'] == 'catch',
|
'data', lambda _, v: v['story_type'] == 'catch',
|
||||||
|
@ -245,7 +238,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'uploader': ('writer_nick', {str}),
|
'uploader': ('writer_nick', {str}),
|
||||||
'uploader_id': ('writer_id', {str}),
|
'uploader_id': ('writer_id', {str}),
|
||||||
'thumbnails': ('thumb', {self._fixup_thumb}),
|
'thumbnail': ('thumb', {url_or_none}),
|
||||||
'timestamp': ('write_timestamp', {int_or_none}),
|
'timestamp': ('write_timestamp', {int_or_none}),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,6 @@ class InfoExtractor:
|
||||||
thumbnails: A list of dictionaries, with the following entries:
|
thumbnails: A list of dictionaries, with the following entries:
|
||||||
* "id" (optional, string) - Thumbnail format ID
|
* "id" (optional, string) - Thumbnail format ID
|
||||||
* "url"
|
* "url"
|
||||||
* "ext" (optional, string) - actual image extension if not given in URL
|
|
||||||
* "preference" (optional, int) - quality of the image
|
* "preference" (optional, int) - quality of the image
|
||||||
* "width" (optional, int)
|
* "width" (optional, int)
|
||||||
* "height" (optional, int)
|
* "height" (optional, int)
|
||||||
|
|
|
@ -50,7 +50,6 @@ from ..utils import (
|
||||||
parse_iso8601,
|
parse_iso8601,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
remove_end,
|
|
||||||
remove_start,
|
remove_start,
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
|
@ -516,8 +515,6 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
_YT_HANDLE_RE = r'@[\w.-]{3,30}' # https://support.google.com/youtube/answer/11585688?hl=en
|
_YT_HANDLE_RE = r'@[\w.-]{3,30}' # https://support.google.com/youtube/answer/11585688?hl=en
|
||||||
_YT_CHANNEL_UCID_RE = r'UC[\w-]{22}'
|
_YT_CHANNEL_UCID_RE = r'UC[\w-]{22}'
|
||||||
|
|
||||||
_NETRC_MACHINE = 'youtube'
|
|
||||||
|
|
||||||
def ucid_or_none(self, ucid):
|
def ucid_or_none(self, ucid):
|
||||||
return self._search_regex(rf'^({self._YT_CHANNEL_UCID_RE})$', ucid, 'UC-id', default=None)
|
return self._search_regex(rf'^({self._YT_CHANNEL_UCID_RE})$', ucid, 'UC-id', default=None)
|
||||||
|
|
||||||
|
@ -576,19 +573,10 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
self._initialize_consent()
|
self._initialize_consent()
|
||||||
self._check_login_required()
|
self._check_login_required()
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
|
||||||
if username.startswith('oauth'):
|
|
||||||
raise ExtractorError(
|
|
||||||
f'Login with OAuth is no longer supported. {self._youtube_login_hint}', expected=True)
|
|
||||||
|
|
||||||
self.report_warning(
|
|
||||||
f'Login with password is not supported for YouTube. {self._youtube_login_hint}')
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _youtube_login_hint(self):
|
def _youtube_login_hint(self):
|
||||||
return (f'{self._login_hint(method="cookies")}. Also see '
|
return (f'{self._login_hint(method="cookies")}. '
|
||||||
'https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies '
|
'See https://github.com/yt-dlp/yt-dlp/wiki/Extractors#exporting-youtube-cookies for help with cookies')
|
||||||
'for tips on effectively exporting YouTube cookies')
|
|
||||||
|
|
||||||
def _check_login_required(self):
|
def _check_login_required(self):
|
||||||
if self._LOGIN_REQUIRED and not self.is_authenticated:
|
if self._LOGIN_REQUIRED and not self.is_authenticated:
|
||||||
|
@ -863,7 +851,11 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
for alert_type, alert_message in (warnings + errors[:-1]):
|
for alert_type, alert_message in (warnings + errors[:-1]):
|
||||||
self.report_warning(f'YouTube said: {alert_type} - {alert_message}', only_once=only_once)
|
self.report_warning(f'YouTube said: {alert_type} - {alert_message}', only_once=only_once)
|
||||||
if errors:
|
if errors:
|
||||||
raise ExtractorError(f'YouTube said: {errors[-1][1]}', expected=expected)
|
msg = errors[-1][1]
|
||||||
|
if msg and 'sign in' in msg.lower():
|
||||||
|
expected = True
|
||||||
|
msg += '\n' + self._youtube_login_hint
|
||||||
|
raise ExtractorError(f'YouTube said: {msg}', expected=expected)
|
||||||
|
|
||||||
def _extract_and_report_alerts(self, data, *args, **kwargs):
|
def _extract_and_report_alerts(self, data, *args, **kwargs):
|
||||||
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
|
return self._report_alerts(self._extract_alerts(data), *args, **kwargs)
|
||||||
|
@ -4462,9 +4454,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
self.raise_geo_restricted(subreason, countries, metadata_available=True)
|
self.raise_geo_restricted(subreason, countries, metadata_available=True)
|
||||||
reason += f'. {subreason}'
|
reason += f'. {subreason}'
|
||||||
if reason:
|
if reason:
|
||||||
if 'sign in' in reason.lower():
|
|
||||||
reason = remove_end(reason, 'This helps protect our community. Learn more')
|
|
||||||
reason = f'{remove_end(reason.strip(), ".")}. {self._youtube_login_hint}'
|
|
||||||
self.raise_no_formats(reason, expected=True)
|
self.raise_no_formats(reason, expected=True)
|
||||||
|
|
||||||
keywords = get_first(video_details, 'keywords', expected_type=list) or []
|
keywords = get_first(video_details, 'keywords', expected_type=list) or []
|
||||||
|
@ -6984,7 +6973,7 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor):
|
||||||
raise ExtractorError('Unable to recognize tab page')
|
raise ExtractorError('Unable to recognize tab page')
|
||||||
|
|
||||||
|
|
||||||
class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
class YoutubePlaylistIE(InfoExtractor):
|
||||||
IE_DESC = 'YouTube playlists'
|
IE_DESC = 'YouTube playlists'
|
||||||
_VALID_URL = r'''(?x)(?:
|
_VALID_URL = r'''(?x)(?:
|
||||||
(?:https?://)?
|
(?:https?://)?
|
||||||
|
@ -7098,7 +7087,7 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
||||||
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
return self.url_result(url, ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeYtBeIE(YoutubeBaseInfoExtractor):
|
class YoutubeYtBeIE(InfoExtractor):
|
||||||
IE_DESC = 'youtu.be'
|
IE_DESC = 'youtu.be'
|
||||||
_VALID_URL = rf'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{{11}})/*?.*?\blist=(?P<playlist_id>{YoutubeBaseInfoExtractor._PLAYLIST_ID_RE})'
|
_VALID_URL = rf'https?://youtu\.be/(?P<id>[0-9A-Za-z_-]{{11}})/*?.*?\blist=(?P<playlist_id>{YoutubeBaseInfoExtractor._PLAYLIST_ID_RE})'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -7149,7 +7138,7 @@ class YoutubeYtBeIE(YoutubeBaseInfoExtractor):
|
||||||
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
}), ie=YoutubeTabIE.ie_key(), video_id=playlist_id)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeLivestreamEmbedIE(YoutubeBaseInfoExtractor):
|
class YoutubeLivestreamEmbedIE(InfoExtractor):
|
||||||
IE_DESC = 'YouTube livestream embeds'
|
IE_DESC = 'YouTube livestream embeds'
|
||||||
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
|
_VALID_URL = r'https?://(?:\w+\.)?youtube\.com/embed/live_stream/?\?(?:[^#]+&)?channel=(?P<id>[^&#]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -7164,7 +7153,7 @@ class YoutubeLivestreamEmbedIE(YoutubeBaseInfoExtractor):
|
||||||
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
|
ie=YoutubeTabIE.ie_key(), video_id=channel_id)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeYtUserIE(YoutubeBaseInfoExtractor):
|
class YoutubeYtUserIE(InfoExtractor):
|
||||||
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
|
IE_DESC = 'YouTube user videos; "ytuser:" prefix'
|
||||||
IE_NAME = 'youtube:user'
|
IE_NAME = 'youtube:user'
|
||||||
_VALID_URL = r'ytuser:(?P<id>.+)'
|
_VALID_URL = r'ytuser:(?P<id>.+)'
|
||||||
|
@ -7451,7 +7440,7 @@ class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor):
|
||||||
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
|
return self.playlist_result(self._search_results(query, params, default_client='web_music'), title, title)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
|
class YoutubeFeedsInfoExtractor(InfoExtractor):
|
||||||
"""
|
"""
|
||||||
Base class for feed extractors
|
Base class for feed extractors
|
||||||
Subclasses must re-define the _FEED_NAME property.
|
Subclasses must re-define the _FEED_NAME property.
|
||||||
|
@ -7468,7 +7457,7 @@ class YoutubeFeedsInfoExtractor(YoutubeBaseInfoExtractor):
|
||||||
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
|
f'https://www.youtube.com/feed/{self._FEED_NAME}', ie=YoutubeTabIE.ie_key())
|
||||||
|
|
||||||
|
|
||||||
class YoutubeWatchLaterIE(YoutubeBaseInfoExtractor):
|
class YoutubeWatchLaterIE(InfoExtractor):
|
||||||
IE_NAME = 'youtube:watchlater'
|
IE_NAME = 'youtube:watchlater'
|
||||||
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
|
IE_DESC = 'Youtube watch later list; ":ytwatchlater" keyword (requires cookies)'
|
||||||
_VALID_URL = r':ytwatchlater'
|
_VALID_URL = r':ytwatchlater'
|
||||||
|
@ -7522,7 +7511,7 @@ class YoutubeHistoryIE(YoutubeFeedsInfoExtractor):
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
class YoutubeShortsAudioPivotIE(YoutubeBaseInfoExtractor):
|
class YoutubeShortsAudioPivotIE(InfoExtractor):
|
||||||
IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
|
IE_DESC = 'YouTube Shorts audio pivot (Shorts using audio of a given video)'
|
||||||
IE_NAME = 'youtube:shorts:pivot:audio'
|
IE_NAME = 'youtube:shorts:pivot:audio'
|
||||||
_VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P<id>[\w-]{11})/shorts'
|
_VALID_URL = r'https?://(?:www\.)?youtube\.com/source/(?P<id>[\w-]{11})/shorts'
|
||||||
|
@ -7546,7 +7535,7 @@ class YoutubeShortsAudioPivotIE(YoutubeBaseInfoExtractor):
|
||||||
ie=YoutubeTabIE)
|
ie=YoutubeTabIE)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeTruncatedURLIE(YoutubeBaseInfoExtractor):
|
class YoutubeTruncatedURLIE(InfoExtractor):
|
||||||
IE_NAME = 'youtube:truncated_url'
|
IE_NAME = 'youtube:truncated_url'
|
||||||
IE_DESC = False # Do not list
|
IE_DESC = False # Do not list
|
||||||
_VALID_URL = r'''(?x)
|
_VALID_URL = r'''(?x)
|
||||||
|
@ -7705,7 +7694,7 @@ class YoutubeConsentRedirectIE(YoutubeBaseInfoExtractor):
|
||||||
return self.url_result(redirect_url)
|
return self.url_result(redirect_url)
|
||||||
|
|
||||||
|
|
||||||
class YoutubeTruncatedIDIE(YoutubeBaseInfoExtractor):
|
class YoutubeTruncatedIDIE(InfoExtractor):
|
||||||
IE_NAME = 'youtube:truncated_id'
|
IE_NAME = 'youtube:truncated_id'
|
||||||
IE_DESC = False # Do not list
|
IE_DESC = False # Do not list
|
||||||
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
|
_VALID_URL = r'https?://(?:www\.)?youtube\.com/watch\?v=(?P<id>[0-9A-Za-z_-]{1,10})$'
|
||||||
|
|
|
@ -216,7 +216,7 @@ def partial_application(func):
|
||||||
sig = inspect.signature(func)
|
sig = inspect.signature(func)
|
||||||
required_args = [
|
required_args = [
|
||||||
param.name for param in sig.parameters.values()
|
param.name for param in sig.parameters.values()
|
||||||
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)
|
||||||
if param.default is inspect.Parameter.empty
|
if param.default is inspect.Parameter.empty
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -4837,6 +4837,7 @@ def number_of_digits(number):
|
||||||
return len('%d' % number)
|
return len('%d' % number)
|
||||||
|
|
||||||
|
|
||||||
|
@partial_application
|
||||||
def join_nonempty(*values, delim='-', from_dict=None):
|
def join_nonempty(*values, delim='-', from_dict=None):
|
||||||
if from_dict is not None:
|
if from_dict is not None:
|
||||||
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
|
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
|
||||||
|
|
|
@ -332,14 +332,14 @@ class _RequiredError(ExtractorError):
|
||||||
|
|
||||||
|
|
||||||
@typing.overload
|
@typing.overload
|
||||||
def subs_list_to_dict(*, lang: str | None = 'und', ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
|
def subs_list_to_dict(*, ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
|
||||||
|
|
||||||
|
|
||||||
@typing.overload
|
@typing.overload
|
||||||
def subs_list_to_dict(subs: list[dict] | None, /, *, lang: str | None = 'und', ext: str | None = None) -> dict[str, list[dict]]: ...
|
def subs_list_to_dict(subs: list[dict] | None, /, *, ext: str | None = None) -> dict[str, list[dict]]: ...
|
||||||
|
|
||||||
|
|
||||||
def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None):
|
def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
|
||||||
"""
|
"""
|
||||||
Convert subtitles from a traversal into a subtitle dict.
|
Convert subtitles from a traversal into a subtitle dict.
|
||||||
The path should have an `all` immediately before this function.
|
The path should have an `all` immediately before this function.
|
||||||
|
@ -352,7 +352,7 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None
|
||||||
`quality` The sort order for each subtitle
|
`quality` The sort order for each subtitle
|
||||||
"""
|
"""
|
||||||
if subs is None:
|
if subs is None:
|
||||||
return functools.partial(subs_list_to_dict, lang=lang, ext=ext)
|
return functools.partial(subs_list_to_dict, ext=ext)
|
||||||
|
|
||||||
result = collections.defaultdict(list)
|
result = collections.defaultdict(list)
|
||||||
|
|
||||||
|
@ -360,16 +360,10 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None
|
||||||
if not url_or_none(sub.get('url')) and not sub.get('data'):
|
if not url_or_none(sub.get('url')) and not sub.get('data'):
|
||||||
continue
|
continue
|
||||||
sub_id = sub.pop('id', None)
|
sub_id = sub.pop('id', None)
|
||||||
if not isinstance(sub_id, str):
|
if sub_id is None:
|
||||||
if not lang:
|
continue
|
||||||
continue
|
if ext is not None and not sub.get('ext'):
|
||||||
sub_id = lang
|
sub['ext'] = ext
|
||||||
sub_ext = sub.get('ext')
|
|
||||||
if not isinstance(sub_ext, str):
|
|
||||||
if not ext:
|
|
||||||
sub.pop('ext', None)
|
|
||||||
else:
|
|
||||||
sub['ext'] = ext
|
|
||||||
result[sub_id].append(sub)
|
result[sub_id].append(sub)
|
||||||
result = dict(result)
|
result = dict(result)
|
||||||
|
|
||||||
|
@ -458,9 +452,9 @@ def trim_str(*, start=None, end=None):
|
||||||
return trim
|
return trim
|
||||||
|
|
||||||
|
|
||||||
def unpack(func, **kwargs):
|
def unpack(func):
|
||||||
@functools.wraps(func)
|
@functools.wraps(func)
|
||||||
def inner(items):
|
def inner(items, **kwargs):
|
||||||
return func(*items, **kwargs)
|
return func(*items, **kwargs)
|
||||||
|
|
||||||
return inner
|
return inner
|
||||||
|
|
Loading…
Reference in New Issue
Block a user