mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 01:01:25 +01:00
Compare commits
6 Commits
731870007b
...
fb79dbeef2
Author | SHA1 | Date | |
---|---|---|---|
|
fb79dbeef2 | ||
|
c699bafc50 | ||
|
eb64ae7d5d | ||
|
c014fbcddc | ||
|
39d79c9b9c | ||
|
637ccf3523 |
|
@ -481,7 +481,7 @@ class TestTraversalHelpers:
|
||||||
'id': 'name',
|
'id': 'name',
|
||||||
'data': 'content',
|
'data': 'content',
|
||||||
'url': 'url',
|
'url': 'url',
|
||||||
}, all, {subs_list_to_dict}]) == {
|
}, all, {subs_list_to_dict(lang=None)}]) == {
|
||||||
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
||||||
'en': [{'data': 'content'}],
|
'en': [{'data': 'content'}],
|
||||||
}, 'subs with mandatory items missing should be filtered'
|
}, 'subs with mandatory items missing should be filtered'
|
||||||
|
@ -507,6 +507,54 @@ class TestTraversalHelpers:
|
||||||
{'url': 'https://example.com/subs/en1', 'ext': 'ext'},
|
{'url': 'https://example.com/subs/en1', 'ext': 'ext'},
|
||||||
{'url': 'https://example.com/subs/en2', 'ext': 'ext'},
|
{'url': 'https://example.com/subs/en2', 'ext': 'ext'},
|
||||||
]}, '`quality` key should sort subtitle list accordingly'
|
]}, '`quality` key should sort subtitle list accordingly'
|
||||||
|
assert traverse_obj([
|
||||||
|
{'name': 'de', 'url': 'https://example.com/subs/de.ass'},
|
||||||
|
{'name': 'de'},
|
||||||
|
{'name': 'en', 'content': 'content'},
|
||||||
|
{'url': 'https://example.com/subs/en'},
|
||||||
|
], [..., {
|
||||||
|
'id': 'name',
|
||||||
|
'url': 'url',
|
||||||
|
'data': 'content',
|
||||||
|
}, all, {subs_list_to_dict(lang='en')}]) == {
|
||||||
|
'de': [{'url': 'https://example.com/subs/de.ass'}],
|
||||||
|
'en': [
|
||||||
|
{'data': 'content'},
|
||||||
|
{'url': 'https://example.com/subs/en'},
|
||||||
|
],
|
||||||
|
}, 'optionally provided lang should be used if no id available'
|
||||||
|
assert traverse_obj([
|
||||||
|
{'name': 1, 'url': 'https://example.com/subs/de1'},
|
||||||
|
{'name': {}, 'url': 'https://example.com/subs/de2'},
|
||||||
|
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
|
||||||
|
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
|
||||||
|
], [..., {
|
||||||
|
'id': 'name',
|
||||||
|
'url': 'url',
|
||||||
|
'ext': 'ext',
|
||||||
|
}, all, {subs_list_to_dict(lang=None)}]) == {
|
||||||
|
'de': [
|
||||||
|
{'url': 'https://example.com/subs/de3'},
|
||||||
|
{'url': 'https://example.com/subs/de4'},
|
||||||
|
],
|
||||||
|
}, 'non str types should be ignored for id and ext'
|
||||||
|
assert traverse_obj([
|
||||||
|
{'name': 1, 'url': 'https://example.com/subs/de1'},
|
||||||
|
{'name': {}, 'url': 'https://example.com/subs/de2'},
|
||||||
|
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
|
||||||
|
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
|
||||||
|
], [..., {
|
||||||
|
'id': 'name',
|
||||||
|
'url': 'url',
|
||||||
|
'ext': 'ext',
|
||||||
|
}, all, {subs_list_to_dict(lang='de')}]) == {
|
||||||
|
'de': [
|
||||||
|
{'url': 'https://example.com/subs/de1'},
|
||||||
|
{'url': 'https://example.com/subs/de2'},
|
||||||
|
{'url': 'https://example.com/subs/de3'},
|
||||||
|
{'url': 'https://example.com/subs/de4'},
|
||||||
|
],
|
||||||
|
}, 'non str types should be replaced by default id'
|
||||||
|
|
||||||
def test_trim_str(self):
|
def test_trim_str(self):
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
|
@ -525,7 +573,7 @@ class TestTraversalHelpers:
|
||||||
def test_unpack(self):
|
def test_unpack(self):
|
||||||
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
|
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
|
||||||
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
|
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
|
||||||
assert unpack(join_nonempty(delim=' '))([1, 2, 3]) == '1 2 3'
|
assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3'
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
unpack(join_nonempty)()
|
unpack(join_nonempty)()
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
|
|
|
@ -72,7 +72,6 @@ from yt_dlp.utils import (
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
iri_to_uri,
|
iri_to_uri,
|
||||||
is_html,
|
is_html,
|
||||||
join_nonempty,
|
|
||||||
js_to_json,
|
js_to_json,
|
||||||
limit_length,
|
limit_length,
|
||||||
locked_file,
|
locked_file,
|
||||||
|
@ -2158,10 +2157,6 @@ Line 1
|
||||||
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
|
||||||
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
|
||||||
|
|
||||||
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
|
|
||||||
assert callable(join_nonempty()), 'varargs positional should apply partially'
|
|
||||||
assert join_nonempty(None, delim=', ') == '', 'passed varargs should call the function'
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -4381,7 +4381,9 @@ class YoutubeDL:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
for idx, t in list(enumerate(thumbnails))[::-1]:
|
for idx, t in list(enumerate(thumbnails))[::-1]:
|
||||||
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
|
thumb_ext = t.get('ext') or determine_ext(t['url'], 'jpg')
|
||||||
|
if multiple:
|
||||||
|
thumb_ext = f'{t["id"]}.{thumb_ext}'
|
||||||
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
||||||
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
|
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
|
||||||
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
|
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
|
||||||
|
|
|
@ -66,6 +66,14 @@ class AfreecaTVBaseIE(InfoExtractor):
|
||||||
extensions={'legacy_ssl': True}), display_id,
|
extensions={'legacy_ssl': True}), display_id,
|
||||||
'Downloading API JSON', 'Unable to download API JSON')
|
'Downloading API JSON', 'Unable to download API JSON')
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _fixup_thumb(thumb_url):
|
||||||
|
if not url_or_none(thumb_url):
|
||||||
|
return None
|
||||||
|
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
|
||||||
|
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
|
||||||
|
return [{'url': thumb_url, 'ext': 'jpg'}]
|
||||||
|
|
||||||
|
|
||||||
class AfreecaTVIE(AfreecaTVBaseIE):
|
class AfreecaTVIE(AfreecaTVBaseIE):
|
||||||
IE_NAME = 'soop'
|
IE_NAME = 'soop'
|
||||||
|
@ -155,7 +163,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
|
||||||
'uploader': ('writer_nick', {str}),
|
'uploader': ('writer_nick', {str}),
|
||||||
'uploader_id': ('bj_id', {str}),
|
'uploader_id': ('bj_id', {str}),
|
||||||
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
|
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
|
||||||
'thumbnail': ('thumb', {url_or_none}),
|
'thumbnails': ('thumb', {self._fixup_thumb}),
|
||||||
})
|
})
|
||||||
|
|
||||||
entries = []
|
entries = []
|
||||||
|
@ -226,8 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||||
|
|
||||||
return self.playlist_result(self._entries(data), video_id)
|
return self.playlist_result(self._entries(data), video_id)
|
||||||
|
|
||||||
@staticmethod
|
def _entries(self, data):
|
||||||
def _entries(data):
|
|
||||||
# 'files' is always a list with 1 element
|
# 'files' is always a list with 1 element
|
||||||
yield from traverse_obj(data, (
|
yield from traverse_obj(data, (
|
||||||
'data', lambda _, v: v['story_type'] == 'catch',
|
'data', lambda _, v: v['story_type'] == 'catch',
|
||||||
|
@ -238,7 +245,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
|
||||||
'title': ('title', {str}),
|
'title': ('title', {str}),
|
||||||
'uploader': ('writer_nick', {str}),
|
'uploader': ('writer_nick', {str}),
|
||||||
'uploader_id': ('writer_id', {str}),
|
'uploader_id': ('writer_id', {str}),
|
||||||
'thumbnail': ('thumb', {url_or_none}),
|
'thumbnails': ('thumb', {self._fixup_thumb}),
|
||||||
'timestamp': ('write_timestamp', {int_or_none}),
|
'timestamp': ('write_timestamp', {int_or_none}),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
|
|
@ -279,6 +279,7 @@ class InfoExtractor:
|
||||||
thumbnails: A list of dictionaries, with the following entries:
|
thumbnails: A list of dictionaries, with the following entries:
|
||||||
* "id" (optional, string) - Thumbnail format ID
|
* "id" (optional, string) - Thumbnail format ID
|
||||||
* "url"
|
* "url"
|
||||||
|
* "ext" (optional, string) - actual image extension if not given in URL
|
||||||
* "preference" (optional, int) - quality of the image
|
* "preference" (optional, int) - quality of the image
|
||||||
* "width" (optional, int)
|
* "width" (optional, int)
|
||||||
* "height" (optional, int)
|
* "height" (optional, int)
|
||||||
|
|
|
@ -1,21 +1,17 @@
|
||||||
import re
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from .. import traverse_obj
|
||||||
determine_ext,
|
from ..utils import determine_ext, int_or_none, parse_count, parse_duration, parse_iso8601, url_or_none
|
||||||
extract_attributes,
|
|
||||||
int_or_none,
|
|
||||||
str_to_int,
|
|
||||||
url_or_none,
|
|
||||||
urlencode_postdata,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ManyVidsIE(InfoExtractor):
|
class ManyVidsIE(InfoExtractor):
|
||||||
_WORKING = False
|
_WORKING = True
|
||||||
_VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)'
|
_VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)'
|
||||||
_TESTS = [{
|
_TESTS = [
|
||||||
# preview video
|
{
|
||||||
|
# Dead preview video
|
||||||
|
'skip': True,
|
||||||
'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/',
|
'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/',
|
||||||
'md5': '03f11bb21c52dd12a05be21a5c7dcc97',
|
'md5': '03f11bb21c52dd12a05be21a5c7dcc97',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
|
@ -26,7 +22,27 @@ class ManyVidsIE(InfoExtractor):
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
},
|
},
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
|
# preview video
|
||||||
|
'url': 'https://www.manyvids.com/Video/530341/mv-tips-tricks',
|
||||||
|
'md5': '738dc723f7735ee9602f7ea352a6d058',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '530341',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'MV Tips & Tricks (Preview)',
|
||||||
|
'description': 'md5:c3bae98c0f9453237c28b0f8795d9f83',
|
||||||
|
'thumbnail': 'https://cdn5.manyvids.com/php_uploads/video_images/DestinyDiaz/thumbs/thumb_Hs26ATOO7fcZaI9sx3XT_screenshot_001.jpg',
|
||||||
|
'uploader': 'DestinyDiaz',
|
||||||
|
'view_count': int,
|
||||||
|
'like_count': int,
|
||||||
|
'release_timestamp': 1508419904,
|
||||||
|
'tags': ['AdultSchool', 'BBW', 'SFW', 'TeacherFetish'],
|
||||||
|
'release_date': '20171019',
|
||||||
|
'duration': 3167.0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
# full video
|
# full video
|
||||||
'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/',
|
'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/',
|
||||||
'md5': 'bb47bab0e0802c2a60c24ef079dfe60f',
|
'md5': 'bb47bab0e0802c2a60c24ef079dfe60f',
|
||||||
|
@ -35,79 +51,50 @@ class ManyVidsIE(InfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'MY FACE REVEAL',
|
'title': 'MY FACE REVEAL',
|
||||||
'description': 'md5:ec5901d41808b3746fed90face161612',
|
'description': 'md5:ec5901d41808b3746fed90face161612',
|
||||||
|
'thumbnail': 'https://ods.manyvids.com/1001061960/3aa5397f2a723ec4597e344df66ab845/screenshots/thumbs/custom_1_180_5be09c1dcce03.jpg',
|
||||||
'uploader': 'Sarah Calanthe',
|
'uploader': 'Sarah Calanthe',
|
||||||
'view_count': int,
|
'view_count': int,
|
||||||
'like_count': int,
|
'like_count': int,
|
||||||
|
'release_date': '20181110',
|
||||||
|
'tags': ['EyeContact', 'Interviews', 'MaskFetish', 'MouthFetish', 'Redhead'],
|
||||||
|
'release_timestamp': 1541851200,
|
||||||
|
'duration': 224.0,
|
||||||
},
|
},
|
||||||
}]
|
},
|
||||||
|
]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
video_id = self._match_id(url)
|
||||||
|
|
||||||
real_url = f'https://www.manyvids.com/video/{video_id}/gtm.js'
|
info = traverse_obj(
|
||||||
try:
|
self._download_json(f'https://www.manyvids.com/bff/store/video/{video_id}', video_id),
|
||||||
webpage = self._download_webpage(real_url, video_id)
|
('data', {dict})) or {}
|
||||||
except Exception:
|
|
||||||
# probably useless fallback
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
info = self._search_regex(
|
video_urls = traverse_obj(
|
||||||
r'''(<div\b[^>]*\bid\s*=\s*(['"])pageMetaDetails\2[^>]*>)''',
|
self._download_json(f'https://www.manyvids.com/bff/store/video/{video_id}/private', video_id),
|
||||||
webpage, 'meta details', default='')
|
('data', {dict})) or {}
|
||||||
info = extract_attributes(info)
|
|
||||||
|
|
||||||
player = self._search_regex(
|
|
||||||
r'''(<div\b[^>]*\bid\s*=\s*(['"])rmpPlayerStream\2[^>]*>)''',
|
|
||||||
webpage, 'player details', default='')
|
|
||||||
player = extract_attributes(player)
|
|
||||||
|
|
||||||
video_urls_and_ids = (
|
video_urls_and_ids = (
|
||||||
(info.get('data-meta-video'), 'video'),
|
(traverse_obj(video_urls, ('teaser', 'filepath')), 'preview'),
|
||||||
(player.get('data-video-transcoded'), 'transcoded'),
|
(video_urls.get('transcodedFilepath'), 'transcoded'),
|
||||||
(player.get('data-video-filepath'), 'filepath'),
|
(video_urls.get('filepath'), 'filepath'),
|
||||||
(self._og_search_video_url(webpage, secure=False, default=None), 'og_video'),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def txt_or_none(s, default=None):
|
title = info.get('title')
|
||||||
return (s.strip() or default) if isinstance(s, str) else default
|
uploader = traverse_obj(info, ('model', 'displayName'))
|
||||||
|
description = info.get('description')
|
||||||
|
likes = parse_count(info.get('likes'))
|
||||||
|
views = parse_count(info.get('views'))
|
||||||
|
thumbnail = url_or_none(info.get('screenshot')) or url_or_none(info.get('thumbnail'))
|
||||||
|
release_timestamp = parse_iso8601(info.get('launchDate'))
|
||||||
|
duration = parse_duration(info.get('videoDuration'))
|
||||||
|
tags = [t.get('label') for t in info.get('tagList')]
|
||||||
|
|
||||||
uploader = txt_or_none(info.get('data-meta-author'))
|
# If the video formats JSON only contains a teaser object, then it is a preview
|
||||||
|
if video_urls.get('teaser') and not video_urls.get('filepath'):
|
||||||
def mung_title(s):
|
|
||||||
if uploader:
|
|
||||||
s = re.sub(rf'^\s*{re.escape(uploader)}\s+[|-]', '', s)
|
|
||||||
return txt_or_none(s)
|
|
||||||
|
|
||||||
title = (
|
|
||||||
mung_title(info.get('data-meta-title'))
|
|
||||||
or self._html_search_regex(
|
|
||||||
(r'<span[^>]+class=["\']item-title[^>]+>([^<]+)',
|
|
||||||
r'<h2[^>]+class=["\']h2 m-0["\'][^>]*>([^<]+)'),
|
|
||||||
webpage, 'title', default=None)
|
|
||||||
or self._html_search_meta(
|
|
||||||
'twitter:title', webpage, 'title', fatal=True))
|
|
||||||
|
|
||||||
title = re.sub(r'\s*[|-]\s+ManyVids\s*$', '', title) or title
|
|
||||||
|
|
||||||
if any(p in webpage for p in ('preview_videos', '_preview.mp4')):
|
|
||||||
title += ' (Preview)'
|
title += ' (Preview)'
|
||||||
|
self.report_warning(
|
||||||
mv_token = self._search_regex(
|
f'Only extracting preview. Video may be paid or subscription only. {self._login_hint()}')
|
||||||
r'data-mvtoken=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
|
|
||||||
'mv token', default=None, group='value')
|
|
||||||
|
|
||||||
if mv_token:
|
|
||||||
# Sets some cookies
|
|
||||||
self._download_webpage(
|
|
||||||
'https://www.manyvids.com/includes/ajax_repository/you_had_me_at_hello.php',
|
|
||||||
video_id, note='Setting format cookies', fatal=False,
|
|
||||||
data=urlencode_postdata({
|
|
||||||
'mvtoken': mv_token,
|
|
||||||
'vid': video_id,
|
|
||||||
}), headers={
|
|
||||||
'Referer': url,
|
|
||||||
'X-Requested-With': 'XMLHttpRequest',
|
|
||||||
})
|
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
for v_url, fmt in video_urls_and_ids:
|
for v_url, fmt in video_urls_and_ids:
|
||||||
|
@ -130,33 +117,21 @@ class ManyVidsIE(InfoExtractor):
|
||||||
if f.get('height') is None:
|
if f.get('height') is None:
|
||||||
f['height'] = int_or_none(
|
f['height'] = int_or_none(
|
||||||
self._search_regex(r'_(\d{2,3}[02468])_', f['url'], 'video height', default=None))
|
self._search_regex(r'_(\d{2,3}[02468])_', f['url'], 'video height', default=None))
|
||||||
if '/preview/' in f['url']:
|
if 'preview' in f['format_id']:
|
||||||
f['format_id'] = '_'.join(filter(None, (f.get('format_id'), 'preview')))
|
|
||||||
f['preference'] = -10
|
f['preference'] = -10
|
||||||
if 'transcoded' in f['format_id']:
|
if 'transcoded' in f['format_id']:
|
||||||
f['preference'] = f.get('preference', -1) - 1
|
f['preference'] = f.get('preference', -1) - 1
|
||||||
|
|
||||||
def get_likes():
|
|
||||||
likes = self._search_regex(
|
|
||||||
rf'''(<a\b[^>]*\bdata-id\s*=\s*(['"]){video_id}\2[^>]*>)''',
|
|
||||||
webpage, 'likes', default='')
|
|
||||||
likes = extract_attributes(likes)
|
|
||||||
return int_or_none(likes.get('data-likes'))
|
|
||||||
|
|
||||||
def get_views():
|
|
||||||
return str_to_int(self._html_search_regex(
|
|
||||||
r'''(?s)<span\b[^>]*\bclass\s*=["']views-wrapper\b[^>]+>.+?<span\b[^>]+>\s*(\d[\d,.]*)\s*</span>''',
|
|
||||||
webpage, 'view count', default=None))
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
'title': title,
|
'title': title,
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
'description': txt_or_none(info.get('data-meta-description')),
|
'description': description,
|
||||||
'uploader': txt_or_none(info.get('data-meta-author')),
|
'uploader': uploader,
|
||||||
'thumbnail': (
|
'thumbnail': thumbnail,
|
||||||
url_or_none(info.get('data-meta-image'))
|
'view_count': views,
|
||||||
or url_or_none(player.get('data-video-screenshot'))),
|
'like_count': likes,
|
||||||
'view_count': get_views(),
|
'release_timestamp': release_timestamp,
|
||||||
'like_count': get_likes(),
|
'duration': duration,
|
||||||
|
'tags': tags,
|
||||||
}
|
}
|
||||||
|
|
|
@ -216,7 +216,7 @@ def partial_application(func):
|
||||||
sig = inspect.signature(func)
|
sig = inspect.signature(func)
|
||||||
required_args = [
|
required_args = [
|
||||||
param.name for param in sig.parameters.values()
|
param.name for param in sig.parameters.values()
|
||||||
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)
|
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
|
||||||
if param.default is inspect.Parameter.empty
|
if param.default is inspect.Parameter.empty
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -4837,7 +4837,6 @@ def number_of_digits(number):
|
||||||
return len('%d' % number)
|
return len('%d' % number)
|
||||||
|
|
||||||
|
|
||||||
@partial_application
|
|
||||||
def join_nonempty(*values, delim='-', from_dict=None):
|
def join_nonempty(*values, delim='-', from_dict=None):
|
||||||
if from_dict is not None:
|
if from_dict is not None:
|
||||||
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
|
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)
|
||||||
|
|
|
@ -332,14 +332,14 @@ class _RequiredError(ExtractorError):
|
||||||
|
|
||||||
|
|
||||||
@typing.overload
|
@typing.overload
|
||||||
def subs_list_to_dict(*, ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
|
def subs_list_to_dict(*, lang: str | None = 'und', ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
|
||||||
|
|
||||||
|
|
||||||
@typing.overload
|
@typing.overload
|
||||||
def subs_list_to_dict(subs: list[dict] | None, /, *, ext: str | None = None) -> dict[str, list[dict]]: ...
|
def subs_list_to_dict(subs: list[dict] | None, /, *, lang: str | None = 'und', ext: str | None = None) -> dict[str, list[dict]]: ...
|
||||||
|
|
||||||
|
|
||||||
def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
|
def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None):
|
||||||
"""
|
"""
|
||||||
Convert subtitles from a traversal into a subtitle dict.
|
Convert subtitles from a traversal into a subtitle dict.
|
||||||
The path should have an `all` immediately before this function.
|
The path should have an `all` immediately before this function.
|
||||||
|
@ -352,7 +352,7 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
|
||||||
`quality` The sort order for each subtitle
|
`quality` The sort order for each subtitle
|
||||||
"""
|
"""
|
||||||
if subs is None:
|
if subs is None:
|
||||||
return functools.partial(subs_list_to_dict, ext=ext)
|
return functools.partial(subs_list_to_dict, lang=lang, ext=ext)
|
||||||
|
|
||||||
result = collections.defaultdict(list)
|
result = collections.defaultdict(list)
|
||||||
|
|
||||||
|
@ -360,9 +360,15 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
|
||||||
if not url_or_none(sub.get('url')) and not sub.get('data'):
|
if not url_or_none(sub.get('url')) and not sub.get('data'):
|
||||||
continue
|
continue
|
||||||
sub_id = sub.pop('id', None)
|
sub_id = sub.pop('id', None)
|
||||||
if sub_id is None:
|
if not isinstance(sub_id, str):
|
||||||
|
if not lang:
|
||||||
continue
|
continue
|
||||||
if ext is not None and not sub.get('ext'):
|
sub_id = lang
|
||||||
|
sub_ext = sub.get('ext')
|
||||||
|
if not isinstance(sub_ext, str):
|
||||||
|
if not ext:
|
||||||
|
sub.pop('ext', None)
|
||||||
|
else:
|
||||||
sub['ext'] = ext
|
sub['ext'] = ext
|
||||||
result[sub_id].append(sub)
|
result[sub_id].append(sub)
|
||||||
result = dict(result)
|
result = dict(result)
|
||||||
|
@ -452,9 +458,9 @@ def trim_str(*, start=None, end=None):
|
||||||
return trim
|
return trim
|
||||||
|
|
||||||
|
|
||||||
def unpack(func):
|
def unpack(func, **kwargs):
|
||||||
@functools.wraps(func)
|
@functools.wraps(func)
|
||||||
def inner(items, **kwargs):
|
def inner(items):
|
||||||
return func(*items, **kwargs)
|
return func(*items, **kwargs)
|
||||||
|
|
||||||
return inner
|
return inner
|
||||||
|
|
Loading…
Reference in New Issue
Block a user