Compare commits

...

9 Commits

Author SHA1 Message Date
Michael Skyba
2afd1b3742
Merge 2be0c24897 into c699bafc50 2024-11-15 16:51:58 -06:00
bashonly
c699bafc50 [ie/soop] Fix thumbnail extraction (#11545)
Closes #11537

Authored by: bashonly
2024-11-15 22:51:55 +00:00
bashonly
eb64ae7d5d [ie] Allow ext override for thumbnails (#11545)
Authored by: bashonly
2024-11-15 22:51:55 +00:00
Simon Sawicki
c014fbcddc
[utils] subs_list_to_dict: Add lang default parameter (#11508)
Authored by: Grub4K
2024-11-15 23:25:52 +01:00
Simon Sawicki
39d79c9b9c
[utils] Fix join_nonempty, add **kwargs to unpack (#11559)
Authored by: Grub4K
2024-11-15 22:06:15 +01:00
Michael Skyba
2be0c24897 [ie/suno] add fallbacks for basic page metadata 2024-11-04 23:54:46 -05:00
Michael Skyba
10a1a93352 [ie/suno] use regexes for thumbnail match tests 2024-11-04 23:54:45 -05:00
Michael Skyba
6ec19e942d [ie/suno] add playlist extractor 2024-11-04 23:54:43 -05:00
Michael Skyba
3c59d3e7a2 [ie/suno] add /song mp3 extractor 2024-11-04 20:18:18 -05:00
9 changed files with 235 additions and 24 deletions

View File

@ -481,7 +481,7 @@ class TestTraversalHelpers:
'id': 'name', 'id': 'name',
'data': 'content', 'data': 'content',
'url': 'url', 'url': 'url',
}, all, {subs_list_to_dict}]) == { }, all, {subs_list_to_dict(lang=None)}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}], 'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [{'data': 'content'}], 'en': [{'data': 'content'}],
}, 'subs with mandatory items missing should be filtered' }, 'subs with mandatory items missing should be filtered'
@ -507,6 +507,54 @@ class TestTraversalHelpers:
{'url': 'https://example.com/subs/en1', 'ext': 'ext'}, {'url': 'https://example.com/subs/en1', 'ext': 'ext'},
{'url': 'https://example.com/subs/en2', 'ext': 'ext'}, {'url': 'https://example.com/subs/en2', 'ext': 'ext'},
]}, '`quality` key should sort subtitle list accordingly' ]}, '`quality` key should sort subtitle list accordingly'
assert traverse_obj([
{'name': 'de', 'url': 'https://example.com/subs/de.ass'},
{'name': 'de'},
{'name': 'en', 'content': 'content'},
{'url': 'https://example.com/subs/en'},
], [..., {
'id': 'name',
'url': 'url',
'data': 'content',
}, all, {subs_list_to_dict(lang='en')}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [
{'data': 'content'},
{'url': 'https://example.com/subs/en'},
],
}, 'optionally provided lang should be used if no id available'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang=None)}]) == {
'de': [
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be ignored for id and ext'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang='de')}]) == {
'de': [
{'url': 'https://example.com/subs/de1'},
{'url': 'https://example.com/subs/de2'},
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be replaced by default id'
def test_trim_str(self): def test_trim_str(self):
with pytest.raises(TypeError): with pytest.raises(TypeError):
@ -525,7 +573,7 @@ class TestTraversalHelpers:
def test_unpack(self): def test_unpack(self):
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123' assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3' assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
assert unpack(join_nonempty(delim=' '))([1, 2, 3]) == '1 2 3' assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3'
with pytest.raises(TypeError): with pytest.raises(TypeError):
unpack(join_nonempty)() unpack(join_nonempty)()
with pytest.raises(TypeError): with pytest.raises(TypeError):

View File

@ -72,7 +72,6 @@ from yt_dlp.utils import (
intlist_to_bytes, intlist_to_bytes,
iri_to_uri, iri_to_uri,
is_html, is_html,
join_nonempty,
js_to_json, js_to_json,
limit_length, limit_length,
locked_file, locked_file,
@ -2158,10 +2157,6 @@ Line 1
assert int_or_none(v=10) == 10, 'keyword passed positional should call function' assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function' assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
assert callable(join_nonempty()), 'varargs positional should apply partially'
assert join_nonempty(None, delim=', ') == '', 'passed varargs should call the function'
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -4381,7 +4381,9 @@ class YoutubeDL:
return None return None
for idx, t in list(enumerate(thumbnails))[::-1]: for idx, t in list(enumerate(thumbnails))[::-1]:
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg') thumb_ext = t.get('ext') or determine_ext(t['url'], 'jpg')
if multiple:
thumb_ext = f'{t["id"]}.{thumb_ext}'
thumb_display_id = f'{label} thumbnail {t["id"]}' thumb_display_id = f'{label} thumbnail {t["id"]}'
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext')) thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext')) thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))

View File

@ -1983,6 +1983,7 @@ from .stretchinternet import StretchInternetIE
from .stripchat import StripchatIE from .stripchat import StripchatIE
from .stv import STVPlayerIE from .stv import STVPlayerIE
from .substack import SubstackIE from .substack import SubstackIE
from .suno import SunoIE, SunoPlaylistIE
from .sunporno import SunPornoIE from .sunporno import SunPornoIE
from .sverigesradio import ( from .sverigesradio import (
SverigesRadioEpisodeIE, SverigesRadioEpisodeIE,

View File

@ -66,6 +66,14 @@ class AfreecaTVBaseIE(InfoExtractor):
extensions={'legacy_ssl': True}), display_id, extensions={'legacy_ssl': True}), display_id,
'Downloading API JSON', 'Unable to download API JSON') 'Downloading API JSON', 'Unable to download API JSON')
@staticmethod
def _fixup_thumb(thumb_url):
if not url_or_none(thumb_url):
return None
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
return [{'url': thumb_url, 'ext': 'jpg'}]
class AfreecaTVIE(AfreecaTVBaseIE): class AfreecaTVIE(AfreecaTVBaseIE):
IE_NAME = 'soop' IE_NAME = 'soop'
@ -155,7 +163,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('bj_id', {str}), 'uploader_id': ('bj_id', {str}),
'duration': ('total_file_duration', {int_or_none(scale=1000)}), 'duration': ('total_file_duration', {int_or_none(scale=1000)}),
'thumbnail': ('thumb', {url_or_none}), 'thumbnails': ('thumb', {self._fixup_thumb}),
}) })
entries = [] entries = []
@ -226,8 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
return self.playlist_result(self._entries(data), video_id) return self.playlist_result(self._entries(data), video_id)
@staticmethod def _entries(self, data):
def _entries(data):
# 'files' is always a list with 1 element # 'files' is always a list with 1 element
yield from traverse_obj(data, ( yield from traverse_obj(data, (
'data', lambda _, v: v['story_type'] == 'catch', 'data', lambda _, v: v['story_type'] == 'catch',
@ -238,7 +245,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
'title': ('title', {str}), 'title': ('title', {str}),
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('writer_id', {str}), 'uploader_id': ('writer_id', {str}),
'thumbnail': ('thumb', {url_or_none}), 'thumbnails': ('thumb', {self._fixup_thumb}),
'timestamp': ('write_timestamp', {int_or_none}), 'timestamp': ('write_timestamp', {int_or_none}),
})) }))

View File

@ -279,6 +279,7 @@ class InfoExtractor:
thumbnails: A list of dictionaries, with the following entries: thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID * "id" (optional, string) - Thumbnail format ID
* "url" * "url"
* "ext" (optional, string) - actual image extension if not given in URL
* "preference" (optional, int) - quality of the image * "preference" (optional, int) - quality of the image
* "width" (optional, int) * "width" (optional, int)
* "height" (optional, int) * "height" (optional, int)

152
yt_dlp/extractor/suno.py Normal file
View File

@ -0,0 +1,152 @@
import re
from .common import InfoExtractor
from ..utils import ExtractorError, unescapeHTML
class SunoBaseIE(InfoExtractor):
def _get_title(self, webpage):
return self._html_search_meta(
['og:title', 'twitter:title'], webpage, 'title',
default=None) or self._html_extract_title(webpage)
def _get_description(self, webpage):
return self._html_search_meta(
['og:description', 'description', 'twitter:description'],
webpage, 'description', default=None)
def _get_thumbnail(self, webpage):
return self._html_search_meta(
['og:image', 'twitter:image'], webpage, 'thumbnail', default=None)
class SunoIE(SunoBaseIE):
_VALID_URL = r'https?://(?:www\.)?suno\.com/song/(?P<id>[-a-f0-9]+)'
_TESTS = [
{
'url': 'https://suno.com/song/ab39a04d-b2e6-463b-9b8e-ddea725422f5',
'md5': 'ef850763b175d8a3c7fba5e2dbdc6bc5',
'info_dict': {
'id': 'ab39a04d-b2e6-463b-9b8e-ddea725422f5',
'title': 'Life\'s a Soundtrack · AI Funk Factory @ YT by @funk | Suno',
'description': 'groovy funk, melodic song. Listen and make your own with Suno.',
'thumbnail': r're:https?://.*903f2bd7-ccc0-4029-a76a-887f07ebc2df.*\.jpeg$',
'ext': 'mp3',
},
},
{
'url': 'https://suno.com/song/9cbcb5f4-f367-4f1c-8a32-23ec62bdc47e',
'md5': '2f038badef88d189891d5f8cd8d8804d',
'info_dict': {
'id': '9cbcb5f4-f367-4f1c-8a32-23ec62bdc47e',
'title': 'Pequenos Prazeres da Vida by @groovebot | Suno',
'description': 'pop bossa nova song. Listen and make your own with Suno.',
'thumbnail': r're:https?://.*9cbcb5f4-f367-4f1c-8a32-23ec62bdc47e.*\.jpeg$',
'ext': 'mp3',
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
url = self._html_search_meta(
['og:audio', 'twitter:player:stream'], webpage, 'url', default=None)
return {
'id': video_id,
'title': self._get_title(webpage),
'description': self._get_description(webpage),
'thumbnail': self._get_thumbnail(webpage),
'url': url,
}
class SunoPlaylistIE(SunoBaseIE):
_VALID_URL = r'https?://(?:www\.)?suno\.com/playlist/(?P<id>[-a-f0-9]+)'
_TESTS = [
{
'url': 'https://suno.com/playlist/01f2ac32-c32e-4d26-b10c-221107c02946',
'info_dict': {
'id': '01f2ac32-c32e-4d26-b10c-221107c02946',
'title': 'Main 0 by @contemplativetranspositions367 | Suno',
'description': 'Hopefully the test case passed',
'thumbnail': r're:https?://.*19d6d518-1b87-43b3-90b9-2a476ca5824a.*\.jpeg$',
},
'playlist': [{
'info_dict': {
'id': '19d6d518-1b87-43b3-90b9-2a476ca5824a',
'title': 'Ceaseless <Echoes>',
'ext': 'mp3',
},
}],
'playlist_count': 1,
},
{
'url': 'https://www.suno.com/playlist/568eeaab-dfbf-4da6-aa0a-0fb1a32330de',
'info_dict': {
'id': '568eeaab-dfbf-4da6-aa0a-0fb1a32330de',
'title': 'Piano by @kunal | Suno',
'description': 'Here are some good piano',
'thumbnail': r're:https?://.*0ecc0956-3b17-4d4b-8504-55849dd75e22.*\.jpeg$',
},
'playlist': [
{
'info_dict': {
'id': '0ecc0956-3b17-4d4b-8504-55849dd75e22',
'title': 'ST',
'ext': 'mp3',
},
},
{
'info_dict': {
'id': '3fef7d44-c5a3-4181-9de3-d81542af23ef',
'title': 'ST',
'ext': 'mp3',
},
},
{
'info_dict': {
'id': '15e797fa-06c0-4e11-8cc0-3b2580476039',
'title': 'ST - 2',
'ext': 'mp3',
},
},
],
'playlist_count': 3,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
# There are <a>s whose href is a song/ID path. The <span>s directly
# within them have the respective song title as their innerHTML.
# Alternatively, this info can be extracted through parsing an escaped
# JSON object inside a <script> array, though that seems even less stable
# than this HTML.
songs_regex = r'/song/(?P<id>[-a-f0-9]+)["\'][^>]*>\s*<span[^>]*>\s*(?P<title>[^<]+)</span>'
songs = re.findall(songs_regex, webpage)
og_audio_regex = self._og_regexes('audio')[0]
audio_urls = [matches[0] for matches in re.findall(og_audio_regex, webpage)]
if len(songs) != len(audio_urls):
raise ExtractorError('Unexpected mismatch between song HTML list and og audio URLs')
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._get_title(webpage),
'description': self._get_description(webpage),
'thumbnail': self._get_thumbnail(webpage),
'entries': [{
'id': song_tuple[0],
'title': unescapeHTML(song_tuple[1]),
'url': audio_urls[i],
} for i, song_tuple in enumerate(songs)],
}

View File

@ -216,7 +216,7 @@ def partial_application(func):
sig = inspect.signature(func) sig = inspect.signature(func)
required_args = [ required_args = [
param.name for param in sig.parameters.values() param.name for param in sig.parameters.values()
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.VAR_POSITIONAL) if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
if param.default is inspect.Parameter.empty if param.default is inspect.Parameter.empty
] ]
@ -4837,7 +4837,6 @@ def number_of_digits(number):
return len('%d' % number) return len('%d' % number)
@partial_application
def join_nonempty(*values, delim='-', from_dict=None): def join_nonempty(*values, delim='-', from_dict=None):
if from_dict is not None: if from_dict is not None:
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values) values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)

View File

@ -332,14 +332,14 @@ class _RequiredError(ExtractorError):
@typing.overload @typing.overload
def subs_list_to_dict(*, ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ... def subs_list_to_dict(*, lang: str | None = 'und', ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
@typing.overload @typing.overload
def subs_list_to_dict(subs: list[dict] | None, /, *, ext: str | None = None) -> dict[str, list[dict]]: ... def subs_list_to_dict(subs: list[dict] | None, /, *, lang: str | None = 'und', ext: str | None = None) -> dict[str, list[dict]]: ...
def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None): def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None):
""" """
Convert subtitles from a traversal into a subtitle dict. Convert subtitles from a traversal into a subtitle dict.
The path should have an `all` immediately before this function. The path should have an `all` immediately before this function.
@ -352,7 +352,7 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
`quality` The sort order for each subtitle `quality` The sort order for each subtitle
""" """
if subs is None: if subs is None:
return functools.partial(subs_list_to_dict, ext=ext) return functools.partial(subs_list_to_dict, lang=lang, ext=ext)
result = collections.defaultdict(list) result = collections.defaultdict(list)
@ -360,9 +360,15 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
if not url_or_none(sub.get('url')) and not sub.get('data'): if not url_or_none(sub.get('url')) and not sub.get('data'):
continue continue
sub_id = sub.pop('id', None) sub_id = sub.pop('id', None)
if sub_id is None: if not isinstance(sub_id, str):
if not lang:
continue continue
if ext is not None and not sub.get('ext'): sub_id = lang
sub_ext = sub.get('ext')
if not isinstance(sub_ext, str):
if not ext:
sub.pop('ext', None)
else:
sub['ext'] = ext sub['ext'] = ext
result[sub_id].append(sub) result[sub_id].append(sub)
result = dict(result) result = dict(result)
@ -452,9 +458,9 @@ def trim_str(*, start=None, end=None):
return trim return trim
def unpack(func): def unpack(func, **kwargs):
@functools.wraps(func) @functools.wraps(func)
def inner(items, **kwargs): def inner(items):
return func(*items, **kwargs) return func(*items, **kwargs)
return inner return inner