Compare commits

...

31 Commits

Author SHA1 Message Date
Kieran
f13468b272
Merge 7bac30e475 into 1d253b0a27 2024-11-17 02:29:17 +05:30
Jackson Humphrey
1d253b0a27
[ie/patreon] Fix comments extraction (#11530)
Closes #11483
Authored by: jshumphrey, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2024-11-16 20:02:14 +00:00
powergold1
720b3dc453
[ie/chaturbate] Extract from API and support impersonation (#11555)
Closes #6546, Closes #10359
Authored by: powergold1
2024-11-16 19:55:40 +00:00
Jackson Humphrey
d215fba7ed
[ie/RedGifsUser] Fix extraction (#11531)
Closes #7382, Closes #9131
Authored by: jshumphrey
2024-11-16 19:50:17 +00:00
Jackson Humphrey
8388ec256f
[ie/spankbang] Support browser impersonation (#11542)
Closes #6545
Authored by: jshumphrey
2024-11-16 19:48:47 +00:00
sepro
6365e92589
[ie/bandlab] Add extractors (#11535)
Closes #7750
Authored by: seproDev
2024-11-16 17:56:43 +01:00
Alessandro Campolo
70c55cb08f
[ie/RadioRadicale] Add extractor (#5607)
Authored by: a13ssandr0, pzhlkj6612

Co-authored-by: Mozi <29089388+pzhlkj6612@users.noreply.github.com>
2024-11-16 13:56:15 +01:00
bashonly
c699bafc50 [ie/soop] Fix thumbnail extraction (#11545)
Closes #11537

Authored by: bashonly
2024-11-15 22:51:55 +00:00
bashonly
eb64ae7d5d [ie] Allow ext override for thumbnails (#11545)
Authored by: bashonly
2024-11-15 22:51:55 +00:00
Simon Sawicki
c014fbcddc
[utils] subs_list_to_dict: Add lang default parameter (#11508)
Authored by: Grub4K
2024-11-15 23:25:52 +01:00
Simon Sawicki
39d79c9b9c
[utils] Fix join_nonempty, add **kwargs to unpack (#11559)
Authored by: Grub4K
2024-11-15 22:06:15 +01:00
Kieran Eglin
7bac30e475
Merge branch 'master' into ke/refactor-move-files-pp 2024-11-08 09:52:59 -08:00
bashonly
81d8716a40
Merge branch 'master' into ke/refactor-move-files-pp 2024-07-08 00:13:14 -05:00
bashonly
b3f1ef087a
(merge conflict resolution) don't pass the dot to replace_extension 2024-07-08 05:11:49 +00:00
Kieran Eglin
7316fc5703
Missed one linting item 2024-06-14 12:50:24 -07:00
Kieran Eglin
15c7bef8d0
Linting 2024-06-14 12:47:59 -07:00
Kieran Eglin
dd4475bd33
Merged parent 2024-06-14 12:38:09 -07:00
Kieran Eglin
6c8ede8188
Fixed embedding filepath issue for subs and infojson 2024-04-26 16:16:56 -07:00
Kieran Eglin
3046c17822
Fixed filepath bug when embedding thumbnails 2024-04-26 15:51:37 -07:00
Kieran Eglin
dd986a4149
Linter 2024-04-26 15:31:22 -07:00
Kieran Eglin
c3fccc58cf
Updated logic for determining file extensions 2024-04-26 15:26:26 -07:00
Kieran Eglin
28d5051546
Reverted pre/post_process function signature 2024-04-26 14:17:18 -07:00
Kieran Eglin
a1ff1d4272
Reverted unrelated changes 2024-04-26 13:55:07 -07:00
Kieran Eglin
0a3c5aceb5
Removed now-unneeded thumbnail/subtitle return values 2024-04-24 11:08:21 -07:00
Kieran Eglin
9c3b227db8
Removed files_to_move logic 2024-04-24 11:02:26 -07:00
Kieran Eglin
ea2a085397
Fixed up tests and linting 2024-04-24 10:57:39 -07:00
Kieran Eglin
44bb6c2056
[WIP] got refactor of file mover basically working 2024-04-24 09:47:58 -07:00
Kieran Eglin
fe4a15ff75
First pass at test feedback 2024-04-23 11:54:38 -07:00
Kieran Eglin
5d51ddbbfc
Removed unneeded conditionals + return 2024-04-23 11:04:46 -07:00
Kieran Eglin
c9d8184fe6
Ran flake8 2024-04-23 11:00:15 -07:00
Kieran Eglin
c574be85f1
Refactored MoveFilesPP to respect non-video files 2024-04-23 10:22:36 -07:00
18 changed files with 854 additions and 131 deletions

View File

@ -929,32 +929,33 @@ class TestYoutubeDL(unittest.TestCase):
}), r'^30fps$') }), r'^30fps$')
def test_postprocessors(self): def test_postprocessors(self):
filename = 'post-processor-testfile.mp4' filename = 'post-processor-testfile'
audiofile = filename + '.mp3' video_file = filename + '.mp4'
audio_file = filename + '.mp3'
class SimplePP(PostProcessor): class SimplePP(PostProcessor):
def run(self, info): def run(self, info):
with open(audiofile, 'w') as f: with open(audio_file, 'w') as f:
f.write('EXAMPLE') f.write('EXAMPLE')
return [info['filepath']], info return [info['filepath']], info
def run_pp(params, pp): def run_pp(params, pp):
with open(filename, 'w') as f: with open(video_file, 'w') as f:
f.write('EXAMPLE') f.write('EXAMPLE')
ydl = YoutubeDL(params) ydl = YoutubeDL(params)
ydl.add_post_processor(pp()) ydl.add_post_processor(pp())
ydl.post_process(filename, {'filepath': filename}) ydl.post_process(video_file, {'filepath': video_file})
run_pp({'keepvideo': True}, SimplePP) run_pp({'keepvideo': True, 'outtmpl': filename}, SimplePP)
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist') self.assertTrue(os.path.exists(video_file), f'{video_file} doesn\'t exist')
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist') self.assertTrue(os.path.exists(audio_file), f'{audio_file} doesn\'t exist')
os.unlink(filename) os.unlink(video_file)
os.unlink(audiofile) os.unlink(audio_file)
run_pp({'keepvideo': False}, SimplePP) run_pp({'keepvideo': False, 'outtmpl': filename}, SimplePP)
self.assertFalse(os.path.exists(filename), f'{filename} exists') self.assertFalse(os.path.exists(video_file), f'{video_file} exists')
self.assertTrue(os.path.exists(audiofile), f'{audiofile} doesn\'t exist') self.assertTrue(os.path.exists(audio_file), f'{audio_file} doesn\'t exist')
os.unlink(audiofile) os.unlink(audio_file)
class ModifierPP(PostProcessor): class ModifierPP(PostProcessor):
def run(self, info): def run(self, info):
@ -962,9 +963,9 @@ class TestYoutubeDL(unittest.TestCase):
f.write('MODIFIED') f.write('MODIFIED')
return [], info return [], info
run_pp({'keepvideo': False}, ModifierPP) run_pp({'keepvideo': False, 'outtmpl': filename}, ModifierPP)
self.assertTrue(os.path.exists(filename), f'{filename} doesn\'t exist') self.assertTrue(os.path.exists(video_file), f'{video_file} doesn\'t exist')
os.unlink(filename) os.unlink(video_file)
def test_match_filter(self): def test_match_filter(self):
first = { first = {

View File

@ -481,7 +481,7 @@ class TestTraversalHelpers:
'id': 'name', 'id': 'name',
'data': 'content', 'data': 'content',
'url': 'url', 'url': 'url',
}, all, {subs_list_to_dict}]) == { }, all, {subs_list_to_dict(lang=None)}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}], 'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [{'data': 'content'}], 'en': [{'data': 'content'}],
}, 'subs with mandatory items missing should be filtered' }, 'subs with mandatory items missing should be filtered'
@ -507,6 +507,54 @@ class TestTraversalHelpers:
{'url': 'https://example.com/subs/en1', 'ext': 'ext'}, {'url': 'https://example.com/subs/en1', 'ext': 'ext'},
{'url': 'https://example.com/subs/en2', 'ext': 'ext'}, {'url': 'https://example.com/subs/en2', 'ext': 'ext'},
]}, '`quality` key should sort subtitle list accordingly' ]}, '`quality` key should sort subtitle list accordingly'
assert traverse_obj([
{'name': 'de', 'url': 'https://example.com/subs/de.ass'},
{'name': 'de'},
{'name': 'en', 'content': 'content'},
{'url': 'https://example.com/subs/en'},
], [..., {
'id': 'name',
'url': 'url',
'data': 'content',
}, all, {subs_list_to_dict(lang='en')}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [
{'data': 'content'},
{'url': 'https://example.com/subs/en'},
],
}, 'optionally provided lang should be used if no id available'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang=None)}]) == {
'de': [
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be ignored for id and ext'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang='de')}]) == {
'de': [
{'url': 'https://example.com/subs/de1'},
{'url': 'https://example.com/subs/de2'},
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be replaced by default id'
def test_trim_str(self): def test_trim_str(self):
with pytest.raises(TypeError): with pytest.raises(TypeError):
@ -525,7 +573,7 @@ class TestTraversalHelpers:
def test_unpack(self): def test_unpack(self):
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123' assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3' assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
assert unpack(join_nonempty(delim=' '))([1, 2, 3]) == '1 2 3' assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3'
with pytest.raises(TypeError): with pytest.raises(TypeError):
unpack(join_nonempty)() unpack(join_nonempty)()
with pytest.raises(TypeError): with pytest.raises(TypeError):

View File

@ -72,7 +72,6 @@ from yt_dlp.utils import (
intlist_to_bytes, intlist_to_bytes,
iri_to_uri, iri_to_uri,
is_html, is_html,
join_nonempty,
js_to_json, js_to_json,
limit_length, limit_length,
locked_file, locked_file,
@ -2158,10 +2157,6 @@ Line 1
assert int_or_none(v=10) == 10, 'keyword passed positional should call function' assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function' assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
assert callable(join_nonempty()), 'varargs positional should apply partially'
assert join_nonempty(None, delim=', ') == '', 'passed varargs should call the function'
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -3239,7 +3239,6 @@ class YoutubeDL:
# info_dict['_filename'] needs to be set for backward compatibility # info_dict['_filename'] needs to be set for backward compatibility
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True) info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
temp_filename = self.prepare_filename(info_dict, 'temp') temp_filename = self.prepare_filename(info_dict, 'temp')
files_to_move = {}
# Forced printings # Forced printings
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict)) self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
@ -3267,13 +3266,11 @@ class YoutubeDL:
sub_files = self._write_subtitles(info_dict, temp_filename) sub_files = self._write_subtitles(info_dict, temp_filename)
if sub_files is None: if sub_files is None:
return return
files_to_move.update(dict(sub_files))
thumb_files = self._write_thumbnails( thumb_files = self._write_thumbnails(
'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail')) 'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
if thumb_files is None: if thumb_files is None:
return return
files_to_move.update(dict(thumb_files))
infofn = self.prepare_filename(info_dict, 'infojson') infofn = self.prepare_filename(info_dict, 'infojson')
_infojson_written = self._write_info_json('video', info_dict, infofn) _infojson_written = self._write_info_json('video', info_dict, infofn)
@ -3347,13 +3344,12 @@ class YoutubeDL:
for link_type, should_write in write_links.items()): for link_type, should_write in write_links.items()):
return return
new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move) new_info, _ = self.pre_process(info_dict, 'before_dl')
replace_info_dict(new_info) replace_info_dict(new_info)
if self.params.get('skip_download'): if self.params.get('skip_download'):
info_dict['filepath'] = temp_filename info_dict['filepath'] = temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move
replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)) replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive') info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
else: else:
@ -3467,9 +3463,6 @@ class YoutubeDL:
info_dict['__files_to_merge'] = downloaded info_dict['__files_to_merge'] = downloaded
# Even if there were no downloads, it is being merged only now # Even if there were no downloads, it is being merged only now
info_dict['__real_download'] = True info_dict['__real_download'] = True
else:
for file in downloaded:
files_to_move[file] = None
else: else:
# Just a single file # Just a single file
dl_filename = existing_video_file(full_filename, temp_filename) dl_filename = existing_video_file(full_filename, temp_filename)
@ -3483,7 +3476,6 @@ class YoutubeDL:
dl_filename = dl_filename or temp_filename dl_filename = dl_filename or temp_filename
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
except network_exceptions as err: except network_exceptions as err:
self.report_error(f'unable to download video data: {err}') self.report_error(f'unable to download video data: {err}')
return return
@ -3554,7 +3546,7 @@ class YoutubeDL:
fixup() fixup()
try: try:
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move)) replace_info_dict(self.post_process(dl_filename, info_dict))
except PostProcessingError as err: except PostProcessingError as err:
self.report_error(f'Postprocessing: {err}') self.report_error(f'Postprocessing: {err}')
return return
@ -3677,8 +3669,6 @@ class YoutubeDL:
os.remove(filename) os.remove(filename)
except OSError: except OSError:
self.report_warning(f'Unable to delete file {filename}') self.report_warning(f'Unable to delete file {filename}')
if filename in info.get('__files_to_move', []): # NB: Delete even if None
del info['__files_to_move'][filename]
@staticmethod @staticmethod
def post_extract(info_dict): def post_extract(info_dict):
@ -3695,8 +3685,7 @@ class YoutubeDL:
def run_pp(self, pp, infodict): def run_pp(self, pp, infodict):
files_to_delete = [] files_to_delete = []
if '__files_to_move' not in infodict:
infodict['__files_to_move'] = {}
try: try:
files_to_delete, infodict = pp.run(infodict) files_to_delete, infodict = pp.run(infodict)
except PostProcessingError as e: except PostProcessingError as e:
@ -3708,10 +3697,7 @@ class YoutubeDL:
if not files_to_delete: if not files_to_delete:
return infodict return infodict
if self.params.get('keepvideo', False): if not self.params.get('keepvideo', False):
for f in files_to_delete:
infodict['__files_to_move'].setdefault(f, '')
else:
self._delete_downloaded_files( self._delete_downloaded_files(
*files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)') *files_to_delete, info=infodict, msg='Deleting original file %s (pass -k to keep)')
return infodict return infodict
@ -3724,23 +3710,27 @@ class YoutubeDL:
return info return info
def pre_process(self, ie_info, key='pre_process', files_to_move=None): def pre_process(self, ie_info, key='pre_process', files_to_move=None):
if files_to_move is not None:
self.report_warning('[pre_process] "files_to_move" is deprecated and may be removed in a future version')
info = dict(ie_info) info = dict(ie_info)
info['__files_to_move'] = files_to_move or {}
try: try:
info = self.run_all_pps(key, info) info = self.run_all_pps(key, info)
except PostProcessingError as err: except PostProcessingError as err:
msg = f'Preprocessing: {err}' msg = f'Preprocessing: {err}'
info.setdefault('__pending_error', msg) info.setdefault('__pending_error', msg)
self.report_error(msg, is_error=False) self.report_error(msg, is_error=False)
return info, info.pop('__files_to_move', None) return info, files_to_move
def post_process(self, filename, info, files_to_move=None): def post_process(self, filename, info, files_to_move=None):
"""Run all the postprocessors on the given file.""" """Run all the postprocessors on the given file."""
if files_to_move is not None:
self.report_warning('[post_process] "files_to_move" is deprecated and may be removed in a future version')
info['filepath'] = filename info['filepath'] = filename
info['__files_to_move'] = files_to_move or {}
info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors')) info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
info = self.run_pp(MoveFilesAfterDownloadPP(self), info) info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
del info['__files_to_move'] info.pop('__multiple_thumbnails', None)
return self.run_all_pps('after_move', info) return self.run_all_pps('after_move', info)
def _make_archive_id(self, info_dict): def _make_archive_id(self, info_dict):
@ -4325,10 +4315,11 @@ class YoutubeDL:
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext')) sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext')) sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
existing_sub = self.existing_file((sub_filename_final, sub_filename)) existing_sub = self.existing_file((sub_filename_final, sub_filename))
if existing_sub: if existing_sub:
self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present') self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
sub_info['filepath'] = existing_sub sub_info['filepath'] = existing_sub
ret.append((existing_sub, sub_filename_final)) ret.append(existing_sub)
continue continue
self.to_screen(f'[info] Writing video subtitles to: {sub_filename}') self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
@ -4339,7 +4330,7 @@ class YoutubeDL:
with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile: with open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_info['data']) subfile.write(sub_info['data'])
sub_info['filepath'] = sub_filename sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final)) ret.append(sub_filename)
continue continue
except OSError: except OSError:
self.report_error(f'Cannot write video subtitles file {sub_filename}') self.report_error(f'Cannot write video subtitles file {sub_filename}')
@ -4350,7 +4341,7 @@ class YoutubeDL:
sub_copy.setdefault('http_headers', info_dict.get('http_headers')) sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
self.dl(sub_filename, sub_copy, subtitle=True) self.dl(sub_filename, sub_copy, subtitle=True)
sub_info['filepath'] = sub_filename sub_info['filepath'] = sub_filename
ret.append((sub_filename, sub_filename_final)) ret.append(sub_filename)
except (DownloadError, ExtractorError, OSError, ValueError, *network_exceptions) as err: except (DownloadError, ExtractorError, OSError, ValueError, *network_exceptions) as err:
msg = f'Unable to download video subtitles for {sub_lang!r}: {err}' msg = f'Unable to download video subtitles for {sub_lang!r}: {err}'
if self.params.get('ignoreerrors') is not True: # False or 'only_download' if self.params.get('ignoreerrors') is not True: # False or 'only_download'
@ -4370,6 +4361,7 @@ class YoutubeDL:
self.to_screen(f'[info] There are no {label} thumbnails to download') self.to_screen(f'[info] There are no {label} thumbnails to download')
return ret return ret
multiple = write_all and len(thumbnails) > 1 multiple = write_all and len(thumbnails) > 1
info_dict['__multiple_thumbnails'] = multiple
if thumb_filename_base is None: if thumb_filename_base is None:
thumb_filename_base = filename thumb_filename_base = filename
@ -4381,7 +4373,9 @@ class YoutubeDL:
return None return None
for idx, t in list(enumerate(thumbnails))[::-1]: for idx, t in list(enumerate(thumbnails))[::-1]:
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg') thumb_ext = t.get('ext') or determine_ext(t['url'], 'jpg')
if multiple:
thumb_ext = f'{t["id"]}.{thumb_ext}'
thumb_display_id = f'{label} thumbnail {t["id"]}' thumb_display_id = f'{label} thumbnail {t["id"]}'
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext')) thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext')) thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
@ -4391,7 +4385,7 @@ class YoutubeDL:
self.to_screen('[info] {} is already present'.format(( self.to_screen('[info] {} is already present'.format((
thumb_display_id if multiple else f'{label} thumbnail').capitalize())) thumb_display_id if multiple else f'{label} thumbnail').capitalize()))
t['filepath'] = existing_thumb t['filepath'] = existing_thumb
ret.append((existing_thumb, thumb_filename_final)) ret.append(existing_thumb)
else: else:
self.to_screen(f'[info] Downloading {thumb_display_id} ...') self.to_screen(f'[info] Downloading {thumb_display_id} ...')
try: try:
@ -4399,7 +4393,7 @@ class YoutubeDL:
self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}') self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
with open(encodeFilename(thumb_filename), 'wb') as thumbf: with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf) shutil.copyfileobj(uf, thumbf)
ret.append((thumb_filename, thumb_filename_final)) ret.append(thumb_filename)
t['filepath'] = thumb_filename t['filepath'] = thumb_filename
except network_exceptions as err: except network_exceptions as err:
if isinstance(err, HTTPError) and err.status == 404: if isinstance(err, HTTPError) and err.status == 404:
@ -4409,4 +4403,5 @@ class YoutubeDL:
thumbnails.pop(idx) thumbnails.pop(idx)
if ret and not write_all: if ret and not write_all:
break break
return ret return ret

View File

@ -208,6 +208,10 @@ from .bandcamp import (
BandcampUserIE, BandcampUserIE,
BandcampWeeklyIE, BandcampWeeklyIE,
) )
from .bandlab import (
BandlabIE,
BandlabPlaylistIE,
)
from .bannedvideo import BannedVideoIE from .bannedvideo import BannedVideoIE
from .bbc import ( from .bbc import (
BBCIE, BBCIE,
@ -1649,6 +1653,7 @@ from .radiokapital import (
RadioKapitalIE, RadioKapitalIE,
RadioKapitalShowIE, RadioKapitalShowIE,
) )
from .radioradicale import RadioRadicaleIE
from .radiozet import RadioZetPodcastIE from .radiozet import RadioZetPodcastIE
from .radlive import ( from .radlive import (
RadLiveChannelIE, RadLiveChannelIE,

View File

@ -66,6 +66,14 @@ class AfreecaTVBaseIE(InfoExtractor):
extensions={'legacy_ssl': True}), display_id, extensions={'legacy_ssl': True}), display_id,
'Downloading API JSON', 'Unable to download API JSON') 'Downloading API JSON', 'Unable to download API JSON')
@staticmethod
def _fixup_thumb(thumb_url):
if not url_or_none(thumb_url):
return None
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
return [{'url': thumb_url, 'ext': 'jpg'}]
class AfreecaTVIE(AfreecaTVBaseIE): class AfreecaTVIE(AfreecaTVBaseIE):
IE_NAME = 'soop' IE_NAME = 'soop'
@ -155,7 +163,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('bj_id', {str}), 'uploader_id': ('bj_id', {str}),
'duration': ('total_file_duration', {int_or_none(scale=1000)}), 'duration': ('total_file_duration', {int_or_none(scale=1000)}),
'thumbnail': ('thumb', {url_or_none}), 'thumbnails': ('thumb', {self._fixup_thumb}),
}) })
entries = [] entries = []
@ -226,8 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
return self.playlist_result(self._entries(data), video_id) return self.playlist_result(self._entries(data), video_id)
@staticmethod def _entries(self, data):
def _entries(data):
# 'files' is always a list with 1 element # 'files' is always a list with 1 element
yield from traverse_obj(data, ( yield from traverse_obj(data, (
'data', lambda _, v: v['story_type'] == 'catch', 'data', lambda _, v: v['story_type'] == 'catch',
@ -238,7 +245,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
'title': ('title', {str}), 'title': ('title', {str}),
'uploader': ('writer_nick', {str}), 'uploader': ('writer_nick', {str}),
'uploader_id': ('writer_id', {str}), 'uploader_id': ('writer_id', {str}),
'thumbnail': ('thumb', {url_or_none}), 'thumbnails': ('thumb', {self._fixup_thumb}),
'timestamp': ('write_timestamp', {int_or_none}), 'timestamp': ('write_timestamp', {int_or_none}),
})) }))

438
yt_dlp/extractor/bandlab.py Normal file
View File

@ -0,0 +1,438 @@
from .common import InfoExtractor
from ..utils import (
ExtractorError,
float_or_none,
format_field,
int_or_none,
parse_iso8601,
parse_qs,
truncate_string,
url_or_none,
)
from ..utils.traversal import traverse_obj, value
class BandlabBaseIE(InfoExtractor):
def _call_api(self, endpoint, asset_id, **kwargs):
headers = kwargs.pop('headers', None) or {}
return self._download_json(
f'https://www.bandlab.com/api/v1.3/{endpoint}/{asset_id}',
asset_id, headers={
'accept': 'application/json',
'referer': 'https://www.bandlab.com/',
'x-client-id': 'BandLab-Web',
'x-client-version': '10.1.124',
**headers,
}, **kwargs)
def _parse_revision(self, revision_data, url=None):
return {
'vcodec': 'none',
'media_type': 'revision',
'extractor_key': BandlabIE.ie_key(),
'extractor': BandlabIE.IE_NAME,
**traverse_obj(revision_data, {
'webpage_url': (
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/revision/%s')}), filter, any),
'id': (('revisionId', 'id'), {str}, any),
'title': ('song', 'name', {str}),
'track': ('song', 'name', {str}),
'url': ('mixdown', 'file', {url_or_none}),
'thumbnail': ('song', 'picture', 'url', {url_or_none}),
'description': ('description', {str}),
'uploader': ('creator', 'name', {str}),
'uploader_id': ('creator', 'username', {str}),
'timestamp': ('createdOn', {parse_iso8601}),
'duration': ('mixdown', 'duration', {float_or_none}),
'view_count': ('counters', 'plays', {int_or_none}),
'like_count': ('counters', 'likes', {int_or_none}),
'comment_count': ('counters', 'comments', {int_or_none}),
'genres': ('genres', ..., 'name', {str}),
}),
}
def _parse_track(self, track_data, url=None):
return {
'vcodec': 'none',
'media_type': 'track',
'extractor_key': BandlabIE.ie_key(),
'extractor': BandlabIE.IE_NAME,
**traverse_obj(track_data, {
'webpage_url': (
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any),
'id': (('revisionId', 'id'), {str}, any),
'url': ('track', 'sample', 'audioUrl', {url_or_none}),
'title': ('track', 'name', {str}),
'track': ('track', 'name', {str}),
'description': ('caption', {str}),
'thumbnail': ('track', 'picture', ('original', 'url'), {url_or_none}, any),
'view_count': ('counters', 'plays', {int_or_none}),
'like_count': ('counters', 'likes', {int_or_none}),
'comment_count': ('counters', 'comments', {int_or_none}),
'duration': ('track', 'sample', 'duration', {float_or_none}),
'uploader': ('creator', 'name', {str}),
'uploader_id': ('creator', 'username', {str}),
'timestamp': ('createdOn', {parse_iso8601}),
}),
}
def _parse_video(self, video_data, url=None):
return {
'media_type': 'video',
'extractor_key': BandlabIE.ie_key(),
'extractor': BandlabIE.IE_NAME,
**traverse_obj(video_data, {
'id': ('id', {str}),
'webpage_url': (
'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any),
'url': ('video', 'url', {url_or_none}),
'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}),
'description': ('caption', {str}),
'thumbnail': ('video', 'picture', 'url', {url_or_none}),
'view_count': ('video', 'counters', 'plays', {int_or_none}),
'like_count': ('video', 'counters', 'likes', {int_or_none}),
'comment_count': ('counters', 'comments', {int_or_none}),
'duration': ('video', 'duration', {float_or_none}),
'uploader': ('creator', 'name', {str}),
'uploader_id': ('creator', 'username', {str}),
}),
}
class BandlabIE(BandlabBaseIE):
_VALID_URL = [
r'https?://(?:www\.)?bandlab.com/(?P<url_type>track|post|revision)/(?P<id>[\da-f_-]+)',
r'https?://(?:www\.)?bandlab.com/(?P<url_type>embed)/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)',
]
_EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]']
_TESTS = [{
'url': 'https://www.bandlab.com/track/04b37e88dba24967b9dac8eb8567ff39_07d7f906fc96ee11b75e000d3a428fff',
'md5': '46f7b43367dd268bbcf0bbe466753b2c',
'info_dict': {
'id': '02d7f906-fc96-ee11-b75e-000d3a428fff',
'ext': 'm4a',
'uploader_id': 'ender_milze',
'track': 'sweet black',
'description': 'composed by juanjn3737',
'timestamp': 1702171963,
'view_count': int,
'like_count': int,
'duration': 54.629999999999995,
'title': 'sweet black',
'upload_date': '20231210',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'genres': ['Lofi'],
'uploader': 'ender milze',
'comment_count': int,
'media_type': 'revision',
},
}, {
# Same track as above but post URL
'url': 'https://www.bandlab.com/post/07d7f906-fc96-ee11-b75e-000d3a428fff',
'md5': '46f7b43367dd268bbcf0bbe466753b2c',
'info_dict': {
'id': '02d7f906-fc96-ee11-b75e-000d3a428fff',
'ext': 'm4a',
'uploader_id': 'ender_milze',
'track': 'sweet black',
'description': 'composed by juanjn3737',
'timestamp': 1702171973,
'view_count': int,
'like_count': int,
'duration': 54.629999999999995,
'title': 'sweet black',
'upload_date': '20231210',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/',
'genres': ['Lofi'],
'uploader': 'ender milze',
'comment_count': int,
'media_type': 'revision',
},
}, {
# SharedKey Example
'url': 'https://www.bandlab.com/track/048916c2-c6da-ee11-85f9-6045bd2e11f9?sharedKey=0NNWX8qYAEmI38lWAzCNDA',
'md5': '15174b57c44440e2a2008be9cae00250',
'info_dict': {
'id': '038916c2-c6da-ee11-85f9-6045bd2e11f9',
'ext': 'm4a',
'comment_count': int,
'genres': ['Other'],
'uploader_id': 'user8353034818103753',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/',
'timestamp': 1709625771,
'track': 'PodcastMaerchen4b',
'duration': 468.14,
'view_count': int,
'description': 'Podcast: Neues aus der Märchenwelt',
'like_count': int,
'upload_date': '20240305',
'uploader': 'Erna Wageneder',
'title': 'PodcastMaerchen4b',
'media_type': 'revision',
},
}, {
# Different Revision selected
'url': 'https://www.bandlab.com/track/130343fc-148b-ea11-96d2-0003ffd1fc09?revId=110343fc-148b-ea11-96d2-0003ffd1fc09',
'md5': '74e055ef9325d63f37088772fbfe4454',
'info_dict': {
'id': '110343fc-148b-ea11-96d2-0003ffd1fc09',
'ext': 'm4a',
'timestamp': 1588273294,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/',
'description': 'Final Revision.',
'title': 'Replay ( Instrumental)',
'uploader': 'David R Sparks',
'uploader_id': 'davesnothome69',
'view_count': int,
'comment_count': int,
'track': 'Replay ( Instrumental)',
'genres': ['Rock'],
'upload_date': '20200430',
'like_count': int,
'duration': 279.43,
'media_type': 'revision',
},
}, {
# Video
'url': 'https://www.bandlab.com/post/5cdf9036-3857-ef11-991a-6045bd36e0d9',
'md5': '8caa2ef28e86c1dacf167293cfdbeba9',
'info_dict': {
'id': '5cdf9036-3857-ef11-991a-6045bd36e0d9',
'ext': 'mp4',
'duration': 44.705,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/',
'comment_count': int,
'title': 'backing vocals',
'uploader_id': 'marliashya',
'uploader': 'auraa',
'like_count': int,
'description': 'backing vocals',
'media_type': 'video',
},
}, {
# Embed Example
'url': 'https://www.bandlab.com/embed/?blur=false&id=014de0a4-7d82-ea11-a94c-0003ffd19c0f',
'md5': 'a4ad05cb68c54faaed9b0a8453a8cf4a',
'info_dict': {
'id': '014de0a4-7d82-ea11-a94c-0003ffd19c0f',
'ext': 'm4a',
'comment_count': int,
'genres': ['Electronic'],
'uploader': 'Charlie Henson',
'timestamp': 1587328674,
'upload_date': '20200419',
'view_count': int,
'track': 'Positronic Meltdown',
'duration': 318.55,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/',
'description': 'Checkout my tracks at AOMX http://aomxsounds.com/',
'uploader_id': 'microfreaks',
'title': 'Positronic Meltdown',
'like_count': int,
'media_type': 'revision',
},
}, {
# Track without revisions available
'url': 'https://www.bandlab.com/track/55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5',
'md5': 'f05d68a3769952c2d9257c473e14c15f',
'info_dict': {
'id': '55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5',
'ext': 'm4a',
'track': 'insame',
'like_count': int,
'duration': 84.03,
'title': 'insame',
'view_count': int,
'comment_count': int,
'uploader': 'Sorakime',
'uploader_id': 'sorakime',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/',
'timestamp': 1691162128,
'upload_date': '20230804',
'media_type': 'track',
},
}, {
'url': 'https://www.bandlab.com/revision/014de0a4-7d82-ea11-a94c-0003ffd19c0f',
'only_matching': True,
}]
_WEBPAGE_TESTS = [{
'url': 'https://phantomluigi.github.io/',
'info_dict': {
'id': 'e14223c3-7871-ef11-bdfd-000d3a980db3',
'ext': 'm4a',
'view_count': int,
'upload_date': '20240913',
'uploader_id': 'phantommusicofficial',
'timestamp': 1726194897,
'uploader': 'Phantom',
'comment_count': int,
'genres': ['Progresive Rock'],
'description': 'md5:a38cd668f7a2843295ef284114f18429',
'duration': 225.23,
'like_count': int,
'title': 'Vermilion Pt. 2 (Cover)',
'track': 'Vermilion Pt. 2 (Cover)',
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/62b10750-7aef-4f42-ad08-1af52f577e97/',
'media_type': 'revision',
},
}]
def _real_extract(self, url):
display_id, url_type = self._match_valid_url(url).group('id', 'url_type')
qs = parse_qs(url)
revision_id = traverse_obj(qs, (('revId', 'id'), 0, any))
if url_type == 'revision':
revision_id = display_id
revision_data = None
if not revision_id:
post_data = self._call_api(
'posts', display_id, note='Downloading post data',
query=traverse_obj(qs, {'sharedKey': ('sharedKey', 0)}))
revision_id = traverse_obj(post_data, (('revisionId', ('revision', 'id')), {str}, any))
revision_data = traverse_obj(post_data, ('revision', {dict}))
if not revision_data and not revision_id:
post_type = post_data.get('type')
if post_type == 'Video':
return self._parse_video(post_data, url=url)
if post_type == 'Track':
return self._parse_track(post_data, url=url)
raise ExtractorError(f'Could not extract data for post type {post_type!r}')
if not revision_data:
revision_data = self._call_api(
'revisions', revision_id, note='Downloading revision data', query={'edit': 'false'})
return self._parse_revision(revision_data, url=url)
class BandlabPlaylistIE(BandlabBaseIE):
_VALID_URL = [
r'https?://(?:www\.)?bandlab.com/(?:[\w]+/)?(?P<type>albums|collections)/(?P<id>[\da-f-]+)',
r'https?://(?:www\.)?bandlab.com/(?P<type>embed)/collection/\?(?:[^#]*&)?id=(?P<id>[\da-f-]+)',
]
_EMBED_REGEX = [rf'<iframe[^>]+src=[\'"](?P<url>{_VALID_URL[1]})[\'"]']
_TESTS = [{
'url': 'https://www.bandlab.com/davesnothome69/albums/89b79ea6-de42-ed11-b495-00224845aac7',
'info_dict': {
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/69507ff3-579a-45be-afca-9e87eddec944/',
'release_date': '20221003',
'title': 'Remnants',
'album': 'Remnants',
'like_count': int,
'album_type': 'LP',
'description': 'A collection of some feel good, rock hits.',
'comment_count': int,
'view_count': int,
'id': '89b79ea6-de42-ed11-b495-00224845aac7',
'uploader': 'David R Sparks',
'uploader_id': 'davesnothome69',
},
'playlist_count': 10,
}, {
'url': 'https://www.bandlab.com/slytheband/collections/955102d4-1040-ef11-86c3-000d3a42581b',
'info_dict': {
'id': '955102d4-1040-ef11-86c3-000d3a42581b',
'timestamp': 1720762659,
'view_count': int,
'title': 'My Shit 🖤',
'uploader_id': 'slytheband',
'uploader': '𝓢𝓛𝓨',
'upload_date': '20240712',
'like_count': int,
'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/collections/2c64ca12-b180-4b76-8587-7a8da76bddc8/',
},
'playlist_count': 15,
}, {
# Embeds can contain both albums and collections with the same URL pattern. This is an album
'url': 'https://www.bandlab.com/embed/collection/?id=12cc6f7f-951b-ee11-907c-00224844f303',
'info_dict': {
'id': '12cc6f7f-951b-ee11-907c-00224844f303',
'release_date': '20230706',
'description': 'This is a collection of songs I created when I had an Amiga computer.',
'view_count': int,
'title': 'Mark Salud The Amiga Collection',
'uploader_id': 'mssirmooth1962',
'comment_count': int,
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/d618bd7b-0537-40d5-bdd8-61b066e77d59/',
'like_count': int,
'uploader': 'Mark Salud',
'album': 'Mark Salud The Amiga Collection',
'album_type': 'LP',
},
'playlist_count': 24,
}, {
# Tracks without revision id
'url': 'https://www.bandlab.com/embed/collection/?id=e98aafb5-d932-ee11-b8f0-00224844c719',
'info_dict': {
'like_count': int,
'uploader_id': 'sorakime',
'comment_count': int,
'uploader': 'Sorakime',
'view_count': int,
'description': 'md5:4ec31c568a5f5a5a2b17572ea64c3825',
'release_date': '20230812',
'title': 'Art',
'album': 'Art',
'album_type': 'Album',
'id': 'e98aafb5-d932-ee11-b8f0-00224844c719',
'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/20c890de-e94a-4422-828a-2da6377a13c8/',
},
'playlist_count': 13,
}, {
'url': 'https://www.bandlab.com/albums/89b79ea6-de42-ed11-b495-00224845aac7',
'only_matching': True,
}]
def _entries(self, album_data):
for post in traverse_obj(album_data, ('posts', lambda _, v: v['type'])):
post_type = post['type']
if post_type == 'Revision':
yield self._parse_revision(post.get('revision'))
elif post_type == 'Track':
yield self._parse_track(post)
elif post_type == 'Video':
yield self._parse_video(post)
else:
self.report_warning(f'Skipping unknown post type: "{post_type}"')
def _real_extract(self, url):
playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type')
endpoints = {
'albums': ['albums'],
'collections': ['collections'],
'embed': ['collections', 'albums'],
}.get(playlist_type)
for endpoint in endpoints:
playlist_data = self._call_api(
endpoint, playlist_id, note=f'Downloading {endpoint[:-1]} data',
fatal=False, expected_status=404)
if not playlist_data.get('errorCode'):
playlist_type = endpoint
break
if error_code := playlist_data.get('errorCode'):
raise ExtractorError(f'Could not find playlist data. Error code: "{error_code}"')
return self.playlist_result(
self._entries(playlist_data), playlist_id,
**traverse_obj(playlist_data, {
'title': ('name', {str}),
'description': ('description', {str}),
'uploader': ('creator', 'name', {str}),
'uploader_id': ('creator', 'username', {str}),
'timestamp': ('createdOn', {parse_iso8601}),
'release_date': ('releaseDate', {lambda x: x.replace('-', '')}, filter),
'thumbnail': ('picture', ('original', 'url'), {url_or_none}, any),
'like_count': ('counters', 'likes', {int_or_none}),
'comment_count': ('counters', 'comments', {int_or_none}),
'view_count': ('counters', 'plays', {int_or_none}),
}),
**(traverse_obj(playlist_data, {
'album': ('name', {str}),
'album_type': ('type', {str}),
}) if playlist_type == 'albums' else {}))

View File

@ -5,6 +5,7 @@ from ..utils import (
ExtractorError, ExtractorError,
lowercase_escape, lowercase_escape,
url_or_none, url_or_none,
urlencode_postdata,
) )
@ -40,14 +41,48 @@ class ChaturbateIE(InfoExtractor):
'only_matching': True, 'only_matching': True,
}] }]
_ROOM_OFFLINE = 'Room is currently offline' _ERROR_MAP = {
'offline': 'Room is currently offline',
'private': 'Room is currently in a private show',
'away': 'Performer is currently away',
'password protected': 'Room is password protected',
'hidden': 'Hidden session in progress',
}
def _real_extract(self, url): def _extract_from_api(self, video_id, tld):
video_id, tld = self._match_valid_url(url).group('id', 'tld') response = self._download_json(
f'https://chaturbate.{tld}/get_edge_hls_url_ajax/', video_id,
data=urlencode_postdata({'room_slug': video_id}),
headers={
**self.geo_verification_headers(),
'X-Requested-With': 'XMLHttpRequest',
'Accept': 'application/json',
}, fatal=False, impersonate=True) or {}
status = response.get('room_status')
if status != 'public':
if error := self._ERROR_MAP.get(status):
raise ExtractorError(error, expected=True)
self.report_warning('Falling back to webpage extraction')
return None
m3u8_url = response.get('url')
if not m3u8_url:
self.raise_geo_restricted()
return {
'id': video_id,
'title': video_id,
'thumbnail': f'https://roomimg.stream.highwebmedia.com/ri/{video_id}.jpg',
'is_live': True,
'age_limit': 18,
'formats': self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True),
}
def _extract_from_webpage(self, video_id, tld):
webpage = self._download_webpage( webpage = self._download_webpage(
f'https://chaturbate.{tld}/{video_id}/', video_id, f'https://chaturbate.{tld}/{video_id}/', video_id,
headers=self.geo_verification_headers()) headers=self.geo_verification_headers(), impersonate=True)
found_m3u8_urls = [] found_m3u8_urls = []
@ -85,8 +120,8 @@ class ChaturbateIE(InfoExtractor):
webpage, 'error', group='error', default=None) webpage, 'error', group='error', default=None)
if not error: if not error:
if any(p in webpage for p in ( if any(p in webpage for p in (
self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')): self._ERROR_MAP['offline'], 'offline_tipping', 'tip_offline')):
error = self._ROOM_OFFLINE error = self._ERROR_MAP['offline']
if error: if error:
raise ExtractorError(error, expected=True) raise ExtractorError(error, expected=True)
raise ExtractorError('Unable to find stream URL') raise ExtractorError('Unable to find stream URL')
@ -113,3 +148,7 @@ class ChaturbateIE(InfoExtractor):
'is_live': True, 'is_live': True,
'formats': formats, 'formats': formats,
} }
def _real_extract(self, url):
video_id, tld = self._match_valid_url(url).group('id', 'tld')
return self._extract_from_api(video_id, tld) or self._extract_from_webpage(video_id, tld)

View File

@ -279,6 +279,7 @@ class InfoExtractor:
thumbnails: A list of dictionaries, with the following entries: thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID * "id" (optional, string) - Thumbnail format ID
* "url" * "url"
* "ext" (optional, string) - actual image extension if not given in URL
* "preference" (optional, int) - quality of the image * "preference" (optional, int) - quality of the image
* "width" (optional, int) * "width" (optional, int)
* "height" (optional, int) * "height" (optional, int)

View File

@ -16,10 +16,10 @@ from ..utils import (
parse_iso8601, parse_iso8601,
smuggle_url, smuggle_url,
str_or_none, str_or_none,
traverse_obj,
url_or_none, url_or_none,
urljoin, urljoin,
) )
from ..utils.traversal import traverse_obj, value
class PatreonBaseIE(InfoExtractor): class PatreonBaseIE(InfoExtractor):
@ -252,6 +252,27 @@ class PatreonIE(PatreonBaseIE):
'thumbnail': r're:^https?://.+', 'thumbnail': r're:^https?://.+',
}, },
'skip': 'Patron-only content', 'skip': 'Patron-only content',
}, {
# Contains a comment reply in the 'included' section
'url': 'https://www.patreon.com/posts/114721679',
'info_dict': {
'id': '114721679',
'ext': 'mp4',
'upload_date': '20241025',
'uploader': 'Japanalysis',
'like_count': int,
'thumbnail': r're:^https?://.+',
'comment_count': int,
'title': 'Karasawa Part 2',
'description': 'Part 2 of this video https://www.youtube.com/watch?v=Azms2-VTASk',
'uploader_url': 'https://www.patreon.com/japanalysis',
'uploader_id': '80504268',
'channel_url': 'https://www.patreon.com/japanalysis',
'channel_follower_count': int,
'timestamp': 1729897015,
'channel_id': '9346307',
},
'params': {'getcomments': True},
}] }]
_RETURN_TYPE = 'video' _RETURN_TYPE = 'video'
@ -404,26 +425,24 @@ class PatreonIE(PatreonBaseIE):
f'posts/{post_id}/comments', post_id, query=params, note=f'Downloading comments page {page}') f'posts/{post_id}/comments', post_id, query=params, note=f'Downloading comments page {page}')
cursor = None cursor = None
for comment in traverse_obj(response, (('data', ('included', lambda _, v: v['type'] == 'comment')), ...)): for comment in traverse_obj(response, (('data', 'included'), lambda _, v: v['type'] == 'comment' and v['id'])):
count += 1 count += 1
comment_id = comment.get('id')
attributes = comment.get('attributes') or {}
if comment_id is None:
continue
author_id = traverse_obj(comment, ('relationships', 'commenter', 'data', 'id')) author_id = traverse_obj(comment, ('relationships', 'commenter', 'data', 'id'))
author_info = traverse_obj(
response, ('included', lambda _, v: v['id'] == author_id and v['type'] == 'user', 'attributes'),
get_all=False, expected_type=dict, default={})
yield { yield {
'id': comment_id, **traverse_obj(comment, {
'text': attributes.get('body'), 'id': ('id', {str_or_none}),
'timestamp': parse_iso8601(attributes.get('created')), 'text': ('attributes', 'body', {str}),
'parent': traverse_obj(comment, ('relationships', 'parent', 'data', 'id'), default='root'), 'timestamp': ('attributes', 'created', {parse_iso8601}),
'author_is_uploader': attributes.get('is_by_creator'), 'parent': ('relationships', 'parent', 'data', ('id', {value('root')}), {str}, any),
'author_is_uploader': ('attributes', 'is_by_creator', {bool}),
}),
**traverse_obj(response, (
'included', lambda _, v: v['id'] == author_id and v['type'] == 'user', 'attributes', {
'author': ('full_name', {str}),
'author_thumbnail': ('image_url', {url_or_none}),
}), get_all=False),
'author_id': author_id, 'author_id': author_id,
'author': author_info.get('full_name'),
'author_thumbnail': author_info.get('image_url'),
} }
if count < traverse_obj(response, ('meta', 'count')): if count < traverse_obj(response, ('meta', 'count')):

View File

@ -0,0 +1,105 @@
from .common import InfoExtractor
from ..utils import url_or_none
from ..utils.traversal import traverse_obj
class RadioRadicaleIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?radioradicale\.it/scheda/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://www.radioradicale.it/scheda/471591',
'md5': 'eb0fbe43a601f1a361cbd00f3c45af4a',
'info_dict': {
'id': '471591',
'ext': 'mp4',
'title': 'md5:e8fbb8de57011a3255db0beca69af73d',
'description': 'md5:5e15a789a2fe4d67da8d1366996e89ef',
'location': 'Napoli',
'duration': 2852.0,
'timestamp': 1459987200,
'upload_date': '20160407',
'thumbnail': 'https://www.radioradicale.it/photo400/0/0/9/0/1/00901768.jpg',
},
}, {
'url': 'https://www.radioradicale.it/scheda/742783/parlamento-riunito-in-seduta-comune-11a-della-xix-legislatura',
'info_dict': {
'id': '742783',
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
'description': '-) Votazione per l\'elezione di un giudice della Corte Costituzionale (nono scrutinio)',
'location': 'CAMERA',
'duration': 5868.0,
'timestamp': 1730246400,
'upload_date': '20241030',
},
'playlist': [{
'md5': 'aa48de55dcc45478e4cd200f299aab7d',
'info_dict': {
'id': '742783-0',
'ext': 'mp4',
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
},
}, {
'md5': 'be915c189c70ad2920e5810f32260ff5',
'info_dict': {
'id': '742783-1',
'ext': 'mp4',
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
},
}, {
'md5': 'f0ee4047342baf8ed3128a8417ac5e0a',
'info_dict': {
'id': '742783-2',
'ext': 'mp4',
'title': 'Parlamento riunito in seduta comune (11ª della XIX legislatura)',
},
}],
}]
def _entries(self, videos_info, page_id):
for idx, video in enumerate(traverse_obj(
videos_info, ('playlist', lambda _, v: v['sources']))):
video_id = f'{page_id}-{idx}'
formats = []
subtitles = {}
for m3u8_url in traverse_obj(video, ('sources', ..., 'src', {url_or_none})):
fmts, subs = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id)
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
for sub in traverse_obj(video, ('subtitles', ..., lambda _, v: url_or_none(v['src']))):
self._merge_subtitles({sub.get('srclang') or 'und': [{
'url': sub['src'],
'name': sub.get('label'),
}]}, target=subtitles)
yield {
'id': video_id,
'title': video.get('title'),
'formats': formats,
'subtitles': subtitles,
}
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
videos_info = self._search_json(
r'jQuery\.extend\(Drupal\.settings\s*,',
webpage, 'videos_info', page_id)['RRscheda']
entries = list(self._entries(videos_info, page_id))
common_info = {
'id': page_id,
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'location': videos_info.get('luogo'),
**self._search_json_ld(webpage, page_id),
}
if len(entries) == 1:
return {
**entries[0],
**common_info,
}
return self.playlist_result(entries, multi_video=True, **common_info)

View File

@ -213,7 +213,7 @@ class RedGifsSearchIE(RedGifsBaseInfoExtractor):
class RedGifsUserIE(RedGifsBaseInfoExtractor): class RedGifsUserIE(RedGifsBaseInfoExtractor):
IE_DESC = 'Redgifs user' IE_DESC = 'Redgifs user'
_VALID_URL = r'https?://(?:www\.)?redgifs\.com/users/(?P<username>[^/?#]+)(?:\?(?P<query>[^#]+))?' _VALID_URL = r'https?://(?:www\.)?redgifs\.com/users/(?P<username>[^/?#]+)(?:\?(?P<query>[^#]+))?'
_PAGE_SIZE = 30 _PAGE_SIZE = 80
_TESTS = [ _TESTS = [
{ {
'url': 'https://www.redgifs.com/users/lamsinka89', 'url': 'https://www.redgifs.com/users/lamsinka89',
@ -222,7 +222,7 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
'title': 'lamsinka89', 'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by recent', 'description': 'RedGifs user lamsinka89, ordered by recent',
}, },
'playlist_mincount': 100, 'playlist_mincount': 391,
}, },
{ {
'url': 'https://www.redgifs.com/users/lamsinka89?page=3', 'url': 'https://www.redgifs.com/users/lamsinka89?page=3',
@ -231,7 +231,7 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
'title': 'lamsinka89', 'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by recent', 'description': 'RedGifs user lamsinka89, ordered by recent',
}, },
'playlist_count': 30, 'playlist_count': 80,
}, },
{ {
'url': 'https://www.redgifs.com/users/lamsinka89?order=best&type=g', 'url': 'https://www.redgifs.com/users/lamsinka89?order=best&type=g',
@ -240,7 +240,17 @@ class RedGifsUserIE(RedGifsBaseInfoExtractor):
'title': 'lamsinka89', 'title': 'lamsinka89',
'description': 'RedGifs user lamsinka89, ordered by best', 'description': 'RedGifs user lamsinka89, ordered by best',
}, },
'playlist_mincount': 100, 'playlist_mincount': 391,
},
{
'url': 'https://www.redgifs.com/users/ignored52',
'note': 'https://github.com/yt-dlp/yt-dlp/issues/7382',
'info_dict': {
'id': 'ignored52',
'title': 'ignored52',
'description': 'RedGifs user ignored52, ordered by recent',
},
'playlist_mincount': 121,
}, },
] ]

View File

@ -71,9 +71,11 @@ class SpankBangIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
mobj = self._match_valid_url(url) mobj = self._match_valid_url(url)
video_id = mobj.group('id') or mobj.group('id_2') video_id = mobj.group('id') or mobj.group('id_2')
country = self.get_param('geo_bypass_country') or 'US'
self._set_cookie('.spankbang.com', 'country', country.upper())
webpage = self._download_webpage( webpage = self._download_webpage(
url.replace(f'/{video_id}/embed', f'/{video_id}/video'), url.replace(f'/{video_id}/embed', f'/{video_id}/video'),
video_id, headers={'Cookie': 'country=US'}) video_id, impersonate=True)
if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage): if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage):
raise ExtractorError( raise ExtractorError(

View File

@ -231,4 +231,8 @@ class EmbedThumbnailPP(FFmpegPostProcessor):
thumbnail_filename if converted or not self._already_have_thumbnail else None, thumbnail_filename if converted or not self._already_have_thumbnail else None,
original_thumbnail if converted and not self._already_have_thumbnail else None, original_thumbnail if converted and not self._already_have_thumbnail else None,
info=info) info=info)
if not self._already_have_thumbnail:
info['thumbnails'][idx].pop('filepath', None)
return [], info return [], info

View File

@ -663,6 +663,10 @@ class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
self.run_ffmpeg_multiple_files(input_files, temp_filename, opts) self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
os.replace(temp_filename, filename) os.replace(temp_filename, filename)
if not self._already_have_subtitle:
for _, subtitle in subtitles.items():
subtitle.pop('filepath', None)
files_to_delete = [] if self._already_have_subtitle else sub_filenames files_to_delete = [] if self._already_have_subtitle else sub_filenames
return files_to_delete, info return files_to_delete, info
@ -699,6 +703,7 @@ class FFmpegMetadataPP(FFmpegPostProcessor):
infojson_filename = info.get('infojson_filename') infojson_filename = info.get('infojson_filename')
options.extend(self._get_infojson_opts(info, infojson_filename)) options.extend(self._get_infojson_opts(info, infojson_filename))
if not infojson_filename: if not infojson_filename:
info.pop('infojson_filename', None)
files_to_delete.append(info.get('infojson_filename')) files_to_delete.append(info.get('infojson_filename'))
elif self._add_infojson is True: elif self._add_infojson is True:
self.to_screen('The info-json can only be attached to mkv/mka files') self.to_screen('The info-json can only be attached to mkv/mka files')
@ -1016,9 +1021,6 @@ class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
'filepath': new_file, 'filepath': new_file,
} }
info['__files_to_move'][new_file] = replace_extension(
info['__files_to_move'][sub['filepath']], new_ext)
return sub_filenames, info return sub_filenames, info
@ -1083,16 +1085,15 @@ class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
return imghdr.what(path) == 'webp' return imghdr.what(path) == 'webp'
def fixup_webp(self, info, idx=-1): def fixup_webp(self, info, idx=-1):
thumbnail_filename = info['thumbnails'][idx]['filepath'] thumbnail = info['thumbnails'][idx]
thumbnail_filename = thumbnail['filepath']
_, thumbnail_ext = os.path.splitext(thumbnail_filename) _, thumbnail_ext = os.path.splitext(thumbnail_filename)
if thumbnail_ext: if thumbnail_ext:
if thumbnail_ext.lower() != '.webp' and imghdr.what(thumbnail_filename) == 'webp': if thumbnail_ext.lower() != '.webp' and imghdr.what(thumbnail_filename) == 'webp':
self.to_screen(f'Correcting thumbnail "{thumbnail_filename}" extension to webp') self.to_screen(f'Correcting thumbnail "{thumbnail_filename}" extension to webp')
webp_filename = replace_extension(thumbnail_filename, 'webp') webp_filename = replace_extension(thumbnail_filename, 'webp')
os.replace(thumbnail_filename, webp_filename) os.replace(thumbnail_filename, webp_filename)
info['thumbnails'][idx]['filepath'] = webp_filename thumbnail['filepath'] = webp_filename
info['__files_to_move'][webp_filename] = replace_extension(
info['__files_to_move'].pop(thumbnail_filename), 'webp')
@staticmethod @staticmethod
def _options(target_ext): def _options(target_ext):
@ -1130,8 +1131,6 @@ class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
continue continue
thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, target_ext) thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, target_ext)
files_to_delete.append(original_thumbnail) files_to_delete.append(original_thumbnail)
info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension(
info['__files_to_move'][original_thumbnail], target_ext)
if not has_thumbnail: if not has_thumbnail:
self.to_screen('There aren\'t any thumbnails to convert') self.to_screen('There aren\'t any thumbnails to convert')

View File

@ -1,16 +1,22 @@
import os import os
from pathlib import Path
from .common import PostProcessor from .common import PostProcessor
from ..compat import shutil from ..compat import shutil
from ..utils import ( from ..utils import (
PostProcessingError, PostProcessingError,
decodeFilename,
encodeFilename,
make_dir, make_dir,
replace_extension,
) )
class MoveFilesAfterDownloadPP(PostProcessor): class MoveFilesAfterDownloadPP(PostProcessor):
# Map of the keys that contain moveable files and the 'type' of the file
# for generating the output filename
CHILD_KEYS = {
'thumbnails': 'thumbnail',
'requested_subtitles': 'subtitle',
}
def __init__(self, downloader=None, downloaded=True): def __init__(self, downloader=None, downloaded=True):
PostProcessor.__init__(self, downloader) PostProcessor.__init__(self, downloader)
@ -20,33 +26,77 @@ class MoveFilesAfterDownloadPP(PostProcessor):
def pp_key(cls): def pp_key(cls):
return 'MoveFiles' return 'MoveFiles'
def move_file_and_write_to_info(self, info_dict, relevant_dict=None, output_file_type=None):
relevant_dict = relevant_dict or info_dict
if 'filepath' not in relevant_dict:
return
output_file_type = output_file_type or ''
current_filepath, final_filepath = self.determine_filepath(info_dict, relevant_dict, output_file_type)
move_result = self.move_file(info_dict, current_filepath, final_filepath)
if move_result:
relevant_dict['filepath'] = move_result
else:
del relevant_dict['filepath']
def determine_filepath(self, info_dict, relevant_dict, output_file_type):
current_filepath = relevant_dict['filepath']
prepared_filepath = self._downloader.prepare_filename(info_dict, output_file_type)
if (output_file_type == 'thumbnail' and info_dict['__multiple_thumbnails']) or output_file_type == 'subtitle':
desired_extension = ''.join(Path(current_filepath).suffixes[-2:])
else:
desired_extension = Path(current_filepath).suffix
return current_filepath, replace_extension(prepared_filepath, desired_extension[1:])
def move_file(self, info_dict, current_filepath, final_filepath):
if not current_filepath or not final_filepath:
return
dl_parent_folder = os.path.split(info_dict['filepath'])[0]
finaldir = info_dict.get('__finaldir', os.path.abspath(dl_parent_folder))
if not os.path.isabs(current_filepath):
current_filepath = os.path.join(finaldir, current_filepath)
if not os.path.isabs(final_filepath):
final_filepath = os.path.join(finaldir, final_filepath)
if current_filepath == final_filepath:
return final_filepath
if not os.path.exists(current_filepath):
self.report_warning(f'File "{current_filepath}" cannot be found')
return
if os.path.exists(final_filepath):
if self.get_param('overwrites', True):
self.report_warning(f'Replacing existing file "{final_filepath}"')
os.remove(final_filepath)
else:
self.report_warning(f'Cannot move file "{current_filepath}" out of temporary directory since "{final_filepath}" already exists. ')
return
make_dir(final_filepath, PostProcessingError)
self.to_screen(f'Moving file "{current_filepath}" to "{final_filepath}"')
shutil.move(current_filepath, final_filepath) # os.rename cannot move between volumes
return final_filepath
def run(self, info): def run(self, info):
dl_path, dl_name = os.path.split(encodeFilename(info['filepath'])) # This represents the main media file (using the 'filepath' key)
finaldir = info.get('__finaldir', dl_path) self.move_file_and_write_to_info(info)
finalpath = os.path.join(finaldir, dl_name)
if self._downloaded:
info['__files_to_move'][info['filepath']] = decodeFilename(finalpath)
make_newfilename = lambda old: decodeFilename(os.path.join(finaldir, os.path.basename(encodeFilename(old)))) for key, output_file_type in self.CHILD_KEYS.items():
for oldfile, newfile in info['__files_to_move'].items(): if key not in info:
if not newfile:
newfile = make_newfilename(oldfile)
if os.path.abspath(encodeFilename(oldfile)) == os.path.abspath(encodeFilename(newfile)):
continue continue
if not os.path.exists(encodeFilename(oldfile)):
self.report_warning(f'File "{oldfile}" cannot be found')
continue
if os.path.exists(encodeFilename(newfile)):
if self.get_param('overwrites', True):
self.report_warning(f'Replacing existing file "{newfile}"')
os.remove(encodeFilename(newfile))
else:
self.report_warning(
f'Cannot move file "{oldfile}" out of temporary directory since "{newfile}" already exists. ')
continue
make_dir(newfile, PostProcessingError)
self.to_screen(f'Moving file "{oldfile}" to "{newfile}"')
shutil.move(oldfile, newfile) # os.rename cannot move between volumes
info['filepath'] = finalpath if isinstance(info[key], (dict, list)):
iterable = info[key].values() if isinstance(info[key], dict) else info[key]
for file_dict in iterable:
self.move_file_and_write_to_info(info, file_dict, output_file_type)
return [], info return [], info

View File

@ -216,7 +216,7 @@ def partial_application(func):
sig = inspect.signature(func) sig = inspect.signature(func)
required_args = [ required_args = [
param.name for param in sig.parameters.values() param.name for param in sig.parameters.values()
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.VAR_POSITIONAL) if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
if param.default is inspect.Parameter.empty if param.default is inspect.Parameter.empty
] ]
@ -4837,7 +4837,6 @@ def number_of_digits(number):
return len('%d' % number) return len('%d' % number)
@partial_application
def join_nonempty(*values, delim='-', from_dict=None): def join_nonempty(*values, delim='-', from_dict=None):
if from_dict is not None: if from_dict is not None:
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values) values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)

View File

@ -332,14 +332,14 @@ class _RequiredError(ExtractorError):
@typing.overload @typing.overload
def subs_list_to_dict(*, ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ... def subs_list_to_dict(*, lang: str | None = 'und', ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
@typing.overload @typing.overload
def subs_list_to_dict(subs: list[dict] | None, /, *, ext: str | None = None) -> dict[str, list[dict]]: ... def subs_list_to_dict(subs: list[dict] | None, /, *, lang: str | None = 'und', ext: str | None = None) -> dict[str, list[dict]]: ...
def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None): def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None):
""" """
Convert subtitles from a traversal into a subtitle dict. Convert subtitles from a traversal into a subtitle dict.
The path should have an `all` immediately before this function. The path should have an `all` immediately before this function.
@ -352,7 +352,7 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
`quality` The sort order for each subtitle `quality` The sort order for each subtitle
""" """
if subs is None: if subs is None:
return functools.partial(subs_list_to_dict, ext=ext) return functools.partial(subs_list_to_dict, lang=lang, ext=ext)
result = collections.defaultdict(list) result = collections.defaultdict(list)
@ -360,10 +360,16 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
if not url_or_none(sub.get('url')) and not sub.get('data'): if not url_or_none(sub.get('url')) and not sub.get('data'):
continue continue
sub_id = sub.pop('id', None) sub_id = sub.pop('id', None)
if sub_id is None: if not isinstance(sub_id, str):
continue if not lang:
if ext is not None and not sub.get('ext'): continue
sub['ext'] = ext sub_id = lang
sub_ext = sub.get('ext')
if not isinstance(sub_ext, str):
if not ext:
sub.pop('ext', None)
else:
sub['ext'] = ext
result[sub_id].append(sub) result[sub_id].append(sub)
result = dict(result) result = dict(result)
@ -452,9 +458,9 @@ def trim_str(*, start=None, end=None):
return trim return trim
def unpack(func): def unpack(func, **kwargs):
@functools.wraps(func) @functools.wraps(func)
def inner(items, **kwargs): def inner(items):
return func(*items, **kwargs) return func(*items, **kwargs)
return inner return inner