Compare commits

...

27 Commits

Author SHA1 Message Date
N/Ame
a4f83648f7
Merge 1ef35f1c00 into c699bafc50 2024-11-16 20:14:12 +13:00
bashonly
c699bafc50 [ie/soop] Fix thumbnail extraction (#11545)
Closes #11537

Authored by: bashonly
2024-11-15 22:51:55 +00:00
bashonly
eb64ae7d5d [ie] Allow ext override for thumbnails (#11545)
Authored by: bashonly
2024-11-15 22:51:55 +00:00
Simon Sawicki
c014fbcddc
[utils] subs_list_to_dict: Add lang default parameter (#11508)
Authored by: Grub4K
2024-11-15 23:25:52 +01:00
Simon Sawicki
39d79c9b9c
[utils] Fix join_nonempty, add **kwargs to unpack (#11559)
Authored by: Grub4K
2024-11-15 22:06:15 +01:00
grqx_termux
1ef35f1c00 update docstring 2024-10-02 17:52:29 +13:00
grqx
3068d9897d Merge branch 'master' into GoogleDriveFolderFix 2024-10-02 17:47:09 +13:00
grqx
b3534df159 Merge branch 'master' into GoogleDriveFolderFix
This may fix the failing ci
2024-10-02 17:43:36 +13:00
grqx
3582a238a0 update url and video id instead of returning a url result 2024-10-01 23:51:00 +13:00
grqx_wsl
23ea25196d update docstring 2024-09-30 12:24:13 +13:00
grqx_wsl
83e0860835 fix my-drive extraction 2024-09-30 06:36:05 +13:00
grqx_wsl
8d827d2460 Update tests 2024-09-29 00:24:50 +12:00
N/Ame
beb76094fa
Update yt_dlp/extractor/googledrive.py 2024-09-28 20:59:50 +12:00
grqx_wsl
d133c2c7f7 [GoogleDriveFolderIE] revert part of last commit
- change folder info json metadata extraction default appraoch to ds match
2024-08-18 03:15:45 +12:00
grqx_wsl
4a76306868 [GoogleDriveFolderIE] support private folders and my-drive 2024-08-18 03:08:45 +12:00
grqx_wsl
b81a41d5ff [GoogleDriveFolderIE] raise for access denied 2024-08-18 00:01:03 +12:00
grqx_wsl
28ed64d87a [GoogleDriveFolderIE] add bare raise in the except block 2024-08-17 23:49:56 +12:00
grqx_wsl
8623ada293 [GoogleDriveFolderIE] Several fixes
- Update test: GoogleDriveFolder_1
- Raise for log-in required
- catch HTTP Error 404
2024-08-17 23:49:00 +12:00
grqx_wsl
6e98d99dd5 [GoogleDriveFolderIE] improve code readability
- Combined `_extract_json_ds` and`_extract_json_hash` into one method(`_extract_json_meta`)
- Improved `item_url_getter`'s traversal path of item info
- Add notations to improve code readability
2024-08-17 20:21:02 +12:00
grqx_wsl
99d9105f33 [GoogleDrive] add support for shortcut urls: follow redirection
Example url: https://drive.google.com/file/d/17OrYKQBPRm4J_D1rsGbo4eOmm1_SfoIY/
Redirects to(real video):
https://drive.google.com/file/d/1Jp0I0tS-qMxtXNehGQW5_hWhwgC0FeeB/edit
2024-07-28 03:36:55 +12:00
grqx_wsl
017997068b read shortcuts url 2024-07-28 03:18:17 +12:00
grqx_wsl
9962859595 Replace function make_playlist with a more concise generator expression to improve code readability.
All tests and code format checker are passing
        modified:   yt_dlp/extractor/googledrive.py
2024-07-24 23:16:44 +12:00
grqx_wsl
cbe698b4b0 Add support for empty folder 2024-07-22 17:28:45 +12:00
grqx_wsl
64d4e93516 add support for subfolders(recursive) 2024-07-22 16:51:04 +12:00
grqx_wsl
a917af960c fix code formating, fix test folder title 2024-07-19 16:48:36 +12:00
grqx_wsl
05403ea5ad add tests, fix regex, improve stability. 2024-07-19 16:31:08 +12:00
grqx_wsl
d65753ce05 [GoogleDriveFolder] Fix Extractor 2024-07-19 04:49:09 +12:00
8 changed files with 180 additions and 62 deletions

View File

@ -481,7 +481,7 @@ class TestTraversalHelpers:
'id': 'name',
'data': 'content',
'url': 'url',
}, all, {subs_list_to_dict}]) == {
}, all, {subs_list_to_dict(lang=None)}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [{'data': 'content'}],
}, 'subs with mandatory items missing should be filtered'
@ -507,6 +507,54 @@ class TestTraversalHelpers:
{'url': 'https://example.com/subs/en1', 'ext': 'ext'},
{'url': 'https://example.com/subs/en2', 'ext': 'ext'},
]}, '`quality` key should sort subtitle list accordingly'
assert traverse_obj([
{'name': 'de', 'url': 'https://example.com/subs/de.ass'},
{'name': 'de'},
{'name': 'en', 'content': 'content'},
{'url': 'https://example.com/subs/en'},
], [..., {
'id': 'name',
'url': 'url',
'data': 'content',
}, all, {subs_list_to_dict(lang='en')}]) == {
'de': [{'url': 'https://example.com/subs/de.ass'}],
'en': [
{'data': 'content'},
{'url': 'https://example.com/subs/en'},
],
}, 'optionally provided lang should be used if no id available'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang=None)}]) == {
'de': [
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be ignored for id and ext'
assert traverse_obj([
{'name': 1, 'url': 'https://example.com/subs/de1'},
{'name': {}, 'url': 'https://example.com/subs/de2'},
{'name': 'de', 'ext': 1, 'url': 'https://example.com/subs/de3'},
{'name': 'de', 'ext': {}, 'url': 'https://example.com/subs/de4'},
], [..., {
'id': 'name',
'url': 'url',
'ext': 'ext',
}, all, {subs_list_to_dict(lang='de')}]) == {
'de': [
{'url': 'https://example.com/subs/de1'},
{'url': 'https://example.com/subs/de2'},
{'url': 'https://example.com/subs/de3'},
{'url': 'https://example.com/subs/de4'},
],
}, 'non str types should be replaced by default id'
def test_trim_str(self):
with pytest.raises(TypeError):
@ -525,7 +573,7 @@ class TestTraversalHelpers:
def test_unpack(self):
assert unpack(lambda *x: ''.join(map(str, x)))([1, 2, 3]) == '123'
assert unpack(join_nonempty)([1, 2, 3]) == '1-2-3'
assert unpack(join_nonempty(delim=' '))([1, 2, 3]) == '1 2 3'
assert unpack(join_nonempty, delim=' ')([1, 2, 3]) == '1 2 3'
with pytest.raises(TypeError):
unpack(join_nonempty)()
with pytest.raises(TypeError):

View File

@ -72,7 +72,6 @@ from yt_dlp.utils import (
intlist_to_bytes,
iri_to_uri,
is_html,
join_nonempty,
js_to_json,
limit_length,
locked_file,
@ -2158,10 +2157,6 @@ Line 1
assert int_or_none(v=10) == 10, 'keyword passed positional should call function'
assert int_or_none(scale=0.1)(10) == 100, 'call after partial application should call the function'
assert callable(join_nonempty(delim=', ')), 'varargs positional should apply partially'
assert callable(join_nonempty()), 'varargs positional should apply partially'
assert join_nonempty(None, delim=', ') == '', 'passed varargs should call the function'
if __name__ == '__main__':
unittest.main()

View File

@ -4381,7 +4381,9 @@ class YoutubeDL:
return None
for idx, t in list(enumerate(thumbnails))[::-1]:
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
thumb_ext = t.get('ext') or determine_ext(t['url'], 'jpg')
if multiple:
thumb_ext = f'{t["id"]}.{thumb_ext}'
thumb_display_id = f'{label} thumbnail {t["id"]}'
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))

View File

@ -66,6 +66,14 @@ class AfreecaTVBaseIE(InfoExtractor):
extensions={'legacy_ssl': True}), display_id,
'Downloading API JSON', 'Unable to download API JSON')
@staticmethod
def _fixup_thumb(thumb_url):
if not url_or_none(thumb_url):
return None
# Core would determine_ext as 'php' from the url, so we need to provide the real ext
# See: https://github.com/yt-dlp/yt-dlp/issues/11537
return [{'url': thumb_url, 'ext': 'jpg'}]
class AfreecaTVIE(AfreecaTVBaseIE):
IE_NAME = 'soop'
@ -155,7 +163,7 @@ class AfreecaTVIE(AfreecaTVBaseIE):
'uploader': ('writer_nick', {str}),
'uploader_id': ('bj_id', {str}),
'duration': ('total_file_duration', {int_or_none(scale=1000)}),
'thumbnail': ('thumb', {url_or_none}),
'thumbnails': ('thumb', {self._fixup_thumb}),
})
entries = []
@ -226,8 +234,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
return self.playlist_result(self._entries(data), video_id)
@staticmethod
def _entries(data):
def _entries(self, data):
# 'files' is always a list with 1 element
yield from traverse_obj(data, (
'data', lambda _, v: v['story_type'] == 'catch',
@ -238,7 +245,7 @@ class AfreecaTVCatchStoryIE(AfreecaTVBaseIE):
'title': ('title', {str}),
'uploader': ('writer_nick', {str}),
'uploader_id': ('writer_id', {str}),
'thumbnail': ('thumb', {url_or_none}),
'thumbnails': ('thumb', {self._fixup_thumb}),
'timestamp': ('write_timestamp', {int_or_none}),
}))

View File

@ -279,6 +279,7 @@ class InfoExtractor:
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "ext" (optional, string) - actual image extension if not given in URL
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)

View File

@ -3,6 +3,7 @@ import urllib.parse
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
bug_reports_message,
@ -12,6 +13,7 @@ from ..utils import (
get_element_html_by_id,
int_or_none,
lowercase_escape,
traverse_obj,
try_get,
update_url_query,
)
@ -51,6 +53,17 @@ class GoogleDriveIE(InfoExtractor):
'duration': 184,
'thumbnail': 'https://drive.google.com/thumbnail?id=1IP0o8dHcQrIHGgVyp0Ofvx2cGfLzyO1x',
},
}, {
# shortcut url
'url': 'https://drive.google.com/file/d/1_n3-8ZwEUV4OniMsLAJ_C1JEjuT2u5Pk/view?usp=drivesdk',
'md5': '43d34f7be1acc0262f337a039d1ad12d',
'info_dict': {
'id': '1J1RCw2jcgUngrZRdpza-IHXYkardZ-4l',
'ext': 'webm',
'title': 'Forrest walk with Best Mind Refresh Music Mithran [tEvJKrE4cS0].webm',
'duration': 512,
'thumbnail': 'https://drive.google.com/thumbnail?id=1J1RCw2jcgUngrZRdpza-IHXYkardZ-4l',
},
}, {
# video can't be watched anonymously due to view count limit reached,
# but can be downloaded (see https://github.com/ytdl-org/youtube-dl/issues/14046)
@ -166,6 +179,17 @@ class GoogleDriveIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
try:
_, webpage_urlh = self._download_webpage_handle(url, video_id)
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
if e.cause.status in (401, 403):
self.raise_login_required('Access Denied')
raise
if webpage_urlh.url != url:
url = webpage_urlh.url
video_id = self._match_id(url)
video_info = urllib.parse.parse_qs(self._download_webpage(
'https://drive.google.com/get_video_info',
video_id, 'Downloading video webpage', query={'docid': video_id}))
@ -289,7 +313,7 @@ class GoogleDriveIE(InfoExtractor):
class GoogleDriveFolderIE(InfoExtractor):
IE_NAME = 'GoogleDrive:Folder'
_VALID_URL = r'https?://(?:docs|drive)\.google\.com/drive/folders/(?P<id>[\w-]{28,})'
_VALID_URL = r'https?://(?:docs|drive)\.google\.com/drive/(?:folders/(?P<id>[\w-]{19,})|my-drive)'
_TESTS = [{
'url': 'https://drive.google.com/drive/folders/1dQ4sx0-__Nvg65rxTSgQrl7VyW_FZ9QI',
'info_dict': {
@ -297,47 +321,83 @@ class GoogleDriveFolderIE(InfoExtractor):
'title': 'Forrest',
},
'playlist_count': 3,
}, {
'note': 'Contains various formats and a subfolder, folder name was formerly mismatched.'
'also contains loop shortcut, shortcut to non-downloadable files, etc.',
'url': 'https://docs.google.com/drive/folders/1jjrhqi94d8TSHSVMSdBjD49MOiHYpHfF',
'info_dict': {
'id': '1jjrhqi94d8TSHSVMSdBjD49MOiHYpHfF',
'title': '], sideChannel: {}});',
},
'playlist_count': 8,
}]
_BOUNDARY = '=====vc17a3rwnndj====='
_REQUEST = "/drive/v2beta/files?openDrive=true&reason=102&syncType=0&errorRecovery=false&q=trashed%20%3D%20false%20and%20'{folder_id}'%20in%20parents&fields=kind%2CnextPageToken%2Citems(kind%2CmodifiedDate%2CmodifiedByMeDate%2ClastViewedByMeDate%2CfileSize%2Cowners(kind%2CpermissionId%2Cid)%2ClastModifyingUser(kind%2CpermissionId%2Cid)%2ChasThumbnail%2CthumbnailVersion%2Ctitle%2Cid%2CresourceKey%2Cshared%2CsharedWithMeDate%2CuserPermission(role)%2CexplicitlyTrashed%2CmimeType%2CquotaBytesUsed%2Ccopyable%2CfileExtension%2CsharingUser(kind%2CpermissionId%2Cid)%2Cspaces%2Cversion%2CteamDriveId%2ChasAugmentedPermissions%2CcreatedDate%2CtrashingUser(kind%2CpermissionId%2Cid)%2CtrashedDate%2Cparents(id)%2CshortcutDetails(targetId%2CtargetMimeType%2CtargetLookupStatus)%2Ccapabilities(canCopy%2CcanDownload%2CcanEdit%2CcanAddChildren%2CcanDelete%2CcanRemoveChildren%2CcanShare%2CcanTrash%2CcanRename%2CcanReadTeamDrive%2CcanMoveTeamDriveItem)%2Clabels(starred%2Ctrashed%2Crestricted%2Cviewed))%2CincompleteSearch&appDataFilter=NO_APP_DATA&spaces=drive&pageToken={page_token}&maxResults=50&supportsTeamDrives=true&includeItemsFromAllDrives=true&corpora=default&orderBy=folder%2Ctitle_natural%20asc&retryCount=0&key={key} HTTP/1.1"
_DATA = f'''--{_BOUNDARY}
content-type: application/http
content-transfer-encoding: binary
GET %s
--{_BOUNDARY}
'''
def _call_api(self, folder_id, key, data, **kwargs):
response = self._download_webpage(
'https://clients6.google.com/batch/drive/v2beta',
folder_id, data=data.encode(),
headers={
'Content-Type': 'text/plain;charset=UTF-8;',
'Origin': 'https://drive.google.com',
}, query={
'$ct': f'multipart/mixed; boundary="{self._BOUNDARY}"',
'key': key,
}, **kwargs)
return self._search_json('', response, 'api response', folder_id, **kwargs) or {}
def _get_folder_items(self, folder_id, key):
page_token = ''
while page_token is not None:
request = self._REQUEST.format(folder_id=folder_id, page_token=page_token, key=key)
page = self._call_api(folder_id, key, self._DATA % request)
yield from page['items']
page_token = page.get('nextPageToken')
def _extract_json_meta(self, webpage, video_id, dsval=None, hashval=None, name=None, **kwargs):
"""
Uses regex to search for json metadata with 'ds' value(0-5) or 'hash' value(1-6)
from the webpage.
logged out folder info:ds0hash1; items:ds4hash6
logged in folder info:ds0hash1; items:ds5hash6
my-drive folder info:ds0hash1/ds0hash4; items:ds5hash6
For example, if the webpage contains the line below, the empty data array
can be got by passing dsval=3 or hashval=2 to this method.
AF_initDataCallback({key: 'ds:3', hash: '2', data:[], sideChannel: {}});
"""
_ARRAY_RE = r'\[(?s:.+)\]'
_META_END_RE = r', sideChannel: \{\}\}\);' # greedy match to deal with the 2nd test case
if dsval is not None:
if not name:
name = f'webpage JSON metadata ds:{dsval}'
return self._search_json(
rf'''key\s*?:\s*?(['"])ds:\s*?{dsval}\1,[^\[]*?data:''', webpage, name, video_id,
end_pattern=_META_END_RE, contains_pattern=_ARRAY_RE, **kwargs)
elif hashval is not None:
if not name:
name = f'webpage JSON metadata hash:{hashval}'
return self._search_json(
rf'''hash\s*?:\s*?(['"]){hashval}\1,[^\[]*?data:''', webpage, name, video_id,
end_pattern=_META_END_RE, contains_pattern=_ARRAY_RE, **kwargs)
def _real_extract(self, url):
folder_id = self._match_id(url)
def item_url_getter(item, video_id):
if not isinstance(item, list):
return None
available_IEs = (GoogleDriveFolderIE, GoogleDriveIE) # subfolder or item
if 'application/vnd.google-apps.shortcut' in item: # extract real link
entry_url = traverse_obj(
item,
(..., ..., lambda _, v: any(ie.suitable(v) for ie in available_IEs), any))
else:
entry_url = traverse_obj(
item,
(lambda _, v: any(ie.suitable(v) for ie in available_IEs), any))
if not entry_url:
return None
return self.url_result(entry_url, video_id=video_id, video_title=item[2])
webpage = self._download_webpage(url, folder_id)
key = self._search_regex(r'"(\w{39})"', webpage, 'key')
folder_id = self._match_id(url) or 'my-drive'
headers = self.geo_verification_headers()
folder_info = self._call_api(folder_id, key, self._DATA % f'/drive/v2beta/files/{folder_id} HTTP/1.1', fatal=False)
try:
webpage, urlh = self._download_webpage_handle(url, folder_id, headers=headers)
except ExtractorError as e:
if isinstance(e.cause, HTTPError):
if e.cause.status == 404:
self.raise_no_formats(e.cause.msg, expected=True)
elif e.cause.status == 403:
# logged in with an account without access
self.raise_login_required('Access Denied')
raise
if urllib.parse.urlparse(urlh.url).netloc == 'accounts.google.com':
# not logged in when visiting a private folder
self.raise_login_required('Access Denied')
return self.playlist_from_matches(
self._get_folder_items(folder_id, key), folder_id, folder_info.get('title'),
ie=GoogleDriveIE, getter=lambda item: f'https://drive.google.com/file/d/{item["id"]}')
title = self._extract_json_meta(webpage, folder_id, dsval=0, name='folder info')[1][2]
items = self._extract_json_meta(webpage, folder_id, hashval=6, name='folder items')[-1]
if items is False: # empty folder
return self.playlist_result([], folder_id, title)
return self.playlist_result(
(entry for item in items if (entry := item_url_getter(item, folder_id))),
folder_id, title)

View File

@ -216,7 +216,7 @@ def partial_application(func):
sig = inspect.signature(func)
required_args = [
param.name for param in sig.parameters.values()
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.VAR_POSITIONAL)
if param.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD)
if param.default is inspect.Parameter.empty
]
@ -4837,7 +4837,6 @@ def number_of_digits(number):
return len('%d' % number)
@partial_application
def join_nonempty(*values, delim='-', from_dict=None):
if from_dict is not None:
values = (traversal.traverse_obj(from_dict, variadic(v)) for v in values)

View File

@ -332,14 +332,14 @@ class _RequiredError(ExtractorError):
@typing.overload
def subs_list_to_dict(*, ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
def subs_list_to_dict(*, lang: str | None = 'und', ext: str | None = None) -> collections.abc.Callable[[list[dict]], dict[str, list[dict]]]: ...
@typing.overload
def subs_list_to_dict(subs: list[dict] | None, /, *, ext: str | None = None) -> dict[str, list[dict]]: ...
def subs_list_to_dict(subs: list[dict] | None, /, *, lang: str | None = 'und', ext: str | None = None) -> dict[str, list[dict]]: ...
def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
def subs_list_to_dict(subs: list[dict] | None = None, /, *, lang='und', ext=None):
"""
Convert subtitles from a traversal into a subtitle dict.
The path should have an `all` immediately before this function.
@ -352,7 +352,7 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
`quality` The sort order for each subtitle
"""
if subs is None:
return functools.partial(subs_list_to_dict, ext=ext)
return functools.partial(subs_list_to_dict, lang=lang, ext=ext)
result = collections.defaultdict(list)
@ -360,10 +360,16 @@ def subs_list_to_dict(subs: list[dict] | None = None, /, *, ext=None):
if not url_or_none(sub.get('url')) and not sub.get('data'):
continue
sub_id = sub.pop('id', None)
if sub_id is None:
continue
if ext is not None and not sub.get('ext'):
sub['ext'] = ext
if not isinstance(sub_id, str):
if not lang:
continue
sub_id = lang
sub_ext = sub.get('ext')
if not isinstance(sub_ext, str):
if not ext:
sub.pop('ext', None)
else:
sub['ext'] = ext
result[sub_id].append(sub)
result = dict(result)
@ -452,9 +458,9 @@ def trim_str(*, start=None, end=None):
return trim
def unpack(func):
def unpack(func, **kwargs):
@functools.wraps(func)
def inner(items, **kwargs):
def inner(items):
return func(*items, **kwargs)
return inner