Compare commits

...

6 Commits

Author SHA1 Message Date
Lucas Rademaker
d42fc5bf72
Merge 2eaf303b63 into f2a4983df7 2024-11-14 03:16:51 +01:00
Jackson Humphrey
f2a4983df7
[ie/archive.org] Fix comments extraction (#11527)
Closes #11526
Authored by: jshumphrey
2024-11-12 23:26:18 +00:00
bashonly
bacc31b05a
[ie/facebook] Fix formats extraction (#11513)
Closes #11497
Authored by: bashonly
2024-11-12 23:23:10 +00:00
Lucas Rademaker
2eaf303b63 [ie/Zoom] gh-7784 fix password handling logic 2024-10-18 11:56:22 +07:00
Lucas Rademaker
367ec929f4 [Zoom] skip expired video test 2024-10-18 09:06:35 +07:00
Lucas Rademaker
b901e4fb8a [Zoom] add intepreter audio formats
Use the options `-f "best+mergeall[vcodec=none]" --audio-multistreams` to merge all intepreter tracks alongside the main video
2024-10-08 18:01:09 +05:45
3 changed files with 106 additions and 26 deletions

View File

@ -205,6 +205,26 @@ class ArchiveOrgIE(InfoExtractor):
}, },
}, },
], ],
}, {
# The reviewbody is None for one of the reviews; just need to extract data without crashing
'url': 'https://archive.org/details/gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
'info_dict': {
'id': 'gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
'ext': 'mp3',
'title': 'Stuck Inside of Mobile with the Memphis Blues Again',
'creators': ['Grateful Dead'],
'duration': 338.31,
'track': 'Stuck Inside of Mobile with the Memphis Blues Again',
'description': 'md5:764348a470b986f1217ffd38d6ac7b72',
'display_id': 'gd95-04-02d1t04.shn',
'location': 'Pyramid Arena',
'uploader': 'jon@archive.org',
'album': '1995-04-02 - Pyramid Arena',
'upload_date': '20040519',
'track_number': 4,
'release_date': '19950402',
'timestamp': 1084927901,
},
}] }]
@staticmethod @staticmethod
@ -335,7 +355,7 @@ class ArchiveOrgIE(InfoExtractor):
info['comments'].append({ info['comments'].append({
'id': review.get('review_id'), 'id': review.get('review_id'),
'author': review.get('reviewer'), 'author': review.get('reviewer'),
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'), 'text': join_nonempty('reviewtitle', 'reviewbody', from_dict=review, delim='\n\n'),
'timestamp': unified_timestamp(review.get('createdate')), 'timestamp': unified_timestamp(review.get('createdate')),
'parent': 'root'}) 'parent': 'root'})

View File

@ -563,13 +563,13 @@ class FacebookIE(InfoExtractor):
return extract_video_data(try_get( return extract_video_data(try_get(
js_data, lambda x: x['jsmods']['instances'], list) or []) js_data, lambda x: x['jsmods']['instances'], list) or [])
def extract_dash_manifest(video, formats): def extract_dash_manifest(vid_data, formats, mpd_url=None):
dash_manifest = traverse_obj( dash_manifest = traverse_obj(
video, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', expected_type=str) vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
if dash_manifest: if dash_manifest:
formats.extend(self._parse_mpd_formats( formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)), compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
mpd_url=url_or_none(video.get('dash_manifest_url')))) mpd_url=url_or_none(video.get('dash_manifest_url')) or mpd_url))
def process_formats(info): def process_formats(info):
# Downloads with browser's User-Agent are rate limited. Working around # Downloads with browser's User-Agent are rate limited. Working around
@ -619,9 +619,12 @@ class FacebookIE(InfoExtractor):
video = video['creation_story'] video = video['creation_story']
video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner')) video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner'))
video.update(reel_info) video.update(reel_info)
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
formats = [] formats = []
q = qualities(['sd', 'hd']) q = qualities(['sd', 'hd'])
# Legacy formats extraction
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'), for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
('playable_url_dash', ''), ('browser_native_hd_url', 'hd'), ('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
('browser_native_sd_url', 'sd')): ('browser_native_sd_url', 'sd')):
@ -629,7 +632,7 @@ class FacebookIE(InfoExtractor):
if not playable_url: if not playable_url:
continue continue
if determine_ext(playable_url) == 'mpd': if determine_ext(playable_url) == 'mpd':
formats.extend(self._extract_mpd_formats(playable_url, video_id)) formats.extend(self._extract_mpd_formats(playable_url, video_id, fatal=False))
else: else:
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
@ -638,6 +641,28 @@ class FacebookIE(InfoExtractor):
'url': playable_url, 'url': playable_url,
}) })
extract_dash_manifest(fmt_data, formats) extract_dash_manifest(fmt_data, formats)
# New videoDeliveryResponse formats extraction
fmt_data = traverse_obj(video, ('videoDeliveryResponseFragment', 'videoDeliveryResponseResult'))
mpd_urls = traverse_obj(fmt_data, ('dash_manifest_urls', ..., 'manifest_url', {url_or_none}))
dash_manifests = traverse_obj(fmt_data, ('dash_manifests', lambda _, v: v['manifest_xml']))
for idx, dash_manifest in enumerate(dash_manifests):
extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
if not dash_manifests:
# Only extract from MPD URLs if the manifests are not already provided
for mpd_url in mpd_urls:
formats.extend(self._extract_mpd_formats(mpd_url, video_id, fatal=False))
for prog_fmt in traverse_obj(fmt_data, ('progressive_urls', lambda _, v: v['progressive_url'])):
format_id = traverse_obj(prog_fmt, ('metadata', 'quality', {str.lower}))
formats.append({
'format_id': format_id,
# sd, hd formats w/o resolution info should be deprioritized below DASH
'quality': q(format_id) - 3,
'url': prog_fmt['progressive_url'],
})
for m3u8_url in traverse_obj(fmt_data, ('hls_playlist_urls', ..., 'hls_playlist_url', {url_or_none})):
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', fatal=False, m3u8_id='hls'))
if not formats: if not formats:
# Do not append false positive entry w/o any formats # Do not append false positive entry w/o any formats
return return

View File

@ -7,6 +7,7 @@ from ..utils import (
parse_resolution, parse_resolution,
str_or_none, str_or_none,
traverse_obj, traverse_obj,
update_url,
url_basename, url_basename,
urlencode_postdata, urlencode_postdata,
urljoin, urljoin,
@ -34,6 +35,7 @@ class ZoomIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO', 'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO',
}, },
'skip': 'This recording has expired',
}, { }, {
# share URL # share URL
'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8', 'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8',
@ -61,41 +63,59 @@ class ZoomIE(InfoExtractor):
return self._search_json( return self._search_json(
r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json) r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json)
def _get_real_webpage(self, url, base_url, video_id, url_type): def _try_login(self, url, base_url, video_id, form):
webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage') # This will most likely only work for password-protected meetings
try:
form = self._form_hidden_inputs('password_form', webpage)
except ExtractorError:
return webpage
password = self.get_param('videopassword') password = self.get_param('videopassword')
if not password: if not password:
raise ExtractorError( raise ExtractorError(
'This video is protected by a passcode, use the --video-password option', expected=True) 'This video is protected by a passcode, use the --video-password option', expected=True)
is_meeting = form.get('useWhichPasswd') == 'meeting' is_meeting = form.get('useWhichPasswd') == 'meeting'
validation = self._download_json( validation = self._download_json(
base_url + 'rec/validate%s_passwd' % ('_meet' if is_meeting else ''), base_url + 'nws/recording/1.0/validate%s-passwd' % ('-meeting' if is_meeting else ''),
video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({ video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({
'id': form[('meet' if is_meeting else 'file') + 'Id'], 'id': form[('meeting' if is_meeting else 'file') + '_id'],
'passwd': password, 'passwd': password,
'action': form.get('action'), 'action': form.get('action'),
})) }))
if not validation.get('status'): if not validation.get('status'):
raise ExtractorError(validation['errorMessage'], expected=True) raise ExtractorError(validation['errorMessage'], expected=True)
return self._download_webpage(url, video_id, note=f'Re-downloading {url_type} webpage')
def _get_real_webpage(self, url, base_url, video_id, url_type):
webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage')
data = self._get_page_data(webpage, video_id)
if data.get('componentName') != 'need-password': # not password protected
return webpage
# Password-protected:
self._try_login(url, base_url, video_id, form=data)
# Return the new HTML document
new_url = f"{base_url}rec/share/{data['meeting_id']}"
return self._download_webpage(new_url, video_id, note=f'Re-downloading {url_type} webpage')
def _get_share_redirect_url(self, url, base_url, video_id):
"""Converts a `/rec/share` url to the corresponding `/rec/play` url, performs login if necessary"""
webpage = self._get_real_webpage(url, base_url, video_id, 'share')
meeting_id = self._get_page_data(webpage, video_id)['meetingId']
redirect_dict = self._download_json(
f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}',
video_id, note='Downloading share info JSON')['result']
redirect_path = redirect_dict.pop('redirectUrl')
url = update_url(urljoin(base_url, redirect_path), query_update=redirect_dict)
if redirect_dict.get('componentName') == 'need-password':
# First login, then return redirection URL
return self._get_share_redirect_url(url, base_url, video_id)
return url
def _real_extract(self, url): def _real_extract(self, url):
base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id') base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id')
query = {}
if url_type == 'share': if url_type == 'share':
webpage = self._get_real_webpage(url, base_url, video_id, 'share') url = self._get_share_redirect_url(url, base_url, video_id)
meeting_id = self._get_page_data(webpage, video_id)['meetingId']
redirect_path = self._download_json(
f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}',
video_id, note='Downloading share info JSON')['result']['redirectUrl']
url = urljoin(base_url, redirect_path)
query['continueMode'] = 'true'
webpage = self._get_real_webpage(url, base_url, video_id, 'play') webpage = self._get_real_webpage(url, base_url, video_id, 'play')
file_id = self._get_page_data(webpage, video_id)['fileId'] file_id = self._get_page_data(webpage, video_id)['fileId']
@ -104,10 +124,12 @@ class ZoomIE(InfoExtractor):
raise ExtractorError('Unable to extract file ID') raise ExtractorError('Unable to extract file ID')
data = self._download_json( data = self._download_json(
f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id, query=query, f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id, query={
'continueMode': 'true', # Makes this return value include interpreter audio information
},
note='Downloading play info JSON')['result'] note='Downloading play info JSON')['result']
subtitles = {} subtitles = {}
# XXX: Would be more appropriate to parse chapters separate from subtitles
for _type in ('transcript', 'cc', 'chapter'): for _type in ('transcript', 'cc', 'chapter'):
if data.get(f'{_type}Url'): if data.get(f'{_type}Url'):
subtitles[_type] = [{ subtitles[_type] = [{
@ -117,6 +139,19 @@ class ZoomIE(InfoExtractor):
formats = [] formats = []
if data.get('interpreterAudioList'):
for audio in data.get('interpreterAudioList'):
formats.append({
'format_note': f'Intepreter: {audio["languageText"]}',
'url': audio['audioUrl'],
'format_id': f'interpreter-{ audio["icon"].lower()}',
'ext': 'm4a',
# There doesn't seem to be an explicit field for a standardized language code,
# sometimes the `language` field may be more accurate than `icon`
'language': audio['icon'].lower(),
'vcodec': 'none',
})
if data.get('viewMp4Url'): if data.get('viewMp4Url'):
formats.append({ formats.append({
'format_note': 'Camera stream', 'format_note': 'Camera stream',