mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 09:11:25 +01:00
Compare commits
15 Commits
fb27062bdb
...
e7b5c45ec0
Author | SHA1 | Date | |
---|---|---|---|
|
e7b5c45ec0 | ||
|
f2a4983df7 | ||
|
bacc31b05a | ||
|
6abf89a9c7 | ||
|
fd6ad217b1 | ||
|
0b6807cdb0 | ||
|
cb13b03b3e | ||
|
9989f2ab3b | ||
|
e0eefb2c5a | ||
|
c9da74e5e7 | ||
|
e3a2c06121 | ||
|
23f070dee8 | ||
|
32298c6d97 | ||
|
0d3a6f2c2a | ||
|
f80ba18ee9 |
|
@ -1468,6 +1468,8 @@
|
|||
- **ThisVid**
|
||||
- **ThisVidMember**
|
||||
- **ThisVidPlaylist**
|
||||
- **Threads**
|
||||
- **ThreadsIOS**: Threads' iOS `barcelona://` URL
|
||||
- **ThreeSpeak**
|
||||
- **ThreeSpeakUser**
|
||||
- **TikTok**
|
||||
|
|
|
@ -2090,6 +2090,10 @@ from .thisvid import (
|
|||
ThisVidMemberIE,
|
||||
ThisVidPlaylistIE,
|
||||
)
|
||||
from .threads import (
|
||||
ThreadsIE,
|
||||
ThreadsIOSIE,
|
||||
)
|
||||
from .threeqsdn import ThreeQSDNIE
|
||||
from .threespeak import (
|
||||
ThreeSpeakIE,
|
||||
|
|
|
@ -205,6 +205,26 @@ class ArchiveOrgIE(InfoExtractor):
|
|||
},
|
||||
},
|
||||
],
|
||||
}, {
|
||||
# The reviewbody is None for one of the reviews; just need to extract data without crashing
|
||||
'url': 'https://archive.org/details/gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||
'info_dict': {
|
||||
'id': 'gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||
'ext': 'mp3',
|
||||
'title': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||
'creators': ['Grateful Dead'],
|
||||
'duration': 338.31,
|
||||
'track': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||
'description': 'md5:764348a470b986f1217ffd38d6ac7b72',
|
||||
'display_id': 'gd95-04-02d1t04.shn',
|
||||
'location': 'Pyramid Arena',
|
||||
'uploader': 'jon@archive.org',
|
||||
'album': '1995-04-02 - Pyramid Arena',
|
||||
'upload_date': '20040519',
|
||||
'track_number': 4,
|
||||
'release_date': '19950402',
|
||||
'timestamp': 1084927901,
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
|
@ -335,7 +355,7 @@ class ArchiveOrgIE(InfoExtractor):
|
|||
info['comments'].append({
|
||||
'id': review.get('review_id'),
|
||||
'author': review.get('reviewer'),
|
||||
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
|
||||
'text': join_nonempty('reviewtitle', 'reviewbody', from_dict=review, delim='\n\n'),
|
||||
'timestamp': unified_timestamp(review.get('createdate')),
|
||||
'parent': 'root'})
|
||||
|
||||
|
|
|
@ -563,13 +563,13 @@ class FacebookIE(InfoExtractor):
|
|||
return extract_video_data(try_get(
|
||||
js_data, lambda x: x['jsmods']['instances'], list) or [])
|
||||
|
||||
def extract_dash_manifest(video, formats):
|
||||
def extract_dash_manifest(vid_data, formats, mpd_url=None):
|
||||
dash_manifest = traverse_obj(
|
||||
video, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', expected_type=str)
|
||||
vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
|
||||
if dash_manifest:
|
||||
formats.extend(self._parse_mpd_formats(
|
||||
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
|
||||
mpd_url=url_or_none(video.get('dash_manifest_url'))))
|
||||
mpd_url=url_or_none(video.get('dash_manifest_url')) or mpd_url))
|
||||
|
||||
def process_formats(info):
|
||||
# Downloads with browser's User-Agent are rate limited. Working around
|
||||
|
@ -619,9 +619,12 @@ class FacebookIE(InfoExtractor):
|
|||
video = video['creation_story']
|
||||
video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner'))
|
||||
video.update(reel_info)
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
||||
|
||||
formats = []
|
||||
q = qualities(['sd', 'hd'])
|
||||
|
||||
# Legacy formats extraction
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
||||
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
|
||||
('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
|
||||
('browser_native_sd_url', 'sd')):
|
||||
|
@ -629,7 +632,7 @@ class FacebookIE(InfoExtractor):
|
|||
if not playable_url:
|
||||
continue
|
||||
if determine_ext(playable_url) == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(playable_url, video_id))
|
||||
formats.extend(self._extract_mpd_formats(playable_url, video_id, fatal=False))
|
||||
else:
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
|
@ -638,6 +641,28 @@ class FacebookIE(InfoExtractor):
|
|||
'url': playable_url,
|
||||
})
|
||||
extract_dash_manifest(fmt_data, formats)
|
||||
|
||||
# New videoDeliveryResponse formats extraction
|
||||
fmt_data = traverse_obj(video, ('videoDeliveryResponseFragment', 'videoDeliveryResponseResult'))
|
||||
mpd_urls = traverse_obj(fmt_data, ('dash_manifest_urls', ..., 'manifest_url', {url_or_none}))
|
||||
dash_manifests = traverse_obj(fmt_data, ('dash_manifests', lambda _, v: v['manifest_xml']))
|
||||
for idx, dash_manifest in enumerate(dash_manifests):
|
||||
extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
|
||||
if not dash_manifests:
|
||||
# Only extract from MPD URLs if the manifests are not already provided
|
||||
for mpd_url in mpd_urls:
|
||||
formats.extend(self._extract_mpd_formats(mpd_url, video_id, fatal=False))
|
||||
for prog_fmt in traverse_obj(fmt_data, ('progressive_urls', lambda _, v: v['progressive_url'])):
|
||||
format_id = traverse_obj(prog_fmt, ('metadata', 'quality', {str.lower}))
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
# sd, hd formats w/o resolution info should be deprioritized below DASH
|
||||
'quality': q(format_id) - 3,
|
||||
'url': prog_fmt['progressive_url'],
|
||||
})
|
||||
for m3u8_url in traverse_obj(fmt_data, ('hls_playlist_urls', ..., 'hls_playlist_url', {url_or_none})):
|
||||
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', fatal=False, m3u8_id='hls'))
|
||||
|
||||
if not formats:
|
||||
# Do not append false positive entry w/o any formats
|
||||
return
|
||||
|
|
158
yt_dlp/extractor/threads.py
Normal file
158
yt_dlp/extractor/threads.py
Normal file
|
@ -0,0 +1,158 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
remove_end,
|
||||
strftime_or_none,
|
||||
strip_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class ThreadsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?threads\.net/(?P<uploader>[^/]+)/post/(?P<id>[^/?#&]+)/?(?P<embed>embed.*?)?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.threads.net/@tntsportsbr/post/C6cqebdCfBi',
|
||||
'info_dict': {
|
||||
'id': 'C6cqebdCfBi',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:062673d04195aa2d99b8d7a11798cb9d',
|
||||
'description': 'md5:fe0c73f9a892fb92efcc67cc075561b0',
|
||||
'uploader': 'TNT Sports Brasil',
|
||||
'uploader_id': 'tntsportsbr',
|
||||
'uploader_url': 'https://www.threads.net/@tntsportsbr',
|
||||
'channel': 'tntsportsbr',
|
||||
'channel_url': 'https://www.threads.net/@tntsportsbr',
|
||||
'timestamp': 1714613811,
|
||||
'upload_date': '20240502',
|
||||
'like_count': int,
|
||||
'channel_is_verified': bool,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.threads.net/@felipebecari/post/C6cM_yNPHCF',
|
||||
'info_dict': {
|
||||
'id': 'C6cM_yNPHCF',
|
||||
'ext': 'mp4',
|
||||
'title': '@felipebecari • Sobre o futuro dos dois últimos resgatados: tem muita notícia boa! 🐶❤️',
|
||||
'description': 'Sobre o futuro dos dois últimos resgatados: tem muita notícia boa! 🐶❤️',
|
||||
'uploader': 'Felipe Becari',
|
||||
'uploader_id': 'felipebecari',
|
||||
'uploader_url': 'https://www.threads.net/@felipebecari',
|
||||
'channel': 'felipebecari',
|
||||
'channel_url': 'https://www.threads.net/@felipebecari',
|
||||
'timestamp': 1714598318,
|
||||
'upload_date': '20240501',
|
||||
'like_count': int,
|
||||
'channel_is_verified': bool,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
metadata = {}
|
||||
|
||||
# Try getting videos from json
|
||||
json_data = self._search_regex(
|
||||
rf'<script[^>]+>(.*"code":"{video_id}".*)</script>',
|
||||
webpage, 'main json', fatal=True)
|
||||
|
||||
result = self._search_json(
|
||||
r'"result":', json_data,
|
||||
'result data', video_id, fatal=True)
|
||||
|
||||
edges = traverse_obj(result, ('data', 'data', 'edges'))
|
||||
|
||||
for node in edges:
|
||||
items = traverse_obj(node, ('node', 'thread_items'))
|
||||
|
||||
for item in items:
|
||||
post = item.get('post')
|
||||
|
||||
if post and post.get('code') == video_id:
|
||||
formats = []
|
||||
thumbnails = []
|
||||
|
||||
# Videos
|
||||
if post.get('carousel_media') is not None: # Handle multiple videos posts
|
||||
media_list = post.get('carousel_media')
|
||||
else:
|
||||
media_list = [post]
|
||||
|
||||
for media in media_list:
|
||||
videos = media.get('video_versions')
|
||||
|
||||
if videos:
|
||||
for video in videos:
|
||||
formats.append({
|
||||
'format_id': '{}-{}'.format(media.get('pk'), video['type']), # id-type
|
||||
'url': video['url'],
|
||||
'width': media.get('original_width'),
|
||||
'height': media.get('original_height'),
|
||||
})
|
||||
|
||||
# Thumbnails
|
||||
thumbs = traverse_obj(post, ('image_versions2', 'candidates'))
|
||||
|
||||
for thumb in thumbs:
|
||||
thumbnails.append({
|
||||
'url': thumb['url'],
|
||||
'width': thumb['width'],
|
||||
'height': thumb['height'],
|
||||
})
|
||||
|
||||
# Metadata
|
||||
metadata.setdefault('uploader_id', traverse_obj(post, ('user', 'username')))
|
||||
metadata.setdefault('channel_is_verified', traverse_obj(post, ('user', 'is_verified')))
|
||||
metadata.setdefault('uploader_url', 'https://www.threads.net/@{}'.format(traverse_obj(post, ('user', 'username'))))
|
||||
metadata.setdefault('timestamp', post.get('taken_at'))
|
||||
metadata.setdefault('like_count', post.get('like_count'))
|
||||
|
||||
# Try getting metadata
|
||||
metadata['id'] = video_id
|
||||
metadata['title'] = strip_or_none(remove_end(self._html_extract_title(webpage), '• Threads'))
|
||||
metadata['description'] = self._og_search_description(webpage)
|
||||
|
||||
metadata['channel'] = metadata.get('uploader_id')
|
||||
metadata['channel_url'] = metadata.get('uploader_url')
|
||||
metadata['uploader'] = self._search_regex(r'(.*?) \(', self._og_search_title(webpage), 'uploader', metadata.get('uploader_id'))
|
||||
metadata['upload_date'] = strftime_or_none(metadata.get('timestamp'))
|
||||
|
||||
return {
|
||||
**metadata,
|
||||
'formats': formats,
|
||||
'thumbnails': thumbnails,
|
||||
}
|
||||
|
||||
|
||||
class ThreadsIOSIE(InfoExtractor):
|
||||
IE_DESC = 'IOS barcelona:// URL'
|
||||
_VALID_URL = r'barcelona://media\?shortcode=(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'barcelona://media?shortcode=C6fDehepo5D',
|
||||
'info_dict': {
|
||||
'id': 'C6fDehepo5D',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:dc92f960981b8b3a33eba9681e9fdfc6',
|
||||
'description': 'md5:0c36a7e67e1517459bc0334dba932164',
|
||||
'uploader': 'Sa\u0303o Paulo Futebol Clube',
|
||||
'uploader_id': 'saopaulofc',
|
||||
'uploader_url': 'https://www.threads.net/@saopaulofc',
|
||||
'channel': 'saopaulofc',
|
||||
'channel_url': 'https://www.threads.net/@saopaulofc',
|
||||
'timestamp': 1714694014,
|
||||
'upload_date': '20240502',
|
||||
'like_count': int,
|
||||
'channel_is_verified': bool,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
},
|
||||
'add_ie': ['Threads'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# Threads doesn't care about the user url, it redirects to the right one
|
||||
# So we use ** instead so that we don't need to find it
|
||||
return self.url_result(f'http://www.threads.net/**/post/{video_id}', ThreadsIE, video_id)
|
Loading…
Reference in New Issue
Block a user