mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 09:11:25 +01:00
Compare commits
8 Commits
58cfbf52e8
...
f4727b099f
Author | SHA1 | Date | |
---|---|---|---|
|
f4727b099f | ||
|
f2a4983df7 | ||
|
bacc31b05a | ||
|
b76f13229d | ||
|
8bdf94aee3 | ||
|
11daaaf060 | ||
|
80c2305d45 | ||
|
513ea81155 |
|
@ -582,6 +582,7 @@ from .egghead import (
|
||||||
)
|
)
|
||||||
from .eighttracks import EightTracksIE
|
from .eighttracks import EightTracksIE
|
||||||
from .eitb import EitbIE
|
from .eitb import EitbIE
|
||||||
|
from .elemental_tv import ElementalTVIE
|
||||||
from .elementorembed import ElementorEmbedIE
|
from .elementorembed import ElementorEmbedIE
|
||||||
from .elonet import ElonetIE
|
from .elonet import ElonetIE
|
||||||
from .elpais import ElPaisIE
|
from .elpais import ElPaisIE
|
||||||
|
|
|
@ -205,6 +205,26 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
}, {
|
||||||
|
# The reviewbody is None for one of the reviews; just need to extract data without crashing
|
||||||
|
'url': 'https://archive.org/details/gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'gd95-04-02.sbd.11622.sbeok.shnf/gd95-04-02d1t04.shn',
|
||||||
|
'ext': 'mp3',
|
||||||
|
'title': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||||
|
'creators': ['Grateful Dead'],
|
||||||
|
'duration': 338.31,
|
||||||
|
'track': 'Stuck Inside of Mobile with the Memphis Blues Again',
|
||||||
|
'description': 'md5:764348a470b986f1217ffd38d6ac7b72',
|
||||||
|
'display_id': 'gd95-04-02d1t04.shn',
|
||||||
|
'location': 'Pyramid Arena',
|
||||||
|
'uploader': 'jon@archive.org',
|
||||||
|
'album': '1995-04-02 - Pyramid Arena',
|
||||||
|
'upload_date': '20040519',
|
||||||
|
'track_number': 4,
|
||||||
|
'release_date': '19950402',
|
||||||
|
'timestamp': 1084927901,
|
||||||
|
},
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -335,7 +355,7 @@ class ArchiveOrgIE(InfoExtractor):
|
||||||
info['comments'].append({
|
info['comments'].append({
|
||||||
'id': review.get('review_id'),
|
'id': review.get('review_id'),
|
||||||
'author': review.get('reviewer'),
|
'author': review.get('reviewer'),
|
||||||
'text': str_or_none(review.get('reviewtitle'), '') + '\n\n' + review.get('reviewbody'),
|
'text': join_nonempty('reviewtitle', 'reviewbody', from_dict=review, delim='\n\n'),
|
||||||
'timestamp': unified_timestamp(review.get('createdate')),
|
'timestamp': unified_timestamp(review.get('createdate')),
|
||||||
'parent': 'root'})
|
'parent': 'root'})
|
||||||
|
|
||||||
|
|
107
yt_dlp/extractor/elemental_tv.py
Normal file
107
yt_dlp/extractor/elemental_tv.py
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
|
||||||
|
from ..extractor.common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
|
from ..utils import ExtractorError
|
||||||
|
|
||||||
|
|
||||||
|
class ElementalTVIE(InfoExtractor):
|
||||||
|
_LOGIN_REQUIRED = True
|
||||||
|
_NETRC_MACHINE = 'elemental_tv'
|
||||||
|
_VALID_URL = r'https?://play\.elemental\.tv/channel/[0-9a-f]{24}'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://play.elemental.tv/channel/573f5a14761973ec1d502507',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '573f5a14761973ec1d502507',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'БНТ 1 HD',
|
||||||
|
'thumbnail': 'https://play.elemental.tv/v1/tumblrs/573f5a14761973ec1d502507',
|
||||||
|
'age_limit': 0,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
access_token = ''
|
||||||
|
channel_id = ''
|
||||||
|
|
||||||
|
def _get_channel_id(self, url):
|
||||||
|
url_parts = re.search('(?<=channel/)[0-9a-f]{24}', url)
|
||||||
|
|
||||||
|
if not url_parts or not url_parts.group(0):
|
||||||
|
return None
|
||||||
|
|
||||||
|
self.channel_id = url_parts.group(0)
|
||||||
|
self.write_debug(f'Channel ID: {self.channel_id}')
|
||||||
|
|
||||||
|
if not self.channel_id:
|
||||||
|
raise ExtractorError('Channel ID not found')
|
||||||
|
|
||||||
|
def _get_stream_metadata(self):
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': 'Bearer ' + self.access_token,
|
||||||
|
}
|
||||||
|
res_api = self._download_json(
|
||||||
|
'https://play.elemental.tv/v1/channels', self.channel_id, headers=headers)
|
||||||
|
data = res_api.get('data').get(self.channel_id)
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
self.write_debug('Getting metadata failed')
|
||||||
|
return {}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'title': data.get('name'),
|
||||||
|
'age_limit': data.get('age'),
|
||||||
|
'thumbnail': data.get('tumblrurl'),
|
||||||
|
}
|
||||||
|
except Exception:
|
||||||
|
self.write_debug('Getting metadata failed')
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _get_stream_url(self):
|
||||||
|
# Stream URL needs current epoch time rounded to 10000s
|
||||||
|
begin = int((time.time() - 60) / 10000) * 10000
|
||||||
|
stream_url = 'https://play.elemental.tv/v1/playlists/%s/playlist.m3u8?begin=%d&access_token=%s' % (self.channel_id, begin, self.access_token)
|
||||||
|
|
||||||
|
if not stream_url or '.m3u8' not in stream_url:
|
||||||
|
raise ExtractorError('Unable to get stream URL')
|
||||||
|
|
||||||
|
return stream_url
|
||||||
|
|
||||||
|
def _perform_login(self, username, password):
|
||||||
|
post_data = {
|
||||||
|
'email': str(username),
|
||||||
|
'grant_type': 'client_credentials',
|
||||||
|
'password': str(password),
|
||||||
|
'rememberme': 'true',
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
res_api = self._download_json(
|
||||||
|
'https://play.elemental.tv/v1/users/login', self.channel_id, data=json.dumps(post_data).encode()).get('data')
|
||||||
|
except ExtractorError as e:
|
||||||
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
|
error_message = self._parse_json(e.cause.response.read().decode(), self.channel_id).get('error_info').get('description')
|
||||||
|
raise ExtractorError(error_message, expected=True)
|
||||||
|
|
||||||
|
if not res_api or not res_api.get('access_token'):
|
||||||
|
raise ExtractorError('Accessing login token failed')
|
||||||
|
|
||||||
|
self.access_token = res_api.get('access_token')
|
||||||
|
|
||||||
|
if res_api.get('token_type') != 'Bearer':
|
||||||
|
raise ExtractorError('Unknown login token type')
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
self._get_channel_id(url)
|
||||||
|
stream_url = self._get_stream_url()
|
||||||
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, self.channel_id, ext='mp4')
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': self.channel_id,
|
||||||
|
'is_live': True,
|
||||||
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
**self._get_stream_metadata(),
|
||||||
|
}
|
|
@ -563,13 +563,13 @@ class FacebookIE(InfoExtractor):
|
||||||
return extract_video_data(try_get(
|
return extract_video_data(try_get(
|
||||||
js_data, lambda x: x['jsmods']['instances'], list) or [])
|
js_data, lambda x: x['jsmods']['instances'], list) or [])
|
||||||
|
|
||||||
def extract_dash_manifest(video, formats):
|
def extract_dash_manifest(vid_data, formats, mpd_url=None):
|
||||||
dash_manifest = traverse_obj(
|
dash_manifest = traverse_obj(
|
||||||
video, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', expected_type=str)
|
vid_data, 'dash_manifest', 'playlist', 'dash_manifest_xml_string', 'manifest_xml', expected_type=str)
|
||||||
if dash_manifest:
|
if dash_manifest:
|
||||||
formats.extend(self._parse_mpd_formats(
|
formats.extend(self._parse_mpd_formats(
|
||||||
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
|
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
|
||||||
mpd_url=url_or_none(video.get('dash_manifest_url'))))
|
mpd_url=url_or_none(video.get('dash_manifest_url')) or mpd_url))
|
||||||
|
|
||||||
def process_formats(info):
|
def process_formats(info):
|
||||||
# Downloads with browser's User-Agent are rate limited. Working around
|
# Downloads with browser's User-Agent are rate limited. Working around
|
||||||
|
@ -619,9 +619,12 @@ class FacebookIE(InfoExtractor):
|
||||||
video = video['creation_story']
|
video = video['creation_story']
|
||||||
video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner'))
|
video['owner'] = traverse_obj(video, ('short_form_video_context', 'video_owner'))
|
||||||
video.update(reel_info)
|
video.update(reel_info)
|
||||||
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
|
||||||
formats = []
|
formats = []
|
||||||
q = qualities(['sd', 'hd'])
|
q = qualities(['sd', 'hd'])
|
||||||
|
|
||||||
|
# Legacy formats extraction
|
||||||
|
fmt_data = traverse_obj(video, ('videoDeliveryLegacyFields', {dict})) or video
|
||||||
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
|
for key, format_id in (('playable_url', 'sd'), ('playable_url_quality_hd', 'hd'),
|
||||||
('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
|
('playable_url_dash', ''), ('browser_native_hd_url', 'hd'),
|
||||||
('browser_native_sd_url', 'sd')):
|
('browser_native_sd_url', 'sd')):
|
||||||
|
@ -629,7 +632,7 @@ class FacebookIE(InfoExtractor):
|
||||||
if not playable_url:
|
if not playable_url:
|
||||||
continue
|
continue
|
||||||
if determine_ext(playable_url) == 'mpd':
|
if determine_ext(playable_url) == 'mpd':
|
||||||
formats.extend(self._extract_mpd_formats(playable_url, video_id))
|
formats.extend(self._extract_mpd_formats(playable_url, video_id, fatal=False))
|
||||||
else:
|
else:
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
|
@ -638,6 +641,28 @@ class FacebookIE(InfoExtractor):
|
||||||
'url': playable_url,
|
'url': playable_url,
|
||||||
})
|
})
|
||||||
extract_dash_manifest(fmt_data, formats)
|
extract_dash_manifest(fmt_data, formats)
|
||||||
|
|
||||||
|
# New videoDeliveryResponse formats extraction
|
||||||
|
fmt_data = traverse_obj(video, ('videoDeliveryResponseFragment', 'videoDeliveryResponseResult'))
|
||||||
|
mpd_urls = traverse_obj(fmt_data, ('dash_manifest_urls', ..., 'manifest_url', {url_or_none}))
|
||||||
|
dash_manifests = traverse_obj(fmt_data, ('dash_manifests', lambda _, v: v['manifest_xml']))
|
||||||
|
for idx, dash_manifest in enumerate(dash_manifests):
|
||||||
|
extract_dash_manifest(dash_manifest, formats, mpd_url=traverse_obj(mpd_urls, idx))
|
||||||
|
if not dash_manifests:
|
||||||
|
# Only extract from MPD URLs if the manifests are not already provided
|
||||||
|
for mpd_url in mpd_urls:
|
||||||
|
formats.extend(self._extract_mpd_formats(mpd_url, video_id, fatal=False))
|
||||||
|
for prog_fmt in traverse_obj(fmt_data, ('progressive_urls', lambda _, v: v['progressive_url'])):
|
||||||
|
format_id = traverse_obj(prog_fmt, ('metadata', 'quality', {str.lower}))
|
||||||
|
formats.append({
|
||||||
|
'format_id': format_id,
|
||||||
|
# sd, hd formats w/o resolution info should be deprioritized below DASH
|
||||||
|
'quality': q(format_id) - 3,
|
||||||
|
'url': prog_fmt['progressive_url'],
|
||||||
|
})
|
||||||
|
for m3u8_url in traverse_obj(fmt_data, ('hls_playlist_urls', ..., 'hls_playlist_url', {url_or_none})):
|
||||||
|
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', fatal=False, m3u8_id='hls'))
|
||||||
|
|
||||||
if not formats:
|
if not formats:
|
||||||
# Do not append false positive entry w/o any formats
|
# Do not append false positive entry w/o any formats
|
||||||
return
|
return
|
||||||
|
|
Loading…
Reference in New Issue
Block a user