mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-24 16:21:24 +01:00
Compare commits
36 Commits
f069573aed
...
3c48f31fe5
Author | SHA1 | Date | |
---|---|---|---|
|
3c48f31fe5 | ||
|
da252d9d32 | ||
|
e079ffbda6 | ||
|
2009cb27e1 | ||
|
f351440f1d | ||
|
c59ce7d6a6 | ||
|
bd857a06a0 | ||
|
c58ee488a9 | ||
|
eacad11a5a | ||
|
d69a1be537 | ||
|
5cbf04763b | ||
|
901e78af62 | ||
|
9a6f9843c0 | ||
|
8ef2294282 | ||
|
0e344b806f | ||
|
60b763c50f | ||
|
195af478f3 | ||
|
8a1daf41ab | ||
|
0f9b09842e | ||
|
1066a94acf | ||
|
aa34d34596 | ||
|
0e1851bc34 | ||
|
a886439396 | ||
|
38383ea313 | ||
|
28a1163010 | ||
|
cee1c763e4 | ||
|
bbb121c2af | ||
|
6beca5eb57 | ||
|
82d7e40908 | ||
|
5b1b5bb1b6 | ||
|
445531c5a0 | ||
|
16d68723dc | ||
|
5b962d70de | ||
|
98d9edf823 | ||
|
6d2de79b7a | ||
|
a8769f672b |
|
@ -342,8 +342,9 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
|||
extractor plugins; postprocessor plugins can
|
||||
only be loaded from the default plugin
|
||||
directories
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them
|
||||
--flat-playlist Do not extract a playlist's URL result
|
||||
entries; some entry metadata may be missing
|
||||
and downloading may be bypassed
|
||||
--no-flat-playlist Fully extract the videos of a playlist
|
||||
(default)
|
||||
--live-from-start Download livestreams from the start.
|
||||
|
@ -1869,6 +1870,9 @@ The following extractors use this feature:
|
|||
#### digitalconcerthall
|
||||
* `prefer_combined_hls`: Prefer extracting combined/pre-merged video and audio HLS formats. This will exclude 4K/HEVC video and lossless/FLAC audio formats, which are only available as split video/audio HLS formats
|
||||
|
||||
#### sonylivseries
|
||||
* `sort_order`: Episode sort order for series extraction - one of `asc` (ascending, oldest first) or `desc` (descending, newest first). Default is `asc`
|
||||
|
||||
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
||||
|
||||
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
||||
|
|
|
@ -234,5 +234,10 @@
|
|||
"when": "57212a5f97ce367590aaa5c3e9a135eead8f81f7",
|
||||
"short": "[ie/vimeo] Fix API retries (#11351)",
|
||||
"authors": ["bashonly"]
|
||||
},
|
||||
{
|
||||
"action": "add",
|
||||
"when": "52c0ffe40ad6e8404d93296f575007b05b04c686",
|
||||
"short": "[priority] **Login with OAuth is no longer supported for YouTube**\nDue to a change made by the site, yt-dlp is longer able to support OAuth login for YouTube. [Read more](https://github.com/yt-dlp/yt-dlp/issues/11462#issuecomment-2471703090)"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -285,6 +285,16 @@ from .bloomberg import BloombergIE
|
|||
from .bluesky import BlueskyIE
|
||||
from .bokecc import BokeCCIE
|
||||
from .bongacams import BongaCamsIE
|
||||
from .boomplay import (
|
||||
BoomplayEpisodeIE,
|
||||
BoomplayGenericPlaylistIE,
|
||||
BoomplayMusicIE,
|
||||
BoomplayPlaylistIE,
|
||||
BoomplayPodcastIE,
|
||||
BoomplaySearchIE,
|
||||
BoomplaySearchURLIE,
|
||||
BoomplayVideoIE,
|
||||
)
|
||||
from .boosty import BoostyIE
|
||||
from .bostonglobe import BostonGlobeIE
|
||||
from .box import BoxIE
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
|
|
511
yt_dlp/extractor/boomplay.py
Normal file
511
yt_dlp/extractor/boomplay.py
Normal file
|
@ -0,0 +1,511 @@
|
|||
import base64
|
||||
import functools
|
||||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from ..aes import aes_cbc_decrypt_bytes, aes_cbc_encrypt_bytes, unpad_pkcs7
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
classproperty,
|
||||
clean_html,
|
||||
extract_attributes,
|
||||
get_elements_text_and_html_by_attribute,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
merge_dicts,
|
||||
parse_count,
|
||||
parse_duration,
|
||||
smuggle_url,
|
||||
strip_or_none,
|
||||
unified_strdate,
|
||||
unsmuggle_url,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
variadic,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
|
||||
|
||||
class BoomplayBaseIE(InfoExtractor):
|
||||
# Calculated from const values, see lhx.AESUtils.encrypt in public.js
|
||||
# Note that the real key/iv differs from `lhx.AESUtils.key`/`lhx.AESUtils.iv`
|
||||
_KEY = b'boomplayVr3xopAM'
|
||||
_IV = b'boomplay8xIsKTn9'
|
||||
_BASE = 'https://www.boomplay.com'
|
||||
_MEDIA_TYPES = ('songs', 'video', 'episode', 'podcasts', 'playlists', 'artists', 'albums')
|
||||
_GEO_COUNTRIES = ['NG']
|
||||
|
||||
@staticmethod
|
||||
def __yield_elements_text_and_html_by_class_and_tag(class_, tag, html):
|
||||
"""
|
||||
Yields content of all element matching `tag.class_` in html
|
||||
class_ must be re escaped
|
||||
"""
|
||||
# get_elements_text_and_html_by_attribute returns a generator
|
||||
return get_elements_text_and_html_by_attribute(
|
||||
attribute='class', value=rf'''[^'"]*(?<=['"\s]){class_}(?=['"\s])[^'"]*''', html=html,
|
||||
tag=tag, escape_value=False)
|
||||
|
||||
@classmethod
|
||||
def __yield_elements_by_class_and_tag(cls, *args, **kwargs):
|
||||
return (content for content, _ in cls.__yield_elements_text_and_html_by_class_and_tag(*args, **kwargs))
|
||||
|
||||
@classmethod
|
||||
def __yield_elements_html_by_class_and_tag(cls, *args, **kwargs):
|
||||
return (whole for _, whole in cls.__yield_elements_text_and_html_by_class_and_tag(*args, **kwargs))
|
||||
|
||||
@classmethod
|
||||
def _get_elements_by_class_and_tag(cls, class_, tag, html):
|
||||
return list(cls.__yield_elements_by_class_and_tag(class_, tag, html))
|
||||
|
||||
@classmethod
|
||||
def _get_element_by_class_and_tag(cls, class_, tag, html):
|
||||
return next(cls.__yield_elements_by_class_and_tag(class_, tag, html), None)
|
||||
|
||||
@classmethod
|
||||
def _urljoin(cls, path):
|
||||
return url_or_none(urljoin(base=cls._BASE, path=path))
|
||||
|
||||
def _get_playurl(self, item_id, item_type):
|
||||
resp = self._download_json(
|
||||
'https://www.boomplay.com/getResourceAddr', item_id,
|
||||
note='Downloading play URL', errnote='Failed to download play URL',
|
||||
data=urlencode_postdata({
|
||||
'param': base64.b64encode(aes_cbc_encrypt_bytes(json.dumps({
|
||||
'itemID': item_id,
|
||||
'itemType': item_type,
|
||||
}).encode(), self._KEY, self._IV)).decode(),
|
||||
}), headers={
|
||||
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
|
||||
})
|
||||
if not (source := resp.get('source')) and (code := resp.get('code')):
|
||||
if 'unavailable in your country' in (desc := resp.get('desc')) or '':
|
||||
# since NG must have failed ...
|
||||
self.raise_geo_restricted(countries=['GH', 'KE', 'TZ', 'CM', 'CI'])
|
||||
else:
|
||||
raise ExtractorError(desc or f'Failed to get play url, code: {code}')
|
||||
return unpad_pkcs7(aes_cbc_decrypt_bytes(
|
||||
base64.b64decode(source),
|
||||
self._KEY, self._IV)).decode()
|
||||
|
||||
def _extract_formats(self, item_id, item_type='MUSIC', **kwargs):
|
||||
if url := url_or_none(self._get_playurl(item_id, item_type)):
|
||||
return [{
|
||||
'format_id': '0',
|
||||
'url': url,
|
||||
'http_headers': {
|
||||
'Origin': 'https://www.boomplay.com',
|
||||
'Referer': 'https://www.boomplay.com',
|
||||
'X-Boomplay-Ref': 'Boomplay_WEBV1',
|
||||
},
|
||||
**kwargs,
|
||||
}]
|
||||
else:
|
||||
self.raise_no_formats('No formats found')
|
||||
|
||||
def _extract_page_metadata(self, webpage, item_id):
|
||||
metadata_div = self._get_element_by_class_and_tag('summary', 'div', webpage) or ''
|
||||
metadata_entries = re.findall(r'(?si)<strong>(?P<entry>.*?)</strong>', metadata_div) or []
|
||||
description = re.sub(
|
||||
r'(?i)Listen and download music for free on Boomplay!', '',
|
||||
clean_html(self._get_element_by_class_and_tag(
|
||||
'description_content', 'span', webpage)) or '') or None
|
||||
|
||||
details_section = self._get_element_by_class_and_tag('songDetailInfo', 'section', webpage) or ''
|
||||
metadata_entries.extend(re.findall(r'(?si)<li>(?P<entry>.*?)</li>', details_section) or [])
|
||||
page_metadata = {
|
||||
'id': item_id,
|
||||
**self._extract_title_from_webpage(webpage),
|
||||
'thumbnail': self._html_search_meta(['og:image', 'twitter:image'],
|
||||
webpage, 'thumbnail', default=None),
|
||||
'like_count': parse_count(self._get_element_by_class_and_tag('btn_favorite', 'button', metadata_div)),
|
||||
'repost_count': parse_count(self._get_element_by_class_and_tag('btn_share', 'button', metadata_div)),
|
||||
'comment_count': parse_count(self._get_element_by_class_and_tag('btn_comment', 'button', metadata_div)),
|
||||
'duration': parse_duration(self._get_element_by_class_and_tag('btn_duration', 'button', metadata_div)),
|
||||
'upload_date': unified_strdate(strip_or_none(
|
||||
self._get_element_by_class_and_tag('btn_pubDate', 'button', metadata_div))),
|
||||
'description': description,
|
||||
}
|
||||
for metadata_entry in metadata_entries:
|
||||
if ':' not in metadata_entry:
|
||||
continue
|
||||
k, v = clean_html(metadata_entry).split(':', 1)
|
||||
v = v.strip()
|
||||
if 'artist' in k.lower():
|
||||
page_metadata['artists'] = [v]
|
||||
elif 'album' in k.lower():
|
||||
page_metadata['album'] = v
|
||||
elif 'genre' in k.lower():
|
||||
page_metadata['genres'] = [v]
|
||||
elif 'year of release' in k.lower():
|
||||
page_metadata['release_year'] = int_or_none(v)
|
||||
return page_metadata
|
||||
|
||||
def _extract_title_from_webpage(self, webpage):
|
||||
if h1_title := self._html_search_regex(r'(?i)<h1[^>]*>([^<]+)</h1>', webpage, 'title', default=None):
|
||||
return {'title': h1_title}
|
||||
else:
|
||||
return self._fix_title(
|
||||
self._html_search_meta(['og:title', 'twitter:title'], webpage, 'title', default=None)
|
||||
or self._html_search_regex(r'(?i)<title[^>]*>([^<]+)</title>', webpage, 'title', default=None))
|
||||
|
||||
@staticmethod
|
||||
def _fix_title(title):
|
||||
"""
|
||||
fix various types of titles(og:title, twitter:title, title tag in html head)
|
||||
"""
|
||||
if not title:
|
||||
return {}
|
||||
|
||||
title_patterns = (
|
||||
r'^(?P<title>(?P<artist>.+)) Songs MP3 Download, New Songs \& Albums \| Boomplay$', # artists
|
||||
r'^(?P<artist>.+?) - (?P<title>.+) MP3\ Download \& Lyrics \| Boomplay$', # music
|
||||
r'^Download (?P<artist>.+) album songs: (?P<title>.+?) \| Boomplay Music$', # album
|
||||
r'^Search:(?P<title>.+) \| Boomplay Music$', # search url
|
||||
r'^(?P<title>.+) \| Podcast \| Boomplay$', # podcast, episode
|
||||
r'^(?P<title>.+) \| Boomplay(?: Music)?$', # video, playlist, generic playlists
|
||||
)
|
||||
|
||||
for pattern in title_patterns:
|
||||
if match := re.search(pattern, title):
|
||||
return {
|
||||
'title': match.group('title'),
|
||||
'artists': [match.group('artist')] if 'artist' in match.groupdict() else None,
|
||||
}
|
||||
|
||||
return {'title': title}
|
||||
|
||||
@classmethod
|
||||
def _extract_from_webpage(cls, url, webpage, **kwargs):
|
||||
if kwargs:
|
||||
url = smuggle_url(url, kwargs)
|
||||
return super()._extract_from_webpage(url, webpage)
|
||||
|
||||
@classmethod
|
||||
def _extract_embed_urls(cls, url, webpage):
|
||||
url, smuggled_data = unsmuggle_url(url)
|
||||
media_types = variadic(smuggled_data.get('media_types', cls._MEDIA_TYPES))
|
||||
media_types = join_nonempty(*(
|
||||
re.escape(v)for v in media_types if v in cls._MEDIA_TYPES),
|
||||
delim='|')
|
||||
|
||||
for mobj in re.finditer(
|
||||
rf'''(?ix)
|
||||
<a
|
||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||
(?<=\s)href\s*=\s*(?P<_q>['"])
|
||||
(?P<href>/(?:{media_types})/\d+/?[\-\w=?&#:;@]*)
|
||||
(?P=_q)
|
||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||
>''', webpage):
|
||||
if url := cls._urljoin(mobj.group('href')):
|
||||
yield url
|
||||
|
||||
@classmethod
|
||||
def _extract_playlist_entries(cls, webpage, media_types, warn=True):
|
||||
song_list = strip_or_none(
|
||||
cls._get_element_by_class_and_tag('morePart_musics', 'ol', webpage)
|
||||
or cls._get_element_by_class_and_tag('morePart', 'ol', webpage)
|
||||
or '')
|
||||
|
||||
entries = traverse_obj(cls.__yield_elements_html_by_class_and_tag(
|
||||
'songName', 'a', song_list),
|
||||
(..., {extract_attributes}, 'href', {cls._urljoin}, {cls.url_result}))
|
||||
if not entries:
|
||||
if warn:
|
||||
cls.report_warning('Failed to extract playlist entries, finding suitable links instead!')
|
||||
|
||||
def strip_ie(entry):
|
||||
# All our IEs have a _VALID_URL and set a key: don't use it
|
||||
entry.pop('ie_key', None)
|
||||
return entry
|
||||
|
||||
return (strip_ie(result) for result in
|
||||
cls._extract_from_webpage(cls._BASE, webpage, media_types=media_types))
|
||||
|
||||
return entries
|
||||
|
||||
|
||||
class BoomplayMusicIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/songs/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/songs/165481965',
|
||||
'md5': 'c5fb4f23e6aae98064230ef3c39c2178',
|
||||
'info_dict': {
|
||||
'title': 'Rise of the Fallen Heroes',
|
||||
'ext': 'mp3',
|
||||
'id': '165481965',
|
||||
'artists': ['fatbunny'],
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/04/29/375ecda38f6f48179a93c72ab909118f_464_464.jpg',
|
||||
'channel_url': 'https://www.boomplay.com/artists/52723101',
|
||||
'duration': 125.0,
|
||||
'release_year': 2024,
|
||||
'comment_count': int,
|
||||
'like_count': int,
|
||||
'repost_count': int,
|
||||
'album': 'Legendary Battle',
|
||||
'genres': ['Metal'],
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
song_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, song_id)
|
||||
ld_json_meta = next(self._yield_json_ld(webpage, song_id))
|
||||
# TODO: extract comments(and lyrics? they don't have timestamps)
|
||||
# example: https://www.boomplay.com/songs/96352673?from=home
|
||||
return merge_dicts(
|
||||
self._extract_page_metadata(webpage, song_id),
|
||||
traverse_obj(ld_json_meta, {
|
||||
'title': 'name',
|
||||
'thumbnail': 'image',
|
||||
'channel_url': ('byArtist', 0, '@id'),
|
||||
'artists': ('byArtist', ..., 'name'),
|
||||
'duration': ('duration', {parse_duration}),
|
||||
}), {
|
||||
'formats': self._extract_formats(song_id, 'MUSIC', vcodec='none'),
|
||||
})
|
||||
|
||||
|
||||
class BoomplayVideoIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/video/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/video/1154892',
|
||||
'md5': 'd9b67ad333d2292a82922062d065352d',
|
||||
'info_dict': {
|
||||
'id': '1154892',
|
||||
'ext': 'mp4',
|
||||
'title': 'Autumn blues',
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/10/10/2171dee9e1f8452e84021560729edb88.jpg',
|
||||
'upload_date': '20241010',
|
||||
'timestamp': 1728599214,
|
||||
'view_count': int,
|
||||
'duration': 177.0,
|
||||
'description': 'Autumn blues by Lugo',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
return merge_dicts(
|
||||
self._extract_page_metadata(webpage, video_id),
|
||||
self._search_json_ld(webpage, video_id), {
|
||||
'formats': self._extract_formats(video_id, 'VIDEO', ext='mp4'),
|
||||
})
|
||||
|
||||
|
||||
class BoomplayEpisodeIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/episode/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/episode/7132706',
|
||||
'md5': 'f26e236b764baa53d7a2cbb7e9ce6dc4',
|
||||
'info_dict': {
|
||||
'id': '7132706',
|
||||
'ext': 'mp3',
|
||||
'title': 'Letting Go',
|
||||
'repost_count': int,
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/05/06/fc535eaa25714b43a47185a9831887a5_320_320.jpg',
|
||||
'comment_count': int,
|
||||
'duration': 921.0,
|
||||
'upload_date': '20240506',
|
||||
'description': 'md5:5ec684b281fa0f9e4c31b3ee20c5e57a',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
ep_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, ep_id)
|
||||
return merge_dicts(
|
||||
self._extract_page_metadata(webpage, ep_id), {
|
||||
'description': self._html_search_meta(
|
||||
['description', 'og:description', 'twitter:description'], webpage),
|
||||
'formats': self._extract_formats(ep_id, 'EPISODE', vcodec='none'),
|
||||
})
|
||||
|
||||
|
||||
class BoomplayPodcastIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/podcasts/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/podcasts/5372',
|
||||
'playlist_count': 200,
|
||||
'info_dict': {
|
||||
'id': '5372',
|
||||
'title': 'TED Talks Daily',
|
||||
'description': r're:(?s)Every weekday, TED Talks Daily brings you the latest talks .{328} learn something new\.$',
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/12/22/6f9cf97ad6f846a0a7882c98dfcf4f8c_320_320.jpg',
|
||||
'repost_count': int,
|
||||
'comment_count': int,
|
||||
'like_count': int,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
song_list = self._get_element_by_class_and_tag('morePart_musics', 'ol', webpage)
|
||||
song_list = traverse_obj(re.finditer(
|
||||
r'''(?ix)
|
||||
<li
|
||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||
\sdata-id\s*=\s*
|
||||
(?P<_q>['"]?)
|
||||
(?P<id>\d+)
|
||||
(?P=_q)
|
||||
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
|
||||
>''',
|
||||
song_list),
|
||||
(..., 'id', {
|
||||
lambda x: self.url_result(
|
||||
f'https://www.boomplay.com/episode/{x}', BoomplayEpisodeIE, x),
|
||||
}))
|
||||
return self.playlist_result(
|
||||
song_list, playlist_id,
|
||||
**self._extract_page_metadata(webpage, playlist_id))
|
||||
|
||||
|
||||
class BoomplayPlaylistIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/(?:playlists|artists|albums)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/playlists/33792494',
|
||||
'info_dict': {
|
||||
'id': '33792494',
|
||||
'title': 'Daily Trending Indonesia',
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/08/19/d05d431ee616412caeacd7f78f4f68f5_320_320.jpeg',
|
||||
'repost_count': int,
|
||||
'comment_count': int,
|
||||
'like_count': int,
|
||||
'description': 'md5:7ebdffc5137c77acb62acb3c89248445',
|
||||
},
|
||||
'playlist_count': 10,
|
||||
}, {
|
||||
'url': 'https://www.boomplay.com/artists/52723101',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.boomplay.com/albums/89611238?from=home#google_vignette',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
json_ld_metadata = next(self._yield_json_ld(webpage, playlist_id))
|
||||
# schema `MusicGroup` not supported by self._json_ld()
|
||||
|
||||
return self.playlist_result(**merge_dicts(
|
||||
self._extract_page_metadata(webpage, playlist_id),
|
||||
traverse_obj(json_ld_metadata, {
|
||||
'entries': ('track', ..., 'url', {
|
||||
functools.partial(self.url_result, ie=BoomplayMusicIE),
|
||||
}),
|
||||
'playlist_title': 'name',
|
||||
'thumbnail': 'image',
|
||||
'artists': ('byArtist', ..., 'name'),
|
||||
'channel_url': ('byArtist', 0, '@id'),
|
||||
})))
|
||||
|
||||
|
||||
class BoomplayGenericPlaylistIE(BoomplayBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?boomplay\.com/.+'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/new-songs',
|
||||
'playlist_mincount': 20,
|
||||
'info_dict': {
|
||||
'id': 'new-songs',
|
||||
'title': 'New Songs',
|
||||
'thumbnail': 'http://www.boomplay.com/pc/img/og_default_v3.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.boomplay.com/trending-songs',
|
||||
'playlist_mincount': 20,
|
||||
'info_dict': {
|
||||
'id': 'trending-songs',
|
||||
'title': 'Trending Songs',
|
||||
'thumbnail': 'http://www.boomplay.com/pc/img/og_default_v3.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return super().suitable(url) and all(not ie.suitable(url) for ie in (
|
||||
BoomplayEpisodeIE,
|
||||
BoomplayMusicIE,
|
||||
BoomplayPlaylistIE,
|
||||
BoomplayPodcastIE,
|
||||
BoomplaySearchURLIE,
|
||||
BoomplayVideoIE,
|
||||
))
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._generic_id(url)
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
return self.playlist_result(
|
||||
self._extract_playlist_entries(webpage, self._MEDIA_TYPES),
|
||||
**self._extract_page_metadata(webpage, playlist_id))
|
||||
|
||||
|
||||
class BoomplaySearchURLIE(BoomplayBaseIE):
|
||||
_TESTS = [{
|
||||
'url': 'https://www.boomplay.com/search/default/%20Rise%20of%20the%20Falletesn%20Heroes%20fatbunny',
|
||||
'md5': 'c5fb4f23e6aae98064230ef3c39c2178',
|
||||
'info_dict': {
|
||||
'id': '165481965',
|
||||
'ext': 'mp3',
|
||||
'title': 'Rise of the Fallen Heroes',
|
||||
'duration': 125.0,
|
||||
'genres': ['Metal'],
|
||||
'artists': ['fatbunny'],
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/04/29/375ecda38f6f48179a93c72ab909118f_464_464.jpg',
|
||||
'channel_url': 'https://www.boomplay.com/artists/52723101',
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
'album': 'Legendary Battle',
|
||||
'release_year': 2024,
|
||||
'like_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.boomplay.com/search/video/%20Autumn%20blues',
|
||||
'md5': 'd9b67ad333d2292a82922062d065352d',
|
||||
'info_dict': {
|
||||
'id': '1154892',
|
||||
'title': 'Autumn blues',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1728599214,
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://source.boomplaymusic.com/group10/M00/10/10/2171dee9e1f8452e84021560729edb88.jpg',
|
||||
'description': 'Autumn blues by Lugo',
|
||||
'upload_date': '20241010',
|
||||
'duration': 177.0,
|
||||
},
|
||||
'params': {'playlist_items': '1'},
|
||||
}]
|
||||
|
||||
@classproperty
|
||||
def _VALID_URL(cls):
|
||||
return r'https?://(?:www\.)?boomplay\.com/search/(?P<media_type>default|video|episode|podcasts|playlists|artists|albums)/(?P<query>[^?&#/]+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
media_type, query = self._match_valid_url(url).group('media_type', 'query')
|
||||
if media_type == 'default':
|
||||
media_type = 'songs'
|
||||
webpage = self._download_webpage(url, query)
|
||||
return self.playlist_result(
|
||||
self._extract_playlist_entries(webpage, media_type, warn=media_type == 'songs'),
|
||||
**self._extract_page_metadata(webpage, query))
|
||||
|
||||
|
||||
class BoomplaySearchIE(SearchInfoExtractor):
|
||||
_SEARCH_KEY = 'boomplaysearch'
|
||||
_RETURN_TYPE = 'url'
|
||||
_TESTS = [{
|
||||
'url': 'boomplaysearch:rise of the fallen heroes',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _search_results(self, query):
|
||||
yield self.url_result(
|
||||
f'https://www.boomplay.com/search/default/{urllib.parse.quote(query)}',
|
||||
BoomplaySearchURLIE)
|
|
@ -3767,7 +3767,7 @@ class InfoExtractor:
|
|||
""" Merge subtitle dictionaries, language by language. """
|
||||
if target is None:
|
||||
target = {}
|
||||
for d in dicts:
|
||||
for d in filter(None, dicts):
|
||||
for lang, subs in d.items():
|
||||
target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
|
||||
return target
|
||||
|
|
|
@ -1,11 +1,24 @@
|
|||
import json
|
||||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import orderedSet
|
||||
from .ninecninemedia import NineCNineMediaIE
|
||||
from ..utils import extract_attributes, orderedSet
|
||||
from ..utils.traversal import find_element, traverse_obj
|
||||
|
||||
|
||||
class CTVNewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)(?:$|[#?&])'
|
||||
_BASE_REGEX = r'https?://(?:[^.]+\.)?ctvnews\.ca/'
|
||||
_VIDEO_ID_RE = r'(?P<id>\d{5,})'
|
||||
_PLAYLIST_ID_RE = r'(?P<id>\d\.\d{5,})'
|
||||
_VALID_URL = [
|
||||
rf'{_BASE_REGEX}video/c{_VIDEO_ID_RE}',
|
||||
rf'{_BASE_REGEX}video(?:-gallery)?/?\?clipId={_VIDEO_ID_RE}',
|
||||
rf'{_BASE_REGEX}video/?\?(?:playlist|bin)Id={_PLAYLIST_ID_RE}',
|
||||
rf'{_BASE_REGEX}(?!video/)[^?#]*?{_PLAYLIST_ID_RE}/?(?:$|[?#])',
|
||||
rf'{_BASE_REGEX}(?!video/)[^?#]+\?binId={_PLAYLIST_ID_RE}',
|
||||
]
|
||||
_TESTS = [{
|
||||
'url': 'http://www.ctvnews.ca/video?clipId=901995',
|
||||
'md5': 'b608f466c7fa24b9666c6439d766ab7e',
|
||||
|
@ -17,13 +30,32 @@ class CTVNewsIE(InfoExtractor):
|
|||
'timestamp': 1467286284,
|
||||
'upload_date': '20160630',
|
||||
'categories': [],
|
||||
'tags': [],
|
||||
'season_id': 57981,
|
||||
'duration': 764.631,
|
||||
'series': 'CTV News National story',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'season': 'Season 0',
|
||||
'season_number': 0,
|
||||
'season': 'Season 0',
|
||||
'tags': [],
|
||||
'series': 'CTV News National | Archive | Stories 2',
|
||||
'season_id': '57981',
|
||||
'thumbnail': r're:https?://.*\.jpg$',
|
||||
'duration': 764.631,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://barrie.ctvnews.ca/video/c3030933-here_s-what_s-making-news-for-nov--15?binId=1272429',
|
||||
'md5': '8b8c2b33c5c1803e3c26bc74ff8694d5',
|
||||
'info_dict': {
|
||||
'id': '3030933',
|
||||
'ext': 'flv',
|
||||
'title': 'Here’s what’s making news for Nov. 15',
|
||||
'description': 'Here are the top stories we’re working on for CTV News at 11 for Nov. 15',
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2021_2_22_a602e68e-1514-410e-a67a-e1f7cccbacab_png_2000x1125.jpg',
|
||||
'season_id': '58104',
|
||||
'season_number': 0,
|
||||
'tags': [],
|
||||
'season': 'Season 0',
|
||||
'categories': [],
|
||||
'series': 'CTV News Barrie',
|
||||
'upload_date': '20241116',
|
||||
'duration': 42.943,
|
||||
'timestamp': 1731722452,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224',
|
||||
|
@ -46,6 +78,65 @@ class CTVNewsIE(InfoExtractor):
|
|||
'id': '1.5736957',
|
||||
},
|
||||
'playlist_mincount': 6,
|
||||
}, {
|
||||
'url': 'https://www.ctvnews.ca/business/respondents-to-bank-of-canada-questionnaire-largely-oppose-creating-a-digital-loonie-1.6665797',
|
||||
'md5': '24bc4b88cdc17d8c3fc01dfc228ab72c',
|
||||
'info_dict': {
|
||||
'id': '2695026',
|
||||
'ext': 'flv',
|
||||
'season_id': '89852',
|
||||
'series': 'From CTV News Channel',
|
||||
'description': 'md5:796a985a23cacc7e1e2fafefd94afd0a',
|
||||
'season': '2023',
|
||||
'title': 'Bank of Canada asks public about digital currency',
|
||||
'categories': [],
|
||||
'tags': [],
|
||||
'upload_date': '20230526',
|
||||
'season_number': 2023,
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'timestamp': 1685105157,
|
||||
'duration': 253.553,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://stox.ctvnews.ca/video-gallery?clipId=582589',
|
||||
'md5': '135cc592df607d29dddc931f1b756ae2',
|
||||
'info_dict': {
|
||||
'id': '582589',
|
||||
'ext': 'flv',
|
||||
'categories': [],
|
||||
'timestamp': 1427906183,
|
||||
'season_number': 0,
|
||||
'duration': 125.559,
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'series': 'CTV News Stox',
|
||||
'description': 'CTV original footage of the rise and fall of the Berlin Wall.',
|
||||
'title': 'Berlin Wall',
|
||||
'season_id': '63817',
|
||||
'season': 'Season 0',
|
||||
'tags': [],
|
||||
'upload_date': '20150401',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://ottawa.ctvnews.ca/features/regional-contact/regional-contact-archive?binId=1.1164587#3023759',
|
||||
'md5': 'a14c0603557decc6531260791c23cc5e',
|
||||
'info_dict': {
|
||||
'id': '3023759',
|
||||
'ext': 'flv',
|
||||
'season_number': 2024,
|
||||
'timestamp': 1731798000,
|
||||
'season': '2024',
|
||||
'episode': 'Episode 125',
|
||||
'description': 'CTV News Ottawa at Six',
|
||||
'duration': 2712.076,
|
||||
'episode_number': 125,
|
||||
'upload_date': '20241116',
|
||||
'title': 'CTV News Ottawa at Six for Saturday, November 16, 2024',
|
||||
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
|
||||
'categories': [],
|
||||
'tags': [],
|
||||
'series': 'CTV News Ottawa at Six',
|
||||
'season_id': '92667',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.ctvnews.ca/1.810401',
|
||||
'only_matching': True,
|
||||
|
@ -57,29 +148,35 @@ class CTVNewsIE(InfoExtractor):
|
|||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _ninecninemedia_url_result(self, clip_id):
|
||||
return self.url_result(f'9c9media:ctvnews_web:{clip_id}', NineCNineMediaIE, clip_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
page_id = self._match_id(url)
|
||||
|
||||
def ninecninemedia_url_result(clip_id):
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': clip_id,
|
||||
'url': f'9c9media:ctvnews_web:{clip_id}',
|
||||
'ie_key': 'NineCNineMedia',
|
||||
}
|
||||
if mobj := re.fullmatch(self._VIDEO_ID_RE, urllib.parse.urlparse(url).fragment):
|
||||
page_id = mobj.group('id')
|
||||
|
||||
if page_id.isdigit():
|
||||
return ninecninemedia_url_result(page_id)
|
||||
else:
|
||||
webpage = self._download_webpage(f'http://www.ctvnews.ca/{page_id}', page_id, query={
|
||||
if re.fullmatch(self._VIDEO_ID_RE, page_id):
|
||||
return self._ninecninemedia_url_result(page_id)
|
||||
|
||||
webpage = self._download_webpage(f'https://www.ctvnews.ca/{page_id}', page_id, query={
|
||||
'ot': 'example.AjaxPageLayout.ot',
|
||||
'maxItemsPerPage': 1000000,
|
||||
})
|
||||
entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet(
|
||||
re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
|
||||
entries = [self._ninecninemedia_url_result(clip_id)
|
||||
for clip_id in orderedSet(re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
|
||||
if not entries:
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
if 'getAuthStates("' in webpage:
|
||||
entries = [ninecninemedia_url_result(clip_id) for clip_id in
|
||||
entries = [self._ninecninemedia_url_result(clip_id) for clip_id in
|
||||
self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')]
|
||||
else:
|
||||
entries = [
|
||||
self._ninecninemedia_url_result(clip_id) for clip_id in
|
||||
traverse_obj(webpage, (
|
||||
{find_element(tag='jasper-player-container', html=True)},
|
||||
{extract_attributes}, 'axis-ids', {json.loads}, ..., 'axisId', {str}))
|
||||
]
|
||||
|
||||
return self.playlist_result(entries, page_id)
|
||||
|
|
|
@ -569,7 +569,7 @@ class FacebookIE(InfoExtractor):
|
|||
if dash_manifest:
|
||||
formats.extend(self._parse_mpd_formats(
|
||||
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
|
||||
mpd_url=url_or_none(video.get('dash_manifest_url')) or mpd_url))
|
||||
mpd_url=url_or_none(vid_data.get('dash_manifest_url')) or mpd_url))
|
||||
|
||||
def process_formats(info):
|
||||
# Downloads with browser's User-Agent are rate limited. Working around
|
||||
|
|
|
@ -1,30 +1,32 @@
|
|||
import json
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
smuggle_url,
|
||||
traverse_obj,
|
||||
try_call,
|
||||
unsmuggle_url,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class LiTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)'
|
||||
|
||||
_URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?content_id=%s'
|
||||
|
||||
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:[^/?#]+/watch/|vod/[^/?#]+/content\.do\?content_id=)(?P<id>[\w-]+)'
|
||||
_URL_TEMPLATE = 'https://www.litv.tv/%s/watch/%s'
|
||||
_GEO_COUNTRIES = ['TW']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
|
||||
'info_dict': {
|
||||
'id': 'VOD00041606',
|
||||
'title': '花千骨',
|
||||
},
|
||||
'playlist_count': 51, # 50 episodes + 1 trailer
|
||||
}, {
|
||||
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
|
||||
'md5': 'b90ff1e9f1d8f5cfcd0a44c3e2b34c7a',
|
||||
'info_dict': {
|
||||
'id': 'VOD00041610',
|
||||
|
@ -32,16 +34,15 @@ class LiTVIE(InfoExtractor):
|
|||
'title': '花千骨第1集',
|
||||
'thumbnail': r're:https?://.*\.jpg$',
|
||||
'description': '《花千骨》陸劇線上看。十六年前,平靜的村莊內,一名女嬰隨異相出生,途徑此地的蜀山掌門清虛道長算出此女命運非同一般,她體內散發的異香易招惹妖魔。一念慈悲下,他在村莊周邊設下結界阻擋妖魔入侵,讓其年滿十六後去蜀山,並賜名花千骨。',
|
||||
'categories': ['奇幻', '愛情', '中國', '仙俠'],
|
||||
'categories': ['奇幻', '愛情', '仙俠', '古裝'],
|
||||
'episode': 'Episode 1',
|
||||
'episode_number': 1,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
},
|
||||
'skip': 'Georestricted to Taiwan',
|
||||
}, {
|
||||
'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&',
|
||||
'url': 'https://www.litv.tv/drama/watch/VOD00044841',
|
||||
'md5': '88322ea132f848d6e3e18b32a832b918',
|
||||
'info_dict': {
|
||||
'id': 'VOD00044841',
|
||||
|
@ -55,94 +56,62 @@ class LiTVIE(InfoExtractor):
|
|||
def _extract_playlist(self, playlist_data, content_type):
|
||||
all_episodes = [
|
||||
self.url_result(smuggle_url(
|
||||
self._URL_TEMPLATE % (content_type, episode['contentId']),
|
||||
self._URL_TEMPLATE % (content_type, episode['content_id']),
|
||||
{'force_noplaylist': True})) # To prevent infinite recursion
|
||||
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episode', lambda _, v: v['contentId']))]
|
||||
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episodes', lambda _, v: v['content_id']))]
|
||||
|
||||
return self.playlist_result(all_episodes, playlist_data['contentId'], playlist_data.get('title'))
|
||||
return self.playlist_result(all_episodes, playlist_data['content_id'], playlist_data.get('title'))
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
vod_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']
|
||||
|
||||
if self._search_regex(
|
||||
r'(?i)<meta\s[^>]*http-equiv="refresh"\s[^>]*content="[0-9]+;\s*url=https://www\.litv\.tv/"',
|
||||
webpage, 'meta refresh redirect', default=False, group=0):
|
||||
raise ExtractorError('No such content found', expected=True)
|
||||
program_info = traverse_obj(vod_data, ('programInformation', {dict})) or {}
|
||||
playlist_data = traverse_obj(vod_data, ('seriesTree'))
|
||||
if playlist_data and self._yes_playlist(program_info.get('series_id'), video_id, smuggled_data):
|
||||
return self._extract_playlist(playlist_data, program_info.get('content_type'))
|
||||
|
||||
program_info = self._parse_json(self._search_regex(
|
||||
r'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
|
||||
video_id)
|
||||
|
||||
# In browsers `getProgramInfo` request is always issued. Usually this
|
||||
# endpoint gives the same result as the data embedded in the webpage.
|
||||
# If, for some reason, there are no embedded data, we do an extra request.
|
||||
if 'assetId' not in program_info:
|
||||
program_info = self._download_json(
|
||||
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
|
||||
query={'contentId': video_id},
|
||||
headers={'Accept': 'application/json'})
|
||||
|
||||
series_id = program_info['seriesId']
|
||||
if self._yes_playlist(series_id, video_id, smuggled_data):
|
||||
playlist_data = self._download_json(
|
||||
'https://www.litv.tv/vod/ajax/getSeriesTree', video_id,
|
||||
query={'seriesId': series_id}, headers={'Accept': 'application/json'})
|
||||
return self._extract_playlist(playlist_data, program_info['contentType'])
|
||||
|
||||
video_data = self._parse_json(self._search_regex(
|
||||
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
|
||||
webpage, 'video data', default='{}'), video_id)
|
||||
if not video_data:
|
||||
payload = {'assetId': program_info['assetId']}
|
||||
asset_id = traverse_obj(program_info, ('assets', 0, 'asset_id', {str}))
|
||||
if asset_id: # This is a VOD
|
||||
media_type = 'vod'
|
||||
else: # This is a live stream
|
||||
asset_id = program_info['content_id']
|
||||
media_type = program_info['content_type']
|
||||
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
|
||||
if puid:
|
||||
payload.update({
|
||||
'type': 'auth',
|
||||
'puid': puid,
|
||||
})
|
||||
endpoint = 'getUrl'
|
||||
endpoint = 'get-urls'
|
||||
else:
|
||||
payload.update({
|
||||
'watchDevices': program_info['watchDevices'],
|
||||
'contentType': program_info['contentType'],
|
||||
})
|
||||
endpoint = 'getMainUrlNoAuth'
|
||||
puid = str(uuid.uuid4())
|
||||
endpoint = 'get-urls-no-auth'
|
||||
video_data = self._download_json(
|
||||
f'https://www.litv.tv/vod/ajax/{endpoint}', video_id,
|
||||
data=json.dumps(payload).encode(),
|
||||
f'https://www.litv.tv/api/{endpoint}', video_id,
|
||||
data=json.dumps({'AssetId': asset_id, 'MediaType': media_type, 'puid': puid}).encode(),
|
||||
headers={'Content-Type': 'application/json'})
|
||||
|
||||
if not video_data.get('fullpath'):
|
||||
error_msg = video_data.get('errorMessage')
|
||||
if error_msg == 'vod.error.outsideregionerror':
|
||||
if error := traverse_obj(video_data, ('error', {dict})):
|
||||
error_msg = traverse_obj(error, ('message', {str}))
|
||||
if error_msg and 'OutsideRegionError' in error_msg:
|
||||
self.raise_geo_restricted('This video is available in Taiwan only')
|
||||
if error_msg:
|
||||
elif error_msg:
|
||||
raise ExtractorError(f'{self.IE_NAME} said: {error_msg}', expected=True)
|
||||
raise ExtractorError(f'Unexpected result from {self.IE_NAME}')
|
||||
raise ExtractorError(f'Unexpected error from {self.IE_NAME}')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
video_data['fullpath'], video_id, ext='mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls')
|
||||
video_data['result']['AssetURLs'][0], video_id, ext='mp4', m3u8_id='hls')
|
||||
for a_format in formats:
|
||||
# LiTV HLS segments doesn't like compressions
|
||||
a_format.setdefault('http_headers', {})['Accept-Encoding'] = 'identity'
|
||||
|
||||
title = program_info['title'] + program_info.get('secondaryMark', '')
|
||||
description = program_info.get('description')
|
||||
thumbnail = program_info.get('imageFile')
|
||||
categories = [item['name'] for item in program_info.get('category', [])]
|
||||
episode = int_or_none(program_info.get('episode'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'categories': categories,
|
||||
'episode_number': episode,
|
||||
'title': join_nonempty('title', 'secondary_mark', delim='', from_dict=program_info),
|
||||
**traverse_obj(program_info, {
|
||||
'description': ('description', {str}),
|
||||
'thumbnail': ('picture', {urljoin('https://p-cdnstatic.svc.litv.tv/')}),
|
||||
'categories': ('genres', ..., 'name', {str}),
|
||||
'episode_number': ('episode', {int_or_none}),
|
||||
}),
|
||||
}
|
||||
|
|
|
@ -13,7 +13,10 @@ from ..utils import (
|
|||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
from ..utils.traversal import traverse_obj
|
||||
from ..utils.traversal import (
|
||||
subs_list_to_dict,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
||||
class RutubeBaseIE(InfoExtractor):
|
||||
|
@ -92,11 +95,11 @@ class RutubeBaseIE(InfoExtractor):
|
|||
hls_url, video_id, 'mp4', fatal=False, m3u8_id='hls')
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
for caption in traverse_obj(options, ('captions', lambda _, v: url_or_none(v['file']))):
|
||||
subtitles.setdefault(caption.get('code') or 'ru', []).append({
|
||||
'url': caption['file'],
|
||||
'name': caption.get('langTitle'),
|
||||
})
|
||||
self._merge_subtitles(traverse_obj(options, ('captions', ..., {
|
||||
'id': 'code',
|
||||
'url': 'file',
|
||||
'name': ('langTitle', {str}),
|
||||
}, all, {subs_list_to_dict(lang='ru')})), target=subtitles)
|
||||
return formats, subtitles
|
||||
|
||||
def _download_and_extract_formats_and_subtitles(self, video_id, query=None):
|
||||
|
|
|
@ -199,8 +199,9 @@ class SonyLIVSeriesIE(InfoExtractor):
|
|||
},
|
||||
}]
|
||||
_API_BASE = 'https://apiv2.sonyliv.com/AGL'
|
||||
_SORT_ORDERS = ('asc', 'desc')
|
||||
|
||||
def _entries(self, show_id):
|
||||
def _entries(self, show_id, sort_order):
|
||||
headers = {
|
||||
'Accept': 'application/json, text/plain, */*',
|
||||
'Referer': 'https://www.sonyliv.com',
|
||||
|
@ -215,6 +216,9 @@ class SonyLIVSeriesIE(InfoExtractor):
|
|||
'from': '0',
|
||||
'to': '49',
|
||||
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
|
||||
|
||||
if sort_order == 'desc':
|
||||
seasons = reversed(seasons)
|
||||
for season in seasons:
|
||||
season_id = str(season['id'])
|
||||
note = traverse_obj(season, ('metadata', 'title', {str})) or 'season'
|
||||
|
@ -226,7 +230,7 @@ class SonyLIVSeriesIE(InfoExtractor):
|
|||
'from': str(cursor),
|
||||
'to': str(cursor + 99),
|
||||
'orderBy': 'episodeNumber',
|
||||
'sortOrder': 'asc',
|
||||
'sortOrder': sort_order,
|
||||
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
|
||||
if not episodes:
|
||||
break
|
||||
|
@ -237,4 +241,10 @@ class SonyLIVSeriesIE(InfoExtractor):
|
|||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
return self.playlist_result(self._entries(show_id), playlist_id=show_id)
|
||||
|
||||
sort_order = self._configuration_arg('sort_order', [self._SORT_ORDERS[0]])[0]
|
||||
if sort_order not in self._SORT_ORDERS:
|
||||
raise ValueError(
|
||||
f'Invalid sort order "{sort_order}". Allowed values are: {", ".join(self._SORT_ORDERS)}')
|
||||
|
||||
return self.playlist_result(self._entries(show_id, sort_order), playlist_id=show_id)
|
||||
|
|
|
@ -241,7 +241,7 @@ class SoundcloudBaseIE(InfoExtractor):
|
|||
format_urls.add(format_url)
|
||||
formats.append({
|
||||
'format_id': 'download',
|
||||
'ext': urlhandle_detect_ext(urlh) or 'mp3',
|
||||
'ext': urlhandle_detect_ext(urlh, default='mp3'),
|
||||
'filesize': int_or_none(urlh.headers.get('Content-Length')),
|
||||
'url': format_url,
|
||||
'quality': 10,
|
||||
|
|
|
@ -419,7 +419,9 @@ def create_parser():
|
|||
general.add_option(
|
||||
'--flat-playlist',
|
||||
action='store_const', dest='extract_flat', const='in_playlist', default=False,
|
||||
help='Do not extract the videos of a playlist, only list them')
|
||||
help=(
|
||||
'Do not extract a playlist\'s URL result entries; '
|
||||
'some entry metadata may be missing and downloading may be bypassed'))
|
||||
general.add_option(
|
||||
'--no-flat-playlist',
|
||||
action='store_false', dest='extract_flat',
|
||||
|
|
Loading…
Reference in New Issue
Block a user