Compare commits

...

8 Commits

Author SHA1 Message Date
bashonly
5f8e3632ae
Merge fca99e1433 into da252d9d32 2024-11-18 02:47:53 +02:00
bashonly
da252d9d32
[cleanup] Misc (#11554)
Closes #6884
Authored by: bashonly, Grub4K, seproDev

Co-authored-by: Simon Sawicki <contact@grub4k.xyz>
Co-authored-by: sepro <sepro@sepr0.com>
2024-11-17 23:25:05 +00:00
gillux
e079ffbda6
[ie/litv] Fix extractor (#11071)
Authored by: jiru
2024-11-17 21:37:15 +00:00
bashonly
2009cb27e1
[ie/SonyLIVSeries] Add sort_order extractor-arg (#11569)
Authored by: bashonly
2024-11-17 21:16:22 +00:00
Jackson Humphrey
f351440f1d
[ie/ctvnews] Fix extractor (#11534)
Closes #8689
Authored by: jshumphrey, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2024-11-17 21:06:50 +00:00
bashonly
fca99e1433
Fix token caching
Authored by: bashonly
2024-11-17 12:39:54 -06:00
bashonly
2c7a1dc392
Merge branch 'yt-dlp:master' into ie/shouttv 2024-11-17 11:22:20 -06:00
bashonly
134cb81bc9
[ie/shouttv] Add extractors
Authored by: bashonly
2024-10-30 00:36:32 -05:00
13 changed files with 570 additions and 124 deletions

View File

@ -342,8 +342,9 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
extractor plugins; postprocessor plugins can
only be loaded from the default plugin
directories
--flat-playlist Do not extract the videos of a playlist,
only list them
--flat-playlist Do not extract a playlist's URL result
entries; some entry metadata may be missing
and downloading may be bypassed
--no-flat-playlist Fully extract the videos of a playlist
(default)
--live-from-start Download livestreams from the start.
@ -1869,6 +1870,9 @@ The following extractors use this feature:
#### digitalconcerthall
* `prefer_combined_hls`: Prefer extracting combined/pre-merged video and audio HLS formats. This will exclude 4K/HEVC video and lossless/FLAC audio formats, which are only available as split video/audio HLS formats
#### sonylivseries
* `sort_order`: Episode sort order for series extraction - one of `asc` (ascending, oldest first) or `desc` (descending, newest first). Default is `asc`
**Note**: These options may be changed/removed in the future without concern for backward compatibility
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->

View File

@ -234,5 +234,10 @@
"when": "57212a5f97ce367590aaa5c3e9a135eead8f81f7",
"short": "[ie/vimeo] Fix API retries (#11351)",
"authors": ["bashonly"]
},
{
"action": "add",
"when": "52c0ffe40ad6e8404d93296f575007b05b04c686",
"short": "[priority] **Login with OAuth is no longer supported for YouTube**\nDue to a change made by the site, yt-dlp is longer able to support OAuth login for YouTube. [Read more](https://github.com/yt-dlp/yt-dlp/issues/11462#issuecomment-2471703090)"
}
]

View File

@ -1849,6 +1849,11 @@ from .shahid import (
from .sharepoint import SharePointIE
from .sharevideos import ShareVideosEmbedIE
from .shemaroome import ShemarooMeIE
from .shouttv import (
ShoutTVIE,
ShoutTVSeasonIE,
ShoutTVSeriesIE,
)
from .showroomlive import ShowRoomLiveIE
from .sibnet import SibnetEmbedIE
from .simplecast import (

View File

@ -1,4 +1,3 @@
from .common import InfoExtractor
from ..utils import (
ExtractorError,

View File

@ -3767,7 +3767,7 @@ class InfoExtractor:
""" Merge subtitle dictionaries, language by language. """
if target is None:
target = {}
for d in dicts:
for d in filter(None, dicts):
for lang, subs in d.items():
target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs)
return target

View File

@ -1,11 +1,24 @@
import json
import re
import urllib.parse
from .common import InfoExtractor
from ..utils import orderedSet
from .ninecninemedia import NineCNineMediaIE
from ..utils import extract_attributes, orderedSet
from ..utils.traversal import find_element, traverse_obj
class CTVNewsIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P<id>[0-9.]+)(?:$|[#?&])'
_BASE_REGEX = r'https?://(?:[^.]+\.)?ctvnews\.ca/'
_VIDEO_ID_RE = r'(?P<id>\d{5,})'
_PLAYLIST_ID_RE = r'(?P<id>\d\.\d{5,})'
_VALID_URL = [
rf'{_BASE_REGEX}video/c{_VIDEO_ID_RE}',
rf'{_BASE_REGEX}video(?:-gallery)?/?\?clipId={_VIDEO_ID_RE}',
rf'{_BASE_REGEX}video/?\?(?:playlist|bin)Id={_PLAYLIST_ID_RE}',
rf'{_BASE_REGEX}(?!video/)[^?#]*?{_PLAYLIST_ID_RE}/?(?:$|[?#])',
rf'{_BASE_REGEX}(?!video/)[^?#]+\?binId={_PLAYLIST_ID_RE}',
]
_TESTS = [{
'url': 'http://www.ctvnews.ca/video?clipId=901995',
'md5': 'b608f466c7fa24b9666c6439d766ab7e',
@ -17,13 +30,32 @@ class CTVNewsIE(InfoExtractor):
'timestamp': 1467286284,
'upload_date': '20160630',
'categories': [],
'tags': [],
'season_id': 57981,
'duration': 764.631,
'series': 'CTV News National story',
'thumbnail': r're:^https?://.*\.jpg$',
'season': 'Season 0',
'season_number': 0,
'season': 'Season 0',
'tags': [],
'series': 'CTV News National | Archive | Stories 2',
'season_id': '57981',
'thumbnail': r're:https?://.*\.jpg$',
'duration': 764.631,
},
}, {
'url': 'https://barrie.ctvnews.ca/video/c3030933-here_s-what_s-making-news-for-nov--15?binId=1272429',
'md5': '8b8c2b33c5c1803e3c26bc74ff8694d5',
'info_dict': {
'id': '3030933',
'ext': 'flv',
'title': 'Heres whats making news for Nov. 15',
'description': 'Here are the top stories were working on for CTV News at 11 for Nov. 15',
'thumbnail': 'http://images2.9c9media.com/image_asset/2021_2_22_a602e68e-1514-410e-a67a-e1f7cccbacab_png_2000x1125.jpg',
'season_id': '58104',
'season_number': 0,
'tags': [],
'season': 'Season 0',
'categories': [],
'series': 'CTV News Barrie',
'upload_date': '20241116',
'duration': 42.943,
'timestamp': 1731722452,
},
}, {
'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224',
@ -46,6 +78,65 @@ class CTVNewsIE(InfoExtractor):
'id': '1.5736957',
},
'playlist_mincount': 6,
}, {
'url': 'https://www.ctvnews.ca/business/respondents-to-bank-of-canada-questionnaire-largely-oppose-creating-a-digital-loonie-1.6665797',
'md5': '24bc4b88cdc17d8c3fc01dfc228ab72c',
'info_dict': {
'id': '2695026',
'ext': 'flv',
'season_id': '89852',
'series': 'From CTV News Channel',
'description': 'md5:796a985a23cacc7e1e2fafefd94afd0a',
'season': '2023',
'title': 'Bank of Canada asks public about digital currency',
'categories': [],
'tags': [],
'upload_date': '20230526',
'season_number': 2023,
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
'timestamp': 1685105157,
'duration': 253.553,
},
}, {
'url': 'https://stox.ctvnews.ca/video-gallery?clipId=582589',
'md5': '135cc592df607d29dddc931f1b756ae2',
'info_dict': {
'id': '582589',
'ext': 'flv',
'categories': [],
'timestamp': 1427906183,
'season_number': 0,
'duration': 125.559,
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
'series': 'CTV News Stox',
'description': 'CTV original footage of the rise and fall of the Berlin Wall.',
'title': 'Berlin Wall',
'season_id': '63817',
'season': 'Season 0',
'tags': [],
'upload_date': '20150401',
},
}, {
'url': 'https://ottawa.ctvnews.ca/features/regional-contact/regional-contact-archive?binId=1.1164587#3023759',
'md5': 'a14c0603557decc6531260791c23cc5e',
'info_dict': {
'id': '3023759',
'ext': 'flv',
'season_number': 2024,
'timestamp': 1731798000,
'season': '2024',
'episode': 'Episode 125',
'description': 'CTV News Ottawa at Six',
'duration': 2712.076,
'episode_number': 125,
'upload_date': '20241116',
'title': 'CTV News Ottawa at Six for Saturday, November 16, 2024',
'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg',
'categories': [],
'tags': [],
'series': 'CTV News Ottawa at Six',
'season_id': '92667',
},
}, {
'url': 'http://www.ctvnews.ca/1.810401',
'only_matching': True,
@ -57,29 +148,35 @@ class CTVNewsIE(InfoExtractor):
'only_matching': True,
}]
def _ninecninemedia_url_result(self, clip_id):
return self.url_result(f'9c9media:ctvnews_web:{clip_id}', NineCNineMediaIE, clip_id)
def _real_extract(self, url):
page_id = self._match_id(url)
def ninecninemedia_url_result(clip_id):
return {
'_type': 'url_transparent',
'id': clip_id,
'url': f'9c9media:ctvnews_web:{clip_id}',
'ie_key': 'NineCNineMedia',
}
if mobj := re.fullmatch(self._VIDEO_ID_RE, urllib.parse.urlparse(url).fragment):
page_id = mobj.group('id')
if page_id.isdigit():
return ninecninemedia_url_result(page_id)
else:
webpage = self._download_webpage(f'http://www.ctvnews.ca/{page_id}', page_id, query={
'ot': 'example.AjaxPageLayout.ot',
'maxItemsPerPage': 1000000,
})
entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet(
re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
if not entries:
webpage = self._download_webpage(url, page_id)
if 'getAuthStates("' in webpage:
entries = [ninecninemedia_url_result(clip_id) for clip_id in
self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')]
return self.playlist_result(entries, page_id)
if re.fullmatch(self._VIDEO_ID_RE, page_id):
return self._ninecninemedia_url_result(page_id)
webpage = self._download_webpage(f'https://www.ctvnews.ca/{page_id}', page_id, query={
'ot': 'example.AjaxPageLayout.ot',
'maxItemsPerPage': 1000000,
})
entries = [self._ninecninemedia_url_result(clip_id)
for clip_id in orderedSet(re.findall(r'clip\.id\s*=\s*(\d+);', webpage))]
if not entries:
webpage = self._download_webpage(url, page_id)
if 'getAuthStates("' in webpage:
entries = [self._ninecninemedia_url_result(clip_id) for clip_id in
self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')]
else:
entries = [
self._ninecninemedia_url_result(clip_id) for clip_id in
traverse_obj(webpage, (
{find_element(tag='jasper-player-container', html=True)},
{extract_attributes}, 'axis-ids', {json.loads}, ..., 'axisId', {str}))
]
return self.playlist_result(entries, page_id)

View File

@ -569,7 +569,7 @@ class FacebookIE(InfoExtractor):
if dash_manifest:
formats.extend(self._parse_mpd_formats(
compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)),
mpd_url=url_or_none(video.get('dash_manifest_url')) or mpd_url))
mpd_url=url_or_none(vid_data.get('dash_manifest_url')) or mpd_url))
def process_formats(info):
# Downloads with browser's User-Agent are rate limited. Working around

View File

@ -1,30 +1,32 @@
import json
import uuid
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
join_nonempty,
smuggle_url,
traverse_obj,
try_call,
unsmuggle_url,
urljoin,
)
class LiTVIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:vod|promo)/[^/]+/(?:content\.do)?\?.*?\b(?:content_)?id=(?P<id>[^&]+)'
_URL_TEMPLATE = 'https://www.litv.tv/vod/%s/content.do?content_id=%s'
_VALID_URL = r'https?://(?:www\.)?litv\.tv/(?:[^/?#]+/watch/|vod/[^/?#]+/content\.do\?content_id=)(?P<id>[\w-]+)'
_URL_TEMPLATE = 'https://www.litv.tv/%s/watch/%s'
_GEO_COUNTRIES = ['TW']
_TESTS = [{
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
'info_dict': {
'id': 'VOD00041606',
'title': '花千骨',
},
'playlist_count': 51, # 50 episodes + 1 trailer
}, {
'url': 'https://www.litv.tv/vod/drama/content.do?brc_id=root&id=VOD00041610&isUHEnabled=true&autoPlay=1',
'url': 'https://www.litv.tv/drama/watch/VOD00041610',
'md5': 'b90ff1e9f1d8f5cfcd0a44c3e2b34c7a',
'info_dict': {
'id': 'VOD00041610',
@ -32,16 +34,15 @@ class LiTVIE(InfoExtractor):
'title': '花千骨第1集',
'thumbnail': r're:https?://.*\.jpg$',
'description': '《花千骨》陸劇線上看。十六年前,平靜的村莊內,一名女嬰隨異相出生,途徑此地的蜀山掌門清虛道長算出此女命運非同一般,她體內散發的異香易招惹妖魔。一念慈悲下,他在村莊周邊設下結界阻擋妖魔入侵,讓其年滿十六後去蜀山,並賜名花千骨。',
'categories': ['奇幻', '愛情', '中國', '仙俠'],
'categories': ['奇幻', '愛情', '仙俠', '古裝'],
'episode': 'Episode 1',
'episode_number': 1,
},
'params': {
'noplaylist': True,
},
'skip': 'Georestricted to Taiwan',
}, {
'url': 'https://www.litv.tv/promo/miyuezhuan/?content_id=VOD00044841&',
'url': 'https://www.litv.tv/drama/watch/VOD00044841',
'md5': '88322ea132f848d6e3e18b32a832b918',
'info_dict': {
'id': 'VOD00044841',
@ -55,94 +56,62 @@ class LiTVIE(InfoExtractor):
def _extract_playlist(self, playlist_data, content_type):
all_episodes = [
self.url_result(smuggle_url(
self._URL_TEMPLATE % (content_type, episode['contentId']),
self._URL_TEMPLATE % (content_type, episode['content_id']),
{'force_noplaylist': True})) # To prevent infinite recursion
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episode', lambda _, v: v['contentId']))]
for episode in traverse_obj(playlist_data, ('seasons', ..., 'episodes', lambda _, v: v['content_id']))]
return self.playlist_result(all_episodes, playlist_data['contentId'], playlist_data.get('title'))
return self.playlist_result(all_episodes, playlist_data['content_id'], playlist_data.get('title'))
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
vod_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']
if self._search_regex(
r'(?i)<meta\s[^>]*http-equiv="refresh"\s[^>]*content="[0-9]+;\s*url=https://www\.litv\.tv/"',
webpage, 'meta refresh redirect', default=False, group=0):
raise ExtractorError('No such content found', expected=True)
program_info = traverse_obj(vod_data, ('programInformation', {dict})) or {}
playlist_data = traverse_obj(vod_data, ('seriesTree'))
if playlist_data and self._yes_playlist(program_info.get('series_id'), video_id, smuggled_data):
return self._extract_playlist(playlist_data, program_info.get('content_type'))
program_info = self._parse_json(self._search_regex(
r'var\s+programInfo\s*=\s*([^;]+)', webpage, 'VOD data', default='{}'),
video_id)
asset_id = traverse_obj(program_info, ('assets', 0, 'asset_id', {str}))
if asset_id: # This is a VOD
media_type = 'vod'
else: # This is a live stream
asset_id = program_info['content_id']
media_type = program_info['content_type']
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
if puid:
endpoint = 'get-urls'
else:
puid = str(uuid.uuid4())
endpoint = 'get-urls-no-auth'
video_data = self._download_json(
f'https://www.litv.tv/api/{endpoint}', video_id,
data=json.dumps({'AssetId': asset_id, 'MediaType': media_type, 'puid': puid}).encode(),
headers={'Content-Type': 'application/json'})
# In browsers `getProgramInfo` request is always issued. Usually this
# endpoint gives the same result as the data embedded in the webpage.
# If, for some reason, there are no embedded data, we do an extra request.
if 'assetId' not in program_info:
program_info = self._download_json(
'https://www.litv.tv/vod/ajax/getProgramInfo', video_id,
query={'contentId': video_id},
headers={'Accept': 'application/json'})
series_id = program_info['seriesId']
if self._yes_playlist(series_id, video_id, smuggled_data):
playlist_data = self._download_json(
'https://www.litv.tv/vod/ajax/getSeriesTree', video_id,
query={'seriesId': series_id}, headers={'Accept': 'application/json'})
return self._extract_playlist(playlist_data, program_info['contentType'])
video_data = self._parse_json(self._search_regex(
r'uiHlsUrl\s*=\s*testBackendData\(([^;]+)\);',
webpage, 'video data', default='{}'), video_id)
if not video_data:
payload = {'assetId': program_info['assetId']}
puid = try_call(lambda: self._get_cookies('https://www.litv.tv/')['PUID'].value)
if puid:
payload.update({
'type': 'auth',
'puid': puid,
})
endpoint = 'getUrl'
else:
payload.update({
'watchDevices': program_info['watchDevices'],
'contentType': program_info['contentType'],
})
endpoint = 'getMainUrlNoAuth'
video_data = self._download_json(
f'https://www.litv.tv/vod/ajax/{endpoint}', video_id,
data=json.dumps(payload).encode(),
headers={'Content-Type': 'application/json'})
if not video_data.get('fullpath'):
error_msg = video_data.get('errorMessage')
if error_msg == 'vod.error.outsideregionerror':
if error := traverse_obj(video_data, ('error', {dict})):
error_msg = traverse_obj(error, ('message', {str}))
if error_msg and 'OutsideRegionError' in error_msg:
self.raise_geo_restricted('This video is available in Taiwan only')
if error_msg:
elif error_msg:
raise ExtractorError(f'{self.IE_NAME} said: {error_msg}', expected=True)
raise ExtractorError(f'Unexpected result from {self.IE_NAME}')
raise ExtractorError(f'Unexpected error from {self.IE_NAME}')
formats = self._extract_m3u8_formats(
video_data['fullpath'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls')
video_data['result']['AssetURLs'][0], video_id, ext='mp4', m3u8_id='hls')
for a_format in formats:
# LiTV HLS segments doesn't like compressions
a_format.setdefault('http_headers', {})['Accept-Encoding'] = 'identity'
title = program_info['title'] + program_info.get('secondaryMark', '')
description = program_info.get('description')
thumbnail = program_info.get('imageFile')
categories = [item['name'] for item in program_info.get('category', [])]
episode = int_or_none(program_info.get('episode'))
return {
'id': video_id,
'formats': formats,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'episode_number': episode,
'title': join_nonempty('title', 'secondary_mark', delim='', from_dict=program_info),
**traverse_obj(program_info, {
'description': ('description', {str}),
'thumbnail': ('picture', {urljoin('https://p-cdnstatic.svc.litv.tv/')}),
'categories': ('genres', ..., 'name', {str}),
'episode_number': ('episode', {int_or_none}),
}),
}

View File

@ -13,7 +13,10 @@ from ..utils import (
unified_timestamp,
url_or_none,
)
from ..utils.traversal import traverse_obj
from ..utils.traversal import (
subs_list_to_dict,
traverse_obj,
)
class RutubeBaseIE(InfoExtractor):
@ -92,11 +95,11 @@ class RutubeBaseIE(InfoExtractor):
hls_url, video_id, 'mp4', fatal=False, m3u8_id='hls')
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
for caption in traverse_obj(options, ('captions', lambda _, v: url_or_none(v['file']))):
subtitles.setdefault(caption.get('code') or 'ru', []).append({
'url': caption['file'],
'name': caption.get('langTitle'),
})
self._merge_subtitles(traverse_obj(options, ('captions', ..., {
'id': 'code',
'url': 'file',
'name': ('langTitle', {str}),
}, all, {subs_list_to_dict(lang='ru')})), target=subtitles)
return formats, subtitles
def _download_and_extract_formats_and_subtitles(self, video_id, query=None):

352
yt_dlp/extractor/shouttv.py Normal file
View File

@ -0,0 +1,352 @@
import itertools
import json
import re
import time
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
filter_dict,
int_or_none,
jwt_decode_hs256,
parse_age_limit,
str_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class ShoutTVBaseIE(InfoExtractor):
_NETRC_MACHINE = 'shouttv'
_API_BASE_URL = 'https://dce-frontoffice.imggaming.com/api'
# version and key taken from https://watch.shout-tv.com/code/js/app.3b4ef510d0048f672e1d.js
_APP_VERSION = '6.57.10.65bab8b' # 'OUTPUT_FOLDER' in JS
_API_KEY = '857a1e5d-e35e-4fdf-805b-a87b6f8364bf' # 'API_KEY' in JS
_API_REALM = 'dce.shout'
_API_HEADERS = {
'Accept': 'application/json',
'Origin': 'https://watch.shout-tv.com',
'Referer': 'https://watch.shout-tv.com/',
'app': 'dice',
'x-api-key': _API_KEY,
'x-app-var': _APP_VERSION,
}
_ACCESS_TOKEN = None
_ACCESS_EXPIRY = 0
_REFRESH_TOKEN = None
_is_logged_in = False
def _set_tokens(self, auth_data):
ShoutTVBaseIE._ACCESS_TOKEN = auth_data['authorisationToken'] # 10 minute TTL
ShoutTVBaseIE._ACCESS_EXPIRY = jwt_decode_hs256(ShoutTVBaseIE._ACCESS_TOKEN)['exp']
if refresh_token := traverse_obj(auth_data, ('refreshToken', {str})):
self.write_debug('New refresh token granted')
ShoutTVBaseIE._REFRESH_TOKEN = refresh_token # 2 month TTL
username, _ = self._get_login_info()
if username and ShoutTVBaseIE._is_logged_in:
tokens = self.cache.load(self._NETRC_MACHINE, 'tokens', default={})
tokens[username] = [ShoutTVBaseIE._ACCESS_TOKEN, ShoutTVBaseIE._REFRESH_TOKEN]
self.cache.store(self._NETRC_MACHINE, 'tokens', tokens)
def _fetch_access_token(self, content_id=None):
if ShoutTVBaseIE._ACCESS_TOKEN and ShoutTVBaseIE._ACCESS_EXPIRY - 10 > time.time():
return
headers = self._API_HEADERS.copy()
if ShoutTVBaseIE._REFRESH_TOKEN and ShoutTVBaseIE._ACCESS_TOKEN:
headers.update({
'Authorization': f'Mixed {ShoutTVBaseIE._ACCESS_TOKEN} {ShoutTVBaseIE._REFRESH_TOKEN}',
'Realm': self._API_REALM,
})
self._set_tokens(self._download_json(
f'{self._API_BASE_URL}/v1/init/', content_id,
'Fetching access token', 'Unable to fetch token',
headers=headers, query={
'lk': 'language',
'pk': ['subTitleLanguage', 'audioLanguage', 'autoAdvance', 'pluginAccessTokens'],
'readLicences': 'true',
'countEvents': 'LIVE',
'menuTargetPlatform': 'WEB',
})['authentication'])
def _perform_login(self, username, password):
self.report_login()
cached_tokens = self.cache.load(self._NETRC_MACHINE, 'tokens', default={}).get(username) or []
if (len(cached_tokens) == 2) and (jwt_decode_hs256(cached_tokens[1])['exp'] - 60 > time.time()):
ShoutTVBaseIE._ACCESS_TOKEN, ShoutTVBaseIE._REFRESH_TOKEN = cached_tokens
ShoutTVBaseIE._is_logged_in = True
self.write_debug('Cached refresh token is still valid')
return
self._fetch_access_token()
try:
login_data = self._download_json(
f'{self._API_BASE_URL}/v2/login', None, 'Submitting credentials',
'Unable to log in', headers={
**self._API_HEADERS,
'Authorization': f'Bearer {ShoutTVBaseIE._ACCESS_TOKEN}',
'Content-Type': 'application/json',
'Realm': self._API_REALM,
}, data=json.dumps({
'id': username,
'secret': password,
}, separators=(',', ':')).encode())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 404:
raise ExtractorError('Invalid username or password', expected=True)
raise
ShoutTVBaseIE._is_logged_in = True
self._set_tokens(login_data)
def _call_api(self, content_id, content_type, note='API JSON', query=None, headers=None):
endpoint = {
'video': 'vod',
'live': 'event',
}.get(content_type, content_type)
self._fetch_access_token(content_id)
return self._download_json(
f'{self._API_BASE_URL}/v4/{endpoint}/{content_id}', content_id,
f'Downloading {note}', f'Unable to download {note}', query=query,
headers={
**self._API_HEADERS,
'Authorization': f'Bearer {ShoutTVBaseIE._ACCESS_TOKEN}',
**(headers or {}),
'Realm': self._API_REALM,
})
@staticmethod
def _parse_details(details):
return traverse_obj(details, {
'id': ('id', {int}, {str_or_none}),
'title': ('title', {str}),
'description': ('description', {str}),
'duration': ('duration', {int_or_none}),
'categories': ('categories', ..., {str}, filter),
'thumbnails': (('thumbnailUrl', 'posterUrl', 'coverUrl'), {'url': {url_or_none}}),
'age_limit': (('rating', 'contentRating'), 'rating', {parse_age_limit}, any),
'episode_number': ('episodeInformation', 'episodeNumber', {int_or_none}),
'season_number': ('episodeInformation', 'seasonNumber', {int_or_none}),
'season_id': ('episodeInformation', 'season', {int}, {str_or_none}),
'season': ('episodeInformation', 'seasonTitle', {str}),
'series_id': ('episodeInformation', 'seriesInformation', 'id', {int}, {str_or_none}),
'series': ('episodeInformation', 'seriesInformation', 'title', {str}),
})
def _extract_vod_formats_and_subtitles(self, player, video_id):
formats, subtitles = [], {}
# XXX: 'subtitles' array fields are alongside the 'url' fields in both 'hls and 'dash',
# but couldn't find any examples where the arrays were not empty
for idx, m3u8_url in enumerate(traverse_obj(player, ('hls', ..., 'url', {url_or_none})), start=1):
fmts, subs = self._extract_m3u8_formats_and_subtitles(
m3u8_url, video_id, 'mp4', m3u8_id=f'hls-{idx}', fatal=False)
# This site's HLS manifests do not provide any audio codec/bitrate info
# The audio formats are given a GROUP-ID to pair them to video formats w/the same GROUP-ID
# Worst quality audio is paired to worst quality video, ba paired to bv, etc
# 'audio-1' is usually the worst quality and 'audio-3' is usually the best quality
for fmt in fmts:
if mobj := re.search(r'-audio-(?P<quality>\d+)', fmt['format_id']):
fmt['quality'] = int(mobj.group('quality'))
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
for idx, mpd_url in enumerate(traverse_obj(player, ('dash', ..., 'url', {url_or_none})), start=1):
fmts, subs = self._extract_mpd_formats_and_subtitles(
mpd_url, video_id, mpd_id=f'dash-{idx}', fatal=False)
# DASH audio formats will always be sorted below HLS unless we also set 'quality'
for q, fmt in enumerate(sorted(traverse_obj(fmts, (
lambda _, v: v['tbr'] and v['vcodec'] == 'none')), key=lambda x: x['tbr'])):
fmt['quality'] = q
formats.extend(fmts)
self._merge_subtitles(subs, target=subtitles)
return formats, subtitles
class ShoutTVIE(ShoutTVBaseIE):
IE_NAME = 'shouttv'
IE_DESC = 'Shout! TV video-on-demand on live channels'
_VALID_URL = r'https?://watch\.shout-tv\.com/(?P<type>video|live)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://watch.shout-tv.com/video/640292',
'info_dict': {
'id': '640292',
'ext': 'mp4',
'title': 'Pee-wee\'s Playhouse Christmas Special',
'description': 'Pee-Wee Herman throws a Christmas party at his playhouse with his friends and some celebrity guests.',
'duration': 2879,
'thumbnail': 'https://dve-images.imggaming.com/original/p/2024/05/30/HE2kpg3EjcjrQJb2dSctUVhzpyz7rCXn-1717027231485.jpg',
'age_limit': 0,
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://watch.shout-tv.com/video/691416?seasonId=26337',
'info_dict': {
'id': '691416',
'ext': 'mp4',
'title': 'The Commish: S1 E1 - In The Best Of Families',
'description': 'md5:d61403fb8ddaaeb1100228ac146f5a0c',
'episode': 'Episode 0',
'episode_number': 0,
'season': 'The Commish: Season 1',
'season_number': 1,
'season_id': '26337',
'series': 'The Commish',
'series_id': '2695',
'duration': 2785,
'thumbnail': 'https://dve-images.imggaming.com/original/p/2024/09/23/wgpGQ1Vr3DPG6sHnmbGpsKZqMkcosJND-1727130957509.jpg',
'age_limit': 14,
},
'params': {'skip_download': 'm3u8'},
}, {
'url': 'https://watch.shout-tv.com/live/265692',
'info_dict': {
'id': '265692',
'ext': 'mp4',
'title': r're:MST3K \d{4}-\d{2}-\d{2} \d{2}:\d{2}',
'live_status': 'is_live',
'thumbnail': 'https://img.dge-prod.dicelaboratory.com/thumbnails/265692/original/latest.jpg',
},
'params': {'skip_download': 'livestream'},
}]
def _real_extract(self, url):
video_id, video_type = self._match_valid_url(url).group('id', 'type')
details = self._call_api(
video_id, video_type, query={'includePlaybackDetails': 'URL'},
headers={
'CM-APP-NAME': 'Website',
'CM-APP-VERSION': self._APP_VERSION,
'CM-CST-TCF': '',
'CM-CST-USP': '',
'CM-DVC-DNT': '1',
'CM-DVC-H': '1080',
'CM-DVC-LANG': 'en-US',
'CM-DVC-OS': '14',
'CM-DVC-TYPE': '2',
'CM-DVC-W': '1920',
'CM-WEB-MBL': '0',
'CM-WEB-PAGE': f'/{video_type}/{video_id}',
})
access_level = traverse_obj(details, ('accessLevel', {str}))
if access_level == 'GRANTED_ON_SIGN_IN':
self.raise_login_required(method='password')
elif access_level != 'GRANTED':
self.report_warning(f'Unknown access level "{access_level}"')
player = self._download_json(
details['playerUrlCallback'], video_id, 'Downloading player JSON',
'Unable to download player JSON', headers={'Accept': 'application/json'})
is_live = {
'VOD': False,
'LIVE': True,
}.get(details.get('type'), video_type == 'live')
if is_live:
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
player['hlsUrl'], video_id, 'mp4', m3u8_id='hls', live=True)
else:
formats, subtitles = self._extract_vod_formats_and_subtitles(player, video_id)
return {
'id': video_id,
'formats': formats,
'subtitles': subtitles,
'is_live': is_live,
**self._parse_details(details),
}
class ShoutTVPlaylistBaseIE(ShoutTVBaseIE):
"""Subclasses must set _PAGE_SIZE, _PLAYLIST_TYPE, _ENTRIES_KEY"""
def _create_entry(self, entry):
raise NotImplementedError('This method must be implemented by subclasses')
def _fetch_page(self, playlist_id, page=1, last_seen=None):
return self._call_api(
playlist_id, self._PLAYLIST_TYPE, note=f'{self._PLAYLIST_TYPE} page {page}',
query=filter_dict({'rpp': self._PAGE_SIZE, 'lastSeen': last_seen}))
def _entries(self, playlist_id, first_page):
last_seen = None
for page in itertools.count(1):
data = first_page if page == 1 else self._fetch_page(playlist_id, page, last_seen)
for entry in traverse_obj(data, (self._ENTRIES_KEY, lambda _, v: v['id'] is not None)):
yield self._create_entry(entry)
last_seen = traverse_obj(data, ('paging', 'lastSeen', {int}, {str_or_none}))
if not traverse_obj(data, ('paging', 'moreDataAvailable', {bool})) or not last_seen:
break
def _real_extract(self, url):
playlist_id = self._match_id(url)
first_page = self._fetch_page(playlist_id)
return self.playlist_result(
self._entries(playlist_id, first_page), playlist_id,
**traverse_obj(first_page, {
'title': ('title', {str}),
'description': ('description', {str}),
'thumbnails': (('titleUrl', 'posterUrl', 'coverUrl'), {'url': {url_or_none}}),
'series': ('series', 'title', {str}),
'series_id': ('series', 'seriesId', {int}, {str_or_none}),
'season_number': (
'seasonNumber', {int_or_none},
{lambda x: x if self._PLAYLIST_TYPE == 'season' else None}),
}))
class ShoutTVSeasonIE(ShoutTVPlaylistBaseIE):
IE_NAME = 'shouttv:season'
IE_DESC = 'Shout! TV seasons'
_VALID_URL = r'https?://watch\.shout-tv\.com/season/(?P<id>\d+)'
_TESTS = [{
'url': 'https://watch.shout-tv.com/season/26338',
'info_dict': {
'id': '26338',
'title': 'The Commish: Season 2',
'description': 'md5:a5f99159e36d23af97a63137712c3b04',
'series': 'The Commish',
'series_id': '2695',
'season_number': 2,
},
'playlist_count': 22,
}]
_PAGE_SIZE = 5
_PLAYLIST_TYPE = 'season'
_ENTRIES_KEY = 'episodes'
def _create_entry(self, entry):
return self.url_result(
f'https://watch.shout-tv.com/video/{entry["id"]}', ShoutTVIE,
**self._parse_details(entry))
class ShoutTVSeriesIE(ShoutTVPlaylistBaseIE):
IE_NAME = 'shouttv:series'
IE_DESC = 'Shout! TV series'
_VALID_URL = r'https?://watch\.shout-tv\.com/series/(?P<id>\d+)'
_TESTS = [{
'url': 'https://watch.shout-tv.com/series/2695',
'info_dict': {
'id': '2695',
'title': 'The Commish',
'description': 'md5:a5f99159e36d23af97a63137712c3b04',
},
'playlist_count': 5,
}]
_PAGE_SIZE = 20
_PLAYLIST_TYPE = 'series'
_ENTRIES_KEY = 'seasons'
def _create_entry(self, entry):
return self.url_result(
f'https://watch.shout-tv.com/season/{entry["id"]}', ShoutTVSeasonIE,
**traverse_obj(entry, {
'id': ('id', {int}, {str_or_none}),
'title': ('title', {str}),
'description': ('description', {str}),
'season_id': ('id', {int}, {str_or_none}),
'season_number': ('seasonNumber', {int_or_none}),
}))

View File

@ -199,8 +199,9 @@ class SonyLIVSeriesIE(InfoExtractor):
},
}]
_API_BASE = 'https://apiv2.sonyliv.com/AGL'
_SORT_ORDERS = ('asc', 'desc')
def _entries(self, show_id):
def _entries(self, show_id, sort_order):
headers = {
'Accept': 'application/json, text/plain, */*',
'Referer': 'https://www.sonyliv.com',
@ -215,6 +216,9 @@ class SonyLIVSeriesIE(InfoExtractor):
'from': '0',
'to': '49',
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
if sort_order == 'desc':
seasons = reversed(seasons)
for season in seasons:
season_id = str(season['id'])
note = traverse_obj(season, ('metadata', 'title', {str})) or 'season'
@ -226,7 +230,7 @@ class SonyLIVSeriesIE(InfoExtractor):
'from': str(cursor),
'to': str(cursor + 99),
'orderBy': 'episodeNumber',
'sortOrder': 'asc',
'sortOrder': sort_order,
}), ('resultObj', 'containers', 0, 'containers', lambda _, v: int_or_none(v['id'])))
if not episodes:
break
@ -237,4 +241,10 @@ class SonyLIVSeriesIE(InfoExtractor):
def _real_extract(self, url):
show_id = self._match_id(url)
return self.playlist_result(self._entries(show_id), playlist_id=show_id)
sort_order = self._configuration_arg('sort_order', [self._SORT_ORDERS[0]])[0]
if sort_order not in self._SORT_ORDERS:
raise ValueError(
f'Invalid sort order "{sort_order}". Allowed values are: {", ".join(self._SORT_ORDERS)}')
return self.playlist_result(self._entries(show_id, sort_order), playlist_id=show_id)

View File

@ -241,7 +241,7 @@ class SoundcloudBaseIE(InfoExtractor):
format_urls.add(format_url)
formats.append({
'format_id': 'download',
'ext': urlhandle_detect_ext(urlh) or 'mp3',
'ext': urlhandle_detect_ext(urlh, default='mp3'),
'filesize': int_or_none(urlh.headers.get('Content-Length')),
'url': format_url,
'quality': 10,

View File

@ -419,7 +419,9 @@ def create_parser():
general.add_option(
'--flat-playlist',
action='store_const', dest='extract_flat', const='in_playlist', default=False,
help='Do not extract the videos of a playlist, only list them')
help=(
'Do not extract a playlist\'s URL result entries; '
'some entry metadata may be missing and downloading may be bypassed'))
general.add_option(
'--no-flat-playlist',
action='store_false', dest='extract_flat',