Compare commits

...

4 Commits

Author SHA1 Message Date
SirElderling
6a592aeacc
Merge ab81421072 into be3579aaf0 2024-11-09 21:41:16 +05:30
Steve Ovens
be3579aaf0
[ie/GameDevTV] Add extractor (#11368)
Authored by: stratus-ss, bashonly

Co-authored-by: bashonly <88596187+bashonly@users.noreply.github.com>
2024-11-06 21:58:44 +00:00
bashonly
85fdc66b6e
[ie/adobepass] Fix provider requests (#11472)
Fix bug in dcfeea4dd5

Closes #11469
Authored by: bashonly
2024-11-06 21:26:05 +00:00
SirElderling
ab81421072 [ie/NYTimes] - add audio extraction 2024-04-21 13:53:54 +01:00
4 changed files with 365 additions and 1 deletions

View File

@ -708,6 +708,7 @@ from .gab import (
GabTVIE,
)
from .gaia import GaiaIE
from .gamedevtv import GameDevTVDashboardIE
from .gamejolt import (
GameJoltCommunityIE,
GameJoltGameIE,
@ -1416,6 +1417,7 @@ from .nuum import (
from .nuvid import NuvidIE
from .nytimes import (
NYTimesArticleIE,
NYTimesAudioIE,
NYTimesCookingIE,
NYTimesCookingRecipeIE,
NYTimesIE,

View File

@ -1362,7 +1362,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
def _download_webpage_handle(self, *args, **kwargs):
headers = self.geo_verification_headers()
headers.update(kwargs.get('headers', {}))
headers.update(kwargs.get('headers') or {})
kwargs['headers'] = headers
return super()._download_webpage_handle(
*args, **kwargs)

View File

@ -0,0 +1,141 @@
import json
from .common import InfoExtractor
from ..networking.exceptions import HTTPError
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
join_nonempty,
parse_iso8601,
str_or_none,
url_or_none,
)
from ..utils.traversal import traverse_obj
class GameDevTVDashboardIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?gamedev\.tv/dashboard/courses/(?P<course_id>\d+)(?:/(?P<lecture_id>\d+))?'
_NETRC_MACHINE = 'gamedevtv'
_TESTS = [{
'url': 'https://www.gamedev.tv/dashboard/courses/25',
'info_dict': {
'id': '25',
'title': 'Complete Blender Creator 3: Learn 3D Modelling for Beginners',
'tags': ['blender', 'course', 'all', 'box modelling', 'sculpting'],
'categories': ['Blender', '3D Art'],
'thumbnail': 'https://gamedev-files.b-cdn.net/courses/qisc9pmu1jdc.jpg',
'upload_date': '20220516',
'timestamp': 1652694420,
'modified_date': '20241027',
'modified_timestamp': 1730049658,
},
'playlist_count': 100,
}, {
'url': 'https://www.gamedev.tv/dashboard/courses/63/2279',
'info_dict': {
'id': 'df04f4d8-68a4-4756-a71b-9ca9446c3a01',
'ext': 'mp4',
'modified_timestamp': 1701695752,
'upload_date': '20230504',
'episode': 'MagicaVoxel Community Course Introduction',
'series_id': '63',
'title': 'MagicaVoxel Community Course Introduction',
'timestamp': 1683195397,
'modified_date': '20231204',
'categories': ['3D Art', 'MagicaVoxel'],
'season': 'MagicaVoxel Community Course',
'tags': ['MagicaVoxel', 'all', 'course'],
'series': 'MagicaVoxel 3D Art Mini Course',
'duration': 1405,
'episode_number': 1,
'season_number': 1,
'season_id': '219',
'description': 'md5:a378738c5bbec1c785d76c067652d650',
'display_id': '63-219-2279',
'alt_title': '1_CC_MVX MagicaVoxel Community Course Introduction.mp4',
'thumbnail': 'https://vz-23691c65-6fa.b-cdn.net/df04f4d8-68a4-4756-a71b-9ca9446c3a01/thumbnail.jpg',
},
}]
_API_HEADERS = {}
def _perform_login(self, username, password):
try:
response = self._download_json(
'https://api.gamedev.tv/api/students/login', None, 'Logging in',
headers={'Content-Type': 'application/json'},
data=json.dumps({
'email': username,
'password': password,
'cart_items': [],
}).encode())
except ExtractorError as e:
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
raise ExtractorError('Invalid username/password', expected=True)
raise
self._API_HEADERS['Authorization'] = f'{response["token_type"]} {response["access_token"]}'
def _real_initialize(self):
if not self._API_HEADERS.get('Authorization'):
self.raise_login_required(
'This content is only available with purchase', method='password')
def _entries(self, data, course_id, course_info, selected_lecture):
for section in traverse_obj(data, ('sections', ..., {dict})):
section_info = traverse_obj(section, {
'season_id': ('id', {str_or_none}),
'season': ('title', {str}),
'season_number': ('order', {int_or_none}),
})
for lecture in traverse_obj(section, ('lectures', lambda _, v: url_or_none(v['video']['playListUrl']))):
if selected_lecture and str(lecture.get('id')) != selected_lecture:
continue
display_id = join_nonempty(course_id, section_info.get('season_id'), lecture.get('id'))
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
lecture['video']['playListUrl'], display_id, 'mp4', m3u8_id='hls')
yield {
**course_info,
**section_info,
'id': display_id, # fallback
'display_id': display_id,
'formats': formats,
'subtitles': subtitles,
'series': course_info.get('title'),
'series_id': course_id,
**traverse_obj(lecture, {
'id': ('video', 'guid', {str}),
'title': ('title', {str}),
'alt_title': ('video', 'title', {str}),
'description': ('description', {clean_html}),
'episode': ('title', {str}),
'episode_number': ('order', {int_or_none}),
'duration': ('video', 'duration_in_sec', {int_or_none}),
'timestamp': ('video', 'created_at', {parse_iso8601}),
'modified_timestamp': ('video', 'updated_at', {parse_iso8601}),
'thumbnail': ('video', 'thumbnailUrl', {url_or_none}),
}),
}
def _real_extract(self, url):
course_id, lecture_id = self._match_valid_url(url).group('course_id', 'lecture_id')
data = self._download_json(
f'https://api.gamedev.tv/api/courses/my/{course_id}', course_id,
headers=self._API_HEADERS)['data']
course_info = traverse_obj(data, {
'title': ('title', {str}),
'tags': ('tags', ..., 'name', {str}),
'categories': ('categories', ..., 'title', {str}),
'timestamp': ('created_at', {parse_iso8601}),
'modified_timestamp': ('updated_at', {parse_iso8601}),
'thumbnail': ('image', {url_or_none}),
})
entries = self._entries(data, course_id, course_info, lecture_id)
if lecture_id:
lecture = next(entries, None)
if not lecture:
raise ExtractorError('Lecture not found')
return lecture
return self.playlist_result(entries, course_id, **course_info)

View File

@ -10,6 +10,7 @@ from ..utils import (
float_or_none,
get_elements_html_by_class,
int_or_none,
js_to_json,
merge_dicts,
mimetype2ext,
parse_iso8601,
@ -418,3 +419,223 @@ class NYTimesCookingRecipeIE(InfoExtractor):
'thumbnails': [{'url': thumb_url} for thumb_url in traverse_obj(
recipe_data, ('image', 'crops', 'recipe', ..., {url_or_none}))],
}
class NYTimesAudioIE(NYTimesBaseIE):
_VALID_URL = r"https?://(?:www\.)?nytimes\.com/\d{4}/\d{2}/\d{2}/(?:podcasts|books)/(?:[\w-]+/)?(?P<id>[^./?#]+)(?:\.html)?"
_TESTS = [
{
"url": "http://www.nytimes.com/2016/10/14/podcasts/revelations-from-the-final-weeks.html",
"md5": "cd402e44a059c8caf3b5f514c9264d0f",
"info_dict": {
"id": "100000004709062",
"title": "Revelations From the Final Weeks",
"ext": "mp3",
"description": "md5:fb5c6b93b12efc51649b4847fe066ee4",
"timestamp": 1476448332,
"upload_date": "20161014",
"creators": [''],
"series": "The Run-Up",
"episode": "He Was Like an Octopus",
"episode_number": 20,
"duration": 2130,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
},
{
"url": "https://www.nytimes.com/2023/11/25/podcasts/poultry-slam.html",
"info_dict": {
"id": "100000009191248",
"title": "Poultry Slam",
"ext": "mp3",
"description": "md5:1e6f16b21bb9287b8a1fe563145a72fe",
"timestamp": 1700911084,
"upload_date": "20231125",
"creators": [],
"series": "This American Life",
"episode": "Poultry Slam",
"duration": 3523,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.png",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://www.nytimes.com/2016/10/16/books/review/inside-the-new-york-times-book-review-the-rise-of-hitler.html",
"info_dict": {
"id": "100000004709479",
"title": "Inside The New York Times Book Review: The Rise of Hitler",
"ext": "mp3",
"description": "md5:288161c98c098a0c24f07a94af7108c3",
"timestamp": 1476461513,
"upload_date": "20161014",
"creators": ['Pamela Paul'],
"series": "",
"episode": "The Rise of Hitler",
"duration": 3475,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
"params": {
"skip_download": True,
},
},
{
"url": "https://www.nytimes.com/2023/12/07/podcasts/the-daily/nikki-haley.html",
"info_dict": {
"id": "100000009214128",
"title": "Nikki Haleys Moment",
"ext": "mp3",
"description": "md5:bf9f532fe689967ef1c458bcb057f3e5",
"timestamp": 1701946819,
"upload_date": "20231207",
"creators": [],
"series": "The Daily",
"episode": "Listen to The Daily: Nikki Haleys Moment",
"duration": 1908,
},
"params": {
"skip_download": True,
},
},
{
"url": "https://www.nytimes.com/2023/12/18/podcasts/israel-putin.html",
"md5": "708b4fd393ca103280fe9e56d91b08b5",
"info_dict": {
"id": "100000009227362",
"title": "Pressure Mounts on Israel, and Putin Profits Off Boycott",
"ext": "mp3",
"description": "Hear the news in five minutes.",
"timestamp": 1702897212,
"upload_date": "20231218",
"creators": [],
"series": "The Headlines",
"episode": "The Headlines",
"duration": 298,
"thumbnail": r"re:https?://\w+\.nyt.com/images/.*\.jpg",
},
},
]
def _extract_content_from_block(self, block):
return traverse_obj(
block,
{
"creators": ("data", "track", "credit", all),
"duration": (
("data", "media"),
("track", "length"),
("duration", None),
{int_or_none},
),
"series": (
("data", "media"),
("podcast", "podcastSeries"),
("title", None),
{str_or_none},
),
"episode": (
("data", "media"),
("track", "headline"),
("title", "default"), {str}),
"episode_number": (
"data",
"podcast",
"episode",
{lambda v: v.split()[1]},
{int_or_none},
),
"url": (
("data", "media"),
("track", "fileUrl"),
("source", None),
{url_or_none},
),
"vcodec": "none",
},
get_all=False,
)
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
art_json = self._search_json(
r"window\.__preloadedData\s*=",
webpage,
"media details",
page_id,
transform_source=js_to_json,
)["initialData"]["data"]["article"]
blocks = traverse_obj(
art_json,
(
"sprinkledBody",
"content",
lambda _, v: v["__typename"]
in ("InteractiveBlock", "HeaderMultimediaBlock"),
"media",
),
)
if not blocks:
raise ExtractorError("Unable to extract any media blocks from webpage")
common_info = {
"title": remove_end(
self._html_extract_title(webpage), " - The New York Times"
),
"description": self._html_search_meta(
["og:description", "twitter:description"], webpage
),
"id": traverse_obj(
art_json, ("sourceId")
), # poltry slam is under art_json > 'sourceId'
**traverse_obj(
art_json,
{
"id": (
"sprinkledBody",
"content",
...,
"media",
"sourceId",
any,
{str},
),
"title": ("headline", "default"),
"description": ("summary"),
"timestamp": ("firstPublished", {parse_iso8601}),
"thumbnails": (
"promotionalMedia",
"assetCrops",
...,
"renditions",
...,
all,
{self._extract_thumbnails},
),
},
),
}
entries = []
for block in blocks:
if block.get("html"):
block = self._search_json(
r"function\s+getFlexData\(\)\s*\{\s*return",
block.get("html"),
"Retrieve the inner JSON",
page_id,
)
entries.append(
merge_dicts(self._extract_content_from_block(block), common_info)
)
if len(entries) > 1:
return self.playlist_result(entries, page_id, **common_info)
return {
"id": page_id,
**entries[0],
}