Compare commits

...

7 Commits

Author SHA1 Message Date
zer0-delta
2b9ed41422
Merge 94e2a94b75 into b83ca24eb7 2024-11-10 14:42:01 +01:00
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
zer0-delta
94e2a94b75 Merged master 2024-11-04 23:10:49 +00:00
zer0-delta
2f66504571 Ran hatch fmt 2024-06-12 05:49:31 +01:00
zer0-delta
c0edd1c194 Added BBC Maestro extractor 2024-06-12 05:37:34 +01:00
8 changed files with 175 additions and 8 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -217,6 +217,7 @@ from .bbc import (
BBCCoUkIPlayerGroupIE, BBCCoUkIPlayerGroupIE,
BBCCoUkPlaylistIE, BBCCoUkPlaylistIE,
) )
from .bbcmaestro import BBCMaestroComIE
from .beacon import BeaconTvIE from .beacon import BeaconTvIE
from .beatbump import ( from .beatbump import (
BeatBumpPlaylistIE, BeatBumpPlaylistIE,

View File

@ -0,0 +1,122 @@
import re
from .common import InfoExtractor
from ..utils import orderedSet, smuggle_url, unsmuggle_url
class BBCMaestroComIE(InfoExtractor):
_VALID_URL = (
r'https?://(?:www\.)?bbcmaestro\.com/courses/(?P<id>[^?]+)'
)
_TESTS = [{
'url': 'https://www.bbcmaestro.com/courses/julia-donaldson/writing-children-s-picture-books/trailer',
'info_dict': {
'id': 'julia-donaldson/writing-children-s-picture-books/trailer',
'ext': 'mp4',
'title': 'Course trailer',
},
'params': {
'skip_download': True,
},
}]
def _do_extract_video(self, url, webpage, video_id):
if '/lessons/' not in url:
title = 'Course trailer'
else:
title = self._html_search_regex(
r'<h1\b[^>]*lesson[^>]*title[^>]*>\s*(.+?)\s*</h1>',
webpage,
name='title',
flags=re.RegexFlag.S,
)
m3u8_url = self._html_search_regex(
r'<source[^>]+src="?\'?(\S+\.m3u8)',
webpage,
'video URL',
)
formats = []
if m3u8_url:
formats = self._extract_m3u8_formats(
m3u8_url=m3u8_url,
video_id=video_id,
ext='mp4',
m3u8_id='hls',
fatal=False,
)
return {
'id': video_id,
'title': title,
'formats': formats,
}
def _do_extract_playlist(self, url, webpage):
# Twitter Title usually: <Lesson Title> - <Author> | <Course Title>
twitter_title = self._html_search_meta(
['twitter:title'],
webpage,
fatal=True,
)
playlist_title = (
twitter_title
.split('-', maxsplit=1)[-1]
.replace('|', '-')
)
url_without_query_parameters = url.split('?', maxsplit=1)[0]
self.write_debug(f'url_without_query_parameters: {url_without_query_parameters}')
playlist_id = self._search_regex(
pattern=r'.*/courses/([^/]+/[^/]+).*',
string=url_without_query_parameters,
name='Playlist ID (from URL)',
fatal=True,
)
self.write_debug(f'playlist_id: {playlist_id}')
entries = [
self.url_result(
smuggle_url(
f'https://www.bbcmaestro.com/courses/{playlist_id}/lessons/{item_video_id}',
{'forcevideo': True},
),
ie=BBCMaestroComIE.ie_key(),
)
for item_video_id in orderedSet(re.findall(
r'href=[^>]*/courses/' + re.escape(playlist_id) + r'/lessons/([^?]+)',
webpage,
))
]
# self.write_debug('entries: %r' % entries)
return self.playlist_result(
entries=entries,
playlist_id=playlist_id,
playlist_title=playlist_title,
)
def _check_login_provided(self):
if not self._cookies_passed:
self.raise_login_required('Login details are needed to download this content', method='cookies')
def _real_extract(self, url):
url, smuggled_data = unsmuggle_url(url, {})
self.write_debug(f'Extracting from: {url}')
video_id = self._match_id(url)
self.write_debug(f'Video ID: {video_id}')
webpage = self._download_webpage(url, video_id)
is_private_course_content = ('/lessons/' in url)
is_login_required = is_private_course_content
if is_login_required:
# Note: We can only download the course trailer without login
self._check_login_provided()
is_playlist = (
is_private_course_content
and not smuggled_data.get('forcevideo')
and not self.get_param('noplaylist')
)
if is_playlist:
return self._do_extract_playlist(url, webpage=webpage)
return self._do_extract_video(url, webpage=webpage, video_id=video_id)