Compare commits

...

6 Commits

Author SHA1 Message Date
thuttinpasseron
2557107731
Merge 10003ccacf into b83ca24eb7 2024-11-10 09:19:38 +05:30
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
Thibault HUTTIN-PASSERON
10003ccacf
Fix: fixed _VALID_URL regexp 2024-07-05 00:13:03 +02:00
Thibault HUTTIN-PASSERON
876b2f3c75
Feat: implemented complete season/show download from FranceTV 2024-07-05 00:07:46 +02:00
8 changed files with 142 additions and 8 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -682,6 +682,7 @@ from .francetv import (
FranceTVIE, FranceTVIE,
FranceTVInfoIE, FranceTVInfoIE,
FranceTVSiteIE, FranceTVSiteIE,
FranceTVSiteShowIE,
) )
from .freesound import FreesoundIE from .freesound import FreesoundIE
from .freespeech import FreespeechIE from .freespeech import FreespeechIE

View File

@ -449,3 +449,92 @@ class FranceTVInfoIE(FranceTVBaseInfoExtractor):
webpage, 'video id') webpage, 'video id')
return self._make_url_result(video_id, url=url) return self._make_url_result(video_id, url=url)
class FranceTVSiteShowIE(FranceTVBaseInfoExtractor):
IE_NAME = 'FranceTVSite:playlist'
_VALID_URL = r'''(?x)
https?://(?:(?:www\.)?france\.tv|mobile\.france\.tv)/
(?:[^/]+)/(?P<id>[^/]+)/
(?:toutes-les-videos|saison-(?P<season_number>\d+))/?$'''
_TESTS = [{
'url': 'https://www.france.tv/france-3/wakfu/saison-1/',
'info_dict': {
'id': 'Wakfu Season 1',
'title': 'Wakfu',
'description': 'md5:7d34b24a7fdde32da265bf01580803c3',
},
'playlist_count': 26,
}, {
'url': 'https://www.france.tv/france-2/dix-pour-cent/toutes-les-videos/',
'info_dict': {
'id': 'Dix pour Cent All available videos',
'title': 'Dix pour Cent',
},
'playlist_mincount': 24,
}, {
'url': 'https://www.france.tv/france-2/drag-race-france/toutes-les-videos/',
'info_dict': {
'id': 'Drag Race France All available videos',
'title': 'Drag Race France',
},
'playlist_mincount': 32,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
url_groups = self._match_valid_url(url).groupdict()
is_seasons_url = url_groups.get('season_number') is not None
info_dict = {}
if is_seasons_url:
display_title = self._search_regex(
r'<div\s*class=(["\'])c-hero-banner-program__data-description\1\s*><h1\s*class=\1c-headlines\1\s*>(?P<display_title>[^<]+)</h1\s*></div\s*>',
webpage, name='display_title', group='display_title')
display_title = display_title.split('-')[0].strip()
internal_id = f'{display_title} Season {url_groups["season_number"]}'
info_dict['playlist_description'] = self._search_regex(
r'<div\s*class=(["\'])c-hero-banner-program__data-synopsis\1\s*>(?P<series_description>[^<]+)</div\s*>',
webpage, name='series_description', group='series_description', default=None).strip()
else:
display_title = self._search_regex(
r'<h1\s*class=(["\'])c-headlines\s*c-page-title\s*c-headlines--title-1\1\s*>(?P<display_title>[^<]+)</h1\s*>',
webpage, name='display_title', group='display_title').strip()
internal_id = f'{display_title} All available videos'
info_dict['playlist_id'] = internal_id
info_dict['playlist_title'] = display_title
def entries():
# The page contains only the first videos, some calls must be made to ge the complete video list
(more_base_url, current_page, more_page_number) = self._search_regex(
r'''(?x)
(?:data-current-page\s*=)\s*(["\'])(?P<current_page>(?:(?!\1).)+)\1\s*
(?:data-url\s*=)\s*\1(?P<more_base_url>(?:(?!\1).)+)\1\s*
(?:data-max-page\s*=)\s*\1(?P<more_page_number>(?:(?!\1).)+)\1''',
webpage, 'more_base_url', group=['more_base_url', 'current_page', 'more_page_number'])
parsed_url = urllib.parse.urlparse(url)
(scheme, netloc, _, _, _, _) = parsed_url
videos_list = ''.join([
self._download_webpage(f'{scheme}://{netloc}/{more_base_url}?page={page}',
f'{display_id} page {page + 1}')
for page in range(int(current_page), int(more_page_number) + 1)
])
video_ids = [
match.group('id') for match in re.finditer(
r'(?:data-main-video\s*=|(?:video_factory_id|videoId)["\']?\s*[:=])\s*(["\'])(?P<id>(?:(?!\1).)+)\1',
videos_list)
]
for video_id in video_ids:
yield self.url_result(f'francetv:{video_id}', ie=FranceTVIE, video_id=video_id,
**FranceTVIE._extract_video(self, video_id, hostname=parsed_url.hostname))
return self.playlist_result(entries(), **info_dict)