mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 17:21:23 +01:00
Compare commits
7 Commits
c99d9040cd
...
008e70948d
Author | SHA1 | Date | |
---|---|---|---|
|
008e70948d | ||
|
b83ca24eb7 | ||
|
240a7d43c8 | ||
|
f13df591d4 | ||
|
2eaf303b63 | ||
|
367ec929f4 | ||
|
b901e4fb8a |
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
|
@ -504,7 +504,8 @@ jobs:
|
||||||
- windows32
|
- windows32
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@v4
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: artifact
|
path: artifact
|
||||||
pattern: build-bin-*
|
pattern: build-bin-*
|
||||||
|
|
17
.github/workflows/release-master.yml
vendored
17
.github/workflows/release-master.yml
vendored
|
@ -28,3 +28,20 @@ jobs:
|
||||||
actions: write # For cleaning up cache
|
actions: write # For cleaning up cache
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
publish_pypi:
|
||||||
|
needs: [release]
|
||||||
|
if: vars.MASTER_PYPI_PROJECT != ''
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
name: build-pypi
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
verbose: true
|
||||||
|
|
17
.github/workflows/release-nightly.yml
vendored
17
.github/workflows/release-nightly.yml
vendored
|
@ -41,3 +41,20 @@ jobs:
|
||||||
actions: write # For cleaning up cache
|
actions: write # For cleaning up cache
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
publish_pypi:
|
||||||
|
needs: [release]
|
||||||
|
if: vars.NIGHTLY_PYPI_PROJECT != ''
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
name: build-pypi
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
verbose: true
|
||||||
|
|
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
|
@ -2,10 +2,6 @@ name: Release
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
prerelease:
|
|
||||||
required: false
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
source:
|
source:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
|
@ -18,6 +14,10 @@ on:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
|
prerelease:
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
source:
|
source:
|
||||||
|
@ -278,11 +278,20 @@ jobs:
|
||||||
make clean-cache
|
make clean-cache
|
||||||
python -m build --no-isolation .
|
python -m build --no-isolation .
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
if: github.event_name != 'workflow_dispatch'
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: build-pypi
|
||||||
|
path: |
|
||||||
|
dist/*
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
|
if: github.event_name == 'workflow_dispatch'
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
with:
|
with:
|
||||||
verbose: true
|
verbose: true
|
||||||
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
|
|
||||||
|
|
||||||
publish:
|
publish:
|
||||||
needs: [prepare, build]
|
needs: [prepare, build]
|
||||||
|
|
|
@ -52,7 +52,7 @@ default = [
|
||||||
"pycryptodomex",
|
"pycryptodomex",
|
||||||
"requests>=2.32.2,<3",
|
"requests>=2.32.2,<3",
|
||||||
"urllib3>=1.26.17,<3",
|
"urllib3>=1.26.17,<3",
|
||||||
"websockets>=13.0",
|
"websockets>=13.0,<14",
|
||||||
]
|
]
|
||||||
curl-cffi = [
|
curl-cffi = [
|
||||||
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",
|
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",
|
||||||
|
|
|
@ -24,7 +24,7 @@ try:
|
||||||
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
||||||
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
||||||
from Crypto.PublicKey import RSA # noqa: F401
|
from Crypto.PublicKey import RSA # noqa: F401
|
||||||
except ImportError:
|
except (ImportError, OSError):
|
||||||
__version__ = f'broken {__version__}'.strip()
|
__version__ = f'broken {__version__}'.strip()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||||
parse_resolution,
|
parse_resolution,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
|
update_url,
|
||||||
url_basename,
|
url_basename,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
urljoin,
|
urljoin,
|
||||||
|
@ -34,6 +35,7 @@ class ZoomIE(InfoExtractor):
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO',
|
'title': 'Prépa AF2023 - Séance 5 du 11 avril - R20/VM/GO',
|
||||||
},
|
},
|
||||||
|
'skip': 'This recording has expired',
|
||||||
}, {
|
}, {
|
||||||
# share URL
|
# share URL
|
||||||
'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8',
|
'url': 'https://us02web.zoom.us/rec/share/hkUk5Zxcga0nkyNGhVCRfzkA2gX_mzgS3LpTxEEWJz9Y_QpIQ4mZFOUx7KZRZDQA.9LGQBdqmDAYgiZ_8',
|
||||||
|
@ -61,41 +63,59 @@ class ZoomIE(InfoExtractor):
|
||||||
return self._search_json(
|
return self._search_json(
|
||||||
r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json)
|
r'window\.__data__\s*=', webpage, 'data', video_id, transform_source=js_to_json)
|
||||||
|
|
||||||
def _get_real_webpage(self, url, base_url, video_id, url_type):
|
def _try_login(self, url, base_url, video_id, form):
|
||||||
webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage')
|
# This will most likely only work for password-protected meetings
|
||||||
try:
|
|
||||||
form = self._form_hidden_inputs('password_form', webpage)
|
|
||||||
except ExtractorError:
|
|
||||||
return webpage
|
|
||||||
|
|
||||||
password = self.get_param('videopassword')
|
password = self.get_param('videopassword')
|
||||||
if not password:
|
if not password:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'This video is protected by a passcode, use the --video-password option', expected=True)
|
'This video is protected by a passcode, use the --video-password option', expected=True)
|
||||||
|
|
||||||
is_meeting = form.get('useWhichPasswd') == 'meeting'
|
is_meeting = form.get('useWhichPasswd') == 'meeting'
|
||||||
validation = self._download_json(
|
validation = self._download_json(
|
||||||
base_url + 'rec/validate%s_passwd' % ('_meet' if is_meeting else ''),
|
base_url + 'nws/recording/1.0/validate%s-passwd' % ('-meeting' if is_meeting else ''),
|
||||||
video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({
|
video_id, 'Validating passcode', 'Wrong passcode', data=urlencode_postdata({
|
||||||
'id': form[('meet' if is_meeting else 'file') + 'Id'],
|
'id': form[('meeting' if is_meeting else 'file') + '_id'],
|
||||||
'passwd': password,
|
'passwd': password,
|
||||||
'action': form.get('action'),
|
'action': form.get('action'),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
if not validation.get('status'):
|
if not validation.get('status'):
|
||||||
raise ExtractorError(validation['errorMessage'], expected=True)
|
raise ExtractorError(validation['errorMessage'], expected=True)
|
||||||
return self._download_webpage(url, video_id, note=f'Re-downloading {url_type} webpage')
|
|
||||||
|
def _get_real_webpage(self, url, base_url, video_id, url_type):
|
||||||
|
webpage = self._download_webpage(url, video_id, note=f'Downloading {url_type} webpage')
|
||||||
|
|
||||||
|
data = self._get_page_data(webpage, video_id)
|
||||||
|
if data.get('componentName') != 'need-password': # not password protected
|
||||||
|
return webpage
|
||||||
|
|
||||||
|
# Password-protected:
|
||||||
|
self._try_login(url, base_url, video_id, form=data)
|
||||||
|
# Return the new HTML document
|
||||||
|
new_url = f"{base_url}rec/share/{data['meeting_id']}"
|
||||||
|
return self._download_webpage(new_url, video_id, note=f'Re-downloading {url_type} webpage')
|
||||||
|
|
||||||
|
def _get_share_redirect_url(self, url, base_url, video_id):
|
||||||
|
"""Converts a `/rec/share` url to the corresponding `/rec/play` url, performs login if necessary"""
|
||||||
|
webpage = self._get_real_webpage(url, base_url, video_id, 'share')
|
||||||
|
meeting_id = self._get_page_data(webpage, video_id)['meetingId']
|
||||||
|
redirect_dict = self._download_json(
|
||||||
|
f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}',
|
||||||
|
video_id, note='Downloading share info JSON')['result']
|
||||||
|
redirect_path = redirect_dict.pop('redirectUrl')
|
||||||
|
url = update_url(urljoin(base_url, redirect_path), query_update=redirect_dict)
|
||||||
|
|
||||||
|
if redirect_dict.get('componentName') == 'need-password':
|
||||||
|
# First login, then return redirection URL
|
||||||
|
return self._get_share_redirect_url(url, base_url, video_id)
|
||||||
|
|
||||||
|
return url
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id')
|
base_url, url_type, video_id = self._match_valid_url(url).group('base_url', 'type', 'id')
|
||||||
query = {}
|
|
||||||
|
|
||||||
if url_type == 'share':
|
if url_type == 'share':
|
||||||
webpage = self._get_real_webpage(url, base_url, video_id, 'share')
|
url = self._get_share_redirect_url(url, base_url, video_id)
|
||||||
meeting_id = self._get_page_data(webpage, video_id)['meetingId']
|
|
||||||
redirect_path = self._download_json(
|
|
||||||
f'{base_url}nws/recording/1.0/play/share-info/{meeting_id}',
|
|
||||||
video_id, note='Downloading share info JSON')['result']['redirectUrl']
|
|
||||||
url = urljoin(base_url, redirect_path)
|
|
||||||
query['continueMode'] = 'true'
|
|
||||||
|
|
||||||
webpage = self._get_real_webpage(url, base_url, video_id, 'play')
|
webpage = self._get_real_webpage(url, base_url, video_id, 'play')
|
||||||
file_id = self._get_page_data(webpage, video_id)['fileId']
|
file_id = self._get_page_data(webpage, video_id)['fileId']
|
||||||
|
@ -104,10 +124,12 @@ class ZoomIE(InfoExtractor):
|
||||||
raise ExtractorError('Unable to extract file ID')
|
raise ExtractorError('Unable to extract file ID')
|
||||||
|
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id, query=query,
|
f'{base_url}nws/recording/1.0/play/info/{file_id}', video_id, query={
|
||||||
|
'continueMode': 'true', # Makes this return value include interpreter audio information
|
||||||
|
},
|
||||||
note='Downloading play info JSON')['result']
|
note='Downloading play info JSON')['result']
|
||||||
|
|
||||||
subtitles = {}
|
subtitles = {}
|
||||||
|
# XXX: Would be more appropriate to parse chapters separate from subtitles
|
||||||
for _type in ('transcript', 'cc', 'chapter'):
|
for _type in ('transcript', 'cc', 'chapter'):
|
||||||
if data.get(f'{_type}Url'):
|
if data.get(f'{_type}Url'):
|
||||||
subtitles[_type] = [{
|
subtitles[_type] = [{
|
||||||
|
@ -117,6 +139,19 @@ class ZoomIE(InfoExtractor):
|
||||||
|
|
||||||
formats = []
|
formats = []
|
||||||
|
|
||||||
|
if data.get('interpreterAudioList'):
|
||||||
|
for audio in data.get('interpreterAudioList'):
|
||||||
|
formats.append({
|
||||||
|
'format_note': f'Intepreter: {audio["languageText"]}',
|
||||||
|
'url': audio['audioUrl'],
|
||||||
|
'format_id': f'interpreter-{ audio["icon"].lower()}',
|
||||||
|
'ext': 'm4a',
|
||||||
|
# There doesn't seem to be an explicit field for a standardized language code,
|
||||||
|
# sometimes the `language` field may be more accurate than `icon`
|
||||||
|
'language': audio['icon'].lower(),
|
||||||
|
'vcodec': 'none',
|
||||||
|
})
|
||||||
|
|
||||||
if data.get('viewMp4Url'):
|
if data.get('viewMp4Url'):
|
||||||
formats.append({
|
formats.append({
|
||||||
'format_note': 'Camera stream',
|
'format_note': 'Camera stream',
|
||||||
|
|
Loading…
Reference in New Issue
Block a user