Compare commits

...

5 Commits

Author SHA1 Message Date
Dong Heon Hee
96232aac23
Merge d73bce4928 into b83ca24eb7 2024-11-10 14:42:00 +01:00
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
hui1601
d73bce4928
[soopglobal] Add extractor 2024-06-08 23:48:01 +09:00
8 changed files with 181 additions and 8 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -1894,6 +1894,10 @@ from .sonyliv import (
SonyLIVIE, SonyLIVIE,
SonyLIVSeriesIE, SonyLIVSeriesIE,
) )
from .soopglobal import (
SoopGlobalLiveIE,
SoopGlobalVodIE,
)
from .soundcloud import ( from .soundcloud import (
SoundcloudEmbedIE, SoundcloudEmbedIE,
SoundcloudIE, SoundcloudIE,

View File

@ -0,0 +1,125 @@
import uuid
from yt_dlp import int_or_none, traverse_obj
from yt_dlp.compat import functools
from yt_dlp.extractor.common import InfoExtractor
from yt_dlp.utils import UserNotLive, bool_or_none, parse_iso8601
class SoopGlobalLiveIE(InfoExtractor):
IE_NAME = 'soopglobal:live'
_VALID_URL = r'https?://www\.sooplive\.com/(?P<id>[\w]+$)'
_TESTS = [{
'url': 'https://www.sooplive.com/soopbowl',
'info_dict': {
'id': 'soopbowl',
'ext': 'mp4',
'title': str,
'thumbnail': r're:^https?://.*\.jpg$',
'channel': 'SoopBowl',
'channel_id': 'soopbowl',
'concurrent_view_count': int,
'channel_follower_count': int,
'timestamp': 1717852526,
'upload_date': '20240608',
'live_status': 'is_live',
'view_count': int,
'age_limit': False,
},
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
client_id = str(uuid.uuid4())
live_detail = self._download_json(
f'https://api.sooplive.com/stream/info/{channel_id}', channel_id,
headers={'client-id': client_id},
note='Downloading live info', errnote='Unable to download live info')
if not live_detail.get('isStream'):
raise UserNotLive(video_id=channel_id)
age_limit = 0
if traverse_obj(live_detail, ('data', 'isAdult', {bool_or_none})):
age_limit = 19
live_statistic = self._download_json(
f'https://api.sooplive.com/stream/info/{channel_id}/live', channel_id,
headers={'client-id': client_id},
note='Downloading live statistics', errnote='Unable to download live statistics')
channel_info = self._download_json(
f'https://api.sooplive.com/channel/info/{channel_id}', channel_id,
headers={'client-id': client_id},
note='Downloading channel information', errnote='Unable to download channel information')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'https://api.sooplive.com/media/live/{channel_id}/master.m3u8', channel_id,
headers={'client-id': client_id},
note='Downloading live stream', errnote='Unable to download live stream')
return {
'id': channel_id,
'channel_id': channel_id,
'is_live': True,
'formats': formats,
'subtitles': subtitles,
'view_count': live_statistic.get('viewer'),
'age_limit': age_limit,
**traverse_obj(channel_info.get('streamerChannelInfo'), {
'channel': ('nickname', {str}),
'channel_id': ('channelId', {str}),
'channel_follower_count': ('totalFollowerCount', {int_or_none}),
}),
**traverse_obj(live_detail.get('data'), {
'title': ('title', {str}),
'timestamp': ('streamStartDate', {functools.partial(parse_iso8601)}),
'concurrent_view_count': ('totalStreamCumulativeViewer', {int_or_none}),
'thumbnail': ('thumbnailUrl', {str}),
}),
}
class SoopGlobalVodIE(InfoExtractor):
IE_NAME = 'soopglobal:vod'
_VALID_URL = r'https?://www\.sooplive\.com/video/(?P<id>[\d]+)'
_TESTS = [{
'url': 'https://www.sooplive.com/video/607',
'info_dict': {
'id': '607',
'ext': 'mp4',
'title': str,
'thumbnail': r're:^https?://.*\.jpg$',
'channel': '샤미요',
'channel_id': 'shamiyo',
'timestamp': 1717051284,
'upload_date': '20240530',
'view_count': int,
'age_limit': False,
},
'params': {'skip_download': 'm3u8'},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
client_id = str(uuid.uuid4())
video_info = self._download_json(
f'https://api.sooplive.com/vod/info/{video_id}', video_id,
headers={'client-id': client_id},
note='Downloading video info', errnote='Unable to download video info')
channel_id = video_info.get('channelId')
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
f'https://api.sooplive.com/media/vod/{channel_id}/{video_id}/master.m3u8', video_id,
headers={'client-id': client_id},
note='Downloading video stream', errnote='Unable to download video stream')
return {
'id': video_id,
'channel': video_info.get('nickName'),
'channel_id': channel_id,
'title': video_info.get('titleName'),
'thumbnail': video_info.get('thumb'),
'timestamp': parse_iso8601(video_info.get('createDate')),
'view_count': video_info.get('readCnt'),
'age_limit': 0 if not video_info.get('isAdult') else 19,
'formats': formats,
'subtitles': subtitles,
}