Compare commits

...

7 Commits

Author SHA1 Message Date
Mozi
77198411dc
Merge 2761d7f271 into b83ca24eb7 2024-11-10 09:28:03 +05:30
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
Mozi
2761d7f271
Apply suggestions from code review
Co-authored-by: N/Ame <173015200+grqz@users.noreply.github.com>
2024-09-18 05:26:47 +00:00
Mozi
eb98d9c020 no matching quickies 2024-09-17 18:09:08 +00:00
Mozi
6e3d0e5810 [ie/XVideosUser] Add extractor 2024-09-17 17:52:10 +00:00
8 changed files with 174 additions and 11 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -2494,6 +2494,7 @@ from .xstream import XstreamIE
from .xvideos import ( from .xvideos import (
XVideosIE, XVideosIE,
XVideosQuickiesIE, XVideosQuickiesIE,
XVideosUserIE,
) )
from .xxxymovies import XXXYMoviesIE from .xxxymovies import XXXYMoviesIE
from .yahoo import ( from .yahoo import (

View File

@ -1,14 +1,22 @@
import functools
import re import re
import urllib.parse import urllib.parse
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
OnDemandPagedList,
clean_html, clean_html,
determine_ext, determine_ext,
get_element_by_class,
get_element_by_id,
int_or_none, int_or_none,
js_to_json,
parse_duration, parse_duration,
remove_end,
str_or_none,
) )
from ..utils.traversal import traverse_obj
class XVideosIE(InfoExtractor): class XVideosIE(InfoExtractor):
@ -108,9 +116,8 @@ class XVideosIE(InfoExtractor):
video_id = self._match_id(url) video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
mobj = re.search(r'<h1 class="inlineError">(.+?)</h1>', webpage) if inline_error := get_element_by_class('inlineError', webpage):
if mobj: raise ExtractorError(f'{self.IE_NAME} said: {clean_html(inline_error)}', expected=True)
raise ExtractorError(f'{self.IE_NAME} said: {clean_html(mobj.group(1))}', expected=True)
title = self._html_search_regex( title = self._html_search_regex(
(r'<title>(?P<title>.+?)\s+-\s+XVID', (r'<title>(?P<title>.+?)\s+-\s+XVID',
@ -223,3 +230,114 @@ class XVideosQuickiesIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
domain, id_ = self._match_valid_url(url).group('domain', 'id') domain, id_ = self._match_valid_url(url).group('domain', 'id')
return self.url_result(f'https://{domain}/video{"" if id_.isdecimal() else "."}{id_}/_', XVideosIE, id_) return self.url_result(f'https://{domain}/video{"" if id_.isdecimal() else "."}{id_}/_', XVideosIE, id_)
class XVideosUserIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:.+?\.)?xvideos\.(?:com|es)/
(?P<page_path>(?:channels|amateur-channels|model-channels|pornstar-channels|profiles)/
(?P<id>[^/?#&]+))(?:(?:(?!\#quickies).)+)?$'''
_TESTS = [{
# channel; "Most viewed"
'url': 'https://www.xvideos.com/channels/college_girls_gone_bad#_tabVideos,rating',
'info_dict': {
'id': '70472676',
'display_id': 'college_girls_gone_bad',
'title': 'College Girls Gone Bad',
'description': 'Hot college girls in real sorority hazing acts!',
'thumbnails': 'count:2',
},
'playlist_mincount': 99,
}, {
# channel; "New"
'url': 'https://www.xvideos.com/model-channels/shonariver#_tabVideos,new',
'info_dict': {
'id': '407014987',
'display_id': 'shonariver',
'title': 'Shona River',
'description': 'md5:ad6654037aee13535b0d15a020eb82d0',
'thumbnails': 'count:2',
},
'playlist_mincount': 9,
}, {
# channel; "Most commented"
'url': 'https://www.xvideos.com/amateur-channels/queanfuckingcucking#_tabVideos,comments',
'info_dict': {
'id': '227800369',
'display_id': 'queanfuckingcucking',
'title': 'Queanfuckingcucking',
'description': 'md5:265a602186d4e811082782cd6a97b064',
'thumbnails': 'count:2',
},
'playlist_mincount': 8,
}, {
# channel; "Watched recently" (default)
'url': 'https://www.xvideos.com/channels/girlfriendsfilmsofficial#_tabVideos',
'info_dict': {
'id': '244972019',
'display_id': 'girlfriendsfilmsofficial',
'title': 'Girlfriend\'s Films Official',
'thumbnails': 'count:2',
},
'playlist_mincount': 500,
}, {
# /profiles/***
'url': 'https://www.xvideos.com/profiles/jacobsy',
'info_dict': {
'id': '962189',
'display_id': 'jacobsy',
'title': 'Jacobsy',
'description': 'fetishist and bdsm lover...',
'thumbnails': 'count:2',
},
'playlist_mincount': 63,
}, {
# no description, no videos
'url': 'https://www.xvideos.com/profiles/espoder',
'info_dict': {
'id': '581228107',
'display_id': 'espoder',
'title': 'Espoder',
'thumbnails': 'count:2',
},
'playlist_count': 0,
}, {
# no description
'url': 'https://www.xvideos.com/profiles/alfsun',
'info_dict': {
'id': '551066909',
'display_id': 'alfsun',
'title': 'Alfsun',
'thumbnails': 'count:2',
},
'playlist_mincount': 3,
}]
_PAGE_SIZE = 36
def _real_extract(self, url):
page_path, display_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, display_id)
fragment = urllib.parse.urlparse(url).fragment
sort_order = traverse_obj(
['new', 'rating', 'comments'], (lambda _, v: v in fragment), default='best', get_all=False)
page_base_url = f'https://www.xvideos.com/{page_path}/videos/{sort_order}'
user_info = traverse_obj(self._search_json(
r'<script>.*?window\.xv\.conf\s*=', webpage, 'xv.conf',
display_id, transform_source=js_to_json, fatal=False), ('data', 'user'))
user_id = traverse_obj(user_info, ('id_user', {str_or_none})) or display_id
return self.playlist_result(
OnDemandPagedList(functools.partial(self._get_page, page_base_url, user_id), self._PAGE_SIZE),
user_id, traverse_obj(user_info, ('display', {str_or_none})),
remove_end(clean_html(get_element_by_id('header-about-me', webpage)), '+'),
display_id=(traverse_obj(user_info, ('username', {str_or_none})) or display_id),
thumbnails=traverse_obj(user_info, (['profile_picture_small', 'profile_picture'], {lambda x: {'url': x}})))
def _get_page(self, page_base_url, user_id, page_num):
page_info = self._download_json(
f'{page_base_url}/{page_num}', user_id, f'Downloading page {page_num + 1}')
yield from [self.url_result(
f'https://www.xvideos.com/video{video["id"]}/{video["eid"]}', ie=XVideosIE.ie_key())
for video in page_info['videos']]