Compare commits

...

5 Commits

Author SHA1 Message Date
pj47x
1d0bb00e0a
Merge 637ccf3523 into b83ca24eb7 2024-11-10 09:28:03 +05:30
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
pj47x
637ccf3523 [ie/manyvids] Fix ManyVids extractor after website update 2024-10-13 21:22:35 +11:00
7 changed files with 139 additions and 120 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -1,113 +1,100 @@
import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from .. import traverse_obj
determine_ext, from ..utils import determine_ext, int_or_none, parse_count, parse_duration, parse_iso8601, url_or_none
extract_attributes,
int_or_none,
str_to_int,
url_or_none,
urlencode_postdata,
)
class ManyVidsIE(InfoExtractor): class ManyVidsIE(InfoExtractor):
_WORKING = False _WORKING = True
_VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)' _VALID_URL = r'(?i)https?://(?:www\.)?manyvids\.com/video/(?P<id>\d+)'
_TESTS = [{ _TESTS = [
# preview video {
'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/', # Dead preview video
'md5': '03f11bb21c52dd12a05be21a5c7dcc97', 'skip': True,
'info_dict': { 'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/',
'id': '133957', 'md5': '03f11bb21c52dd12a05be21a5c7dcc97',
'ext': 'mp4', 'info_dict': {
'title': 'everthing about me (Preview)', 'id': '133957',
'uploader': 'ellyxxix', 'ext': 'mp4',
'view_count': int, 'title': 'everthing about me (Preview)',
'like_count': int, 'uploader': 'ellyxxix',
'view_count': int,
'like_count': int,
},
}, },
}, { {
# full video # preview video
'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/', 'url': 'https://www.manyvids.com/Video/530341/mv-tips-tricks',
'md5': 'bb47bab0e0802c2a60c24ef079dfe60f', 'md5': '738dc723f7735ee9602f7ea352a6d058',
'info_dict': { 'info_dict': {
'id': '935718', 'id': '530341',
'ext': 'mp4', 'ext': 'mp4',
'title': 'MY FACE REVEAL', 'title': 'MV Tips &amp; Tricks (Preview)',
'description': 'md5:ec5901d41808b3746fed90face161612', 'description': 'md5:c3bae98c0f9453237c28b0f8795d9f83',
'uploader': 'Sarah Calanthe', 'thumbnail': 'https://cdn5.manyvids.com/php_uploads/video_images/DestinyDiaz/thumbs/thumb_Hs26ATOO7fcZaI9sx3XT_screenshot_001.jpg',
'view_count': int, 'uploader': 'DestinyDiaz',
'like_count': int, 'view_count': int,
'like_count': int,
'release_timestamp': 1508419904,
'tags': ['AdultSchool', 'BBW', 'SFW', 'TeacherFetish'],
'release_date': '20171019',
'duration': 3167.0,
},
}, },
}] {
# full video
'url': 'https://www.manyvids.com/Video/935718/MY-FACE-REVEAL/',
'md5': 'bb47bab0e0802c2a60c24ef079dfe60f',
'info_dict': {
'id': '935718',
'ext': 'mp4',
'title': 'MY FACE REVEAL',
'description': 'md5:ec5901d41808b3746fed90face161612',
'thumbnail': 'https://ods.manyvids.com/1001061960/3aa5397f2a723ec4597e344df66ab845/screenshots/thumbs/custom_1_180_5be09c1dcce03.jpg',
'uploader': 'Sarah Calanthe',
'view_count': int,
'like_count': int,
'release_date': '20181110',
'tags': ['EyeContact', 'Interviews', 'MaskFetish', 'MouthFetish', 'Redhead'],
'release_timestamp': 1541851200,
'duration': 224.0,
},
},
]
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
real_url = f'https://www.manyvids.com/video/{video_id}/gtm.js' info = traverse_obj(
try: self._download_json(f'https://www.manyvids.com/bff/store/video/{video_id}', video_id),
webpage = self._download_webpage(real_url, video_id) ('data', {dict})) or {}
except Exception:
# probably useless fallback
webpage = self._download_webpage(url, video_id)
info = self._search_regex( video_urls = traverse_obj(
r'''(<div\b[^>]*\bid\s*=\s*(['"])pageMetaDetails\2[^>]*>)''', self._download_json(f'https://www.manyvids.com/bff/store/video/{video_id}/private', video_id),
webpage, 'meta details', default='') ('data', {dict})) or {}
info = extract_attributes(info)
player = self._search_regex(
r'''(<div\b[^>]*\bid\s*=\s*(['"])rmpPlayerStream\2[^>]*>)''',
webpage, 'player details', default='')
player = extract_attributes(player)
video_urls_and_ids = ( video_urls_and_ids = (
(info.get('data-meta-video'), 'video'), (traverse_obj(video_urls, ('teaser', 'filepath')), 'preview'),
(player.get('data-video-transcoded'), 'transcoded'), (video_urls.get('transcodedFilepath'), 'transcoded'),
(player.get('data-video-filepath'), 'filepath'), (video_urls.get('filepath'), 'filepath'),
(self._og_search_video_url(webpage, secure=False, default=None), 'og_video'),
) )
def txt_or_none(s, default=None): title = info.get('title')
return (s.strip() or default) if isinstance(s, str) else default uploader = traverse_obj(info, ('model', 'displayName'))
description = info.get('description')
likes = parse_count(info.get('likes'))
views = parse_count(info.get('views'))
thumbnail = url_or_none(info.get('screenshot')) or url_or_none(info.get('thumbnail'))
release_timestamp = parse_iso8601(info.get('launchDate'))
duration = parse_duration(info.get('videoDuration'))
tags = [t.get('label') for t in info.get('tagList')]
uploader = txt_or_none(info.get('data-meta-author')) # If the video formats JSON only contains a teaser object, then it is a preview
if video_urls.get('teaser') and not video_urls.get('filepath'):
def mung_title(s):
if uploader:
s = re.sub(rf'^\s*{re.escape(uploader)}\s+[|-]', '', s)
return txt_or_none(s)
title = (
mung_title(info.get('data-meta-title'))
or self._html_search_regex(
(r'<span[^>]+class=["\']item-title[^>]+>([^<]+)',
r'<h2[^>]+class=["\']h2 m-0["\'][^>]*>([^<]+)'),
webpage, 'title', default=None)
or self._html_search_meta(
'twitter:title', webpage, 'title', fatal=True))
title = re.sub(r'\s*[|-]\s+ManyVids\s*$', '', title) or title
if any(p in webpage for p in ('preview_videos', '_preview.mp4')):
title += ' (Preview)' title += ' (Preview)'
self.report_warning(
mv_token = self._search_regex( f'Only extracting preview. Video may be paid or subscription only. {self._login_hint()}')
r'data-mvtoken=(["\'])(?P<value>(?:(?!\1).)+)\1', webpage,
'mv token', default=None, group='value')
if mv_token:
# Sets some cookies
self._download_webpage(
'https://www.manyvids.com/includes/ajax_repository/you_had_me_at_hello.php',
video_id, note='Setting format cookies', fatal=False,
data=urlencode_postdata({
'mvtoken': mv_token,
'vid': video_id,
}), headers={
'Referer': url,
'X-Requested-With': 'XMLHttpRequest',
})
formats = [] formats = []
for v_url, fmt in video_urls_and_ids: for v_url, fmt in video_urls_and_ids:
@ -130,33 +117,21 @@ class ManyVidsIE(InfoExtractor):
if f.get('height') is None: if f.get('height') is None:
f['height'] = int_or_none( f['height'] = int_or_none(
self._search_regex(r'_(\d{2,3}[02468])_', f['url'], 'video height', default=None)) self._search_regex(r'_(\d{2,3}[02468])_', f['url'], 'video height', default=None))
if '/preview/' in f['url']: if 'preview' in f['format_id']:
f['format_id'] = '_'.join(filter(None, (f.get('format_id'), 'preview')))
f['preference'] = -10 f['preference'] = -10
if 'transcoded' in f['format_id']: if 'transcoded' in f['format_id']:
f['preference'] = f.get('preference', -1) - 1 f['preference'] = f.get('preference', -1) - 1
def get_likes():
likes = self._search_regex(
rf'''(<a\b[^>]*\bdata-id\s*=\s*(['"]){video_id}\2[^>]*>)''',
webpage, 'likes', default='')
likes = extract_attributes(likes)
return int_or_none(likes.get('data-likes'))
def get_views():
return str_to_int(self._html_search_regex(
r'''(?s)<span\b[^>]*\bclass\s*=["']views-wrapper\b[^>]+>.+?<span\b[^>]+>\s*(\d[\d,.]*)\s*</span>''',
webpage, 'view count', default=None))
return { return {
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'formats': formats, 'formats': formats,
'description': txt_or_none(info.get('data-meta-description')), 'description': description,
'uploader': txt_or_none(info.get('data-meta-author')), 'uploader': uploader,
'thumbnail': ( 'thumbnail': thumbnail,
url_or_none(info.get('data-meta-image')) 'view_count': views,
or url_or_none(player.get('data-video-screenshot'))), 'like_count': likes,
'view_count': get_views(), 'release_timestamp': release_timestamp,
'like_count': get_likes(), 'duration': duration,
'tags': tags,
} }