Compare commits

...

8 Commits

Author SHA1 Message Date
NiChrosia
c732246470
Merge 34bf80c62d into b83ca24eb7 2024-11-10 21:09:04 +01:00
sepro
b83ca24eb7
[core] Catch broken Cryptodome installations (#11486)
Authored by: seproDev
2024-11-10 00:53:49 +01:00
bashonly
240a7d43c8
[build] Pin websockets version to >=13.0,<14 (#11488)
websockets 14.0 causes CI test failures (a lot more of them)

Authored by: bashonly
2024-11-09 23:46:47 +00:00
bashonly
f13df591d4
[build] Enable attestations for trusted publishing (#11420)
Reverts 428ffb75aa

Authored by: bashonly
2024-11-09 23:26:02 +00:00
NiChrosia
34bf80c62d fix: keep invalid Windows characters if --no-windows-filenames is specified 2023-10-28 16:49:40 -05:00
NiChrosia
0ed18251d2 fix: default to False for params["windowsfilenames"] when absent 2023-10-28 14:13:03 -05:00
NiChrosia
773b554bf5 style: fix invalid styling 2023-10-28 14:05:41 -05:00
NiChrosia
5982686f86 fix: don't replace characters invalid on Windows when --no-windows-filenames is passed 2023-10-28 13:45:27 -05:00
12 changed files with 96 additions and 24 deletions

View File

@ -504,7 +504,8 @@ jobs:
- windows32 - windows32
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/download-artifact@v4 - name: Download artifacts
uses: actions/download-artifact@v4
with: with:
path: artifact path: artifact
pattern: build-bin-* pattern: build-bin-*

View File

@ -28,3 +28,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.MASTER_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -41,3 +41,20 @@ jobs:
actions: write # For cleaning up cache actions: write # For cleaning up cache
id-token: write # mandatory for trusted publishing id-token: write # mandatory for trusted publishing
secrets: inherit secrets: inherit
publish_pypi:
needs: [release]
if: vars.NIGHTLY_PYPI_PROJECT != ''
runs-on: ubuntu-latest
permissions:
id-token: write # mandatory for trusted publishing
steps:
- name: Download artifacts
uses: actions/download-artifact@v4
with:
path: dist
name: build-pypi
- name: Publish to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
verbose: true

View File

@ -2,10 +2,6 @@ name: Release
on: on:
workflow_call: workflow_call:
inputs: inputs:
prerelease:
required: false
default: true
type: boolean
source: source:
required: false required: false
default: '' default: ''
@ -18,6 +14,10 @@ on:
required: false required: false
default: '' default: ''
type: string type: string
prerelease:
required: false
default: true
type: boolean
workflow_dispatch: workflow_dispatch:
inputs: inputs:
source: source:
@ -278,11 +278,20 @@ jobs:
make clean-cache make clean-cache
python -m build --no-isolation . python -m build --no-isolation .
- name: Upload artifacts
if: github.event_name != 'workflow_dispatch'
uses: actions/upload-artifact@v4
with:
name: build-pypi
path: |
dist/*
compression-level: 0
- name: Publish to PyPI - name: Publish to PyPI
if: github.event_name == 'workflow_dispatch'
uses: pypa/gh-action-pypi-publish@release/v1 uses: pypa/gh-action-pypi-publish@release/v1
with: with:
verbose: true verbose: true
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
publish: publish:
needs: [prepare, build] needs: [prepare, build]

View File

@ -52,7 +52,7 @@ default = [
"pycryptodomex", "pycryptodomex",
"requests>=2.32.2,<3", "requests>=2.32.2,<3",
"urllib3>=1.26.17,<3", "urllib3>=1.26.17,<3",
"websockets>=13.0", "websockets>=13.0,<14",
] ]
curl-cffi = [ curl-cffi = [
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",

View File

@ -161,6 +161,11 @@ class TestUtil(unittest.TestCase):
self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False)) self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False))
self.assertEqual('this - that', sanitize_filename('this: that', is_id=False)) self.assertEqual('this - that', sanitize_filename('this: that', is_id=False))
self.assertEqual('abc_<>\\*|de', sanitize_filename('abc/<>\\*|de', keep_bad_win_chars=True, is_id=False))
self.assertEqual('xxx_<>\\*|', sanitize_filename('xxx/<>\\*|', keep_bad_win_chars=True, is_id=False))
self.assertEqual('yes? no', sanitize_filename('yes? no', keep_bad_win_chars=True, is_id=False))
self.assertEqual('this: that', sanitize_filename('this: that', keep_bad_win_chars=True, is_id=False))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T') self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä' aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut) self.assertEqual(sanitize_filename(aumlaut), aumlaut)
@ -171,6 +176,10 @@ class TestUtil(unittest.TestCase):
sanitize_filename('New World record at 0:12:34'), sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34') 'New World record at 0_12_34')
self.assertEqual(
sanitize_filename('New World record at 0:12:34', keep_bad_win_chars=True),
'New World record at 0:12:34')
self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf') self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf') self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf')
@ -222,6 +231,10 @@ class TestUtil(unittest.TestCase):
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw') self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI') self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
self.assertEqual(sanitize_filename('_n_cd26wFpw', keep_bad_win_chars=True, is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', keep_bad_win_chars=True, is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', keep_bad_win_chars=True, is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self): def test_sanitize_path(self):
with unittest.mock.patch('sys.platform', 'win32'): with unittest.mock.patch('sys.platform', 'win32'):
self._test_sanitize_path() self._test_sanitize_path()

View File

@ -1311,7 +1311,9 @@ class YoutubeDL:
na = self.params.get('outtmpl_na_placeholder', 'NA') na = self.params.get('outtmpl_na_placeholder', 'NA')
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')): def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
return sanitize_filename(str(value), restricted=restricted, is_id=( return sanitize_filename(
str(value), self.params.get('keep_bad_win_chars', False), restricted=restricted,
is_id=(
bool(re.search(r'(^|[_.])id(\.|$)', key)) bool(re.search(r'(^|[_.])id(\.|$)', key))
if 'filename-sanitization' in self.params['compat_opts'] if 'filename-sanitization' in self.params['compat_opts']
else NO_DEFAULT)) else NO_DEFAULT))

View File

@ -833,6 +833,7 @@ def parse_options(argv=None):
'autonumber_start': opts.autonumber_start, 'autonumber_start': opts.autonumber_start,
'restrictfilenames': opts.restrictfilenames, 'restrictfilenames': opts.restrictfilenames,
'windowsfilenames': opts.windowsfilenames, 'windowsfilenames': opts.windowsfilenames,
'keep_bad_win_chars': opts.keep_bad_win_chars,
'ignoreerrors': opts.ignoreerrors, 'ignoreerrors': opts.ignoreerrors,
'force_generic_extractor': opts.force_generic_extractor, 'force_generic_extractor': opts.force_generic_extractor,
'allowed_extractors': opts.allowed_extractors or ['default'], 'allowed_extractors': opts.allowed_extractors or ['default'],

View File

@ -24,7 +24,7 @@ try:
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401 from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
from Crypto.Hash import CMAC, SHA1 # noqa: F401 from Crypto.Hash import CMAC, SHA1 # noqa: F401
from Crypto.PublicKey import RSA # noqa: F401 from Crypto.PublicKey import RSA # noqa: F401
except ImportError: except (ImportError, OSError):
__version__ = f'broken {__version__}'.strip() __version__ = f'broken {__version__}'.strip()

View File

@ -1025,7 +1025,7 @@ class InfoExtractor:
if len(basen) > trim_length: if len(basen) > trim_length:
h = '___' + hashlib.md5(basen.encode()).hexdigest() h = '___' + hashlib.md5(basen.encode()).hexdigest()
basen = basen[:trim_length - len(h)] + h basen = basen[:trim_length - len(h)] + h
filename = sanitize_filename(f'{basen}.dump', restricted=True) filename = sanitize_filename(f'{basen}.dump', self.get_param('keep_bad_win_chars', False), restricted=True)
# Working around MAX_PATH limitation on Windows (see # Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt': if compat_os_name == 'nt':

View File

@ -267,6 +267,10 @@ def create_parser():
out_dict[key] = [*out_dict.get(key, []), val] if append else val out_dict[key] = [*out_dict.get(key, []), val] if append else val
setattr(parser.values, option.dest, out_dict) setattr(parser.values, option.dest, out_dict)
def _store_multiple_callback(option, opt_str, value, parser, values):
for key, value in values.items():
setattr(parser.values, key, value)
def when_prefix(default): def when_prefix(default):
return { return {
'default': {}, 'default': {},
@ -1372,7 +1376,13 @@ def create_parser():
help='Force filenames to be Windows-compatible') help='Force filenames to be Windows-compatible')
filesystem.add_option( filesystem.add_option(
'--no-windows-filenames', '--no-windows-filenames',
action='store_false', dest='windowsfilenames', action='callback', dest='keep_bad_win_chars', default=False, callback=_store_multiple_callback,
callback_kwargs={
'values': {
'windowsfilenames': False,
'keep_bad_win_chars': True
}
},
help='Make filenames Windows-compatible only if using Windows (default)') help='Make filenames Windows-compatible only if using Windows (default)')
filesystem.add_option( filesystem.add_option(
'--trim-filenames', '--trim-file-names', metavar='LENGTH', '--trim-filenames', '--trim-file-names', metavar='LENGTH',

View File

@ -626,8 +626,9 @@ def timeconvert(timestr):
return timestamp return timestamp
def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): def sanitize_filename(s, keep_bad_win_chars=False, restricted=False, is_id=NO_DEFAULT):
"""Sanitizes a string so it could be used as part of a filename. """Sanitizes a string so it could be used as part of a filename.
@param keep_bad_win_chars Whether to keep characters invalid on Windows
@param restricted Use a stricter subset of allowed characters @param restricted Use a stricter subset of allowed characters
@param is_id Whether this is an ID that should be kept unchanged if possible. @param is_id Whether this is an ID that should be kept unchanged if possible.
If unset, yt-dlp's new sanitization rules are in effect If unset, yt-dlp's new sanitization rules are in effect
@ -640,16 +641,16 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
return ACCENT_CHARS[char] return ACCENT_CHARS[char]
elif not restricted and char == '\n': elif not restricted and char == '\n':
return '\0 ' return '\0 '
elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\': elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\' and not keep_bad_win_chars:
# Replace with their full-width unicode counterparts # Replace with their full-width unicode counterparts
return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0)) return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
elif char == '?' or ord(char) < 32 or ord(char) == 127: elif (not keep_bad_win_chars and char == '?') or ord(char) < 32 or ord(char) == 127:
return '' return ''
elif char == '"': elif not keep_bad_win_chars and char == '"':
return '' if restricted else '\'' return '' if restricted else '\''
elif char == ':': elif not keep_bad_win_chars and char == ':':
return '\0_\0-' if restricted else '\0 \0-' return '\0_\0-' if restricted else '\0 \0-'
elif char in '\\/|*<>': elif (not keep_bad_win_chars and char in '\\|*<>') or char == '/':
return '\0_' return '\0_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127): if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
return '' if unicodedata.category(char)[0] in 'CM' else '\0_' return '' if unicodedata.category(char)[0] in 'CM' else '\0_'
@ -658,7 +659,8 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
# Replace look-alike Unicode glyphs # Replace look-alike Unicode glyphs
if restricted and (is_id is NO_DEFAULT or not is_id): if restricted and (is_id is NO_DEFAULT or not is_id):
s = unicodedata.normalize('NFKC', s) s = unicodedata.normalize('NFKC', s)
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0) if keep_bad_win_chars
else m.group(0).replace(':', '_'), s) # Handle timestamps
result = ''.join(map(replace_insane, s)) result = ''.join(map(replace_insane, s))
if is_id is NO_DEFAULT: if is_id is NO_DEFAULT:
result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars