mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-26 17:21:23 +01:00
Compare commits
8 Commits
7a048f788b
...
c732246470
Author | SHA1 | Date | |
---|---|---|---|
|
c732246470 | ||
|
b83ca24eb7 | ||
|
240a7d43c8 | ||
|
f13df591d4 | ||
|
34bf80c62d | ||
|
0ed18251d2 | ||
|
773b554bf5 | ||
|
5982686f86 |
3
.github/workflows/build.yml
vendored
3
.github/workflows/build.yml
vendored
|
@ -504,7 +504,8 @@ jobs:
|
||||||
- windows32
|
- windows32
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/download-artifact@v4
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
path: artifact
|
path: artifact
|
||||||
pattern: build-bin-*
|
pattern: build-bin-*
|
||||||
|
|
17
.github/workflows/release-master.yml
vendored
17
.github/workflows/release-master.yml
vendored
|
@ -28,3 +28,20 @@ jobs:
|
||||||
actions: write # For cleaning up cache
|
actions: write # For cleaning up cache
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
publish_pypi:
|
||||||
|
needs: [release]
|
||||||
|
if: vars.MASTER_PYPI_PROJECT != ''
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
name: build-pypi
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
verbose: true
|
||||||
|
|
17
.github/workflows/release-nightly.yml
vendored
17
.github/workflows/release-nightly.yml
vendored
|
@ -41,3 +41,20 @@ jobs:
|
||||||
actions: write # For cleaning up cache
|
actions: write # For cleaning up cache
|
||||||
id-token: write # mandatory for trusted publishing
|
id-token: write # mandatory for trusted publishing
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
|
||||||
|
publish_pypi:
|
||||||
|
needs: [release]
|
||||||
|
if: vars.NIGHTLY_PYPI_PROJECT != ''
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
id-token: write # mandatory for trusted publishing
|
||||||
|
steps:
|
||||||
|
- name: Download artifacts
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
path: dist
|
||||||
|
name: build-pypi
|
||||||
|
- name: Publish to PyPI
|
||||||
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
|
with:
|
||||||
|
verbose: true
|
||||||
|
|
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
|
@ -2,10 +2,6 @@ name: Release
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
prerelease:
|
|
||||||
required: false
|
|
||||||
default: true
|
|
||||||
type: boolean
|
|
||||||
source:
|
source:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
|
@ -18,6 +14,10 @@ on:
|
||||||
required: false
|
required: false
|
||||||
default: ''
|
default: ''
|
||||||
type: string
|
type: string
|
||||||
|
prerelease:
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
source:
|
source:
|
||||||
|
@ -278,11 +278,20 @@ jobs:
|
||||||
make clean-cache
|
make clean-cache
|
||||||
python -m build --no-isolation .
|
python -m build --no-isolation .
|
||||||
|
|
||||||
|
- name: Upload artifacts
|
||||||
|
if: github.event_name != 'workflow_dispatch'
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: build-pypi
|
||||||
|
path: |
|
||||||
|
dist/*
|
||||||
|
compression-level: 0
|
||||||
|
|
||||||
- name: Publish to PyPI
|
- name: Publish to PyPI
|
||||||
|
if: github.event_name == 'workflow_dispatch'
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
with:
|
with:
|
||||||
verbose: true
|
verbose: true
|
||||||
attestations: false # Currently doesn't work w/ reusable workflows (breaks nightly)
|
|
||||||
|
|
||||||
publish:
|
publish:
|
||||||
needs: [prepare, build]
|
needs: [prepare, build]
|
||||||
|
|
|
@ -52,7 +52,7 @@ default = [
|
||||||
"pycryptodomex",
|
"pycryptodomex",
|
||||||
"requests>=2.32.2,<3",
|
"requests>=2.32.2,<3",
|
||||||
"urllib3>=1.26.17,<3",
|
"urllib3>=1.26.17,<3",
|
||||||
"websockets>=13.0",
|
"websockets>=13.0,<14",
|
||||||
]
|
]
|
||||||
curl-cffi = [
|
curl-cffi = [
|
||||||
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",
|
"curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'",
|
||||||
|
|
|
@ -161,6 +161,11 @@ class TestUtil(unittest.TestCase):
|
||||||
self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False))
|
self.assertEqual('yes no', sanitize_filename('yes? no', is_id=False))
|
||||||
self.assertEqual('this - that', sanitize_filename('this: that', is_id=False))
|
self.assertEqual('this - that', sanitize_filename('this: that', is_id=False))
|
||||||
|
|
||||||
|
self.assertEqual('abc_<>\\*|de', sanitize_filename('abc/<>\\*|de', keep_bad_win_chars=True, is_id=False))
|
||||||
|
self.assertEqual('xxx_<>\\*|', sanitize_filename('xxx/<>\\*|', keep_bad_win_chars=True, is_id=False))
|
||||||
|
self.assertEqual('yes? no', sanitize_filename('yes? no', keep_bad_win_chars=True, is_id=False))
|
||||||
|
self.assertEqual('this: that', sanitize_filename('this: that', keep_bad_win_chars=True, is_id=False))
|
||||||
|
|
||||||
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
||||||
aumlaut = 'ä'
|
aumlaut = 'ä'
|
||||||
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
|
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
|
||||||
|
@ -171,6 +176,10 @@ class TestUtil(unittest.TestCase):
|
||||||
sanitize_filename('New World record at 0:12:34'),
|
sanitize_filename('New World record at 0:12:34'),
|
||||||
'New World record at 0_12_34')
|
'New World record at 0_12_34')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
sanitize_filename('New World record at 0:12:34', keep_bad_win_chars=True),
|
||||||
|
'New World record at 0:12:34')
|
||||||
|
|
||||||
self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf')
|
self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf')
|
||||||
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
|
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
|
||||||
self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf')
|
self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf')
|
||||||
|
@ -222,6 +231,10 @@ class TestUtil(unittest.TestCase):
|
||||||
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
||||||
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
||||||
|
|
||||||
|
self.assertEqual(sanitize_filename('_n_cd26wFpw', keep_bad_win_chars=True, is_id=True), '_n_cd26wFpw')
|
||||||
|
self.assertEqual(sanitize_filename('_BD_eEpuzXw', keep_bad_win_chars=True, is_id=True), '_BD_eEpuzXw')
|
||||||
|
self.assertEqual(sanitize_filename('N0Y__7-UOdI', keep_bad_win_chars=True, is_id=True), 'N0Y__7-UOdI')
|
||||||
|
|
||||||
def test_sanitize_path(self):
|
def test_sanitize_path(self):
|
||||||
with unittest.mock.patch('sys.platform', 'win32'):
|
with unittest.mock.patch('sys.platform', 'win32'):
|
||||||
self._test_sanitize_path()
|
self._test_sanitize_path()
|
||||||
|
|
|
@ -1311,10 +1311,12 @@ class YoutubeDL:
|
||||||
na = self.params.get('outtmpl_na_placeholder', 'NA')
|
na = self.params.get('outtmpl_na_placeholder', 'NA')
|
||||||
|
|
||||||
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
|
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
|
||||||
return sanitize_filename(str(value), restricted=restricted, is_id=(
|
return sanitize_filename(
|
||||||
bool(re.search(r'(^|[_.])id(\.|$)', key))
|
str(value), self.params.get('keep_bad_win_chars', False), restricted=restricted,
|
||||||
if 'filename-sanitization' in self.params['compat_opts']
|
is_id=(
|
||||||
else NO_DEFAULT))
|
bool(re.search(r'(^|[_.])id(\.|$)', key))
|
||||||
|
if 'filename-sanitization' in self.params['compat_opts']
|
||||||
|
else NO_DEFAULT))
|
||||||
|
|
||||||
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
|
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
|
||||||
sanitize = bool(sanitize)
|
sanitize = bool(sanitize)
|
||||||
|
|
|
@ -833,6 +833,7 @@ def parse_options(argv=None):
|
||||||
'autonumber_start': opts.autonumber_start,
|
'autonumber_start': opts.autonumber_start,
|
||||||
'restrictfilenames': opts.restrictfilenames,
|
'restrictfilenames': opts.restrictfilenames,
|
||||||
'windowsfilenames': opts.windowsfilenames,
|
'windowsfilenames': opts.windowsfilenames,
|
||||||
|
'keep_bad_win_chars': opts.keep_bad_win_chars,
|
||||||
'ignoreerrors': opts.ignoreerrors,
|
'ignoreerrors': opts.ignoreerrors,
|
||||||
'force_generic_extractor': opts.force_generic_extractor,
|
'force_generic_extractor': opts.force_generic_extractor,
|
||||||
'allowed_extractors': opts.allowed_extractors or ['default'],
|
'allowed_extractors': opts.allowed_extractors or ['default'],
|
||||||
|
|
|
@ -24,7 +24,7 @@ try:
|
||||||
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
from Crypto.Cipher import AES, PKCS1_OAEP, Blowfish, PKCS1_v1_5 # noqa: F401
|
||||||
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
from Crypto.Hash import CMAC, SHA1 # noqa: F401
|
||||||
from Crypto.PublicKey import RSA # noqa: F401
|
from Crypto.PublicKey import RSA # noqa: F401
|
||||||
except ImportError:
|
except (ImportError, OSError):
|
||||||
__version__ = f'broken {__version__}'.strip()
|
__version__ = f'broken {__version__}'.strip()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1025,7 +1025,7 @@ class InfoExtractor:
|
||||||
if len(basen) > trim_length:
|
if len(basen) > trim_length:
|
||||||
h = '___' + hashlib.md5(basen.encode()).hexdigest()
|
h = '___' + hashlib.md5(basen.encode()).hexdigest()
|
||||||
basen = basen[:trim_length - len(h)] + h
|
basen = basen[:trim_length - len(h)] + h
|
||||||
filename = sanitize_filename(f'{basen}.dump', restricted=True)
|
filename = sanitize_filename(f'{basen}.dump', self.get_param('keep_bad_win_chars', False), restricted=True)
|
||||||
# Working around MAX_PATH limitation on Windows (see
|
# Working around MAX_PATH limitation on Windows (see
|
||||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
||||||
if compat_os_name == 'nt':
|
if compat_os_name == 'nt':
|
||||||
|
|
|
@ -267,6 +267,10 @@ def create_parser():
|
||||||
out_dict[key] = [*out_dict.get(key, []), val] if append else val
|
out_dict[key] = [*out_dict.get(key, []), val] if append else val
|
||||||
setattr(parser.values, option.dest, out_dict)
|
setattr(parser.values, option.dest, out_dict)
|
||||||
|
|
||||||
|
def _store_multiple_callback(option, opt_str, value, parser, values):
|
||||||
|
for key, value in values.items():
|
||||||
|
setattr(parser.values, key, value)
|
||||||
|
|
||||||
def when_prefix(default):
|
def when_prefix(default):
|
||||||
return {
|
return {
|
||||||
'default': {},
|
'default': {},
|
||||||
|
@ -1372,7 +1376,13 @@ def create_parser():
|
||||||
help='Force filenames to be Windows-compatible')
|
help='Force filenames to be Windows-compatible')
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
'--no-windows-filenames',
|
'--no-windows-filenames',
|
||||||
action='store_false', dest='windowsfilenames',
|
action='callback', dest='keep_bad_win_chars', default=False, callback=_store_multiple_callback,
|
||||||
|
callback_kwargs={
|
||||||
|
'values': {
|
||||||
|
'windowsfilenames': False,
|
||||||
|
'keep_bad_win_chars': True
|
||||||
|
}
|
||||||
|
},
|
||||||
help='Make filenames Windows-compatible only if using Windows (default)')
|
help='Make filenames Windows-compatible only if using Windows (default)')
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
'--trim-filenames', '--trim-file-names', metavar='LENGTH',
|
'--trim-filenames', '--trim-file-names', metavar='LENGTH',
|
||||||
|
|
|
@ -626,11 +626,12 @@ def timeconvert(timestr):
|
||||||
return timestamp
|
return timestamp
|
||||||
|
|
||||||
|
|
||||||
def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
|
def sanitize_filename(s, keep_bad_win_chars=False, restricted=False, is_id=NO_DEFAULT):
|
||||||
"""Sanitizes a string so it could be used as part of a filename.
|
"""Sanitizes a string so it could be used as part of a filename.
|
||||||
@param restricted Use a stricter subset of allowed characters
|
@param keep_bad_win_chars Whether to keep characters invalid on Windows
|
||||||
@param is_id Whether this is an ID that should be kept unchanged if possible.
|
@param restricted Use a stricter subset of allowed characters
|
||||||
If unset, yt-dlp's new sanitization rules are in effect
|
@param is_id Whether this is an ID that should be kept unchanged if possible.
|
||||||
|
If unset, yt-dlp's new sanitization rules are in effect
|
||||||
"""
|
"""
|
||||||
if s == '':
|
if s == '':
|
||||||
return ''
|
return ''
|
||||||
|
@ -640,16 +641,16 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
|
||||||
return ACCENT_CHARS[char]
|
return ACCENT_CHARS[char]
|
||||||
elif not restricted and char == '\n':
|
elif not restricted and char == '\n':
|
||||||
return '\0 '
|
return '\0 '
|
||||||
elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\':
|
elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\' and not keep_bad_win_chars:
|
||||||
# Replace with their full-width unicode counterparts
|
# Replace with their full-width unicode counterparts
|
||||||
return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
|
return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0))
|
||||||
elif char == '?' or ord(char) < 32 or ord(char) == 127:
|
elif (not keep_bad_win_chars and char == '?') or ord(char) < 32 or ord(char) == 127:
|
||||||
return ''
|
return ''
|
||||||
elif char == '"':
|
elif not keep_bad_win_chars and char == '"':
|
||||||
return '' if restricted else '\''
|
return '' if restricted else '\''
|
||||||
elif char == ':':
|
elif not keep_bad_win_chars and char == ':':
|
||||||
return '\0_\0-' if restricted else '\0 \0-'
|
return '\0_\0-' if restricted else '\0 \0-'
|
||||||
elif char in '\\/|*<>':
|
elif (not keep_bad_win_chars and char in '\\|*<>') or char == '/':
|
||||||
return '\0_'
|
return '\0_'
|
||||||
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
|
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace() or ord(char) > 127):
|
||||||
return '' if unicodedata.category(char)[0] in 'CM' else '\0_'
|
return '' if unicodedata.category(char)[0] in 'CM' else '\0_'
|
||||||
|
@ -658,7 +659,8 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT):
|
||||||
# Replace look-alike Unicode glyphs
|
# Replace look-alike Unicode glyphs
|
||||||
if restricted and (is_id is NO_DEFAULT or not is_id):
|
if restricted and (is_id is NO_DEFAULT or not is_id):
|
||||||
s = unicodedata.normalize('NFKC', s)
|
s = unicodedata.normalize('NFKC', s)
|
||||||
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps
|
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0) if keep_bad_win_chars
|
||||||
|
else m.group(0).replace(':', '_'), s) # Handle timestamps
|
||||||
result = ''.join(map(replace_insane, s))
|
result = ''.join(map(replace_insane, s))
|
||||||
if is_id is NO_DEFAULT:
|
if is_id is NO_DEFAULT:
|
||||||
result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
|
result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars
|
||||||
|
|
Loading…
Reference in New Issue
Block a user