mirror of
https://github.com/yt-dlp/yt-dlp.git
synced 2024-11-07 16:01:27 +01:00
Compare commits
No commits in common. "ea40489ae64bef49cef0e793fc0bc11faf5f7982" and "13a6f663c7e4f165efd1e3be0a2b50b9de14bdde" have entirely different histories.
ea40489ae6
...
13a6f663c7
|
@ -109,10 +109,5 @@
|
|||
"action": "add",
|
||||
"when": "1d03633c5a1621b9f3a756f0a4f9dc61fab3aeaa",
|
||||
"short": "[priority] **The release channels have been adjusted!**\n\t* [`master`](https://github.com/yt-dlp/yt-dlp-master-builds) builds are made after each push, containing the latest fixes (but also possibly bugs). This was previously the `nightly` channel.\n\t* [`nightly`](https://github.com/yt-dlp/yt-dlp-nightly-builds) builds are now made once a day, if there were any changes."
|
||||
},
|
||||
{
|
||||
"action": "add",
|
||||
"when": "f04b5bedad7b281bee9814686bba1762bae092eb",
|
||||
"short": "[priority] Security: [[CVE-2023-46121](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-46121)] Patch [Generic Extractor MITM Vulnerability via Arbitrary Proxy Injection](https://github.com/yt-dlp/yt-dlp/security/advisories/GHSA-3ch3-jhc6-5r8x)\n\t- Disallow smuggling of arbitary `http_headers`; extractors now only use specific headers"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
import base64
|
||||
import json
|
||||
import re
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
import urllib.parse
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
|
@ -388,7 +387,7 @@ class CBCGemIE(InfoExtractor):
|
|||
url = re.sub(r'(Manifest\(.*?),format=[\w-]+(.*?\))', r'\1\2', base_url)
|
||||
|
||||
secret_xml = self._download_xml(url, video_id, note='Downloading secret XML', fatal=False)
|
||||
if not isinstance(secret_xml, xml.etree.ElementTree.Element):
|
||||
if secret_xml is None:
|
||||
return
|
||||
|
||||
for child in secret_xml:
|
||||
|
|
|
@ -2225,9 +2225,7 @@ class InfoExtractor:
|
|||
mpd_url, video_id,
|
||||
note='Downloading MPD VOD manifest' if note is None else note,
|
||||
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
||||
fatal=False, data=data, headers=headers, query=query)
|
||||
if not isinstance(mpd_doc, xml.etree.ElementTree.Element):
|
||||
return None
|
||||
fatal=False, data=data, headers=headers, query=query) or {}
|
||||
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
|
@ -138,7 +137,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
|||
mediagen_doc = self._download_xml(
|
||||
mediagen_url, video_id, 'Downloading video urls', fatal=False)
|
||||
|
||||
if not isinstance(mediagen_doc, xml.etree.ElementTree.Element):
|
||||
if mediagen_doc is None:
|
||||
return None
|
||||
|
||||
item = mediagen_doc.find('./video/item')
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import base64
|
||||
import json
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .theplatform import ThePlatformIE, default_ns
|
||||
|
@ -804,8 +803,6 @@ class NBCStationsIE(InfoExtractor):
|
|||
smil = self._download_xml(
|
||||
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
||||
note='Downloading SMIL data', query=query, fatal=is_live)
|
||||
if not isinstance(smil, xml.etree.ElementTree.Element):
|
||||
smil = None
|
||||
subtitles = self._parse_smil_subtitles(smil, default_ns) if smil is not None else {}
|
||||
for video in smil.findall(self._xpath_ns('.//video', default_ns)) if smil is not None else []:
|
||||
info['duration'] = float_or_none(remove_end(video.get('dur'), 'ms'), 1000)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
import re
|
||||
import urllib.parse
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
|
@ -470,12 +469,11 @@ class SlidesLiveIE(InfoExtractor):
|
|||
slides = self._download_xml(
|
||||
player_info['slides_xml_url'], video_id, fatal=False,
|
||||
note='Downloading slides XML', errnote='Failed to download slides info')
|
||||
if isinstance(slides, xml.etree.ElementTree.Element):
|
||||
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
||||
for slide_id, slide in enumerate(slides.findall('./slide')):
|
||||
slides_info.append((
|
||||
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
||||
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
||||
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
||||
for slide_id, slide in enumerate(slides.findall('./slide') if slides is not None else [], 1):
|
||||
slides_info.append((
|
||||
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
||||
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
||||
|
||||
chapters, thumbnails = [], []
|
||||
if url_or_none(player_info.get('thumbnail')):
|
||||
|
|
Loading…
Reference in New Issue
Block a user