Compare commits

...

3 Commits

Author SHA1 Message Date
bashonly
e135a1f50a
make it make sense 2024-01-20 20:22:42 +00:00
bashonly
dcef8d3fe5
remove my debug output 2024-01-20 20:13:10 +00:00
bashonly
645c52121d
refactor 2024-01-20 19:43:44 +00:00

View File

@ -20,6 +20,7 @@ from ..utils import (
get_element_by_id, get_element_by_id,
get_first, get_first,
int_or_none, int_or_none,
join_nonempty,
js_to_json, js_to_json,
merge_dicts, merge_dicts,
parse_count, parse_count,
@ -902,7 +903,7 @@ class FacebookAdsIE(InfoExtractor):
'ext': 'mp4', 'ext': 'mp4',
'title': 'video by Kandao', 'title': 'video by Kandao',
'uploader': 'Kandao', 'uploader': 'Kandao',
'uploader_id': 774114102743284, 'uploader_id': '774114102743284',
'uploader_url': r're:^https?://.*', 'uploader_url': r're:^https?://.*',
'timestamp': 1702548330, 'timestamp': 1702548330,
'thumbnail': r're:^https?://.*', 'thumbnail': r're:^https?://.*',
@ -915,7 +916,7 @@ class FacebookAdsIE(InfoExtractor):
'id': '893637265423481', 'id': '893637265423481',
'title': 'Jusqu\u2019\u00e0 -25% sur une s\u00e9lection de vins p\u00e9tillants italiens ', 'title': 'Jusqu\u2019\u00e0 -25% sur une s\u00e9lection de vins p\u00e9tillants italiens ',
'uploader': 'Eataly Paris Marais', 'uploader': 'Eataly Paris Marais',
'uploader_id': 2086668958314152, 'uploader_id': '2086668958314152',
'uploader_url': r're:^https?://.*', 'uploader_url': r're:^https?://.*',
'timestamp': 1703571529, 'timestamp': 1703571529,
'upload_date': '20231226', 'upload_date': '20231226',
@ -924,71 +925,73 @@ class FacebookAdsIE(InfoExtractor):
'playlist_count': 3, 'playlist_count': 3,
}] }]
def _extract_from_url(self, url, video_id): _FORMATS_MAP = {
webpage = self._download_webpage(url, video_id) 'watermarked_video_sd_url': ('sd-wmk', 'SD, watermarked'),
'video_sd_url': ('sd', None),
'watermarked_video_hd_url': ('hd-wmk', 'HD, watermarked'),
'video_hd_url': ('hd', None),
}
def extract_metadata(webpage): def _extract_formats(self, video_dict):
def extract_format(video_dict):
formats = [] formats = []
for i, url in enumerate( for format_key, format_url in traverse_obj(video_dict, (
[url_or_none(video_dict.get('watermarked_video_sd_url')), url_or_none(video_dict.get('video_sd_url')), {dict.items}, lambda _, v: v[0] in self._FORMATS_MAP and url_or_none(v[1])
url_or_none(video_dict.get('watermarked_video_hd_url')), url_or_none(video_dict.get('video_hd_url'))] )):
):
if url:
formats.append({ formats.append({
'format_id': ['sd-wmk', 'sd', 'hd-wmk', 'hd'][i], 'format_id': self._FORMATS_MAP[format_key][0],
'format_note': ['SD, watermarked', None, 'HD, watermarked', None][i], 'format_note': self._FORMATS_MAP[format_key][1],
'url': url, 'url': format_url,
'ext': 'mp4', 'ext': 'mp4',
'preference': i, 'quality': qualities(tuple(self._FORMATS_MAP))(format_key),
}) })
return formats return formats
post_data = [self._parse_json(j, video_id, fatal=False) for j in re.findall(r's.handle\(({.*})\);requireLazy\(', webpage)] def _real_extract(self, url):
ad_data = traverse_obj(post_data, (..., 'require', ..., ..., ..., 'props', 'deeplinkAdCard', 'snapshot'), {dict}) video_id = self._match_id(url)
info_dict = {} webpage = self._download_webpage(url, video_id)
if ad_data and ad_data[0]:
data = ad_data[0] post_data = [self._parse_json(j, video_id, fatal=False)
title = f"{data['display_format']} by {data['page_name']}" if not data['title'] or data['title'] == '{{product.name}}' else data['title'] for j in re.findall(r's\.handle\(({.*})\);requireLazy\(', webpage)]
description = None if data['link_description'] == '{{product.description}}' else data['link_description'] data = traverse_obj(post_data, (
info_dict = { ..., 'require', ..., ..., ..., 'props', 'deeplinkAdCard', 'snapshot', {dict}), get_all=False)
'description': description, if not data:
'uploader': data['page_name'], raise ExtractorError('Unable to extract ad data')
'uploader_id': data['page_id'],
'uploader_url': data['page_profile_uri'], title = data.get('title')
'timestamp': data['creation_time'], if not title or title == '{{product.name}}':
'like_count': data['page_like_count'], title = join_nonempty('display_format', 'page_name', delim=' by ', from_dict=data)
}
entries = [] info_dict = traverse_obj(data, {
for group in [data['videos'], data['cards']]: 'description': ('link_description', {str}, {lambda x: x if x != '{{product.description}}' else None}),
for entry in group: 'uploader': ('page_name', {str}),
if entry.get('watermarked_video_sd_url') or entry.get('video_sd_url') or entry.get('watermarked_video_hd_url') or entry.get('video_hd_url'): 'uploader_id': ('page_id', {str_or_none}),
entries.append({ 'uploader_url': ('page_profile_uri', {url_or_none}),
'id': f'{video_id}_%s' % str(len(entries) + 1), 'timestamp': ('creation_time', {int_or_none}),
'title': entry.get('title') or title, 'like_count': ('page_like_count', {int_or_none}),
'description': entry.get('link_description') or description,
'thumbnail': entry.get('video_preview_image_url'),
'formats': extract_format(entry),
}) })
entries = []
for idx, entry in enumerate(traverse_obj(
data, (('videos', 'cards'), lambda _, v: any([url_or_none(v[f]) for f in self._FORMATS_MAP]))), 1
):
entries.append({
'id': f'{video_id}_{idx}',
'title': entry.get('title') or title,
'description': entry.get('link_description') or info_dict.get('description'),
'thumbnail': url_or_none(entry.get('video_preview_image_url')),
'formats': self._extract_formats(entry),
})
if len(entries) == 1: if len(entries) == 1:
info_dict.update(entries[0]) info_dict.update(entries[0])
info_dict['id'] = video_id
elif len(entries) > 1: elif len(entries) > 1:
info_dict.update({ info_dict.update({
'title': entries[0]['title'], 'title': entries[0]['title'],
'entries': entries, 'entries': entries,
'_type': 'playlist', '_type': 'playlist',
}) })
return info_dict
info_dict = { info_dict['id'] = video_id
'id': video_id,
'title': 'Ad Library',
}
info_dict.update(extract_metadata(webpage))
return info_dict return info_dict
def _real_extract(self, url):
video_id = self._match_id(url)
return self._extract_from_url(f'https://www.facebook.com/ads/library/?id={video_id}', video_id)