Compare commits

...

6 Commits

Author SHA1 Message Date
dirkf
aa0c48f8e7
Merge 7d07e6bf6b7578f35ad09ee37da83d2b235adc39 into 3eb8d22ddb8982ca4fb56bb7a8d6517538bf14c6 2025-04-01 09:36:07 +02:00
dirkf
3eb8d22ddb
[JSInterp] Temporary fix for #33102 2025-03-31 04:21:09 +01:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
df
7d07e6bf6b Adapt yt-dlp#187 to improve NBC metadata 2021-08-28 05:53:34 +01:00
df
23bd0b20d2 Support more URL formats and AMP embedded video for NBCNews 2021-08-28 05:53:33 +01:00
7 changed files with 91 additions and 32 deletions

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE): class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频' IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)' _VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{ _TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor): class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec' IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)' _VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac', 'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None _GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None _GEO_IP_BLOCKS = None
_WORKING = True _WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None): def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader.""" """Constructor. Receives an optional downloader."""

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor): class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True): def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False: if errnote is False:
return False return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE): class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)' _VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX' IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{ _TESTS = [{
'note': 'Hub URLs redirect to ITVX', 'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012', 'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'), 'ext': determine_ext(href, 'vtt'),
}) })
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}') next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {}) video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle') title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title) info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE): class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)' _VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship' IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{ _TESTS = [{
'note': 'British Touring Car Championship', 'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch', 'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -7,9 +7,14 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from .theplatform import ThePlatformIE from .theplatform import ThePlatformIE
from .adobepass import AdobePassIE from .adobepass import AdobePassIE
from ..compat import compat_urllib_parse_unquote from ..compat import (
compat_str,
compat_urllib_parse_unquote
)
from ..utils import ( from ..utils import (
ExtractorError,
int_or_none, int_or_none,
parse_age_limit,
parse_duration, parse_duration,
smuggle_url, smuggle_url,
try_get, try_get,
@ -18,7 +23,7 @@ from ..utils import (
) )
class NBCIE(AdobePassIE): class NBCIE(ThePlatformIE):
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))' _VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))'
_TESTS = [ _TESTS = [
@ -131,8 +136,13 @@ class NBCIE(AdobePassIE):
'mbr': 'true', 'mbr': 'true',
'manifest': 'm3u', 'manifest': 'm3u',
} }
video_id = video_data['mpxGuid'] video_id = try_get(video_data, lambda x: x['mpxGuid'])
title = video_data['secondaryTitle'] if not video_id:
raise ExtractorError('Empty or no metadata from NBC GraphQL API', expected=True)
tp_path = ('NnzsPC/media/guid/%s/%s' %
(video_data.get('mpxAccountId', '2410887629'), video_id))
tpm = self._download_theplatform_metadata(tp_path, video_id)
title = tpm.get('title') or video_data['secondaryTitle']
if video_data.get('locked'): if video_data.get('locked'):
resource = self._get_mvpd_resource( resource = self._get_mvpd_resource(
video_data.get('resourceId') or 'nbcentertainment', video_data.get('resourceId') or 'nbcentertainment',
@ -142,17 +152,27 @@ class NBCIE(AdobePassIE):
theplatform_url = smuggle_url(update_url_query( theplatform_url = smuggle_url(update_url_query(
'http://link.theplatform.com/s/NnzsPC/media/guid/%s/%s' % (video_data.get('mpxAccountId') or '2410887629', video_id), 'http://link.theplatform.com/s/NnzsPC/media/guid/%s/%s' % (video_data.get('mpxAccountId') or '2410887629', video_id),
query), {'force_smil_url': True}) query), {'force_smil_url': True})
episode_number = int_or_none(
video_data.get('episodeNumber'),
default=int_or_none(tpm.get('nbcu$airOrder')))
rating = video_data.get('rating',
try_get(tpm, lambda x: x['ratings'][0]['rating']))
season_number = int_or_none(
video_data.get('seasonNumber'),
default=int_or_none(tpm.get('nbcu$seasonNumber')))
series = video_data.get('seriesShortTitle', tpm.get('nbcu$seriesShortTitle'))
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'id': video_id, 'id': video_id,
'title': title, 'title': title,
'url': theplatform_url, 'url': theplatform_url,
'description': video_data.get('description'), 'description': video_data.get('description') or tpm.get('description'),
'tags': video_data.get('keywords'), 'tags': video_data.get('keywords') or tpm.get('keywords'),
'season_number': int_or_none(video_data.get('seasonNumber')), 'season_number': season_number,
'episode_number': int_or_none(video_data.get('episodeNumber')), 'episode_number': episode_number,
'episode': title, 'episode': title,
'series': video_data.get('seriesShortTitle'), 'series': series,
'age_limit': parse_age_limit(rating),
'ie_key': 'ThePlatform', 'ie_key': 'ThePlatform',
} }
@ -287,7 +307,7 @@ class NBCSportsStreamIE(AdobePassIE):
class NBCNewsIE(ThePlatformIE): class NBCNewsIE(ThePlatformIE):
_VALID_URL = r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)' _VALID_URL = r'(?x)https?://(?:www[0-9]?\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?#]+)'
_TESTS = [ _TESTS = [
{ {
@ -370,20 +390,40 @@ class NBCNewsIE(ThePlatformIE):
# From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html # From http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html
'url': 'http://www.nbcnews.com/widget/video-embed/701714499682', 'url': 'http://www.nbcnews.com/widget/video-embed/701714499682',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.nbcnews.com/news/amp/ncna1276021',
'md5': '948bf2f3b0a8b0ea595c424e0850e7a2',
'info_dict': {
'id': 'ncna1276021',
'ext': 'mp4',
'title': 'Devastating Dixie Fire consumes Californian town',
'description': 'The town of Greenville was destroyed in around three hours by California\'s largest active wildfire.',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': 1628152660,
'upload_date': '20210805',
},
}, },
] ]
def _real_extract(self, url): def _old_real_extract(self, url, video_id, webpage=None):
video_id = self._match_id(url) if not webpage:
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
data = self._parse_json(self._search_regex( data = self._search_regex(
r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>', r'<script[^>]+id="__NEXT_DATA__"[^>]*>({.+?})</script>',
webpage, 'bootstrap json'), video_id)['props']['initialState'] webpage, 'bootstrap json', fatal=False)
if data:
data = self._parse_json(data, video_id, fatal=False)
if data:
data = try_get(data, lambda x: x['props']['initialState'], dict)
if not data:
return
video_data = try_get(data, lambda x: x['video']['current'], dict) video_data = try_get(data, lambda x: x['video']['current'], dict)
if not video_data: if not video_data:
video_data = data['article']['content'][0]['primaryMedia']['video'] video_data = try_get(data, lambda x: x['article']['content'][0]['primaryMedia']['video'], dict)
title = video_data['headline']['primary'] title = try_get(video_data, lambda x: x['headline']['primary'], compat_str)
if not title:
return
formats = [] formats = []
for va in video_data.get('videoAssets', []): for va in video_data.get('videoAssets', []):
@ -432,6 +472,28 @@ class NBCNewsIE(ThePlatformIE):
'subtitles': subtitles, 'subtitles': subtitles,
} }
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
entries = []
for mobj in re.finditer(
r'<amp-iframe\s[^>]+?\bsrc\s*=\s*(\'|")(?P<link>[^>]+?)\1(\s[^>]*)?>',
webpage):
link_url = mobj.group('link')
if link_url:
if '/embedded-video/' not in link_url:
continue
entry = self._old_real_extract(link_url, video_id)
if entry:
entries.append(entry)
if entries:
if len(entries) == 1:
return entries[0]
return self.playlist_result(entries, video_id)
else:
return self._old_real_extract(url, video_id, webpage)
class NBCOlympicsIE(InfoExtractor): class NBCOlympicsIE(InfoExtractor):
IE_NAME = 'nbcolympics' IE_NAME = 'nbcolympics'

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'], ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/'] ['arch', '', 'http://ussenate-f.akamaihd.net/']
] ]
_IE_NAME = 'senate.gov' IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)' _VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{ _TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png', 'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',

View File

@ -686,6 +686,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e) raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace): def _dump(self, obj, namespace):
if obj is JS_Undefined:
return 'undefined'
try: try:
return json.dumps(obj) return json.dumps(obj)
except TypeError: except TypeError: