Compare commits

..

1 Commits

Author SHA1 Message Date
dirkf
fd2d9b11da
Merge ab5617be9e into c5098961b0 2024-08-16 17:10:51 +02:00

View File

@ -13,7 +13,6 @@ from ..utils import (
merge_dicts, merge_dicts,
parse_duration, parse_duration,
parse_iso8601, parse_iso8601,
T,
traverse_obj, traverse_obj,
update_url_query, update_url_query,
url_or_none, url_or_none,
@ -36,7 +35,7 @@ class SBSIE(InfoExtractor):
<meta\s+property="og:video"\s+content=| <meta\s+property="og:video"\s+content=|
<iframe[^>]+?src= <iframe[^>]+?src=
) )
("|\')(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1'''] (["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''']
_TESTS = [{ _TESTS = [{
# Exceptional unrestricted show for testing, thanks SBS, # Exceptional unrestricted show for testing, thanks SBS,
@ -55,8 +54,8 @@ class SBSIE(InfoExtractor):
'timestamp': 1408613220, 'timestamp': 1408613220,
'upload_date': '20140821', 'upload_date': '20140821',
'uploader': 'SBSC', 'uploader': 'SBSC',
'tags': 'mincount:10', 'tags': None,
'categories': 'count:2', 'categories': None,
}, },
'expected_warnings': ['Unable to download JSON metadata'], 'expected_warnings': ['Unable to download JSON metadata'],
}, { }, {
@ -95,14 +94,18 @@ class SBSIE(InfoExtractor):
'only_matching': True, 'only_matching': True,
}] }]
# change default entry_protocol kwarg for _extract_smil_formats()
# TODO: ..._and_subtitles()
def _extract_m3u8_formats(self, m3u8_url, video_id, *args, **kwargs): def _extract_m3u8_formats(self, m3u8_url, video_id, *args, **kwargs):
# ext, entry_protocol, ... # ext, entry_protocol, preference, m3u8_id, note, errnote, fatal,
entry_protocol = kwargs.get('entry_protocol') # live, data, headers, query
if not entry_protocol and len(args) <= 1: entry_protocol = args[1] if len(args) > 1 else kwargs.get('entry_protocol')
kwargs['entry_protocol'] = 'm3u8_native' if not entry_protocol:
kwargs = compat_kwargs(kwargs) entry_protocol = 'm3u8_native'
if len(args) > 1:
args = list(args)
args[1] = entry_protocol
else:
kwargs['entry_protocol'] = entry_protocol
kwargs = compat_kwargs(kwargs)
return super(SBSIE, self)._extract_m3u8_formats(m3u8_url, video_id, *args, **kwargs) return super(SBSIE, self)._extract_m3u8_formats(m3u8_url, video_id, *args, **kwargs)
@ -141,8 +144,8 @@ class SBSIE(InfoExtractor):
def _real_extract(self, url): def _real_extract(self, url):
video_id = self._match_id(url) video_id = self._match_id(url)
# get media links directly though later metadata may contain contentUrl # get media links directly though later metadata may contain contentUrl
formats, subtitles = self._extract_smil_formats( # self._extract_smil_formats_and_subtitles( smil_url = self._get_smil_url(video_id)
self._get_smil_url(video_id), video_id, fatal=False), {} formats = self._extract_smil_formats(smil_url, video_id, fatal=False) or []
if not formats: if not formats:
urlh = self._request_webpage( urlh = self._request_webpage(
@ -157,16 +160,16 @@ class SBSIE(InfoExtractor):
# try for metadata from the same source # try for metadata from the same source
player_data = self._get_player_data(video_id, fatal=False) player_data = self._get_player_data(video_id, fatal=False)
media = traverse_obj(player_data, 'video_object', T(dict)) or {} media = traverse_obj(player_data, 'video_object', expected_type=dict) or {}
# get, or add, metadata from catalogue # get, or add, metadata from catalogue
media.update(self._call_api(video_id, 'mpx-media/' + video_id, fatal=not media)) media.update(self._call_api(video_id, 'mpx-media/' + video_id, fatal=not media))
# utils candidate for use with traverse_obj()
def txt_or_none(s): def txt_or_none(s):
return (s.strip() or None) if isinstance(s, compat_str) else None return (s.strip() or None) if isinstance(s, compat_str) else None
# expected_type fn for thumbs # expected_type fn for thumbs
def mk_thumb(t): def xlate_thumb(t):
u = url_or_none(t.get('contentUrl')) u = url_or_none(t.get('contentUrl'))
return u and { return u and {
'id': t.get('name'), 'id': t.get('name'),
@ -182,41 +185,51 @@ class SBSIE(InfoExtractor):
result = parse_duration(d) result = parse_duration(d)
return result return result
# For named episodes, use the catalogue's title to set episode, rather than generic 'Episode N'. def traverse_media(*args, **kwargs):
if traverse_obj(media, ('partOfSeries', T(dict))): nkwargs = None
media['epName'] = traverse_obj(media, 'title') if 'expected_type' not in kwargs:
kwargs['expected_type'] = txt_or_none
nkwargs = kwargs
if 'get_all' not in kwargs:
kwargs['get_all'] = False
nkwargs = kwargs
if nkwargs:
kwargs = compat_kwargs(nkwargs)
return traverse_obj(media, *args, **kwargs)
str = txt_or_none # instant compat # For named episodes, use the catalogue's title to set episode, rather than generic 'Episode N'.
return merge_dicts({ if traverse_media('partOfSeries', expected_type=dict):
media['epName'] = traverse_media('title')
return merge_dicts(*reversed(({
'id': video_id, 'id': video_id,
}, traverse_obj(media, { }, dict((k, traverse_media(v)) for k, v in {
'title': ('name', T(str)), 'title': 'name',
'description': ('description', T(str)), 'description': 'description',
'channel': ('taxonomy', 'channel', 'name', T(str)), 'channel': ('taxonomy', 'channel', 'name'),
'series': ((('partOfSeries', 'name'), 'seriesTitle'), T(str)), 'series': ((('partOfSeries', 'name'), 'seriesTitle')),
'series_id': ((('partOfSeries', 'uuid'), 'seriesID'), T(str)), 'series_id': ((('partOfSeries', 'uuid'), 'seriesID')),
'season_number': (('partOfSeries', None), 'seasonNumber', T(int_or_none)), 'episode': 'epName',
'episode': ('epName', T(str)), }.items()), {
'episode_number': ('episodeNumber', T(int_or_none)), 'season_number': traverse_media((('partOfSeries', None), 'seasonNumber'), expected_type=int_or_none),
'timestamp': (('datePublished', ('publication', 'startDate')), T(parse_iso8601)), 'episode_number': traverse_media('episodeNumber', expected_type=int_or_none),
'release_year': ('releaseYear', T(int_or_none)), 'timestamp': traverse_media('datePublished', ('publication', 'startDate'),
'duration': ('duration', T(really_parse_duration)), expected_type=parse_iso8601),
'is_live': ('liveStream', T(bool)), 'release_year': traverse_media('releaseYear', expected_type=int_or_none),
'age_limit': ('classificationID', 'contentRating', 'duration': traverse_media('duration', expected_type=really_parse_duration),
T(lambda x: self.AUS_TV_PARENTAL_GUIDELINES.get(x, '').upper() or None)), # dict.get is unhashable in py3.7 'is_live': traverse_media('liveStream', expected_type=bool),
}, get_all=False), traverse_obj(media, { 'age_limit': self.AUS_TV_PARENTAL_GUIDELINES.get(traverse_media(
'categories': ((('genres', Ellipsis), 'classificationID', 'contentRating', default='').upper()),
('taxonomy', ((('genre', 'subgenre'), Ellipsis, 'name'), 'useType'))), 'categories': traverse_media(
T(str)), ('genres', Ellipsis), ('taxonomy', ('genre', 'subgenre'), 'name'),
'tags': ((((('keywords',), get_all=True) or None,
('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice'))), 'tags': traverse_media(
Ellipsis), (('consumerAdviceTexts', ('sbsSubCertification', 'consumerAdvice')), Ellipsis),
('taxonomy', ('era', 'location', 'section', 'subject', 'theme'), get_all=True) or None,
Ellipsis, 'name')), 'thumbnails': traverse_media(('thumbnails', Ellipsis),
T(str)), expected_type=xlate_thumb, get_all=True),
'thumbnails': ('thumbnails', lambda _, v: v['contentUrl'], T(mk_thumb)),
}), {
'formats': formats, 'formats': formats,
'subtitles': subtitles, # TODO: _extract_smil_formats_and_subtitles()
# 'subtitles': subtitles,
'uploader': 'SBSC', 'uploader': 'SBSC',
}, rev=True) })))