Compare commits

...

7 Commits

Author SHA1 Message Date
dirkf
680f050026
Merge 5c1e5a1160a3e65fd2673b6437f3882d348991cb into 3eb8d22ddb8982ca4fb56bb7a8d6517538bf14c6 2025-04-01 07:49:55 +02:00
dirkf
3eb8d22ddb
[JSInterp] Temporary fix for #33102 2025-03-31 04:21:09 +01:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
dirkf
5c1e5a1160 [TubiTv] Add TubiTvShow series/season extractor based on yt-dlp 2024-10-08 16:18:39 +01:00
dirkf
76067fbdb1 [TubiTv] Update TubiTv extractor
* back-port login and extraction from yt-dlp
* further extract uploader, age_limit, cast, categories, series
2024-10-08 16:14:01 +01:00
dirkf
229f59e7c3 [core] Let Git ignore __pycache__, .pytest_cache 2024-10-07 15:52:33 +01:00
9 changed files with 219 additions and 54 deletions

2
.gitignore vendored
View File

@ -1,3 +1,4 @@
__pycache__/
*.pyc
*.pyo
*.class
@ -5,6 +6,7 @@
*.DS_Store
wine-py2exe/
py2exe.log
.pytest_cache/
*.kate-swp
build/
dist/

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频'
IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec'
IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""

View File

@ -1328,7 +1328,10 @@ from .trovo import (
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tubitv import (
TubiTvIE,
TubiTvShowIE,
)
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False:
return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX'
IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{
'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'),
})
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{
'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',

View File

@ -1,23 +1,62 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_id,
int_or_none,
join_nonempty,
js_to_json,
merge_dicts,
parse_age_limit,
sanitized_Request,
strip_or_none,
T,
traverse_obj,
url_or_none,
urlencode_postdata,
)
class TubiTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?:video|movies|tv-shows)/(?P<id>[0-9]+)'
IE_NAME = 'tubitv'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?P<type>video|movies|tv-shows)/(?P<id>\d+)'
_LOGIN_URL = 'http://tubitv.com/login'
_NETRC_MACHINE = 'tubitv'
_GEO_COUNTRIES = ['US']
_TESTS = [{
'url': 'https://tubitv.com/movies/100004539/the-39-steps',
'info_dict': {
'id': '100004539',
'ext': 'mp4',
'title': 'The 39 Steps',
'description': 'md5:bb2f2dd337f0dc58c06cb509943f54c8',
'uploader_id': 'abc2558d54505d4f0f32be94f2e7108c',
'release_year': 1935,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 5187,
},
'params': {'skip_download': 'm3u8'},
'skip': 'This content is currently unavailable',
}, {
'url': 'https://tubitv.com/tv-shows/554628/s01-e01-rise-of-the-snakes',
'info_dict': {
'id': '554628',
'ext': 'mp4',
'title': 'S01:E01 - Rise of the Snakes',
'description': 'md5:ba136f586de53af0372811e783a3f57d',
'episode': 'Rise of the Snakes',
'episode_number': 1,
'season': 'Season 1',
'season_number': 1,
'uploader_id': '2a9273e728c510d22aa5c57d0646810b',
'release_year': 2011,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 1376,
},
'params': {'skip_download': 'm3u8'},
'skip': 'This content is currently unavailable',
}, {
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
'md5': '43ac06be9326f41912dc64ccf7a80320',
'info_dict': {
@ -27,6 +66,7 @@ class TubiTvIE(InfoExtractor):
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434',
},
'skip': 'Content Unavailable',
}, {
'url': 'http://tubitv.com/tv-shows/321886/s01_e01_on_nom_stories',
'only_matching': True,
@ -34,24 +74,42 @@ class TubiTvIE(InfoExtractor):
'url': 'http://tubitv.com/movies/383676/tracker',
'only_matching': True,
}, {
'url': 'https://tubitv.com/movies/560057/penitentiary?start=true',
'url': 'https://tubitv.com/tv-shows/200141623/s01-e01-episode-1',
'info_dict': {
'id': '560057',
'id': '200141623',
'ext': 'mp4',
'title': 'Penitentiary',
'description': 'md5:8d2fc793a93cc1575ff426fdcb8dd3f9',
'uploader_id': 'd8fed30d4f24fcb22ec294421b9defc2',
'release_year': 1979,
'title': 'Shameless S01:E01 - Episode 1',
'description': 'Having her handbag stolen proves to be a blessing in disguise for Fiona when handsome stranger Steve comes to her rescue.',
'timestamp': 1725148800,
'upload_date': '20240901',
'uploader': 'all3-media',
'uploader_id': '9b8e3a8d789b1c843f4b680c025a1853',
'release_year': 2004,
'episode': 'Episode 1',
'episode_number': 1,
'season': 'Season 1',
'season_number': 1,
'series': 'Shameless',
'cast': list,
'age_limit': 17,
},
'params': {
'skip_download': True,
'format': 'best/bestvideo',
'skip_download': 'm3u8'
},
}]
# DRM formats are included only to raise appropriate error
_UNPLAYABLE_FORMATS = ('hlsv6_widevine', 'hlsv6_widevine_nonclearlead', 'hlsv6_playready_psshv0',
'hlsv6_fairplay', 'dash_widevine', 'dash_widevine_nonclearlead')
def _login(self):
username, password = self._get_login_info()
if username is None:
return
self._perform_login(username, password)
def _perform_login(self, username, password):
self.report_login()
form_data = {
'username': username,
@ -62,7 +120,7 @@ class TubiTvIE(InfoExtractor):
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
if not re.search(r'id="tubi-logout"', login_page):
if get_element_by_id('tubi-logout', login_page) is None:
raise ExtractorError(
'Login failed (invalid username/password)', expected=True)
@ -70,41 +128,146 @@ class TubiTvIE(InfoExtractor):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'http://tubitv.com/oz/videos/%s/content' % video_id, video_id)
title = video_data['title']
video_id, video_type = self._match_valid_url(url).group('id', 'type')
webpage = self._download_webpage('https://tubitv.com/{0}/{1}/'.format(video_type, video_id), video_id)
data = self._search_json(
r'window\.__data\s*=', webpage, 'data', video_id,
transform_source=js_to_json)['video']['byId']
video_data = data[video_id]
info = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={})
title = strip_or_none(info.get('title'))
info['title'] = title or strip_or_none(video_data['title'])
formats = self._extract_m3u8_formats(
self._proto_relative_url(video_data['url']),
video_id, 'mp4', 'm3u8_native')
formats = []
drm_formats = 0
for resource in traverse_obj(video_data, ('video_resources', lambda _, v: v['type'] and v['manifest']['url'])):
manifest_url = url_or_none(resource['manifest']['url'])
if not manifest_url:
continue
resource_type = resource['type']
if resource_type == 'dash':
formats.extend(self._extract_mpd_formats(manifest_url, video_id, mpd_id=resource_type, fatal=False))
elif resource_type in ('hlsv3', 'hlsv6'):
formats.extend(self._extract_m3u8_formats(manifest_url, video_id, 'mp4', m3u8_id=resource_type, fatal=False))
elif resource_type in self._UNPLAYABLE_FORMATS:
drm_formats += 1
else:
self.report_warning('Skipping unknown resource type "{0}"'.format(resource_type))
if not formats and drm_formats > 0:
self.report_drm(video_id)
elif not formats and not video_data.get('policy_match'): # policy_match is False if content was removed
raise ExtractorError('This content is currently unavailable', expected=True)
self._sort_formats(formats)
thumbnails = []
for thumbnail_url in video_data.get('thumbnails', []):
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url),
})
subtitles = {}
for sub in video_data.get('subtitles', []):
sub_url = sub.get('url')
for sub in traverse_obj(video_data, ('subtitles', lambda _, v: v['url'])):
sub_url = self._proto_relative_url(sub['url'])
if not sub_url:
continue
subtitles.setdefault(sub.get('lang', 'English'), []).append({
'url': self._proto_relative_url(sub_url),
'url': sub_url,
})
return {
season_number, episode_number, episode_title = self._search_regex(
r'\bS(\d+):E(\d+) - (.+)', info['title'], 'episode info', fatal=False, group=(1, 2, 3), default=(None, None, None))
return merge_dicts({
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': video_data.get('publisher_id'),
'release_year': int_or_none(video_data.get('year')),
}
'season_number': int_or_none(season_number),
'episode_number': int_or_none(episode_number),
'episode': episode_title
}, traverse_obj(video_data, {
'description': ('description', T(strip_or_none)),
'duration': ('duration', T(int_or_none)),
'uploader': ('import_id', T(strip_or_none)),
'uploader_id': ('publisher_id', T(strip_or_none)),
'release_year': ('year', T(int_or_none)),
'thumbnails': ('thumbnails', Ellipsis, T(self._proto_relative_url), {'url': T(url_or_none)}),
'cast': ('actors', Ellipsis, T(strip_or_none)),
'categories': ('tags', Ellipsis, T(strip_or_none)),
'age_limit': ('ratings', 0, 'value', T(parse_age_limit)),
}), traverse_obj(data, (lambda _, v: v['type'] == 's', {
'series': ('title', T(strip_or_none)),
# 'series_id': ('id', T(compat_str)),
}), get_all=False), info)
class TubiTvShowIE(InfoExtractor):
IE_NAME = 'tubitv:series'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/series/\d+/(?P<show_name>[^/?#]+)(?:/season-(?P<season>\d+))?'
_TESTS = [{
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross?start=true',
'playlist_mincount': 390,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross',
},
}, {
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross/season-1',
'playlist_count': 13,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross-season-1',
},
}, {
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross/season-3',
'playlist_count': 13,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross-season-3',
},
}]
def _real_extract(self, url):
playlist_id, selected_season = self._match_valid_url(url).group(
'show_name', 'season')
def entries(s_url, playlist_id, selected_season_num):
def get_season_data(s_num, fatal=False):
if s_num is None:
url, s_id = s_url, playlist_id
else:
url = '%s/season-%d' % (s_url, s_num)
s_id = '%s-season-%d' % (playlist_id, s_num)
webpage = self._download_webpage(url, s_id, fatal=fatal)
data = self._search_json(
r'window\s*\.\s*__data\s*=', webpage or '', 'data', s_id,
transform_source=js_to_json, default={})
return data['video'] if fatal else data.get('video', {})
data = get_season_data(None, fatal=True)
# The {series_id}.seasons JSON may lack some episodes that are available
# Iterate over the season numbers instead [1]
# 1. https://github.com/yt-dlp/yt-dlp/issues/11170#issuecomment-2399918777
seasons = (
traverse_obj(data, (
'byId', lambda _, v: v['type'] == 's', 'seasons', Ellipsis,
'number', T(int_or_none)))
if selected_season is None
else [selected_season])
unavail_cnt = 0
select_episodes = lambda _, v: v['type'] == 'v'
for season_number in seasons:
if not data:
data = get_season_data(season_number)
unavail_cnt += len(traverse_obj(data, ('byId', select_episodes, 'policy_match', T(lambda m: (not m) or None))))
for episode_id, episode in traverse_obj(data, ('byId', select_episodes, T(lambda e: (e['id'], e)))):
yield merge_dicts(self.url_result(
'https://tubitv.com/tv-shows/{0}/'.format(episode_id), TubiTvIE.ie_key(), episode_id), {
'season_number': season_number,
'episode_number': int_or_none(episode.get('num')),
})
data = None
if unavail_cnt > 0:
self.report_warning('%d items were marked as unavailable: check that the desired content is available or provide login parameters if needed' % unavail_cnt)
return self.playlist_result(
entries(url, playlist_id, int_or_none(selected_season)),
join_nonempty(playlist_id, selected_season, delim='-season-'))

View File

@ -686,6 +686,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
if obj is JS_Undefined:
return 'undefined'
try:
return json.dumps(obj)
except TypeError: