Compare commits

...

6 Commits

Author SHA1 Message Date
dirkf
0f8155b84c
Merge fed9c8d17b52c722d6e1fc3321f9083ad66ba97d into 3eb8d22ddb8982ca4fb56bb7a8d6517538bf14c6 2025-04-01 08:50:13 +02:00
dirkf
3eb8d22ddb
[JSInterp] Temporary fix for #33102 2025-03-31 04:21:09 +01:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
dirkf
fed9c8d17b Add IE_DESC 2022-04-28 11:19:55 +01:00
dirkf
876d9b8f49 [TalkTV] Add extractors for TalkTV (UK) shows and series 2022-04-28 01:55:11 +01:00
8 changed files with 194 additions and 15 deletions

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频'
IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec'
IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""

View File

@ -1242,6 +1242,10 @@ from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .talktv import (
TalkTVIE,
TalkTVSeriesIE,
)
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False:
return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX'
IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{
'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'),
})
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{
'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',

View File

@ -0,0 +1,178 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import calendar
from datetime import datetime
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
determine_ext,
extract_attributes,
ExtractorError,
get_elements_by_class,
HEADRequest,
parse_duration,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class TalkTVIE(InfoExtractor):
IE_NAME = 'talk.tv'
IE_DESC = 'TalkTV UK catch-up and live shows'
_VALID_URL = r'https?://watch\.talk\.tv/(?P<id>watch/(?:vod|replay)/\d+|live)'
_TESTS = [{
'url': 'https://watch.talk.tv/watch/replay/12216792',
'md5': 'dc9071f7d26f48ce4057a98425894eb3',
'info_dict': {
'id': '12216792',
'ext': 'mp4',
'title': 'Piers Morgan Uncensored',
'description': 'The host interviews former US President Donald Trump',
'timestamp': 1650917390,
'upload_date': '20220425',
'duration': float,
},
'params': {
'skip_download': True, # adaptive download
},
}, {
'url': 'https://watch.talk.tv/live',
'info_dict': {
'id': 'live',
'ext': 'mp4',
'title': 'Piers Morgan Uncensored',
'description': compat_str,
'timestamp': int,
# needs core fix to force compat_str type
'upload_date': r're:\d{8}',
'duration': float,
},
'params': {
'skip_download': True,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url).rsplit('/', 1)[-1]
is_live = (video_id == 'live')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'(?is)<h1\b[^>]+>\s*(.+?)\s*</h1', webpage, 'title')
player = self._search_regex(r'''(<[dD][iI][vV]\b[^>]+?\bid\s*=\s*(?P<q>"|')player(?P=q)[^>]*>)''', webpage, video_id)
player = extract_attributes(player)
expiry = player.get('expiry')
if expiry is not None and expiry < time.time():
raise ExtractorError('Video has expired', expected=True)
api_info = self._download_json(
'https://mm-v2.simplestream.com/ssmp/api.php?id=%(data-id)s&env=%(data-env)s' % player,
video_id, note='Downloading API info', fatal=False)
player['api_url'] = (
url_or_none(try_get(api_info, lambda x: x['response']['api_hostname']))
or 'https://v2-streams-elb.simplestreamcdn.com')
headers = {'Referer': url, }
for item in ('uvid', 'token', ('expiry', 'Token-Expiry')):
if isinstance(item, compat_str):
name = item.capitalize()
else:
item, name = item
val = player.get('data-' + item)
if val is not None:
headers[name] = val
stream_info = self._download_json(
'%(api_url)s/api/%(data-type)s/stream/%(data-uvid)s?key=%(data-key)s&platform=firefox&cc=%(data-country)s' % player,
video_id, headers=headers)
error = try_get(stream_info, lambda x: x['response']['error'])
if error:
raise ExtractorError('Streaming API reported: ' + error, expected=True)
fmt_url = (stream_info['response'].get('drm') in (None, False)) and stream_info['response']['stream']
formats = []
duration = None
description = None
timestamp = None
if fmt_url:
ext = determine_ext(fmt_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
fmt_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', live=is_live, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
fmt_url, video_id, mpd_id='dash', live=is_live, fatal=False))
else:
formats.append({
'url': fmt_url,
})
if not is_live:
res = self._request_webpage(HEADRequest(fmt_url), video_id, note='Checking date', fatal=False)
if res is not False:
timestamp = unified_timestamp(res.info().getheader('last-modified'))
self._sort_formats(formats)
text_fields = get_elements_by_class('text-start', webpage)
for text in text_fields:
text = clean_html(text)
if text.startswith('EPISODE'):
duration = parse_duration(
self._html_search_regex(r'^EPISODE\b\W*(\w[\w\s]*?)\s*$', text, 'duration', default=None))
elif text.startswith('Live'):
duration = self._html_search_regex(r'^Live\b(?:<[^>]+>|\W)*([0-2]?\d:\d{2}\s*-\s*[0-2]?\d:\d{2})\s*$', text, 'duration', default=None)
duration = list(map(lambda x: datetime.strptime(x, '%H:%M'), re.split(r'\s*-\s*', duration)))
if None not in duration and len(duration) == 2:
timestamp = datetime.now().replace(hour=duration[0].hour, minute=duration[0].minute, second=0, microsecond=0)
timestamp = calendar.timegm(timestamp.timetuple())
duration = duration[1] - duration[0]
try:
duration = duration.total_seconds()
except AttributeError:
# Py 2.6
duration = duration.td_seconds
if duration is not None and duration < 0:
duration += 24 * 3600
else:
description = text
return {
# ensure live has a fixed ID
'id': player['data-uvid'] if not is_live else video_id,
'title': title,
'display_id': video_id if not is_live else player['data-uvid'],
'formats': formats,
'thumbnail': player.get('data-poster'),
'duration': duration,
'timestamp': timestamp,
'description': description,
'is_live': is_live,
}
class TalkTVSeriesIE(InfoExtractor):
IE_NAME = 'talk.tv:series'
IE_DESC = 'TalkTV UK series catch-up'
_VALID_URL = r'https?://(?:watch\.|www\.)?talk\.tv/shows/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
_TESTS = [{
'url': 'https://watch.talk.tv/shows/86dadc3e-c4d2-11ec-b4c6-0af62ebc70d1',
'info_dict': {
'id': '86dadc3e-c4d2-11ec-b4c6-0af62ebc70d1',
},
'playlist_mincount': 4,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
episodes = re.finditer(
r'''(?i)<a\b[^>]+?\bhref\s*=\s*(?P<q>"|')(?P<href>/watch/(?:(?!(?P=q)).)+)(?P=q)''',
webpage)
return self.playlist_from_matches(
episodes, playlist_id, getter=lambda x: urljoin(url, x.group('href')), ie='TalkTV')

View File

@ -686,6 +686,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
if obj is JS_Undefined:
return 'undefined'
try:
return json.dumps(obj)
except TypeError: