Compare commits

...

6 Commits

Author SHA1 Message Date
dirkf
23da1c4267
Merge fed9c8d17b into c5098961b0 2024-08-21 22:32:43 -04:00
dirkf
c5098961b0 [Youtube] Rework n function extraction pattern
Now also succeeds with player b12cc44b
2024-08-06 20:59:09 +01:00
dirkf
dbc08fba83 [jsinterp] Improve slice implementation for player b12cc44b
Partly taken from yt-dlp/yt-dlp#10664, thx seproDev
        Fixes #32896
2024-08-06 20:51:38 +01:00
Aiur Adept
71223bff39
[Youtube] Fix nsig extraction for player 20dfca59 (#32891)
* dirkf's patch for nsig extraction
* add generic search per  yt-dlp/yt-dlp/pull/10611 - thx bashonly

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-08-01 19:18:34 +01:00
dirkf
fed9c8d17b Add IE_DESC 2022-04-28 11:19:55 +01:00
dirkf
876d9b8f49 [TalkTV] Add extractors for TalkTV (UK) shows and series 2022-04-28 01:55:11 +01:00
6 changed files with 267 additions and 13 deletions

View File

@ -425,6 +425,34 @@ class TestJSInterpreter(unittest.TestCase):
self._test(jsi, [''], args=['', '-'])
self._test(jsi, [], args=['', ''])
def test_slice(self):
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7])
self._test('function f(){return "012345678".slice()}', '012345678')
self._test('function f(){return "012345678".slice(0)}', '012345678')
self._test('function f(){return "012345678".slice(5)}', '5678')
self._test('function f(){return "012345678".slice(99)}', '')
self._test('function f(){return "012345678".slice(-2)}', '78')
self._test('function f(){return "012345678".slice(-99)}', '012345678')
self._test('function f(){return "012345678".slice(0, 0)}', '')
self._test('function f(){return "012345678".slice(1, 0)}', '')
self._test('function f(){return "012345678".slice(0, 1)}', '0')
self._test('function f(){return "012345678".slice(3, 6)}', '345')
self._test('function f(){return "012345678".slice(1, -1)}', '1234567')
self._test('function f(){return "012345678".slice(-1, 1)}', '')
self._test('function f(){return "012345678".slice(-3, -1)}', '67')
if __name__ == '__main__':
unittest.main()

View File

@ -174,6 +174,14 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js',
'7X-he4jjvMx7BCX', 'sViSydX8IHtdWA',
),
(
'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js',
'-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw',
),
(
'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js',
'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw',
),
]

View File

@ -1242,6 +1242,10 @@ from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .talktv import (
TalkTVIE,
TalkTVSeriesIE,
)
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE

View File

@ -0,0 +1,178 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import calendar
from datetime import datetime
import time
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
clean_html,
determine_ext,
extract_attributes,
ExtractorError,
get_elements_by_class,
HEADRequest,
parse_duration,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class TalkTVIE(InfoExtractor):
IE_NAME = 'talk.tv'
IE_DESC = 'TalkTV UK catch-up and live shows'
_VALID_URL = r'https?://watch\.talk\.tv/(?P<id>watch/(?:vod|replay)/\d+|live)'
_TESTS = [{
'url': 'https://watch.talk.tv/watch/replay/12216792',
'md5': 'dc9071f7d26f48ce4057a98425894eb3',
'info_dict': {
'id': '12216792',
'ext': 'mp4',
'title': 'Piers Morgan Uncensored',
'description': 'The host interviews former US President Donald Trump',
'timestamp': 1650917390,
'upload_date': '20220425',
'duration': float,
},
'params': {
'skip_download': True, # adaptive download
},
}, {
'url': 'https://watch.talk.tv/live',
'info_dict': {
'id': 'live',
'ext': 'mp4',
'title': 'Piers Morgan Uncensored',
'description': compat_str,
'timestamp': int,
# needs core fix to force compat_str type
'upload_date': r're:\d{8}',
'duration': float,
},
'params': {
'skip_download': True,
},
},
]
def _real_extract(self, url):
video_id = self._match_id(url).rsplit('/', 1)[-1]
is_live = (video_id == 'live')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'(?is)<h1\b[^>]+>\s*(.+?)\s*</h1', webpage, 'title')
player = self._search_regex(r'''(<[dD][iI][vV]\b[^>]+?\bid\s*=\s*(?P<q>"|')player(?P=q)[^>]*>)''', webpage, video_id)
player = extract_attributes(player)
expiry = player.get('expiry')
if expiry is not None and expiry < time.time():
raise ExtractorError('Video has expired', expected=True)
api_info = self._download_json(
'https://mm-v2.simplestream.com/ssmp/api.php?id=%(data-id)s&env=%(data-env)s' % player,
video_id, note='Downloading API info', fatal=False)
player['api_url'] = (
url_or_none(try_get(api_info, lambda x: x['response']['api_hostname']))
or 'https://v2-streams-elb.simplestreamcdn.com')
headers = {'Referer': url, }
for item in ('uvid', 'token', ('expiry', 'Token-Expiry')):
if isinstance(item, compat_str):
name = item.capitalize()
else:
item, name = item
val = player.get('data-' + item)
if val is not None:
headers[name] = val
stream_info = self._download_json(
'%(api_url)s/api/%(data-type)s/stream/%(data-uvid)s?key=%(data-key)s&platform=firefox&cc=%(data-country)s' % player,
video_id, headers=headers)
error = try_get(stream_info, lambda x: x['response']['error'])
if error:
raise ExtractorError('Streaming API reported: ' + error, expected=True)
fmt_url = (stream_info['response'].get('drm') in (None, False)) and stream_info['response']['stream']
formats = []
duration = None
description = None
timestamp = None
if fmt_url:
ext = determine_ext(fmt_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
fmt_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', live=is_live, fatal=False))
elif ext == 'mpd':
formats.extend(self._extract_mpd_formats(
fmt_url, video_id, mpd_id='dash', live=is_live, fatal=False))
else:
formats.append({
'url': fmt_url,
})
if not is_live:
res = self._request_webpage(HEADRequest(fmt_url), video_id, note='Checking date', fatal=False)
if res is not False:
timestamp = unified_timestamp(res.info().getheader('last-modified'))
self._sort_formats(formats)
text_fields = get_elements_by_class('text-start', webpage)
for text in text_fields:
text = clean_html(text)
if text.startswith('EPISODE'):
duration = parse_duration(
self._html_search_regex(r'^EPISODE\b\W*(\w[\w\s]*?)\s*$', text, 'duration', default=None))
elif text.startswith('Live'):
duration = self._html_search_regex(r'^Live\b(?:<[^>]+>|\W)*([0-2]?\d:\d{2}\s*-\s*[0-2]?\d:\d{2})\s*$', text, 'duration', default=None)
duration = list(map(lambda x: datetime.strptime(x, '%H:%M'), re.split(r'\s*-\s*', duration)))
if None not in duration and len(duration) == 2:
timestamp = datetime.now().replace(hour=duration[0].hour, minute=duration[0].minute, second=0, microsecond=0)
timestamp = calendar.timegm(timestamp.timetuple())
duration = duration[1] - duration[0]
try:
duration = duration.total_seconds()
except AttributeError:
# Py 2.6
duration = duration.td_seconds
if duration is not None and duration < 0:
duration += 24 * 3600
else:
description = text
return {
# ensure live has a fixed ID
'id': player['data-uvid'] if not is_live else video_id,
'title': title,
'display_id': video_id if not is_live else player['data-uvid'],
'formats': formats,
'thumbnail': player.get('data-poster'),
'duration': duration,
'timestamp': timestamp,
'description': description,
'is_live': is_live,
}
class TalkTVSeriesIE(InfoExtractor):
IE_NAME = 'talk.tv:series'
IE_DESC = 'TalkTV UK series catch-up'
_VALID_URL = r'https?://(?:watch\.|www\.)?talk\.tv/shows/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
_TESTS = [{
'url': 'https://watch.talk.tv/shows/86dadc3e-c4d2-11ec-b4c6-0af62ebc70d1',
'info_dict': {
'id': '86dadc3e-c4d2-11ec-b4c6-0af62ebc70d1',
},
'playlist_mincount': 4,
},
]
def _real_extract(self, url):
playlist_id = self._match_id(url)
webpage = self._download_webpage(url, playlist_id)
episodes = re.finditer(
r'''(?i)<a\b[^>]+?\bhref\s*=\s*(?P<q>"|')(?P<href>/watch/(?:(?!(?P=q)).)+)(?P=q)''',
webpage)
return self.playlist_from_matches(
episodes, playlist_id, getter=lambda x: urljoin(url, x.group('href')), ie='TalkTV')

View File

@ -1659,17 +1659,46 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_name(self, jscode):
func_name, idx = self._search_regex(
# new: (b=String.fromCharCode(110),c=a.get(b))&&c=nfunc[idx](c)
# or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)s
# old: .get("n"))&&(b=nfunc[idx](b)
# older: .get("n"))&&(b=nfunc(b)
# or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)
# or: (PL(a),b=a.j.n||null)&&(b=nfunc[idx](b)
# or: (b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
# old: (b=a.get("n"))&&(b=nfunc[idx](b)(?P<c>[a-z])\s*=\s*[a-z]\s*
# older: (b=a.get("n"))&&(b=nfunc(b)
r'''(?x)
(?:\(\s*(?P<b>[a-z])\s*=\s*(?:
String\s*\.\s*fromCharCode\s*\(\s*110\s*\)|
"n+"\[\s*\+?s*[\w$.]+\s*]
)\s*,(?P<c>[a-z])\s*=\s*[a-z]\s*)?
\.\s*get\s*\(\s*(?(b)(?P=b)|"n{1,2}")(?:\s*\)){2}\s*&&\s*\(\s*(?(c)(?P=c)|b)\s*=\s*
(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\)
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
\((?:[\w$()\s]+,)*?\s* # (
(?P<b>[a-z])\s*=\s* # b=
(?:
(?: # expect ,c=a.get(b) (etc)
String\s*\.\s*fromCharCode\s*\(\s*110\s*\)|
"n+"\[\s*\+?s*[\w$.]+\s*]
)\s*(?:,[\w$()\s]+(?=,))*|
(?P<old>[\w$]+) # a (old[er])
)\s*
(?(old)
# b.get("n")
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
(?:\.\s*n|\[\s*"n"\s*]|\.\s*get\s*\(\s*"n"\s*\))
| # ,c=a.get(b)
,\s*(?P<c>[a-z])\s*=\s*[a-z]\s*
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
(?:\[\s*(?P=b)\s*]|\.\s*get\s*\(\s*(?P=b)\s*\))
)
# interstitial junk
\s*(?:\|\|\s*null\s*)?(?:\)\s*)?&&\s*(?:\(\s*)?
(?(c)(?P=c)|(?P=b))\s*=\s* # [c|b]=
# nfunc|nfunc[idx]
(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\)
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'),
default=(None, None))
# thx bashonly: yt-dlp/yt-dlp/pull/10611
if not func_name:
self.report_warning('Falling back to generic n function search')
return self._search_regex(
r'''(?xs)
(?:(?<=[^\w$])|^) # instead of \b, which ignores $
(?P<name>(?!\d)[a-zA-Z\d_$]+)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
\s*\{(?:(?!};).)+?["']enhanced_except_
''', jscode, 'Initial JS player n function name', group='name')
if not idx:
return func_name

View File

@ -925,9 +925,16 @@ class JSInterpreter(object):
obj.reverse()
return obj
elif member == 'slice':
assertion(isinstance(obj, list), 'must be applied on a list')
assertion(len(argvals) == 1, 'takes exactly one argument')
return obj[argvals[0]:]
assertion(isinstance(obj, (list, compat_str)), 'must be applied on a list or string')
# From [1]:
# .slice() - like [:]
# .slice(n) - like [n:] (not [slice(n)]
# .slice(m, n) - like [m:n] or [slice(m, n)]
# [1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/slice
assertion(len(argvals) <= 2, 'takes between 0 and 2 arguments')
if len(argvals) < 2:
argvals += (None,)
return obj[slice(*argvals)]
elif member == 'splice':
assertion(isinstance(obj, list), 'must be applied on a list')
assertion(argvals, 'takes one or more arguments')