Compare commits

...

13 Commits

Author SHA1 Message Date
dirkf
656c58c82a
Merge 5c1e5a1160a3e65fd2673b6437f3882d348991cb into 2b4fbfce25902d557b86b003cf48f738129efce4 2025-03-26 06:50:10 +00:00
dirkf
2b4fbfce25 [YouTube] Support player 4fcd6e4a
thx seproDev, bashonly: yt-dlp/yt-dlp#12748
2025-03-26 02:27:25 +00:00
dirkf
1bc45b8b6c [JSInterp] Use , for join() with null/undefined argument
Eg: [1,2,3].join(null) -> '1,2,3'
2025-03-25 22:35:06 +00:00
dirkf
b982d77d0b [YouTube] Align signature tests with yt-dlp
thx bashonly, yt-dlp/yt-dlp#12725
2025-03-25 22:35:06 +00:00
dirkf
c55dbf4838 [YouTube] Update signature extraction for players 643afba4, 363db69b 2025-03-25 22:35:06 +00:00
dirkf
087d865230 [YouTube] Support new player URL patterns 2025-03-25 22:35:06 +00:00
dirkf
a4fc1151f1 [JSInterp] Improve indexing
* catch invalid list index with `ValueError` (eg [1, 2]['ab'] -> undefined)
* allow assignment outside existing list (eg var l = [1,2]; l[9] = 0;)
2025-03-25 22:35:05 +00:00
dirkf
a464c159e6 [YouTube] Make _extract_player_info() use _search_regex() 2025-03-25 22:35:05 +00:00
dirkf
7dca08eff0 [YouTube] Also get original of translated automatic captions 2025-03-25 22:35:05 +00:00
dirkf
2239ee7965 [YouTube] Get subtitles/automatic captions from both web and API responses 2025-03-25 22:35:05 +00:00
dirkf
5c1e5a1160 [TubiTv] Add TubiTvShow series/season extractor based on yt-dlp 2024-10-08 16:18:39 +01:00
dirkf
76067fbdb1 [TubiTv] Update TubiTv extractor
* back-port login and extraction from yt-dlp
* further extract uploader, age_limit, cast, categories, series
2024-10-08 16:14:01 +01:00
dirkf
229f59e7c3 [core] Let Git ignore __pycache__, .pytest_cache 2024-10-07 15:52:33 +01:00
6 changed files with 311 additions and 87 deletions

2
.gitignore vendored
View File

@ -1,3 +1,4 @@
__pycache__/
*.pyc
*.pyo
*.class
@ -5,6 +6,7 @@
*.DS_Store
wine-py2exe/
py2exe.log
.pytest_cache/
*.kate-swp
build/
dist/

View File

@ -84,6 +84,21 @@ _SIG_TESTS = [
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q',
),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'AAOAOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7vgpDL0QwbdV06sCIEzpWqMGkFR20CFOS21Tp-7vj_EMu-m37KtXJoOy1',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
),
]
_NSIG_TESTS = [
@ -153,7 +168,7 @@ _NSIG_TESTS = [
),
(
'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js',
'-Txvy6bT5R6LqgnQNx', 'dcklJCnRUHbgSg',
'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA',
),
(
'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js',
@ -173,7 +188,7 @@ _NSIG_TESTS = [
),
(
'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ',
'aCi3iElgd2kq0bxVbQ', 'QX1y8jGb2IbZ0w',
),
(
'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js',
@ -231,10 +246,6 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
@ -259,6 +270,22 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'eWYu5d5YeY_4LyEDc', 'XJQqf-N7Xra3gg',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
]
@ -271,6 +298,8 @@ class TestPlayerInfo(unittest.TestCase):
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'e7567ecf'),
('https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '643afba4'),
# obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
@ -280,8 +309,9 @@ class TestPlayerInfo(unittest.TestCase):
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
)
ie = YoutubeIE(FakeYDL({'cachedir': False}))
for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url)
player_id = ie._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id)
@ -301,8 +331,8 @@ class TestSignature(unittest.TestCase):
def t_factory(name, sig_func, url_pattern):
def make_tfunc(url, sig_input, expected_sig):
m = url_pattern.match(url)
assert m, '%r should follow URL format' % url
test_id = m.group('id')
assert m, '{0!r} should follow URL format'.format(url)
test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id'))
def test_func(self):
basename = 'player-{0}-{1}.js'.format(name, test_id)
@ -335,12 +365,16 @@ def n_sig(jscode, sig_input):
make_sig_test = t_factory(
'signature', signature, re.compile(r'.*(?:-|/player/)(?P<id>[a-zA-Z0-9_-]+)(?:/.+\.js|(?:/watch_as3|/html5player)?\.[a-z]+)$'))
'signature', signature,
re.compile(r'''(?x)
.+/(?P<h5>html5)?player(?(h5)(?:-en_US)?-|/)(?P<id>[a-zA-Z0-9/._-]+)
(?(h5)/(?:watch_as3|html5player))?\.js$
'''))
for test_spec in _SIG_TESTS:
make_sig_test(*test_spec)
make_nsig_test = t_factory(
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$'))
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$'))
for test_spec in _NSIG_TESTS:
make_nsig_test(*test_spec)

View File

@ -1328,7 +1328,10 @@ from .trovo import (
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tubitv import (
TubiTvIE,
TubiTvShowIE,
)
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,

View File

@ -1,23 +1,62 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
get_element_by_id,
int_or_none,
join_nonempty,
js_to_json,
merge_dicts,
parse_age_limit,
sanitized_Request,
strip_or_none,
T,
traverse_obj,
url_or_none,
urlencode_postdata,
)
class TubiTvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?:video|movies|tv-shows)/(?P<id>[0-9]+)'
IE_NAME = 'tubitv'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?P<type>video|movies|tv-shows)/(?P<id>\d+)'
_LOGIN_URL = 'http://tubitv.com/login'
_NETRC_MACHINE = 'tubitv'
_GEO_COUNTRIES = ['US']
_TESTS = [{
'url': 'https://tubitv.com/movies/100004539/the-39-steps',
'info_dict': {
'id': '100004539',
'ext': 'mp4',
'title': 'The 39 Steps',
'description': 'md5:bb2f2dd337f0dc58c06cb509943f54c8',
'uploader_id': 'abc2558d54505d4f0f32be94f2e7108c',
'release_year': 1935,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 5187,
},
'params': {'skip_download': 'm3u8'},
'skip': 'This content is currently unavailable',
}, {
'url': 'https://tubitv.com/tv-shows/554628/s01-e01-rise-of-the-snakes',
'info_dict': {
'id': '554628',
'ext': 'mp4',
'title': 'S01:E01 - Rise of the Snakes',
'description': 'md5:ba136f586de53af0372811e783a3f57d',
'episode': 'Rise of the Snakes',
'episode_number': 1,
'season': 'Season 1',
'season_number': 1,
'uploader_id': '2a9273e728c510d22aa5c57d0646810b',
'release_year': 2011,
'thumbnail': r're:^https?://.+\.(jpe?g|png)$',
'duration': 1376,
},
'params': {'skip_download': 'm3u8'},
'skip': 'This content is currently unavailable',
}, {
'url': 'http://tubitv.com/video/283829/the_comedian_at_the_friday',
'md5': '43ac06be9326f41912dc64ccf7a80320',
'info_dict': {
@ -27,6 +66,7 @@ class TubiTvIE(InfoExtractor):
'description': 'A stand up comedian is forced to look at the decisions in his life while on a one week trip to the west coast.',
'uploader_id': 'bc168bee0d18dd1cb3b86c68706ab434',
},
'skip': 'Content Unavailable',
}, {
'url': 'http://tubitv.com/tv-shows/321886/s01_e01_on_nom_stories',
'only_matching': True,
@ -34,24 +74,42 @@ class TubiTvIE(InfoExtractor):
'url': 'http://tubitv.com/movies/383676/tracker',
'only_matching': True,
}, {
'url': 'https://tubitv.com/movies/560057/penitentiary?start=true',
'url': 'https://tubitv.com/tv-shows/200141623/s01-e01-episode-1',
'info_dict': {
'id': '560057',
'id': '200141623',
'ext': 'mp4',
'title': 'Penitentiary',
'description': 'md5:8d2fc793a93cc1575ff426fdcb8dd3f9',
'uploader_id': 'd8fed30d4f24fcb22ec294421b9defc2',
'release_year': 1979,
'title': 'Shameless S01:E01 - Episode 1',
'description': 'Having her handbag stolen proves to be a blessing in disguise for Fiona when handsome stranger Steve comes to her rescue.',
'timestamp': 1725148800,
'upload_date': '20240901',
'uploader': 'all3-media',
'uploader_id': '9b8e3a8d789b1c843f4b680c025a1853',
'release_year': 2004,
'episode': 'Episode 1',
'episode_number': 1,
'season': 'Season 1',
'season_number': 1,
'series': 'Shameless',
'cast': list,
'age_limit': 17,
},
'params': {
'skip_download': True,
'format': 'best/bestvideo',
'skip_download': 'm3u8'
},
}]
# DRM formats are included only to raise appropriate error
_UNPLAYABLE_FORMATS = ('hlsv6_widevine', 'hlsv6_widevine_nonclearlead', 'hlsv6_playready_psshv0',
'hlsv6_fairplay', 'dash_widevine', 'dash_widevine_nonclearlead')
def _login(self):
username, password = self._get_login_info()
if username is None:
return
self._perform_login(username, password)
def _perform_login(self, username, password):
self.report_login()
form_data = {
'username': username,
@ -62,7 +120,7 @@ class TubiTvIE(InfoExtractor):
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
request, None, False, 'Wrong login info')
if not re.search(r'id="tubi-logout"', login_page):
if get_element_by_id('tubi-logout', login_page) is None:
raise ExtractorError(
'Login failed (invalid username/password)', expected=True)
@ -70,41 +128,146 @@ class TubiTvIE(InfoExtractor):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
video_data = self._download_json(
'http://tubitv.com/oz/videos/%s/content' % video_id, video_id)
title = video_data['title']
video_id, video_type = self._match_valid_url(url).group('id', 'type')
webpage = self._download_webpage('https://tubitv.com/{0}/{1}/'.format(video_type, video_id), video_id)
data = self._search_json(
r'window\.__data\s*=', webpage, 'data', video_id,
transform_source=js_to_json)['video']['byId']
video_data = data[video_id]
info = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={})
title = strip_or_none(info.get('title'))
info['title'] = title or strip_or_none(video_data['title'])
formats = self._extract_m3u8_formats(
self._proto_relative_url(video_data['url']),
video_id, 'mp4', 'm3u8_native')
formats = []
drm_formats = 0
for resource in traverse_obj(video_data, ('video_resources', lambda _, v: v['type'] and v['manifest']['url'])):
manifest_url = url_or_none(resource['manifest']['url'])
if not manifest_url:
continue
resource_type = resource['type']
if resource_type == 'dash':
formats.extend(self._extract_mpd_formats(manifest_url, video_id, mpd_id=resource_type, fatal=False))
elif resource_type in ('hlsv3', 'hlsv6'):
formats.extend(self._extract_m3u8_formats(manifest_url, video_id, 'mp4', m3u8_id=resource_type, fatal=False))
elif resource_type in self._UNPLAYABLE_FORMATS:
drm_formats += 1
else:
self.report_warning('Skipping unknown resource type "{0}"'.format(resource_type))
if not formats and drm_formats > 0:
self.report_drm(video_id)
elif not formats and not video_data.get('policy_match'): # policy_match is False if content was removed
raise ExtractorError('This content is currently unavailable', expected=True)
self._sort_formats(formats)
thumbnails = []
for thumbnail_url in video_data.get('thumbnails', []):
if not thumbnail_url:
continue
thumbnails.append({
'url': self._proto_relative_url(thumbnail_url),
})
subtitles = {}
for sub in video_data.get('subtitles', []):
sub_url = sub.get('url')
for sub in traverse_obj(video_data, ('subtitles', lambda _, v: v['url'])):
sub_url = self._proto_relative_url(sub['url'])
if not sub_url:
continue
subtitles.setdefault(sub.get('lang', 'English'), []).append({
'url': self._proto_relative_url(sub_url),
'url': sub_url,
})
return {
season_number, episode_number, episode_title = self._search_regex(
r'\bS(\d+):E(\d+) - (.+)', info['title'], 'episode info', fatal=False, group=(1, 2, 3), default=(None, None, None))
return merge_dicts({
'id': video_id,
'title': title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'description': video_data.get('description'),
'duration': int_or_none(video_data.get('duration')),
'uploader_id': video_data.get('publisher_id'),
'release_year': int_or_none(video_data.get('year')),
}
'season_number': int_or_none(season_number),
'episode_number': int_or_none(episode_number),
'episode': episode_title
}, traverse_obj(video_data, {
'description': ('description', T(strip_or_none)),
'duration': ('duration', T(int_or_none)),
'uploader': ('import_id', T(strip_or_none)),
'uploader_id': ('publisher_id', T(strip_or_none)),
'release_year': ('year', T(int_or_none)),
'thumbnails': ('thumbnails', Ellipsis, T(self._proto_relative_url), {'url': T(url_or_none)}),
'cast': ('actors', Ellipsis, T(strip_or_none)),
'categories': ('tags', Ellipsis, T(strip_or_none)),
'age_limit': ('ratings', 0, 'value', T(parse_age_limit)),
}), traverse_obj(data, (lambda _, v: v['type'] == 's', {
'series': ('title', T(strip_or_none)),
# 'series_id': ('id', T(compat_str)),
}), get_all=False), info)
class TubiTvShowIE(InfoExtractor):
IE_NAME = 'tubitv:series'
_VALID_URL = r'https?://(?:www\.)?tubitv\.com/series/\d+/(?P<show_name>[^/?#]+)(?:/season-(?P<season>\d+))?'
_TESTS = [{
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross?start=true',
'playlist_mincount': 390,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross',
},
}, {
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross/season-1',
'playlist_count': 13,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross-season-1',
},
}, {
'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross/season-3',
'playlist_count': 13,
'info_dict': {
'id': 'the-joy-of-painting-with-bob-ross-season-3',
},
}]
def _real_extract(self, url):
playlist_id, selected_season = self._match_valid_url(url).group(
'show_name', 'season')
def entries(s_url, playlist_id, selected_season_num):
def get_season_data(s_num, fatal=False):
if s_num is None:
url, s_id = s_url, playlist_id
else:
url = '%s/season-%d' % (s_url, s_num)
s_id = '%s-season-%d' % (playlist_id, s_num)
webpage = self._download_webpage(url, s_id, fatal=fatal)
data = self._search_json(
r'window\s*\.\s*__data\s*=', webpage or '', 'data', s_id,
transform_source=js_to_json, default={})
return data['video'] if fatal else data.get('video', {})
data = get_season_data(None, fatal=True)
# The {series_id}.seasons JSON may lack some episodes that are available
# Iterate over the season numbers instead [1]
# 1. https://github.com/yt-dlp/yt-dlp/issues/11170#issuecomment-2399918777
seasons = (
traverse_obj(data, (
'byId', lambda _, v: v['type'] == 's', 'seasons', Ellipsis,
'number', T(int_or_none)))
if selected_season is None
else [selected_season])
unavail_cnt = 0
select_episodes = lambda _, v: v['type'] == 'v'
for season_number in seasons:
if not data:
data = get_season_data(season_number)
unavail_cnt += len(traverse_obj(data, ('byId', select_episodes, 'policy_match', T(lambda m: (not m) or None))))
for episode_id, episode in traverse_obj(data, ('byId', select_episodes, T(lambda e: (e['id'], e)))):
yield merge_dicts(self.url_result(
'https://tubitv.com/tv-shows/{0}/'.format(episode_id), TubiTvIE.ie_key(), episode_id), {
'season_number': season_number,
'episode_number': int_or_none(episode.get('num')),
})
data = None
if unavail_cnt > 0:
self.report_warning('%d items were marked as unavailable: check that the desired content is available or provide login parameters if needed' % unavail_cnt)
return self.playlist_result(
entries(url, playlist_id, int_or_none(selected_season)),
join_nonempty(playlist_id, selected_season, delim='-season-'))

View File

@ -692,9 +692,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$',
)
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
@ -1626,15 +1626,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
""" Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod
def _extract_player_info(cls, player_url):
for player_re in cls._PLAYER_INFO_RE:
id_m = re.search(player_re, player_url)
if id_m:
break
else:
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _extract_player_info(self, player_url):
try:
return self._search_regex(
self._PLAYER_INFO_RE, player_url, 'player info', group='id')
except ExtractorError as e:
raise ExtractorError(
'Cannot identify player %r' % (player_url,), cause=e)
def _load_player(self, video_id, player_url, fatal=True, player_id=None):
if not player_id:
@ -1711,6 +1709,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code)
def _extract_sig_fn(self, jsi, funcname):
var_ay = self._search_regex(
r'''(?x)
(?:\*/|\{|\n|^)\s*(?:'[^']+'\s*;\s*)
(var\s*[\w$]+\s*=\s*(?:
('|")(?:\\\2|(?!\2).)+\2\s*\.\s*split\(\s*('|")\W+\3\s*\)|
\[\s*(?:('|")(?:\\\4|(?!\4).)*\4\s*(?:(?=\])|,\s*))+\]
))(?=\s*[,;])
''', jsi.code, 'useful values', default='')
sig_fn = jsi.extract_function_code(funcname)
if var_ay:
sig_fn = (sig_fn[0], ';\n'.join((var_ay, sig_fn[1])))
return sig_fn
def _parse_sig_js(self, jscode):
# Examples where `sig` is funcname:
# sig=function(a){a=a.split(""); ... ;return a.join("")};
@ -1736,8 +1751,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
initial_function = self._extract_sig_fn(jsi, funcname)
func = jsi.extract_function_from_code(*initial_function)
return lambda s: func([s])
def _cached(self, func, *cache_id):
def inner(*args, **kwargs):
@ -1856,15 +1875,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code)
func_code = jsi.extract_function_code(func_name)
if var_ay:
func_code = (func_code[0], ';\n'.join((var_ay, func_code[1])))
func_code = self._extract_sig_fn(jsi, func_name)
if player_id:
self.cache.store('youtube-nsig', player_id, func_code)
@ -2136,7 +2149,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response),
(Ellipsis, 'videoDetails', T(dict))))
player_response.update(api_player_response or {})
player_response.update(filter_dict(
api_player_response or {}, cndn=lambda k, _: k != 'captions'))
player_response['videoDetails'] = video_details
def is_agegated(playability):
@ -2566,8 +2580,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}
pctr = traverse_obj(
player_response,
('captions', 'playerCaptionsTracklistRenderer', T(dict)))
(player_response, api_player_response),
(Ellipsis, 'captions', 'playerCaptionsTracklistRenderer', T(dict)))
if pctr:
def process_language(container, base_url, lang_code, query):
lang_subs = []
@ -2584,20 +2598,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def process_subtitles():
subtitles = {}
for caption_track in traverse_obj(pctr, (
'captionTracks', lambda _, v: v.get('baseUrl'))):
Ellipsis, 'captionTracks', lambda _, v: (
v.get('baseUrl') and v.get('languageCode')))):
base_url = self._yt_urljoin(caption_track['baseUrl'])
if not base_url:
continue
lang_code = caption_track['languageCode']
if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, {})
continue
automatic_captions = {}
process_language(
automatic_captions, base_url, lang_code, {})
for translation_language in traverse_obj(pctr, (
'translationLanguages', lambda _, v: v.get('languageCode'))):
Ellipsis, 'translationLanguages', lambda _, v: v.get('languageCode'))):
translation_language_code = translation_language['languageCode']
process_language(
automatic_captions, base_url, translation_language_code,

View File

@ -678,7 +678,7 @@ class JSInterpreter(object):
return len(obj)
try:
return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)]
except (TypeError, KeyError, IndexError) as e:
except (TypeError, KeyError, IndexError, ValueError) as e:
# allow_undefined is None gives correct behaviour
if allow_undefined or (
allow_undefined is None and not isinstance(e, TypeError)):
@ -1038,6 +1038,10 @@ class JSInterpreter(object):
left_val = self._index(left_val, idx)
if isinstance(idx, float):
idx = int(idx)
if isinstance(left_val, list) and len(left_val) <= int_or_none(idx, default=-1):
# JS Array is a sparsely assignable list
# TODO: handle extreme sparsity without memory bloat, eg using auxiliary dict
left_val.extend((idx - len(left_val) + 1) * [JS_Undefined])
left_val[idx] = self._operator(
m.group('op'), self._index(left_val, idx) if m.group('op') else None,
m.group('expr'), expr, local_vars, allow_recursion)
@ -1204,9 +1208,10 @@ class JSInterpreter(object):
elif member == 'join':
assertion(isinstance(obj, list), 'must be applied on a list')
assertion(len(argvals) <= 1, 'takes at most one argument')
return (',' if len(argvals) == 0 else argvals[0]).join(
('' if x in (None, JS_Undefined) else _js_toString(x))
for x in obj)
return (',' if len(argvals) == 0 or argvals[0] in (None, JS_Undefined)
else argvals[0]).join(
('' if x in (None, JS_Undefined) else _js_toString(x))
for x in obj)
elif member == 'reverse':
assertion(not argvals, 'does not take any arguments')
obj.reverse()
@ -1364,19 +1369,21 @@ class JSInterpreter(object):
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
return self.build_arglist(func_m.group('args')), code
def extract_function(self, funcname):
def extract_function(self, funcname, *global_stack):
return function_with_repr(
self.extract_function_from_code(*self.extract_function_code(funcname)),
self.extract_function_from_code(*itertools.chain(
self.extract_function_code(funcname), global_stack)),
'F<%s>' % (funcname,))
def extract_function_from_code(self, argnames, code, *global_stack):
local_vars = {}
start = None
while True:
mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code)
mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code[start:])
if mobj is None:
break
start, body_start = mobj.span()
start, body_start = ((start or 0) + x for x in mobj.span())
body, remaining = self._separate_at_paren(code[body_start - 1:])
name = self._named_object(local_vars, self.extract_function_from_code(
[x.strip() for x in mobj.group('args').split(',')],