Compare commits

...

4 Commits

Author SHA1 Message Date
dirkf
1dc036186d
Merge fad516e22e58763ce0feab1034cc0c7aa2da184c into da7223d4aa42ff9fc680b0951d043dd03cec2d30 2025-03-22 07:19:43 +08:00
dirkf
da7223d4aa [YouTube] Improve support for tce-style player JS
* improve extraction of global "useful data" Array from player JS
* also handle tv-player and add tests: thx seproDev (yt-dlp/yt-dlp#12684)

Co-Authored-By: sepro <sepro@sepr0.com>
2025-03-21 16:26:25 +00:00
dirkf
37c2440d6a [YouTube] Update player client data
thx seproDev (yt-dlp/yt-dlp#12603)

Co-authored-by: sepro <sepro@sepr0.com>
2025-03-21 16:13:24 +00:00
Lesmiscore
fad516e22e [Tver] Fix extractor
yt-dlp PR https://github.com/yt-dlp/yt-dlp/pull/3268

    Back-port to yt-dl
    Fix tests

    Co-authored-by: dirkf
2022-04-04 11:04:18 +01:00
3 changed files with 238 additions and 43 deletions

View File

@ -232,8 +232,32 @@ _NSIG_TESTS = [
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
), ),
( (
'https://www.youtube.com/s/player/91201489/player_ias_tce.vflset/en_US/base.js', 'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'U48vOZHaeYS6vO', 'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
),
(
'https://www.youtube.com/s/player/d50f54ef/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'XFTb2HoeOE5MHg',
),
(
'https://www.youtube.com/s/player/074a8365/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'ufTsrE0IVYrkl8v',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'N5uAlLqm0eg1GyHO', 'dCBQOejdq5s-ww',
),
(
'https://www.youtube.com/s/player/69f581a5/tv-player-ias.vflset/tv-player-ias.js',
'-qIP447rVlTTwaZjY', 'KNcGOksBAvwqQg',
),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
), ),
] ]

View File

@ -4,58 +4,229 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str from ..compat import (
from ..utils import ( compat_kwargs,
int_or_none, compat_str,
remove_start,
smuggle_url,
try_get,
) )
from ..utils import (
ExtractorError,
int_or_none,
smuggle_url,
str_or_none,
)
try:
from ..utils import traverse_obj
except ImportError:
from ..compat import compat_collections_abc
def traverse_obj(obj, *path_list, **kw):
''' Traverse nested list/dict/tuple'''
# parameter defaults
default = kw.get('default')
expected_type = kw.get('expected_type')
get_all = kw.get('get_all', True)
casesense = kw.get('casesense', True)
is_user_input = kw.get('is_user_input', False)
traverse_string = kw.get('traverse_string', False)
def variadic(x, allowed_types=(compat_str, bytes)):
return x if isinstance(x, compat_collections_abc.Iterable) and not isinstance(x, allowed_types) else (x,)
def listish(l):
return isinstance(l, (list, tuple))
def from_iterable(iterables):
# chain.from_iterable(['ABC', 'DEF']) --> A B C D E F
for it in iterables:
for element in it:
yield element
class Nonlocal:
pass
nl = Nonlocal()
if not casesense:
_lower = lambda k: (k.lower() if isinstance(k, compat_str) else k)
path_list = (map(_lower, variadic(path)) for path in path_list)
def _traverse_obj(obj, path, _current_depth=0):
path = tuple(variadic(path))
for i, key in enumerate(path):
if obj is None:
return None
if listish(key):
obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
key = Ellipsis
if key is Ellipsis:
obj = (obj.values() if isinstance(obj, dict)
else obj if listish(obj)
else compat_str(obj) if traverse_string else [])
_current_depth += 1
nl.depth = max(nl.depth, _current_depth)
return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
elif callable(key):
if listish(obj):
obj = enumerate(obj)
elif isinstance(obj, dict):
obj = obj.items()
else:
if not traverse_string:
return None
obj = str(obj)
_current_depth += 1
nl.depth = max(nl.depth, _current_depth)
return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
elif isinstance(obj, dict) and not (is_user_input and key == ':'):
obj = (obj.get(key) if casesense or (key in obj)
else next((v for k, v in obj.items() if _lower(k) == key), None))
else:
if is_user_input:
key = (int_or_none(key) if ':' not in key
else slice(*map(int_or_none, key.split(':'))))
if key == slice(None):
return _traverse_obj(obj, tuple([Ellipsis] + list(path[i + 1:])), _current_depth)
if not isinstance(key, (int, slice)):
return None
if not listish(obj):
if not traverse_string:
return None
obj = compat_str(obj)
try:
obj = obj[key]
except IndexError:
return None
return obj
if isinstance(expected_type, type):
type_test = lambda val: val if isinstance(val, expected_type) else None
elif expected_type is not None:
type_test = expected_type
else:
type_test = lambda val: val
for path in path_list:
nl.depth = 0
val = _traverse_obj(obj, path)
if val is not None:
if nl.depth:
for _ in range(nl.depth - 1):
val = from_iterable(v for v in val if v is not None)
val = [v for v in map(type_test, val) if v is not None]
if val:
return val if get_all else val[0]
else:
val = type_test(val)
if val is not None:
return val
return default
class TVerIE(InfoExtractor): class TVerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tver\.jp/(?P<path>(?:corner|episode|feature)/(?P<id>f?\d+))' _VALID_URL = r'https?://(?:www\.)?tver\.jp/(?:(?P<type>lp|corner|series|episodes?|feature|tokyo2020/video)/)+(?P<id>[a-zA-Z0-9]+)'
# videos are only available for 7 days # videos are only available for 7 days
_TESTS = [{ _TESTS = [{
'url': 'https://tver.jp/corner/f0062178', 'skip': 'videos are only available for 7 days',
'url': 'https://tver.jp/episodes/ephss8yveb',
'info_dict': {
'id': 'ref:af03de03-21ac-4b98-a53b-9ffd9b102e92',
'ext': 'mp4',
'title': '#44 料理と値段と店主にびっくり オモてなしすぎウマい店 2時間SP',
'description': 'md5:66985373a66fed8ad3cd595a3cfebb13',
'upload_date': '20220329',
'uploader_id': '4394098882001',
'timestamp': 1648527032,
},
'add_ie': ['BrightcoveNew'],
}, {
'skip': 'videos are only available for 7 days',
'url': 'https://tver.jp/lp/episodes/ep6f16g26p',
'info_dict': {
'id': '6302378806001',
'ext': 'mp4',
# "April 11 (Mon) 23:06-Broadcast scheduled": sorry but this is "correct"
'title': '4月11日(月)23時06分 ~ 放送予定',
'description': 'md5:4029cc5f4b1e8090dfc5b7bd2bc5cd0b',
'upload_date': '20220331',
'uploader_id': '3971130137001',
'timestamp': 1648696456,
},
'add_ie': ['BrightcoveNew'],
}, {
'url': 'https://tver.jp/corner/f0103888',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://tver.jp/feature/f0062413', 'url': 'https://tver.jp/lp/f0033031',
'only_matching': True,
}, {
'url': 'https://tver.jp/episode/79622438',
'only_matching': True,
}, {
# subtitle = ' '
'url': 'https://tver.jp/corner/f0068870',
'only_matching': True, 'only_matching': True,
}] }]
_TOKEN = None
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s' BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/default_default/index.html?videoId=%s'
_PLATFORM_UID = None
_PLATFORM_TOKEN = None
def _download_json(self, url_or_request, video_id, **kwargs):
headers = {
'Origin': 'https://s.tver.jp',
'Referer': 'https://s.tver.jp/',
}
headers.update(kwargs.get('headers', {}))
kwargs.update(compat_kwargs({'headers': headers, }))
return super(TVerIE, self)._download_json(url_or_request, video_id, **kwargs)
def _real_initialize(self): def _real_initialize(self):
self._TOKEN = self._download_json( create_response = self._download_json(
'https://tver.jp/api/access_token.php', None)['token'] 'https://platform-api.tver.jp/v2/api/platform_users/browser/create', None,
note='Creating session', data=b'device_type=pc', headers={
'Content-Type': 'application/x-www-form-urlencoded',
})
self._PLATFORM_UID = traverse_obj(create_response, ('result', 'platform_uid'), expected_type=compat_str)
self._PLATFORM_TOKEN = traverse_obj(create_response, ('result', 'platform_token'), expected_type=compat_str)
def _real_extract(self, url): def _real_extract(self, url):
path, video_id = re.match(self._VALID_URL, url).groups() video_id = self._match_id(url)
main = self._download_json(
'https://api.tver.jp/v4/' + path, video_id,
query={'token': self._TOKEN})['main']
p_id = main['publisher_id']
service = remove_start(main['service'], 'ts_')
r_id = main['reference_id'] video_id, video_type = re.match(self._VALID_URL, url).group('id', 'type')
if service not in ('tx', 'russia2018', 'sebare2018live', 'gorin'): if video_type not in ('episode', 'episodes', 'series'):
webpage = self._download_webpage(url, video_id, note='Resolving to new URL')
video_id = self._match_id(self._search_regex(
(r'''canonical['"]\s+href\s*=\s*(?P<q>'|")(?P<url>https?://tver\.jp/(?!(?P=q)).+?)(?P=q)''',
r'&link=(?P<url>https?://tver\.jp/(?!(?P=q)).+?)[?&]'),
webpage, 'url regex', group='url'))
video_info = self._download_json(
'https://statics.tver.jp/content/episode/{0}.json'.format(video_id), video_id,
query={'v': '5'})
p_id = traverse_obj(video_info, ('video', 'accountID'), expected_type=compat_str)
r_id = traverse_obj(video_info, ('video', ('videoRefID', 'videoID')), get_all=False, expected_type=compat_str)
if None in (p_id, r_id):
raise ExtractorError(
'Failed to extract '
+ ', '.join(
(x[0] for x in (('accountID', p_id), ('videoRefID', r_id), )
if x[1] is None)),
expected=False)
if not r_id.isdigit():
r_id = 'ref:' + r_id r_id = 'ref:' + r_id
bc_url = smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id), additional_info = self._download_json(
{'geo_countries': ['JP']}) 'https://platform-api.tver.jp/service/api/v1/callEpisode/{0}?require_data=mylist,later[epefy106ur],good[epefy106ur],resume[epefy106ur]'.format(video_id),
video_id, fatal=False,
query={
'platform_uid': self._PLATFORM_UID,
'platform_token': self._PLATFORM_TOKEN,
}, headers={
'x-tver-platform-type': 'web'
})
return { return {
'_type': 'url_transparent', '_type': 'url_transparent',
'description': try_get(main, lambda x: x['note'][0]['text'], compat_str), 'title': str_or_none(video_info.get('title')),
'episode_number': int_or_none(try_get(main, lambda x: x['ext']['episode_number'])), 'description': str_or_none(video_info.get('description')),
'url': bc_url, 'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % (p_id, r_id), {'geo_countries': ['JP']}),
'series': traverse_obj(
additional_info, ('result', ('episode', 'series'), 'content', ('seriesTitle', 'title')),
get_all=False, expected_type=compat_str),
'ie_key': 'BrightcoveNew', 'ie_key': 'BrightcoveNew',
} }

View File

@ -91,12 +91,12 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'IOS', 'clientName': 'IOS',
'clientVersion': '19.45.4', 'clientVersion': '20.10.4',
'deviceMake': 'Apple', 'deviceMake': 'Apple',
'deviceModel': 'iPhone16,2', 'deviceModel': 'iPhone16,2',
'userAgent': 'com.google.ios.youtube/19.45.4 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)', 'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)',
'osName': 'iPhone', 'osName': 'iPhone',
'osVersion': '18.1.0.22B83', 'osVersion': '18.3.2.22D82',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 5, 'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
@ -109,7 +109,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'MWEB', 'clientName': 'MWEB',
'clientVersion': '2.20241202.07.00', 'clientVersion': '2.20250311.03.00',
# mweb previously did not require PO Token with this UA # mweb previously did not require PO Token with this UA
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)', 'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
}, },
@ -122,7 +122,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'TVHTML5', 'clientName': 'TVHTML5',
'clientVersion': '7.20250120.19.00', 'clientVersion': '7.20250312.16.00',
'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version', 'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
}, },
}, },
@ -133,7 +133,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': { 'INNERTUBE_CONTEXT': {
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': '2.20241126.01.00', 'clientVersion': '2.20250312.04.00',
}, },
}, },
'INNERTUBE_CONTEXT_CLIENT_NAME': 1, 'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
@ -692,7 +692,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES), 'invidious': '|'.join(_INVIDIOUS_SITES),
} }
_PLAYER_INFO_RE = ( _PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player', r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$', r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
) )
@ -1857,7 +1857,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None): def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex( var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"[^"]+"\s*\.\s*split\("\{"\))(?=\s*[,;])', r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='') jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code) func_name = self._extract_n_function_name(jsi.code)