Compare commits

...

12 Commits

Author SHA1 Message Date
dirkf
188e17d185
Merge b663bba81d3a1dde53161d0aec1939fa8d6b909c into 2b4fbfce25902d557b86b003cf48f738129efce4 2025-03-26 07:33:58 +00:00
dirkf
2b4fbfce25 [YouTube] Support player 4fcd6e4a
thx seproDev, bashonly: yt-dlp/yt-dlp#12748
2025-03-26 02:27:25 +00:00
dirkf
1bc45b8b6c [JSInterp] Use , for join() with null/undefined argument
Eg: [1,2,3].join(null) -> '1,2,3'
2025-03-25 22:35:06 +00:00
dirkf
b982d77d0b [YouTube] Align signature tests with yt-dlp
thx bashonly, yt-dlp/yt-dlp#12725
2025-03-25 22:35:06 +00:00
dirkf
c55dbf4838 [YouTube] Update signature extraction for players 643afba4, 363db69b 2025-03-25 22:35:06 +00:00
dirkf
087d865230 [YouTube] Support new player URL patterns 2025-03-25 22:35:06 +00:00
dirkf
a4fc1151f1 [JSInterp] Improve indexing
* catch invalid list index with `ValueError` (eg [1, 2]['ab'] -> undefined)
* allow assignment outside existing list (eg var l = [1,2]; l[9] = 0;)
2025-03-25 22:35:05 +00:00
dirkf
a464c159e6 [YouTube] Make _extract_player_info() use _search_regex() 2025-03-25 22:35:05 +00:00
dirkf
7dca08eff0 [YouTube] Also get original of translated automatic captions 2025-03-25 22:35:05 +00:00
dirkf
2239ee7965 [YouTube] Get subtitles/automatic captions from both web and API responses 2025-03-25 22:35:05 +00:00
dirkf
b663bba81d Back-port from yt-dlp PR #2149
Include MediasetShowIE, DRM work-around, etc; add MediasetClipIE
2022-01-01 23:39:53 +00:00
dirkf
3efdb2758d Fix OnDemandPagedList underflow on slice end
Also migrate towards yt-dlp structure
2021-12-30 21:33:51 +00:00
6 changed files with 379 additions and 102 deletions

View File

@ -84,6 +84,21 @@ _SIG_TESTS = [
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q', '0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q',
), ),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'AAOAOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7vgpDL0QwbdV06sCIEzpWqMGkFR20CFOS21Tp-7vj_EMu-m37KtXJoOy1',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
),
] ]
_NSIG_TESTS = [ _NSIG_TESTS = [
@ -153,7 +168,7 @@ _NSIG_TESTS = [
), ),
( (
'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js',
'-Txvy6bT5R6LqgnQNx', 'dcklJCnRUHbgSg', 'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA',
), ),
( (
'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js',
@ -173,7 +188,7 @@ _NSIG_TESTS = [
), ),
( (
'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ', 'aCi3iElgd2kq0bxVbQ', 'QX1y8jGb2IbZ0w',
), ),
( (
'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js',
@ -231,10 +246,6 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js', 'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
), ),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
( (
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ', 'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
@ -259,6 +270,22 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
), ),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'eWYu5d5YeY_4LyEDc', 'XJQqf-N7Xra3gg',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
] ]
@ -271,6 +298,8 @@ class TestPlayerInfo(unittest.TestCase):
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'e7567ecf'),
('https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '643afba4'),
# obsolete # obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'), ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'), ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
@ -280,8 +309,9 @@ class TestPlayerInfo(unittest.TestCase):
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
) )
ie = YoutubeIE(FakeYDL({'cachedir': False}))
for player_url, expected_player_id in PLAYER_URLS: for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url) player_id = ie._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id) self.assertEqual(player_id, expected_player_id)
@ -301,8 +331,8 @@ class TestSignature(unittest.TestCase):
def t_factory(name, sig_func, url_pattern): def t_factory(name, sig_func, url_pattern):
def make_tfunc(url, sig_input, expected_sig): def make_tfunc(url, sig_input, expected_sig):
m = url_pattern.match(url) m = url_pattern.match(url)
assert m, '%r should follow URL format' % url assert m, '{0!r} should follow URL format'.format(url)
test_id = m.group('id') test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id'))
def test_func(self): def test_func(self):
basename = 'player-{0}-{1}.js'.format(name, test_id) basename = 'player-{0}-{1}.js'.format(name, test_id)
@ -335,12 +365,16 @@ def n_sig(jscode, sig_input):
make_sig_test = t_factory( make_sig_test = t_factory(
'signature', signature, re.compile(r'.*(?:-|/player/)(?P<id>[a-zA-Z0-9_-]+)(?:/.+\.js|(?:/watch_as3|/html5player)?\.[a-z]+)$')) 'signature', signature,
re.compile(r'''(?x)
.+/(?P<h5>html5)?player(?(h5)(?:-en_US)?-|/)(?P<id>[a-zA-Z0-9/._-]+)
(?(h5)/(?:watch_as3|html5player))?\.js$
'''))
for test_spec in _SIG_TESTS: for test_spec in _SIG_TESTS:
make_sig_test(*test_spec) make_sig_test(*test_spec)
make_nsig_test = t_factory( make_nsig_test = t_factory(
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$')) 'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$'))
for test_spec in _NSIG_TESTS: for test_spec in _NSIG_TESTS:
make_nsig_test(*test_spec) make_nsig_test(*test_spec)

View File

@ -672,7 +672,11 @@ from .massengeschmacktv import MassengeschmackTVIE
from .matchtv import MatchTVIE from .matchtv import MatchTVIE
from .mdr import MDRIE from .mdr import MDRIE
from .medaltv import MedalTVIE from .medaltv import MedalTVIE
from .mediaset import MediasetIE from .mediaset import (
MediasetIE,
MediasetClipIE,
MediasetShowIE,
)
from .mediasite import ( from .mediasite import (
MediasiteIE, MediasiteIE,
MediasiteCatalogIE, MediasiteCatalogIE,

View File

@ -1,6 +1,7 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
import functools
import re import re
from .theplatform import ThePlatformBaseIE from .theplatform import ThePlatformBaseIE
@ -10,7 +11,11 @@ from ..compat import (
) )
from ..utils import ( from ..utils import (
ExtractorError, ExtractorError,
GeoRestrictedError,
int_or_none, int_or_none,
OnDemandPagedList,
try_get,
urljoin,
update_url_query, update_url_query,
) )
@ -30,37 +35,110 @@ class MediasetIE(ThePlatformBaseIE):
''' '''
_TESTS = [{ _TESTS = [{
# full episode # full episode
'url': 'https://www.mediasetplay.mediaset.it/video/hellogoodbye/quarta-puntata_FAFU000000661824', 'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
'md5': '9b75534d42c44ecef7bf1ffeacb7f85d', 'md5': 'a7e75c6384871f322adb781d3bd72c26',
'info_dict': { 'info_dict': {
'id': 'FAFU000000661824', 'id': 'F310575103000102',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Quarta puntata', 'title': 'Episodio 1',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'description': 'md5:e8017b7d7194e9bfb75299c2b8d81e02',
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1414.26, 'duration': 2682.0,
'upload_date': '20161107', 'upload_date': '20210530',
'series': 'Hello Goodbye', 'series': 'Mr Wrong - Lezioni d\'amore',
'timestamp': 1478532900, 'timestamp': 1622413946,
'uploader': 'Rete 4', 'uploader': 'Canale 5',
'uploader_id': 'R4', 'uploader_id': 'C5',
'season': 'Season 1',
'episode': 'Episode 1',
'season_number': 1,
'episode_number': 1,
'chapters': [{'start_time': 0.0, 'end_time': 439.88}, {'start_time': 439.88, 'end_time': 1685.84}, {'start_time': 1685.84, 'end_time': 2682.0}],
}, },
'skip': 'Geo restricted',
}, { }, {
'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501', 'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501',
'md5': '288532f0ad18307705b01e581304cd7b', 'md5': '1276f966ac423d16ba255ce867de073e',
'info_dict': { 'info_dict': {
'id': 'F309013801000501', 'id': 'F309013801000501',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Puntata del 25 maggio', 'title': 'Puntata del 25 maggio',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e', 'description': 'md5:ee2e456e3eb1dba5e814596655bb5296',
'thumbnail': r're:^https?://.*\.jpg$', 'thumbnail': r're:^https?://.*\.jpg$',
'duration': 6565.007, 'duration': 6565.008,
'upload_date': '20180526', 'upload_date': '20200903',
'series': 'Matrix', 'series': 'Matrix',
'timestamp': 1527326245, 'timestamp': 1599172492,
'uploader': 'Canale 5', 'uploader': 'Canale 5',
'uploader_id': 'C5', 'uploader_id': 'C5',
'season': 'Season 5',
'episode': 'Episode 5',
'season_number': 5,
'episode_number': 5,
'chapters': [{'start_time': 0.0, 'end_time': 3409.08}, {'start_time': 3409.08, 'end_time': 6565.008}],
}, },
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-69-pezzo-di-luna_F303843101017801',
'md5': 'd1650ac9ff944f185556126a736df148',
'info_dict': {
'id': 'F303843101017801',
'ext': 'mp4',
'title': 'Episodio 69 - Pezzo di luna',
'description': 'md5:7c32c8ec4118b72588b9412f11353f73',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 263.008,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599064700,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 178',
'season_number': 5,
'episode_number': 178,
'chapters': [{'start_time': 0.0, 'end_time': 261.88}, {'start_time': 261.88, 'end_time': 263.008}],
},
'skip': 'Geo restricted',
}, {
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-51-tu-chi-sei_F303843107000601',
'md5': '567e9ad375b7a27a0e370650f572a1e3',
'info_dict': {
'id': 'F303843107000601',
'ext': 'mp4',
'title': 'Episodio 51 - Tu chi sei?',
'description': 'md5:42ef006e56824cc31787a547590923f4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 367.021,
'upload_date': '20200902',
'series': 'Camera Café 5',
'timestamp': 1599069817,
'uploader': 'Italia 1',
'uploader_id': 'I1',
'season': 'Season 5',
'episode': 'Episode 6',
'season_number': 5,
'episode_number': 6,
'chapters': [{'start_time': 0.0, 'end_time': 358.68}, {'start_time': 358.68, 'end_time': 367.021}],
},
'skip': 'Geo restricted',
}, {
# movie
'url': 'https://www.mediasetplay.mediaset.it/movie/selvaggi/selvaggi_F006474501000101',
'md5': '720440187a2ae26af8148eb9e6b901ed',
'info_dict': {
'id': 'F006474501000101',
'ext': 'mp4',
'title': 'Selvaggi',
'description': 'md5:cfdedbbfdd12d4d0e5dcf1fa1b75284f',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 5233.01,
'upload_date': '20210729',
'timestamp': 1627594716,
'uploader': 'Cine34',
'uploader_id': 'B6',
'chapters': [{'start_time': 0.0, 'end_time': 1938.56}, {'start_time': 1938.56, 'end_time': 5233.01}],
},
'skip': 'Geo restricted',
}, { }, {
# clip # clip
'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680', 'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680',
@ -131,6 +209,22 @@ class MediasetIE(ThePlatformBaseIE):
video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src']) video.attrib['src'] = re.sub(r'(https?://vod05)t(-mediaset-it\.akamaized\.net/.+?.mpd)\?.+', r'\1\2', video.attrib['src'])
return super(MediasetIE, self)._parse_smil_formats(smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url) return super(MediasetIE, self)._parse_smil_formats(smil, smil_url, video_id, namespace, f4m_params, transform_rtmp_url)
def _check_drm_formats(self, tp_formats, video_id):
has_nondrm, drm_manifest = False, ''
for f in tp_formats:
if '_sampleaes/' in (f.get('manifest_url') or ''):
drm_manifest = drm_manifest or f['manifest_url']
f['has_drm'] = True
if not has_nondrm and not f.get('has_drm') and f.get('manifest_url'):
has_nondrm = True
nodrm_manifest = re.sub(r'_sampleaes/(\w+)_fp_', r'/\1_no_', drm_manifest)
if has_nondrm or nodrm_manifest == drm_manifest:
return
tp_formats.extend(self._extract_m3u8_formats(
nodrm_manifest, video_id, m3u8_id='hls', fatal=False) or [])
def _real_extract(self, url): def _real_extract(self, url):
guid = self._match_id(url) guid = self._match_id(url)
tp_path = 'PR1GhC/media/guid/2702976343/' + guid tp_path = 'PR1GhC/media/guid/2702976343/' + guid
@ -138,49 +232,164 @@ class MediasetIE(ThePlatformBaseIE):
formats = [] formats = []
subtitles = {} subtitles = {}
first_e = None first_e = geo_e = None
for asset_type in ('SD', 'HD'): asset_type = 'geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo|HD|SD'
# TODO: fixup ISM+none manifest URLs # TODO: fixup ISM+none manifest URLs
for f in ('MPEG4', 'MPEG-DASH+none', 'M3U+none'): for f in ('MPEG4', 'M3U'):
try: try:
tp_formats, tp_subtitles = self._extract_theplatform_smil( tp_formats, tp_subtitles = self._extract_theplatform_smil(
update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), { update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), {
'mbr': 'true', 'mbr': 'true',
'formats': f, 'formats': f,
'assetTypes': asset_type, 'assetTypes': asset_type,
}), guid, 'Downloading %s %s SMIL data' % (f.split('+')[0], asset_type)) }), guid, 'Downloading %s SMIL data' % (f.split('+')[0]))
except ExtractorError as e: except ExtractorError as e:
if not first_e: if not first_e:
first_e = e first_e = e
break if not geo_e and isinstance(e, GeoRestrictedError):
for tp_f in tp_formats: geo_e = e
tp_f['quality'] = 1 if asset_type == 'HD' else 0 continue
formats.extend(tp_formats) self._check_drm_formats(tp_formats, guid)
subtitles = self._merge_subtitles(subtitles, tp_subtitles) formats.extend(tp_formats)
if first_e and not formats: subtitles = self._merge_subtitles(subtitles, tp_subtitles)
# check for errors and report them
if (first_e or geo_e) and not formats:
if geo_e:
raise geo_e
if 'None of the available releases match' in first_e.message:
raise ExtractorError('No non-DRM formats available', cause=first_e)
raise first_e raise first_e
self._sort_formats(formats) self._sort_formats(formats)
fields = []
for templ, repls in (('tvSeason%sNumber', ('', 'Episode')), ('mediasetprogram$%s', ('brandTitle', 'numberOfViews', 'publishInfo'))):
fields.extend(templ % repl for repl in repls)
feed_data = self._download_json( feed_data = self._download_json(
'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs/guid/-/' + guid, 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/' + guid,
guid, fatal=False, query={'fields': ','.join(fields)}) guid, fatal=False)
if feed_data: if feed_data:
publish_info = feed_data.get('mediasetprogram$publishInfo') or {} publish_info = feed_data.get('mediasetprogram$publishInfo') or {}
thumbnails = feed_data.get('thumbnails') or {}
thumbnail = None
for key, value in thumbnails.items():
if key.startswith('image_keyframe_poster-'):
thumbnail = value.get('url')
break
info.update({ info.update({
'episode_number': int_or_none(feed_data.get('tvSeasonEpisodeNumber')), 'description': info.get('description') or feed_data.get('description') or feed_data.get('longDescription'),
'season_number': int_or_none(feed_data.get('tvSeasonNumber')),
'series': feed_data.get('mediasetprogram$brandTitle'),
'uploader': publish_info.get('description'), 'uploader': publish_info.get('description'),
'uploader_id': publish_info.get('channel'), 'uploader_id': publish_info.get('channel'),
'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')), 'view_count': int_or_none(feed_data.get('mediasetprogram$numberOfViews')),
'thumbnail': thumbnail,
}) })
if feed_data.get('programType') == 'episode':
info.update({
'episode_number': int_or_none(
feed_data.get('tvSeasonEpisodeNumber')),
'season_number': int_or_none(
feed_data.get('tvSeasonNumber')),
'series': feed_data.get('mediasetprogram$brandTitle'),
})
info.update({ info.update({
'id': guid, 'id': guid,
'formats': formats, 'formats': formats,
'subtitles': subtitles, 'subtitles': subtitles,
}) })
return info return info
class MediasetClipIE(MediasetIE):
_VALID_URL = r'https?://(?:www\.)?\w+\.mediaset\.it/video/(?:[^/]+/)*[\w-]+_(?P<id>\d+)\.s?html?'
_TESTS = [{
'url': 'https://www.grandefratello.mediaset.it/video/ventinovesima-puntata_27071.shtml',
'info_dict': {
'id': 'F310293901002901',
'ext': 'mp4',
},
'skip': 'Geo restricted, DRM content',
}]
def _real_extract(self, url):
clip_id = self._match_id(url)
webpage = self._download_webpage(url, clip_id)
guid = self._search_regex(
(r'''var\s*_onplay_guid\s*=\s*(?P<q>'|"|\b)(?P<guid>[\dA-Z]{16,})(?P=q)\s*;''',
r'\bGUID\s+(?P<guid>[\dA-Z]{16,})\b', ),
webpage, 'clip GUID', group='guid')
return self.url_result('mediaset:%s' % guid, ie='Mediaset', video_id=clip_id)
class MediasetShowIE(MediasetIE):
_VALID_URL = r'''(?x)
(?:
https?://
(?:(?:www|static3)\.)?mediasetplay\.mediaset\.it/
(?:
(?:fiction|programmi-tv|serie-tv)/(?:.+?/)?
(?:[a-z-]+)_SE(?P<id>\d{12})
(?:,ST(?P<st>\d{12}))?
(?:,sb(?P<sb>\d{9}))?$
)
)
'''
_TESTS = [{
# TV Show webpage (general webpage)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061',
'info_dict': {
'id': '000000000061',
'title': 'Le Iene',
},
'playlist_mincount': 7,
}, {
# TV Show webpage (specific season)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763',
'info_dict': {
'id': '000000002763',
'title': 'Le Iene',
},
'playlist_mincount': 7,
}, {
# TV Show specific playlist (with multiple pages)
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375',
'info_dict': {
'id': '100013375',
'title': 'I servizi',
},
'playlist_mincount': 50,
}]
_BY_SUBBRAND = 'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2?byCustomValue={subBrandId}{%s}&sort=:publishInfo_lastPublished|desc,tvSeasonEpisodeNumber|desc&range=%d-%d'
_PAGE_SIZE = 25
_match_valid_url = lambda s, u: re.match(s._VALID_URL, u)
def _fetch_page(self, sb, page):
lower_limit = page * self._PAGE_SIZE + 1
upper_limit = lower_limit + self._PAGE_SIZE - 1
content = self._download_json(
self._BY_SUBBRAND % (sb, lower_limit, upper_limit), sb)
for entry in content.get('entries') or []:
res = self.url_result('mediaset:' + entry['guid'])
if res:
res['playlist_title'] = entry['mediasetprogram$subBrandDescription']
yield res
def _real_extract(self, url):
playlist_id, st, sb = self._match_valid_url(url).group('id', 'st', 'sb')
if not sb:
page = self._download_webpage(url, st or playlist_id)
entries = [self.url_result(urljoin('https://www.mediasetplay.mediaset.it', url))
for url in re.findall(r'href="([^<>=]+SE\d{12},ST\d{12},sb\d{9})">[^<]+<', page)]
title = (self._html_search_regex(r'(?s)<h1[^>]*>(.+?)</h1>', page, 'title', default=None)
or self._og_search_title(page))
return self.playlist_result(entries, st or playlist_id, title)
entries = OnDemandPagedList(
functools.partial(self._fetch_page, sb),
self._PAGE_SIZE)
# slice explicitly, as no __getitem__ in OnDemandPagedList yet
title = try_get(entries, lambda x: x.getslice(0, 1)[0]['playlist_title'])
return self.playlist_result(entries, sb, title)

View File

@ -692,9 +692,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES), 'invidious': '|'.join(_INVIDIOUS_SITES),
} }
_PLAYER_INFO_RE = ( _PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player', r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$', r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$',
) )
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
@ -1626,15 +1626,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
""" Return a string representation of a signature """ """ Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod def _extract_player_info(self, player_url):
def _extract_player_info(cls, player_url): try:
for player_re in cls._PLAYER_INFO_RE: return self._search_regex(
id_m = re.search(player_re, player_url) self._PLAYER_INFO_RE, player_url, 'player info', group='id')
if id_m: except ExtractorError as e:
break raise ExtractorError(
else: 'Cannot identify player %r' % (player_url,), cause=e)
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True, player_id=None): def _load_player(self, video_id, player_url, fatal=True, player_id=None):
if not player_id: if not player_id:
@ -1711,6 +1709,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
' return %s\n') % (signature_id_tuple, expr_code) ' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code) self.to_screen('Extracted signature function:\n' + code)
def _extract_sig_fn(self, jsi, funcname):
var_ay = self._search_regex(
r'''(?x)
(?:\*/|\{|\n|^)\s*(?:'[^']+'\s*;\s*)
(var\s*[\w$]+\s*=\s*(?:
('|")(?:\\\2|(?!\2).)+\2\s*\.\s*split\(\s*('|")\W+\3\s*\)|
\[\s*(?:('|")(?:\\\4|(?!\4).)*\4\s*(?:(?=\])|,\s*))+\]
))(?=\s*[,;])
''', jsi.code, 'useful values', default='')
sig_fn = jsi.extract_function_code(funcname)
if var_ay:
sig_fn = (sig_fn[0], ';\n'.join((var_ay, sig_fn[1])))
return sig_fn
def _parse_sig_js(self, jscode): def _parse_sig_js(self, jscode):
# Examples where `sig` is funcname: # Examples where `sig` is funcname:
# sig=function(a){a=a.split(""); ... ;return a.join("")}; # sig=function(a){a=a.split(""); ... ;return a.join("")};
@ -1736,8 +1751,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
jscode, 'Initial JS player signature function name', group='sig') jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode) jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s]) initial_function = self._extract_sig_fn(jsi, funcname)
func = jsi.extract_function_from_code(*initial_function)
return lambda s: func([s])
def _cached(self, func, *cache_id): def _cached(self, func, *cache_id):
def inner(*args, **kwargs): def inner(*args, **kwargs):
@ -1856,15 +1875,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None): def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code) func_name = self._extract_n_function_name(jsi.code)
func_code = jsi.extract_function_code(func_name) func_code = self._extract_sig_fn(jsi, func_name)
if var_ay:
func_code = (func_code[0], ';\n'.join((var_ay, func_code[1])))
if player_id: if player_id:
self.cache.store('youtube-nsig', player_id, func_code) self.cache.store('youtube-nsig', player_id, func_code)
@ -2136,7 +2149,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_details = merge_dicts(*traverse_obj( video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response), (player_response, api_player_response),
(Ellipsis, 'videoDetails', T(dict)))) (Ellipsis, 'videoDetails', T(dict))))
player_response.update(api_player_response or {}) player_response.update(filter_dict(
api_player_response or {}, cndn=lambda k, _: k != 'captions'))
player_response['videoDetails'] = video_details player_response['videoDetails'] = video_details
def is_agegated(playability): def is_agegated(playability):
@ -2566,8 +2580,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
} }
pctr = traverse_obj( pctr = traverse_obj(
player_response, (player_response, api_player_response),
('captions', 'playerCaptionsTracklistRenderer', T(dict))) (Ellipsis, 'captions', 'playerCaptionsTracklistRenderer', T(dict)))
if pctr: if pctr:
def process_language(container, base_url, lang_code, query): def process_language(container, base_url, lang_code, query):
lang_subs = [] lang_subs = []
@ -2584,20 +2598,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def process_subtitles(): def process_subtitles():
subtitles = {} subtitles = {}
for caption_track in traverse_obj(pctr, ( for caption_track in traverse_obj(pctr, (
'captionTracks', lambda _, v: v.get('baseUrl'))): Ellipsis, 'captionTracks', lambda _, v: (
v.get('baseUrl') and v.get('languageCode')))):
base_url = self._yt_urljoin(caption_track['baseUrl']) base_url = self._yt_urljoin(caption_track['baseUrl'])
if not base_url: if not base_url:
continue continue
lang_code = caption_track['languageCode']
if caption_track.get('kind') != 'asr': if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
continue
process_language( process_language(
subtitles, base_url, lang_code, {}) subtitles, base_url, lang_code, {})
continue continue
automatic_captions = {} automatic_captions = {}
process_language(
automatic_captions, base_url, lang_code, {})
for translation_language in traverse_obj(pctr, ( for translation_language in traverse_obj(pctr, (
'translationLanguages', lambda _, v: v.get('languageCode'))): Ellipsis, 'translationLanguages', lambda _, v: v.get('languageCode'))):
translation_language_code = translation_language['languageCode'] translation_language_code = translation_language['languageCode']
process_language( process_language(
automatic_captions, base_url, translation_language_code, automatic_captions, base_url, translation_language_code,

View File

@ -678,7 +678,7 @@ class JSInterpreter(object):
return len(obj) return len(obj)
try: try:
return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)] return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)]
except (TypeError, KeyError, IndexError) as e: except (TypeError, KeyError, IndexError, ValueError) as e:
# allow_undefined is None gives correct behaviour # allow_undefined is None gives correct behaviour
if allow_undefined or ( if allow_undefined or (
allow_undefined is None and not isinstance(e, TypeError)): allow_undefined is None and not isinstance(e, TypeError)):
@ -1038,6 +1038,10 @@ class JSInterpreter(object):
left_val = self._index(left_val, idx) left_val = self._index(left_val, idx)
if isinstance(idx, float): if isinstance(idx, float):
idx = int(idx) idx = int(idx)
if isinstance(left_val, list) and len(left_val) <= int_or_none(idx, default=-1):
# JS Array is a sparsely assignable list
# TODO: handle extreme sparsity without memory bloat, eg using auxiliary dict
left_val.extend((idx - len(left_val) + 1) * [JS_Undefined])
left_val[idx] = self._operator( left_val[idx] = self._operator(
m.group('op'), self._index(left_val, idx) if m.group('op') else None, m.group('op'), self._index(left_val, idx) if m.group('op') else None,
m.group('expr'), expr, local_vars, allow_recursion) m.group('expr'), expr, local_vars, allow_recursion)
@ -1204,9 +1208,10 @@ class JSInterpreter(object):
elif member == 'join': elif member == 'join':
assertion(isinstance(obj, list), 'must be applied on a list') assertion(isinstance(obj, list), 'must be applied on a list')
assertion(len(argvals) <= 1, 'takes at most one argument') assertion(len(argvals) <= 1, 'takes at most one argument')
return (',' if len(argvals) == 0 else argvals[0]).join( return (',' if len(argvals) == 0 or argvals[0] in (None, JS_Undefined)
('' if x in (None, JS_Undefined) else _js_toString(x)) else argvals[0]).join(
for x in obj) ('' if x in (None, JS_Undefined) else _js_toString(x))
for x in obj)
elif member == 'reverse': elif member == 'reverse':
assertion(not argvals, 'does not take any arguments') assertion(not argvals, 'does not take any arguments')
obj.reverse() obj.reverse()
@ -1364,19 +1369,21 @@ class JSInterpreter(object):
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
return self.build_arglist(func_m.group('args')), code return self.build_arglist(func_m.group('args')), code
def extract_function(self, funcname): def extract_function(self, funcname, *global_stack):
return function_with_repr( return function_with_repr(
self.extract_function_from_code(*self.extract_function_code(funcname)), self.extract_function_from_code(*itertools.chain(
self.extract_function_code(funcname), global_stack)),
'F<%s>' % (funcname,)) 'F<%s>' % (funcname,))
def extract_function_from_code(self, argnames, code, *global_stack): def extract_function_from_code(self, argnames, code, *global_stack):
local_vars = {} local_vars = {}
start = None
while True: while True:
mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code) mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code[start:])
if mobj is None: if mobj is None:
break break
start, body_start = mobj.span() start, body_start = ((start or 0) + x for x in mobj.span())
body, remaining = self._separate_at_paren(code[body_start - 1:]) body, remaining = self._separate_at_paren(code[body_start - 1:])
name = self._named_object(local_vars, self.extract_function_from_code( name = self._named_object(local_vars, self.extract_function_from_code(
[x.strip() for x in mobj.group('args').split(',')], [x.strip() for x in mobj.group('args').split(',')],

View File

@ -4106,6 +4106,12 @@ class PagedList(object):
# This is only useful for tests # This is only useful for tests
return len(self.getslice()) return len(self.getslice())
def _getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def getslice(self, start=0, end=None):
return list(self._getslice(start, end))
class OnDemandPagedList(PagedList): class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=True): def __init__(self, pagefunc, pagesize, use_cache=True):
@ -4115,11 +4121,12 @@ class OnDemandPagedList(PagedList):
if use_cache: if use_cache:
self._cache = {} self._cache = {}
def getslice(self, start=0, end=None): def _getslice(self, start=0, end=None):
res = [] firstpage = start // self._pagesize
for pagenum in itertools.count(start // self._pagesize): nextfirstid = firstpage * self._pagesize
firstid = pagenum * self._pagesize for pagenum in itertools.count(firstpage):
nextfirstid = pagenum * self._pagesize + self._pagesize firstid = nextfirstid
nextfirstid += self._pagesize
if start >= nextfirstid: if start >= nextfirstid:
continue continue
@ -4132,18 +4139,19 @@ class OnDemandPagedList(PagedList):
self._cache[pagenum] = page_results self._cache[pagenum] = page_results
startv = ( startv = (
start % self._pagesize start - firstid
if firstid <= start < nextfirstid if firstid <= start < nextfirstid
else 0) else 0)
endv = ( endv = (
((end - 1) % self._pagesize) + 1 end - firstid
if (end is not None and firstid <= end <= nextfirstid) if (end is not None and firstid <= end <= nextfirstid)
else None) else None)
if startv != 0 or endv is not None: if startv != 0 or endv is not None:
page_results = page_results[startv:endv] page_results = page_results[startv:endv]
res.extend(page_results) for item in page_results:
yield item
# A little optimization - if current page is not "full", ie. does # A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page # not contain page_size videos then we can assume that this page
@ -4156,7 +4164,7 @@ class OnDemandPagedList(PagedList):
# break out early as well # break out early as well
if end == nextfirstid: if end == nextfirstid:
break break
return res return
class InAdvancePagedList(PagedList): class InAdvancePagedList(PagedList):