Compare commits

...

15 Commits

Author SHA1 Message Date
dirkf
751f9e65cd
Merge e5bfed6c77b47bdf07713d8d88c8b21247999ae7 into 2b4fbfce25902d557b86b003cf48f738129efce4 2025-03-26 06:50:10 +00:00
dirkf
2b4fbfce25 [YouTube] Support player 4fcd6e4a
thx seproDev, bashonly: yt-dlp/yt-dlp#12748
2025-03-26 02:27:25 +00:00
dirkf
1bc45b8b6c [JSInterp] Use , for join() with null/undefined argument
Eg: [1,2,3].join(null) -> '1,2,3'
2025-03-25 22:35:06 +00:00
dirkf
b982d77d0b [YouTube] Align signature tests with yt-dlp
thx bashonly, yt-dlp/yt-dlp#12725
2025-03-25 22:35:06 +00:00
dirkf
c55dbf4838 [YouTube] Update signature extraction for players 643afba4, 363db69b 2025-03-25 22:35:06 +00:00
dirkf
087d865230 [YouTube] Support new player URL patterns 2025-03-25 22:35:06 +00:00
dirkf
a4fc1151f1 [JSInterp] Improve indexing
* catch invalid list index with `ValueError` (eg [1, 2]['ab'] -> undefined)
* allow assignment outside existing list (eg var l = [1,2]; l[9] = 0;)
2025-03-25 22:35:05 +00:00
dirkf
a464c159e6 [YouTube] Make _extract_player_info() use _search_regex() 2025-03-25 22:35:05 +00:00
dirkf
7dca08eff0 [YouTube] Also get original of translated automatic captions 2025-03-25 22:35:05 +00:00
dirkf
2239ee7965 [YouTube] Get subtitles/automatic captions from both web and API responses 2025-03-25 22:35:05 +00:00
dirkf
e5bfed6c77
[PornHub] Remove extraneous modelhub login code 2024-10-15 21:11:28 +01:00
dirkf
38fce984f4
[PornHub] Fix typo in path regex 2024-10-14 14:23:46 +01:00
dirkf
49093c09c0
Merge pull request #32950 from ytdl-org/master
Merge from master
2024-10-14 14:09:51 +01:00
dirkf
f3cf092584
Integrate changes from yt-dlp and PR 31432
Thx:
* MrBigDig <mrbigdig2020@gmail.com>
* yt-dlp contributors.

Supersedes, closes #31432.
2024-10-14 12:39:50 +01:00
DarkFighterLuke
34e1010545 Fix view_count 2022-03-10 15:07:24 +01:00
4 changed files with 388 additions and 251 deletions

View File

@ -84,6 +84,21 @@ _SIG_TESTS = [
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA', '2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q', '0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xxAj7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJ2OySqa0q',
), ),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'AAOAOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7vgpDL0QwbdV06sCIEzpWqMGkFR20CFOS21Tp-7vj_EMu-m37KtXJoOy1',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpz2ICs6EVdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
'wAOAOq0QJ8ARAIgXmPlOPSBkkUs1bYFYlJCfe29xx8q7v1pDL0QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
),
] ]
_NSIG_TESTS = [ _NSIG_TESTS = [
@ -153,7 +168,7 @@ _NSIG_TESTS = [
), ),
( (
'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/c57c113c/player_ias.vflset/en_US/base.js',
'-Txvy6bT5R6LqgnQNx', 'dcklJCnRUHbgSg', 'M92UUMHa8PdvPd3wyM', '3hPqLJsiNZx7yA',
), ),
( (
'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/5a3b6271/player_ias.vflset/en_US/base.js',
@ -173,7 +188,7 @@ _NSIG_TESTS = [
), ),
( (
'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/cfa9e7cb/player_ias.vflset/en_US/base.js',
'qO0NiMtYQ7TeJnfFG2', 'k9cuJDHNS5O7kQ', 'aCi3iElgd2kq0bxVbQ', 'QX1y8jGb2IbZ0w',
), ),
( (
'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/8c7583ff/player_ias.vflset/en_US/base.js',
@ -231,10 +246,6 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js', 'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54', 'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
), ),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
( (
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ', 'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
@ -259,6 +270,22 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', 'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA', 'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
), ),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/363db69b/player_ias.vflset/en_US/base.js',
'eWYu5d5YeY_4LyEDc', 'XJQqf-N7Xra3gg',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/player_ias.vflset/en_US/base.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
(
'https://www.youtube.com/s/player/4fcd6e4a/tv-player-ias.vflset/tv-player-ias.js',
'o_L251jm8yhZkWtBW', 'lXoxI3XvToqn6A',
),
] ]
@ -271,6 +298,8 @@ class TestPlayerInfo(unittest.TestCase):
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-phone-de_DE.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'), ('https://www.youtube.com/s/player/64dddad9/player-plasma-ias-tablet-en_US.vflset/base.js', '64dddad9'),
('https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js', 'e7567ecf'),
('https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js', '643afba4'),
# obsolete # obsolete
('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'), ('https://www.youtube.com/yts/jsbin/player_ias-vfle4-e03/en_US/base.js', 'vfle4-e03'),
('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'), ('https://www.youtube.com/yts/jsbin/player_ias-vfl49f_g4/en_US/base.js', 'vfl49f_g4'),
@ -280,8 +309,9 @@ class TestPlayerInfo(unittest.TestCase):
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflXGBaUN.js', 'vflXGBaUN'),
('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'), ('https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js', 'vflKjOTVq'),
) )
ie = YoutubeIE(FakeYDL({'cachedir': False}))
for player_url, expected_player_id in PLAYER_URLS: for player_url, expected_player_id in PLAYER_URLS:
player_id = YoutubeIE._extract_player_info(player_url) player_id = ie._extract_player_info(player_url)
self.assertEqual(player_id, expected_player_id) self.assertEqual(player_id, expected_player_id)
@ -301,8 +331,8 @@ class TestSignature(unittest.TestCase):
def t_factory(name, sig_func, url_pattern): def t_factory(name, sig_func, url_pattern):
def make_tfunc(url, sig_input, expected_sig): def make_tfunc(url, sig_input, expected_sig):
m = url_pattern.match(url) m = url_pattern.match(url)
assert m, '%r should follow URL format' % url assert m, '{0!r} should follow URL format'.format(url)
test_id = m.group('id') test_id = re.sub(r'[/.-]', '_', m.group('id') or m.group('compat_id'))
def test_func(self): def test_func(self):
basename = 'player-{0}-{1}.js'.format(name, test_id) basename = 'player-{0}-{1}.js'.format(name, test_id)
@ -335,12 +365,16 @@ def n_sig(jscode, sig_input):
make_sig_test = t_factory( make_sig_test = t_factory(
'signature', signature, re.compile(r'.*(?:-|/player/)(?P<id>[a-zA-Z0-9_-]+)(?:/.+\.js|(?:/watch_as3|/html5player)?\.[a-z]+)$')) 'signature', signature,
re.compile(r'''(?x)
.+/(?P<h5>html5)?player(?(h5)(?:-en_US)?-|/)(?P<id>[a-zA-Z0-9/._-]+)
(?(h5)/(?:watch_as3|html5player))?\.js$
'''))
for test_spec in _SIG_TESTS: for test_spec in _SIG_TESTS:
make_sig_test(*test_spec) make_sig_test(*test_spec)
make_nsig_test = t_factory( make_nsig_test = t_factory(
'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_-]+)/.+.js$')) 'nsig', n_sig, re.compile(r'.+/player/(?P<id>[a-zA-Z0-9_/.-]+)\.js$'))
for test_spec in _NSIG_TESTS: for test_spec in _NSIG_TESTS:
make_nsig_test(*test_spec) make_nsig_test(*test_spec)

View File

@ -7,30 +7,35 @@ import operator
import re import re
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import ( from ..compat import (
compat_HTTPError, compat_HTTPError,
compat_str,
compat_urllib_request,
) )
from .openload import PhantomJSwrapper from .openload import PhantomJSwrapper
from ..utils import ( from ..utils import (
clean_html,
determine_ext, determine_ext,
extract_attributes,
ExtractorError, ExtractorError,
get_element_by_class,
get_element_by_id,
int_or_none, int_or_none,
merge_dicts, merge_dicts,
NO_DEFAULT, parse_count,
orderedSet,
remove_quotes, remove_quotes,
str_to_int, remove_start,
T,
traverse_obj,
update_url_query, update_url_query,
urlencode_postdata,
url_or_none, url_or_none,
urlencode_postdata,
urljoin,
) )
class PornHubBaseIE(InfoExtractor): class PornHubBaseIE(InfoExtractor):
_NETRC_MACHINE = 'pornhub' _NETRC_MACHINE = 'pornhub'
_PORNHUB_HOST_RE = r'(?:(?P<host>pornhub(?:premium)?\.(?:com|net|org))|pornhubthbh7ap3u\.onion)' _PORNHUB_HOST_RE = r'(?:(?P<host>pornhub(?:premium)?\.(?:com|net|org))|pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd\.onion)'
def _download_webpage_handle(self, *args, **kwargs): def _download_webpage_handle(self, *args, **kwargs):
def dl(*args, **kwargs): def dl(*args, **kwargs):
@ -47,10 +52,7 @@ class PornHubBaseIE(InfoExtractor):
r'<body\b[^>]+\bonload=["\']go\(\)', r'<body\b[^>]+\bonload=["\']go\(\)',
r'document\.cookie\s*=\s*["\']RNKEY=', r'document\.cookie\s*=\s*["\']RNKEY=',
r'document\.location\.reload\(true\)')): r'document\.location\.reload\(true\)')):
url_or_request = args[0] url = urlh.geturl()
url = (url_or_request.get_full_url()
if isinstance(url_or_request, compat_urllib_request.Request)
else url_or_request)
phantom = PhantomJSwrapper(self, required_version='2.0') phantom = PhantomJSwrapper(self, required_version='2.0')
phantom.get(url, html=webpage) phantom.get(url, html=webpage)
webpage, urlh = dl(*args, **kwargs) webpage, urlh = dl(*args, **kwargs)
@ -60,11 +62,17 @@ class PornHubBaseIE(InfoExtractor):
def _real_initialize(self): def _real_initialize(self):
self._logged_in = False self._logged_in = False
def _set_age_cookies(self, host):
self._set_cookie(host, 'age_verified', '1')
self._set_cookie(host, 'accessAgeDisclaimerPH', '1')
self._set_cookie(host, 'accessAgeDisclaimerUK', '1')
self._set_cookie(host, 'accessPH', '1')
def _login(self, host): def _login(self, host):
if self._logged_in: if self._logged_in:
return return
site = host.split('.')[0] site = host.split('.', 1)[0]
# Both sites pornhub and pornhubpremium have separate accounts # Both sites pornhub and pornhubpremium have separate accounts
# so there should be an option to provide credentials for both. # so there should be an option to provide credentials for both.
@ -81,9 +89,9 @@ class PornHubBaseIE(InfoExtractor):
login_url, None, 'Downloading %s login page' % site) login_url, None, 'Downloading %s login page' % site)
def is_logged(webpage): def is_logged(webpage):
return any(re.search(p, webpage) for p in ( return bool(
r'class=["\']signOut', get_element_by_id('profileMenuDropdown', webpage)
r'>Sign\s+[Oo]ut\s*<')) or get_element_by_class('ph-icon-logout', webpage))
if is_logged(login_page): if is_logged(login_page):
self._logged_in = True self._logged_in = True
@ -92,12 +100,12 @@ class PornHubBaseIE(InfoExtractor):
login_form = self._hidden_inputs(login_page) login_form = self._hidden_inputs(login_page)
login_form.update({ login_form.update({
'username': username, 'email': username,
'password': password, 'password': password,
}) })
response = self._download_json( response = self._download_json(
'https://www.%s/front/authenticate' % host, None, 'https://www.%s/front/authenticate' % host, 'login',
'Logging in to %s' % site, 'Logging in to %s' % site,
data=urlencode_postdata(login_form), data=urlencode_postdata(login_form),
headers={ headers={
@ -119,17 +127,12 @@ class PornHubBaseIE(InfoExtractor):
class PornHubIE(PornHubBaseIE): class PornHubIE(PornHubBaseIE):
IE_DESC = 'PornHub and Thumbzilla' IE_DESC = 'PornHub' # Thumbzilla -> Redtube.com, Modelhub -> uviu.com
_VALID_URL = r'''(?x) _PORNHUB_PATH_RE = r'/(?:(?:view_video\.php%s)\?(?:.+&)?viewkey=%s)(?P<id>[\da-z]+)'
https?:// _VALID_URL = r'https?://(?:[^/]+\.)?%s%s' % (
(?: PornHubBaseIE._PORNHUB_HOST_RE, _PORNHUB_PATH_RE % ('|video/show', '|embed/'))
(?:[^/]+\.)? _PORNHUB_PATH_RE = _PORNHUB_PATH_RE % ('', '')
%s _EMBED_REGEX = [r'<iframe\s[^>]*?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)']
/(?:(?:view_video\.php|video/show)\?viewkey=|embed/)|
(?:www\.)?thumbzilla\.com/video/
)
(?P<id>[\da-z]+)
''' % PornHubBaseIE._PORNHUB_HOST_RE
_TESTS = [{ _TESTS = [{
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015', 'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
'md5': 'a6391306d050e4547f62b3f485dd9ba9', 'md5': 'a6391306d050e4547f62b3f485dd9ba9',
@ -138,6 +141,7 @@ class PornHubIE(PornHubBaseIE):
'ext': 'mp4', 'ext': 'mp4',
'title': 'Seductive Indian beauty strips down and fingers her pink pussy', 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
'uploader': 'Babes', 'uploader': 'Babes',
'uploader_id': '/users/babes-com',
'upload_date': '20130628', 'upload_date': '20130628',
'timestamp': 1372447216, 'timestamp': 1372447216,
'duration': 361, 'duration': 361,
@ -148,6 +152,10 @@ class PornHubIE(PornHubBaseIE):
'age_limit': 18, 'age_limit': 18,
'tags': list, 'tags': list,
'categories': list, 'categories': list,
'cast': list,
},
'params': {
'format': '[format_id!^=hls]',
}, },
}, { }, {
# non-ASCII title # non-ASCII title
@ -189,14 +197,27 @@ class PornHubIE(PornHubBaseIE):
'categories': list, 'categories': list,
'subtitles': { 'subtitles': {
'en': [{ 'en': [{
"ext": 'srt' 'ext': 'srt',
}] }],
}, },
}, },
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
'skip': 'This video has been disabled', 'skip': 'This video has been disabled',
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph601dc30bae19a',
'info_dict': {
'id': 'ph601dc30bae19a',
'ext': 'mp4',
'timestamp': 1612564932,
'age_limit': 18,
'uploader': 'Projekt Melody',
'uploader_id': 'projekt-melody',
'upload_date': '20210205',
'title': '"Welcome to My Pussy Mansion" - CB Stream (02/03/21)',
'thumbnail': r're:https?://.+',
},
}, { }, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d', 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
'only_matching': True, 'only_matching': True,
@ -216,9 +237,6 @@ class PornHubIE(PornHubBaseIE):
# private video # private video
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7', 'url': 'http://www.pornhub.com/view_video.php?viewkey=ph56fd731fce6b7',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.thumbzilla.com/video/ph56c6114abd99a/horny-girlfriend-sex',
'only_matching': True,
}, { }, {
'url': 'http://www.pornhub.com/video/show?viewkey=648719015', 'url': 'http://www.pornhub.com/video/show?viewkey=648719015',
'only_matching': True, 'only_matching': True,
@ -244,28 +262,36 @@ class PornHubIE(PornHubBaseIE):
'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5a9813bfa7156', 'url': 'https://www.pornhub.com/view_video.php?viewkey=ph5a9813bfa7156',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'http://pornhubthbh7ap3u.onion/view_video.php?viewkey=ph5a9813bfa7156', 'url': 'http://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/view_video.php?viewkey=ph5a9813bfa7156',
'only_matching': True, 'only_matching': True,
}] }]
@staticmethod @classmethod
def _extract_urls(webpage): def _extract_urls(cls, webpage):
return re.findall( def yield_urls():
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?pornhub(?:premium)?\.(?:com|net|org)/embed/[\da-z]+)', for p in cls._EMBED_REGEX:
webpage) for from_ in re.finditer(p, webpage):
yield from_.group('url')
return list(yield_urls())
def _extract_count(self, pattern, webpage, name): def _extract_count(self, pattern, webpage, name):
return str_to_int(self._search_regex( return parse_count(self._search_regex(
pattern, webpage, '%s count' % name, fatal=False)) pattern, webpage, '%s count' % name, fatal=False))
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) for _ in range(2):
mobj = self._match_valid_url(url)
video_id = mobj.group('id') if mobj else self._generic_id(url)
_, urlh = self._download_webpage_handle(url, video_id)
if url == urlh.geturl():
break
url = urlh.geturl()
host = mobj.group('host') or 'pornhub.com' host = mobj.group('host') or 'pornhub.com'
video_id = mobj.group('id')
self._login(host) self._login(host)
self._set_age_cookies(host)
self._set_cookie(host, 'age_verified', '1')
def dl_webpage(platform): def dl_webpage(platform):
self._set_cookie(host, 'platform', platform) self._set_cookie(host, 'platform', platform)
@ -276,7 +302,7 @@ class PornHubIE(PornHubBaseIE):
webpage = dl_webpage('pc') webpage = dl_webpage('pc')
error_msg = self._html_search_regex( error_msg = self._html_search_regex(
(r'(?s)<div[^>]+class=(["\'])(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>', (r'(?s)<div[^>]+class=("|\')(?:(?!\1).)*\b(?:removed|userMessageSection)\b(?:(?!\1).)*\1[^>]*>(?P<error>.+?)</div>',
r'(?s)<section[^>]+class=["\']noVideo["\'][^>]*>(?P<error>.+?)</section>'), r'(?s)<section[^>]+class=["\']noVideo["\'][^>]*>(?P<error>.+?)</section>'),
webpage, 'error message', default=None, group='error') webpage, 'error message', default=None, group='error')
if error_msg: if error_msg:
@ -285,9 +311,9 @@ class PornHubIE(PornHubBaseIE):
'PornHub said: %s' % error_msg, 'PornHub said: %s' % error_msg,
expected=True, video_id=video_id) expected=True, video_id=video_id)
if any(re.search(p, webpage) for p in ( if bool(get_element_by_class('geoBlocked', webpage)
r'class=["\']geoBlocked["\']', or self._search_regex(
r'>\s*This content is unavailable in your country')): r'>\s*This content is (unavailable) in your country', webpage, 'geo-restriction', default=False)):
self.raise_geo_restricted() self.raise_geo_restricted()
# video_title from flashvars contains whitespace instead of non-ASCII (see # video_title from flashvars contains whitespace instead of non-ASCII (see
@ -304,36 +330,34 @@ class PornHubIE(PornHubBaseIE):
video_urls_set = set() video_urls_set = set()
subtitles = {} subtitles = {}
flashvars = self._parse_json( def add_video_url(video_url, quality=None):
self._search_regex( v_url = url_or_none(video_url)
r'var\s+flashvars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'), if not v_url:
video_id) return
if flashvars: if v_url in video_urls_set:
subtitle_url = url_or_none(flashvars.get('closedCaptionsFile')) return
if subtitle_url: video_urls.append((v_url, quality))
subtitles.setdefault('en', []).append({ video_urls_set.add(v_url)
'url': subtitle_url,
'ext': 'srt',
})
thumbnail = flashvars.get('image_url')
duration = int_or_none(flashvars.get('video_duration'))
media_definitions = flashvars.get('mediaDefinitions')
if isinstance(media_definitions, list):
for definition in media_definitions:
if not isinstance(definition, dict):
continue
video_url = definition.get('videoUrl')
if not video_url or not isinstance(video_url, compat_str):
continue
if video_url in video_urls_set:
continue
video_urls_set.add(video_url)
video_urls.append(
(video_url, int_or_none(definition.get('quality'))))
else:
thumbnail, duration = [None] * 2
def extract_js_vars(webpage, pattern, default=NO_DEFAULT): flashvars = self._search_json(r'var\s+flashvars_\d+\s*=', webpage, 'flashvars', video_id)
flashvars = traverse_obj(flashvars, {
'closedCaptionsFile': ('closedCaptionsFile', T(url_or_none)),
'image_url': ('image_url', T(url_or_none)),
'video_duration': ('video_duration', T(int_or_none)),
'mediaDefinitions': ('mediaDefinitions', lambda _, v: v['videoUrl']),
}) or {}
subtitle_url = flashvars.get('closedCaptionsFile')
if subtitle_url:
subtitles.setdefault('en', []).append({
'url': subtitle_url,
'ext': 'srt',
})
thumbnail = flashvars.get('image_url')
duration = flashvars.get('video_duration')
for definition in flashvars.get('mediaDefinitions') or []:
add_video_url(definition['videoUrl'], int_or_none(definition.get('quality')))
def extract_js_vars(webpage, pattern, default=None):
assignments = self._search_regex( assignments = self._search_regex(
pattern, webpage, 'encoded url', default=default) pattern, webpage, 'encoded url', default=default)
if not assignments: if not assignments:
@ -363,51 +387,33 @@ class PornHubIE(PornHubBaseIE):
js_vars[vname] = parse_js_value(value) js_vars[vname] = parse_js_value(value)
return js_vars return js_vars
def add_video_url(video_url):
v_url = url_or_none(video_url)
if not v_url:
return
if v_url in video_urls_set:
return
video_urls.append((v_url, None))
video_urls_set.add(v_url)
def parse_quality_items(quality_items): def parse_quality_items(quality_items):
q_items = self._parse_json(quality_items, video_id, fatal=False) q_items = self._parse_json(quality_items, video_id, fatal=False)
if not isinstance(q_items, list): for v_url in traverse_obj(q_items, (Ellipsis, 'url')):
return add_video_url(v_url)
for item in q_items:
if isinstance(item, dict):
add_video_url(item.get('url'))
if not video_urls: if not video_urls:
FORMAT_PREFIXES = ('media', 'quality', 'qualityItems') FORMAT_PREFIXES = ('media', 'quality', 'qualityItems')
js_vars = extract_js_vars( js_vars = extract_js_vars(
webpage, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES), webpage, r'(var\s+(?:%s)_.+)' % '|'.join(FORMAT_PREFIXES))
default=None) for key, format_url in js_vars.items():
if js_vars: if key.startswith(FORMAT_PREFIXES[-1]):
for key, format_url in js_vars.items(): parse_quality_items(format_url)
if key.startswith(FORMAT_PREFIXES[-1]): elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]):
parse_quality_items(format_url) add_video_url(format_url)
elif any(key.startswith(p) for p in FORMAT_PREFIXES[:2]): if not video_urls and get_element_by_id('lockedPlayer', webpage):
add_video_url(format_url)
if not video_urls and re.search(
r'<[^>]+\bid=["\']lockedPlayer', webpage):
raise ExtractorError( raise ExtractorError(
'Video %s is locked' % video_id, expected=True) 'Video %s is locked' % video_id, expected=True)
if not video_urls: if not video_urls:
js_vars = extract_js_vars( js_vars = extract_js_vars(
dl_webpage('tv'), r'(var.+?mediastring.+?)</script>') dl_webpage('tv'), r'(var.+?mediastring.+?)</script>')
add_video_url(js_vars['mediastring']) add_video_url(traverse_obj(js_vars, 'mediastring'))
for mobj in re.finditer( for mobj in re.finditer(
r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1', r'<a[^>]+\bclass=["\']downloadBtn\b[^>]+\bhref=(["\'])(?P<url>(?:(?!\1).)+)\1',
webpage): webpage):
video_url = mobj.group('url') add_video_url(mobj.group('url'))
if video_url not in video_urls_set:
video_urls.append((video_url, None))
video_urls_set.add(video_url)
upload_date = None upload_date = None
formats = [] formats = []
@ -433,6 +439,13 @@ class PornHubIE(PornHubBaseIE):
'height': height, 'height': height,
}) })
if not video_urls:
# import here to avoid mutually recursive dependency
from .generic import GenericIE
ret = GenericIE.generic_url_result(url, video_id=video_id, video_title=title, force_videoid=True)
ret['_type'] = 'url_transparent'
return ret
for video_url, height in video_urls: for video_url, height in video_urls:
if not upload_date: if not upload_date:
upload_date = self._search_regex( upload_date = self._search_regex(
@ -440,52 +453,55 @@ class PornHubIE(PornHubBaseIE):
if upload_date: if upload_date:
upload_date = upload_date.replace('/', '') upload_date = upload_date.replace('/', '')
if '/video/get_media' in video_url: if '/video/get_media' in video_url:
# self._set_cookie(host, 'platform', 'tv')
medias = self._download_json(video_url, video_id, fatal=False) medias = self._download_json(video_url, video_id, fatal=False)
if isinstance(medias, list): for media in traverse_obj(medias, lambda _, v: v['videoUrl']):
for media in medias: video_url = url_or_none(media['videoUrl'])
if not isinstance(media, dict): if not video_url:
continue continue
video_url = url_or_none(media.get('videoUrl')) height = int_or_none(media.get('quality'))
if not video_url: add_format(video_url, height)
continue
height = int_or_none(media.get('quality'))
add_format(video_url, height)
continue continue
add_format(video_url) add_format(video_url)
self._sort_formats( self._sort_formats(
formats, field_preference=('height', 'width', 'fps', 'format_id')) formats, field_preference=('height', 'width', 'fps', 'format_id'))
model_profile = self._search_json(
r'var\s+MODEL_PROFILE\s*=', webpage, 'model profile', video_id, fatal=False)
video_uploader = self._html_search_regex( video_uploader = self._html_search_regex(
r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<', r'(?s)From:&nbsp;.+?<(?:a\b[^>]+\bhref=["\']/(?:(?:user|channel)s|model|pornstar)/|span\b[^>]+\bclass=["\']username)[^>]+>(.+?)<',
webpage, 'uploader', default=None) webpage, 'uploader', default=None) or model_profile.get('username')
def extract_vote_count(kind, name): def extract_vote_count(kind, name):
return self._extract_count( return self._extract_count(
(r'<span[^>]+\bclass="votes%s"[^>]*>([\d,\.]+)</span>' % kind, (r'<span[^>]+\bclass="votes%s"[^>]*>(\d[\d,\.]*[kKmM]?)</span>' % kind,
r'<span[^>]+\bclass=["\']votes%s["\'][^>]*\bdata-rating=["\'](\d+)' % kind), r'<span[^>]+\bclass=["\']votes%s["\'][^>]*\bdata-rating=["\'](\d+)' % kind),
webpage, name) webpage, name)
view_count = self._extract_count( view_count = self._extract_count(
r'<span class="count">([\d,\.]+)</span> [Vv]iews', webpage, 'view') r'<span class="count">(\d[\d,\.]*[kKmM]?)</span> [Vv]iews', webpage, 'view')
like_count = extract_vote_count('Up', 'like') like_count = extract_vote_count('Up', 'like')
dislike_count = extract_vote_count('Down', 'dislike') dislike_count = extract_vote_count('Down', 'dislike')
comment_count = self._extract_count( comment_count = self._extract_count(
r'All Comments\s*<span>\(([\d,.]+)\)', webpage, 'comment') r'All Comments\s*<span>\((\d[\d,\.]*[kKmM]?)\)', webpage, 'comment')
def extract_list(meta_key): def extract_list(meta_key):
div = self._search_regex( div = self._search_regex(
r'(?s)<div[^>]+\bclass=["\'].*?\b%sWrapper[^>]*>(.+?)</div>' r'(?s)<div[^>]+\bclass=["\'].*?\b%sWrapper[^>]*>(.+?)</div>'
% meta_key, webpage, meta_key, default=None) % meta_key, webpage, meta_key, default=None)
if div: if div:
return re.findall(r'<a[^>]+\bhref=[^>]+>([^<]+)', div) return [clean_html(x) for x in re.findall(r'(?s)<a[^>]+\bhref=[^>]+>.+?</a>', div)]
info = self._search_json_ld(webpage, video_id, default={}) info = self._search_json_ld(webpage, video_id, default={})
# description provided in JSON-LD is irrelevant # description provided in JSON-LD is irrelevant
info['description'] = None for k in ('url', 'description'):
info.pop(k, None)
return merge_dicts({ return merge_dicts(info, {
'id': video_id, 'id': video_id,
'uploader': video_uploader, 'uploader': video_uploader,
'uploader_id': remove_start(model_profile.get('modelProfileLink'), '/model/'),
'upload_date': upload_date, 'upload_date': upload_date,
'title': title, 'title': title,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
@ -498,8 +514,9 @@ class PornHubIE(PornHubBaseIE):
'age_limit': 18, 'age_limit': 18,
'tags': extract_list('tags'), 'tags': extract_list('tags'),
'categories': extract_list('categories'), 'categories': extract_list('categories'),
'cast': extract_list('pornstars'),
'subtitles': subtitles, 'subtitles': subtitles,
}, info) })
class PornHubPlaylistBaseIE(PornHubBaseIE): class PornHubPlaylistBaseIE(PornHubBaseIE):
@ -512,65 +529,28 @@ class PornHubPlaylistBaseIE(PornHubBaseIE):
# drop-down menu that uses similar pattern for videos (see # drop-down menu that uses similar pattern for videos (see
# https://github.com/ytdl-org/youtube-dl/issues/11594). # https://github.com/ytdl-org/youtube-dl/issues/11594).
container = self._search_regex( container = self._search_regex(
r'(?s)(<div[^>]+class=["\']container.+)', webpage, r'(?s)(<div\s[^>]*class=["\']container.+)', webpage,
'container', default=webpage) 'container', default=webpage)
def entries():
seen_ids = set()
for m in re.finditer(r'<\w+\s[^>]*(?<!-)\bhref\s*=\s*.("|\'|\b)%s\1[^>]*>' % (PornHubIE._PORNHUB_PATH_RE,), container):
video_id = m.group('id')
if video_id:
if video_id in seen_ids:
continue
seen_ids.add(video_id)
elt = extract_attributes(m.group(0))
video_url = urljoin(host, elt.get('href'))
yield video_url, video_id, elt.get('title')
return [ return [
self.url_result( self.url_result(
'http://www.%s/%s' % (host, video_url), video_url, PornHubIE.ie_key(), video_title=title, video_id=video_id)
PornHubIE.ie_key(), video_title=title) for video_url, video_id, title in entries()
for video_url, title in orderedSet(re.findall(
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"[^>]*\s+title="([^"]+)"',
container))
] ]
class PornHubUserIE(PornHubPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?%s/(?:(?:user|channel)s|model|pornstar)/(?P<id>[^/?#&]+))(?:[?#&]|/(?!videos)|$)' % PornHubBaseIE._PORNHUB_HOST_RE
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph',
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious',
'info_dict': {
'id': 'liz-vicious',
},
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/users/russianveet69',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/channels/povd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph?abc=1',
'only_matching': True,
}, {
# Unavailable via /videos page, but available with direct pagination
# on pornstar page (see [1]), requires premium
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
'url': 'https://www.pornhubpremium.com/pornstar/sienna-west',
'only_matching': True,
}, {
# Same as before, multi page
'url': 'https://www.pornhubpremium.com/pornstar/lily-labeau',
'only_matching': True,
}, {
'url': 'https://pornhubthbh7ap3u.onion/model/zoe_ph',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user_id = mobj.group('id')
videos_url = '%s/videos' % mobj.group('url')
page = self._extract_page(url)
if page:
videos_url = update_url_query(videos_url, {'page': page})
return self.url_result(
videos_url, ie=PornHubPagedVideoListIE.ie_key(), video_id=user_id)
class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE): class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
@staticmethod @staticmethod
def _has_more(webpage): def _has_more(webpage):
@ -617,23 +597,77 @@ class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
page_entries = self._extract_entries(webpage, host) page_entries = self._extract_entries(webpage, host)
if not page_entries: if not page_entries:
break break
for e in page_entries: for from_ in page_entries:
yield e yield from_
if not self._has_more(webpage): if not self._has_more(webpage):
break break
def _real_extract(self, url): def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url) mobj = self._match_valid_url(url)
host = mobj.group('host') host = mobj.group('host')
item_id = mobj.group('id') item_id = mobj.group('id')
self._login(host) self._login(host)
self._set_age_cookies(host)
return self.playlist_result(self._entries(url, host, item_id), item_id) return self.playlist_result(self._entries(url, host, item_id), item_id)
class PornHubUserIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?%s/(?P<id>(?:(?:user|channel)s|model|pornstar)/[^/?#&]+))(?:[?#&]|/(?!videos)|$)' % PornHubBaseIE._PORNHUB_HOST_RE
_TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph',
'info_dict': {
'id': 'zoe_ph',
},
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/pornstar/liz-vicious',
'info_dict': {
'id': 'liz-vicious',
},
'playlist_mincount': 118,
}, {
'url': 'https://www.pornhub.com/users/russianveet69',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/channels/povd',
'only_matching': True,
}, {
'url': 'https://www.pornhub.com/model/zoe_ph?abc=1',
'only_matching': True,
}, {
# Unavailable via /videos page, but available with direct pagination
# on pornstar page (see [1]), requires premium
# 1. https://github.com/ytdl-org/youtube-dl/issues/27853
'url': 'https://www.pornhubpremium.com/pornstar/sienna-west',
'only_matching': True,
}, {
# Same as before, multi page
'url': 'https://www.pornhubpremium.com/pornstar/lily-labeau',
'only_matching': True,
}, {
'url': 'https://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/model/zoe_ph',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
user_id, host = mobj.group('id', 'host')
videos_url = '%s/videos' % mobj.group('url')
page = self._extract_page(url)
if page:
videos_url = update_url_query(videos_url, {'page': page})
self._login(host)
return self.playlist_result(self._entries(videos_url, host, user_id), user_id.split('/')[-1])
# return self.url_result(
# videos_url, ie=PornHubPagedVideoListIE.ie_key(), video_id=user_id)
class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE): class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
_VALID_URL = r'https?://(?:[^/]+\.)?%s/(?P<id>(?:[^/]+/)*[^/?#&]+)' % PornHubBaseIE._PORNHUB_HOST_RE _VALID_URL = r'https?://(?:[^/]+\.)?%s/(?!playlist/|gif/)(?P<id>(?:[^/]+/)*[^/?#&]+)' % PornHubBaseIE._PORNHUB_HOST_RE
_TESTS = [{ _TESTS = [{
'url': 'https://www.pornhub.com/model/zoe_ph/videos', 'url': 'https://www.pornhub.com/model/zoe_ph/videos',
'only_matching': True, 'only_matching': True,
@ -642,16 +676,20 @@ class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos', 'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos',
'info_dict': { 'only_matching': True,
'id': 'pornstar/jenny-blighe/videos',
},
'playlist_mincount': 149,
}, { }, {
'url': 'https://www.pornhub.com/pornstar/jenny-blighe/videos?page=3', 'url': 'https://www.pornhub.com/pornstar/kylie-quinn/videos',
'info_dict': { 'info_dict': {
'id': 'pornstar/jenny-blighe/videos', 'id': 'pornstar/kylie-quinn/videos',
}, },
'playlist_mincount': 40, 'playlist_mincount': 80,
}, {
'url': 'https://www.pornhub.com/pornstar/kylie-quinn/videos?page=2',
'info_dict': {
'id': 'pornstar/kylie-quinn/videos',
},
# specific page: process just that page
'playlist_count': 40,
}, { }, {
# default sorting as Top Rated Videos # default sorting as Top Rated Videos
'url': 'https://www.pornhub.com/channels/povd/videos', 'url': 'https://www.pornhub.com/channels/povd/videos',
@ -727,27 +765,14 @@ class PornHubPagedVideoListIE(PornHubPagedPlaylistBaseIE):
'url': 'https://www.pornhub.com/video/incategories/60fps-1/hd-porn', 'url': 'https://www.pornhub.com/video/incategories/60fps-1/hd-porn',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://www.pornhub.com/playlist/44121572', 'url': 'https://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/model/zoe_ph/videos',
'info_dict': {
'id': 'playlist/44121572',
},
'playlist_mincount': 132,
}, {
'url': 'https://www.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://de.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://pornhubthbh7ap3u.onion/model/zoe_ph/videos',
'only_matching': True, 'only_matching': True,
}] }]
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return (False return (not any(ph.suitable(url) for ph in (PornHubIE, PornHubUserIE, PornHubUserVideosUploadIE))
if PornHubIE.suitable(url) or PornHubUserIE.suitable(url) or PornHubUserVideosUploadIE.suitable(url) and super(PornHubPagedVideoListIE, cls).suitable(url))
else super(PornHubPagedVideoListIE, cls).suitable(url))
class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE): class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
@ -762,6 +787,62 @@ class PornHubUserVideosUploadIE(PornHubPagedPlaylistBaseIE):
'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload', 'url': 'https://www.pornhub.com/model/zoe_ph/videos/upload',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'http://pornhubthbh7ap3u.onion/pornstar/jenny-blighe/videos/upload', 'url': 'http://pornhubvybmsymdol4iibwgwtkpwmeyd6luq2gxajgjzfjvotyt5zhyd.onion/pornstar/jenny-blighe/videos/upload',
'only_matching': True, 'only_matching': True,
}] }]
class PornHubPlaylistIE(PornHubPlaylistBaseIE):
_VALID_URL = r'(?P<url>https?://(?:[^/]+\.)?%s/playlist/(?P<id>[^/?#&]+))' % PornHubBaseIE._PORNHUB_HOST_RE
_TESTS = [{
'url': 'https://www.pornhub.com/playlist/44121572',
'info_dict': {
'id': '44121572',
},
'playlist_mincount': 55,
}, {
'url': 'https://www.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://de.pornhub.com/playlist/4667351',
'only_matching': True,
}, {
'url': 'https://de.pornhub.com/playlist/4667351?page=2',
'only_matching': True,
}]
def _entries(self, url, host, item_id):
webpage = self._download_webpage(url, item_id, 'Downloading page 1')
playlist_id = self._search_regex(r'var\s+playlistId\s*=\s*"([^"]+)"', webpage, 'playlist_id')
video_count = int_or_none(
self._search_regex(r'var\s+itemsCount\s*=\s*([0-9]+)\s*\|\|', webpage, 'video_count'))
token = self._search_regex(r'var\s+token\s*=\s*"([^"]+)"', webpage, 'token')
page_count = (video_count - 36 + 39) // 40 + 1
page_entries = self._extract_entries(webpage, host)
def download_page(page_num):
note = 'Downloading page {0}'.format(page_num)
page_url = 'https://www.{0}/playlist/viewChunked'.format(host)
return self._download_webpage(page_url, item_id, note, query={
'id': playlist_id,
'page': page_num,
'token': token,
})
for page_num in range(1, page_count + 1):
if page_num > 1:
webpage = download_page(page_num)
page_entries = self._extract_entries(webpage, host)
if not page_entries:
break
for from_ in page_entries:
yield from_
def _real_extract(self, url):
mobj = self._match_valid_url(url)
host, item_id = mobj.group('host', 'id')
self._login(host)
self._set_age_cookies(host)
return self.playlist_result(self._entries(mobj.group('url'), host, item_id), item_id)

View File

@ -692,9 +692,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES), 'invidious': '|'.join(_INVIDIOUS_SITES),
} }
_PLAYER_INFO_RE = ( _PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player', r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$', r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias(?:_tce)?\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$', r'\b(?P<id>vfl[a-zA-Z0-9_-]{6,})\b.*?\.js$',
) )
_SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt') _SUBTITLE_FORMATS = ('json3', 'srv1', 'srv2', 'srv3', 'ttml', 'vtt')
@ -1626,15 +1626,13 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
""" Return a string representation of a signature """ """ Return a string representation of a signature """
return '.'.join(compat_str(len(part)) for part in example_sig.split('.')) return '.'.join(compat_str(len(part)) for part in example_sig.split('.'))
@classmethod def _extract_player_info(self, player_url):
def _extract_player_info(cls, player_url): try:
for player_re in cls._PLAYER_INFO_RE: return self._search_regex(
id_m = re.search(player_re, player_url) self._PLAYER_INFO_RE, player_url, 'player info', group='id')
if id_m: except ExtractorError as e:
break raise ExtractorError(
else: 'Cannot identify player %r' % (player_url,), cause=e)
raise ExtractorError('Cannot identify player %r' % player_url)
return id_m.group('id')
def _load_player(self, video_id, player_url, fatal=True, player_id=None): def _load_player(self, video_id, player_url, fatal=True, player_id=None):
if not player_id: if not player_id:
@ -1711,6 +1709,23 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
' return %s\n') % (signature_id_tuple, expr_code) ' return %s\n') % (signature_id_tuple, expr_code)
self.to_screen('Extracted signature function:\n' + code) self.to_screen('Extracted signature function:\n' + code)
def _extract_sig_fn(self, jsi, funcname):
var_ay = self._search_regex(
r'''(?x)
(?:\*/|\{|\n|^)\s*(?:'[^']+'\s*;\s*)
(var\s*[\w$]+\s*=\s*(?:
('|")(?:\\\2|(?!\2).)+\2\s*\.\s*split\(\s*('|")\W+\3\s*\)|
\[\s*(?:('|")(?:\\\4|(?!\4).)*\4\s*(?:(?=\])|,\s*))+\]
))(?=\s*[,;])
''', jsi.code, 'useful values', default='')
sig_fn = jsi.extract_function_code(funcname)
if var_ay:
sig_fn = (sig_fn[0], ';\n'.join((var_ay, sig_fn[1])))
return sig_fn
def _parse_sig_js(self, jscode): def _parse_sig_js(self, jscode):
# Examples where `sig` is funcname: # Examples where `sig` is funcname:
# sig=function(a){a=a.split(""); ... ;return a.join("")}; # sig=function(a){a=a.split(""); ... ;return a.join("")};
@ -1736,8 +1751,12 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
jscode, 'Initial JS player signature function name', group='sig') jscode, 'Initial JS player signature function name', group='sig')
jsi = JSInterpreter(jscode) jsi = JSInterpreter(jscode)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s]) initial_function = self._extract_sig_fn(jsi, funcname)
func = jsi.extract_function_from_code(*initial_function)
return lambda s: func([s])
def _cached(self, func, *cache_id): def _cached(self, func, *cache_id):
def inner(*args, **kwargs): def inner(*args, **kwargs):
@ -1856,15 +1875,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None): def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code) func_name = self._extract_n_function_name(jsi.code)
func_code = jsi.extract_function_code(func_name) func_code = self._extract_sig_fn(jsi, func_name)
if var_ay:
func_code = (func_code[0], ';\n'.join((var_ay, func_code[1])))
if player_id: if player_id:
self.cache.store('youtube-nsig', player_id, func_code) self.cache.store('youtube-nsig', player_id, func_code)
@ -2136,7 +2149,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
video_details = merge_dicts(*traverse_obj( video_details = merge_dicts(*traverse_obj(
(player_response, api_player_response), (player_response, api_player_response),
(Ellipsis, 'videoDetails', T(dict)))) (Ellipsis, 'videoDetails', T(dict))))
player_response.update(api_player_response or {}) player_response.update(filter_dict(
api_player_response or {}, cndn=lambda k, _: k != 'captions'))
player_response['videoDetails'] = video_details player_response['videoDetails'] = video_details
def is_agegated(playability): def is_agegated(playability):
@ -2566,8 +2580,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
} }
pctr = traverse_obj( pctr = traverse_obj(
player_response, (player_response, api_player_response),
('captions', 'playerCaptionsTracklistRenderer', T(dict))) (Ellipsis, 'captions', 'playerCaptionsTracklistRenderer', T(dict)))
if pctr: if pctr:
def process_language(container, base_url, lang_code, query): def process_language(container, base_url, lang_code, query):
lang_subs = [] lang_subs = []
@ -2584,20 +2598,21 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def process_subtitles(): def process_subtitles():
subtitles = {} subtitles = {}
for caption_track in traverse_obj(pctr, ( for caption_track in traverse_obj(pctr, (
'captionTracks', lambda _, v: v.get('baseUrl'))): Ellipsis, 'captionTracks', lambda _, v: (
v.get('baseUrl') and v.get('languageCode')))):
base_url = self._yt_urljoin(caption_track['baseUrl']) base_url = self._yt_urljoin(caption_track['baseUrl'])
if not base_url: if not base_url:
continue continue
lang_code = caption_track['languageCode']
if caption_track.get('kind') != 'asr': if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
continue
process_language( process_language(
subtitles, base_url, lang_code, {}) subtitles, base_url, lang_code, {})
continue continue
automatic_captions = {} automatic_captions = {}
process_language(
automatic_captions, base_url, lang_code, {})
for translation_language in traverse_obj(pctr, ( for translation_language in traverse_obj(pctr, (
'translationLanguages', lambda _, v: v.get('languageCode'))): Ellipsis, 'translationLanguages', lambda _, v: v.get('languageCode'))):
translation_language_code = translation_language['languageCode'] translation_language_code = translation_language['languageCode']
process_language( process_language(
automatic_captions, base_url, translation_language_code, automatic_captions, base_url, translation_language_code,

View File

@ -678,7 +678,7 @@ class JSInterpreter(object):
return len(obj) return len(obj)
try: try:
return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)] return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)]
except (TypeError, KeyError, IndexError) as e: except (TypeError, KeyError, IndexError, ValueError) as e:
# allow_undefined is None gives correct behaviour # allow_undefined is None gives correct behaviour
if allow_undefined or ( if allow_undefined or (
allow_undefined is None and not isinstance(e, TypeError)): allow_undefined is None and not isinstance(e, TypeError)):
@ -1038,6 +1038,10 @@ class JSInterpreter(object):
left_val = self._index(left_val, idx) left_val = self._index(left_val, idx)
if isinstance(idx, float): if isinstance(idx, float):
idx = int(idx) idx = int(idx)
if isinstance(left_val, list) and len(left_val) <= int_or_none(idx, default=-1):
# JS Array is a sparsely assignable list
# TODO: handle extreme sparsity without memory bloat, eg using auxiliary dict
left_val.extend((idx - len(left_val) + 1) * [JS_Undefined])
left_val[idx] = self._operator( left_val[idx] = self._operator(
m.group('op'), self._index(left_val, idx) if m.group('op') else None, m.group('op'), self._index(left_val, idx) if m.group('op') else None,
m.group('expr'), expr, local_vars, allow_recursion) m.group('expr'), expr, local_vars, allow_recursion)
@ -1204,9 +1208,10 @@ class JSInterpreter(object):
elif member == 'join': elif member == 'join':
assertion(isinstance(obj, list), 'must be applied on a list') assertion(isinstance(obj, list), 'must be applied on a list')
assertion(len(argvals) <= 1, 'takes at most one argument') assertion(len(argvals) <= 1, 'takes at most one argument')
return (',' if len(argvals) == 0 else argvals[0]).join( return (',' if len(argvals) == 0 or argvals[0] in (None, JS_Undefined)
('' if x in (None, JS_Undefined) else _js_toString(x)) else argvals[0]).join(
for x in obj) ('' if x in (None, JS_Undefined) else _js_toString(x))
for x in obj)
elif member == 'reverse': elif member == 'reverse':
assertion(not argvals, 'does not take any arguments') assertion(not argvals, 'does not take any arguments')
obj.reverse() obj.reverse()
@ -1364,19 +1369,21 @@ class JSInterpreter(object):
code, _ = self._separate_at_paren(func_m.group('code')) # refine the match code, _ = self._separate_at_paren(func_m.group('code')) # refine the match
return self.build_arglist(func_m.group('args')), code return self.build_arglist(func_m.group('args')), code
def extract_function(self, funcname): def extract_function(self, funcname, *global_stack):
return function_with_repr( return function_with_repr(
self.extract_function_from_code(*self.extract_function_code(funcname)), self.extract_function_from_code(*itertools.chain(
self.extract_function_code(funcname), global_stack)),
'F<%s>' % (funcname,)) 'F<%s>' % (funcname,))
def extract_function_from_code(self, argnames, code, *global_stack): def extract_function_from_code(self, argnames, code, *global_stack):
local_vars = {} local_vars = {}
start = None
while True: while True:
mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code) mobj = re.search(r'function\((?P<args>[^)]*)\)\s*{', code[start:])
if mobj is None: if mobj is None:
break break
start, body_start = mobj.span() start, body_start = ((start or 0) + x for x in mobj.span())
body, remaining = self._separate_at_paren(code[body_start - 1:]) body, remaining = self._separate_at_paren(code[body_start - 1:])
name = self._named_object(local_vars, self.extract_function_from_code( name = self._named_object(local_vars, self.extract_function_from_code(
[x.strip() for x in mobj.group('args').split(',')], [x.strip() for x in mobj.group('args').split(',')],