Compare commits

..

1 Commits

Author SHA1 Message Date
dirkf
771f0dd360
Merge 10f38086d4 into c5098961b0 2024-10-07 14:42:45 +00:00
5 changed files with 101 additions and 213 deletions

View File

@ -1603,14 +1603,11 @@ Line 1
def test_get_element_by_class(self): def test_get_element_by_class(self):
html = ''' html = '''
<span class="foo bar baz-bam">nice</span> <span class="foo bar">nice</span>
''' '''
self.assertEqual(get_element_by_class('foo', html), 'nice') self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('bar', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None) self.assertEqual(get_element_by_class('no-such-class', html), None)
self.assertEqual(get_element_by_class('baz', html), None)
self.assertEqual(get_element_by_class('bam', html), None)
def test_get_element_by_attribute(self): def test_get_element_by_attribute(self):
html = ''' html = '''
@ -1629,13 +1626,10 @@ Line 1
def test_get_elements_by_class(self): def test_get_elements_by_class(self):
html = ''' html = '''
<span class="foo bar baz-bam">nice</span><span class="foo bar">also nice</span> <span class="foo bar">nice</span><span class="foo bar">also nice</span>
''' '''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice']) self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('baz', html), [])
self.assertEqual(get_elements_by_class('bam', html), [])
self.assertEqual(get_elements_by_class('no-such-class', html), []) self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self): def test_get_elements_by_attribute(self):

View File

@ -31,14 +31,10 @@ from ..utils import (
parse_resolution, parse_resolution,
sanitized_Request, sanitized_Request,
smuggle_url, smuggle_url,
strip_or_none,
T,
traverse_obj,
unescapeHTML, unescapeHTML,
unified_timestamp, unified_timestamp,
unsmuggle_url, unsmuggle_url,
UnsupportedError, UnsupportedError,
update_url_query,
url_or_none, url_or_none,
urljoin, urljoin,
xpath_attr, xpath_attr,
@ -2241,7 +2237,6 @@ class GenericIE(InfoExtractor):
'display_id': 'kelis-4th-of-july', 'display_id': 'kelis-4th-of-july',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Kelis - 4th Of July', 'title': 'Kelis - 4th Of July',
'description': 'Kelis - 4th Of July',
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
}, },
}, { }, {
@ -2251,7 +2246,7 @@ class GenericIE(InfoExtractor):
'id': '105', 'id': '105',
'display_id': 'kelis-4th-of-july', 'display_id': 'kelis-4th-of-july',
'ext': 'mp4', 'ext': 'mp4',
'title': r're:Kelis - 4th Of July(?: / Embed Player)?$', 'title': 'Kelis - 4th Of July / Embed Player',
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
}, },
'params': { 'params': {
@ -2302,32 +2297,6 @@ class GenericIE(InfoExtractor):
'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер', 'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер',
'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg', 'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg',
}, },
'skip': 'needs Referer ?',
}, {
# KVS Player v10
'url': 'https://www.cambro.tv/588174/marleny-1/',
'md5': '759d2050590986c6fc341da0592c4d8e',
'info_dict': {
'id': '588174',
'display_id': 'marleny-1',
'ext': 'mp4',
'title': 'marleny 1',
'description': 'la maestra de tic toc',
'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/588000/588174/preview\.jpg',
'age_limit': 18,
},
}, {
# KVS Player v10 embed, NSFW
'url': 'https://www.cambro.tv/embed/436185',
'md5': '24338dc8b182900a2c9eda075a0a46c0',
'info_dict': {
'id': '436185',
'display_id': 'jaeandbailey-chaturbate-webcam-porn-videos',
'ext': 'mp4',
'title': 'jaeandbailey Chaturbate webcam porn videos',
'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/436000/436185/preview\.jpg',
'age_limit': 18,
},
}, { }, {
'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes', 'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes',
'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4', 'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4',
@ -2340,16 +2309,14 @@ class GenericIE(InfoExtractor):
'height': 720, 'height': 720,
'age_limit': 18, 'age_limit': 18,
}, },
# 'skip': 'Geo-blocked in some mjurisdictions',
}, { }, {
# KVS Player v2
'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/', 'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/',
'md5': 'e2f0a4c329f7986280b7328e24036d60', 'md5': 'e2f0a4c329f7986280b7328e24036d60',
'info_dict': { 'info_dict': {
'id': '284002', 'id': '284002',
'display_id': 'just-out-of-the-shower-joi', 'display_id': 'just-out-of-the-shower-joi',
'ext': 'mp4', 'ext': 'mp4',
'title': r're:Just Out Of The Shower JOI(?: - Shooshtime)?$', 'title': 'Just Out Of The Shower JOI - Shooshtime',
'height': 720, 'height': 720,
'age_limit': 18, 'age_limit': 18,
}, },
@ -2515,12 +2482,9 @@ class GenericIE(InfoExtractor):
return '/'.join(urlparts) + '?' + url_query return '/'.join(urlparts) + '?' + url_query
flashvars = self._search_regex( flashvars = self._search_regex(
r'''(?<![=!+*-])=\s*kt_player\s*\(\s*'kt_player'\s*,\s*[^)]+,\s*([\w$]+)\s*\)''', r'(?s)<script\b[^>]*>.*?var\s+flashvars\s*=\s*(\{.+?\});.*?</script>',
webpage, 'flashvars name', default='flashvars') webpage, 'flashvars')
flashvars = self._search_json( flashvars = self._parse_json(flashvars, video_id, transform_source=js_to_json)
r'<script(?:\s[^>]*)?>[\s\S]*?var\s+%s\s*=' % (flashvars,),
webpage, 'flashvars', video_id, end_pattern=r';[\s\S]*?</script>',
transform_source=js_to_json)
# extract the part after the last / as the display_id from the # extract the part after the last / as the display_id from the
# canonical URL. # canonical URL.
@ -2529,7 +2493,12 @@ class GenericIE(InfoExtractor):
r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)', r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
webpage, 'display_id', fatal=False webpage, 'display_id', fatal=False
) )
title = flashvars.get('video_title') or self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title') title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
thumbnail = flashvars['preview_url']
if thumbnail.startswith('//'):
protocol, _, _ = url.partition('/')
thumbnail = protocol + thumbnail
url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys())) url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys()))
formats = [] formats = []
@ -2537,13 +2506,9 @@ class GenericIE(InfoExtractor):
if '/get_file/' not in flashvars[key]: if '/get_file/' not in flashvars[key]:
continue continue
format_id = flashvars.get(key + '_text', key) format_id = flashvars.get(key + '_text', key)
f_url = urljoin(url, getrealurl(flashvars[key], flashvars['license_code']))
rnd = flashvars.get('rnd', key)
if rnd:
f_url = update_url_query(f_url, {'rnd': rnd})
formats.append(merge_dicts( formats.append(merge_dicts(
parse_resolution(format_id) or parse_resolution(flashvars[key]), { parse_resolution(format_id) or parse_resolution(flashvars[key]), {
'url': f_url, 'url': urljoin(url, getrealurl(flashvars[key], flashvars['license_code'])),
'format_id': format_id, 'format_id': format_id,
'ext': 'mp4', 'ext': 'mp4',
'http_headers': {'Referer': url}, 'http_headers': {'Referer': url},
@ -2553,31 +2518,13 @@ class GenericIE(InfoExtractor):
self._sort_formats(formats) self._sort_formats(formats)
csv2list = (T(lambda s: s.split(',')), Ellipsis, T(strip_or_none)) return {
info = traverse_obj(flashvars, {
'tags': ('video_tags',) + csv2list,
'categories': ('video_categories',) + csv2list,
'thumbnails': (
T(dict.items), lambda _, k_v: k_v[0].startswith('preview_url'), {
'url': (1, T(lambda u: urljoin(url, u))),
'preference': (0, T(lambda k: 100 - len(k))),
}),
})
info = merge_dicts(info, {
'id': flashvars['video_id'], 'id': flashvars['video_id'],
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
'thumbnail': thumbnail,
'formats': formats, 'formats': formats,
}) }
# check-porn test for embed pages
if 'age_limit' not in info and traverse_obj(info, (
('title', (('tags', 'categories'), Ellipsis) or []),
T(lambda t: bool(re.search(r'(?i)(?:^|\s+)porn(?:$|\s+)', t)) or None)),
get_all=False):
info['age_limit'] = 18
return info
def _real_extract(self, url): def _real_extract(self, url):
if url.startswith('//'): if url.startswith('//'):
@ -3651,7 +3598,7 @@ class GenericIE(InfoExtractor):
), webpage, 'KVS player', group='ver', default=False) ), webpage, 'KVS player', group='ver', default=False)
if found: if found:
self.report_extraction('%s: KVS Player' % (video_id, )) self.report_extraction('%s: KVS Player' % (video_id, ))
if found.split('.')[0] not in ('2', '4', '5', '6', '10'): if found.split('.')[0] not in ('4', '5', '6'):
self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, )) self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, ))
return merge_dicts( return merge_dicts(
self._extract_kvs(url, webpage, video_id), self._extract_kvs(url, webpage, video_id),

View File

@ -12,9 +12,7 @@ from ..utils import (
clean_html, clean_html,
decode_packed_codes, decode_packed_codes,
determine_ext, determine_ext,
extract_attributes,
ExtractorError, ExtractorError,
get_element_by_class,
get_element_by_id, get_element_by_id,
int_or_none, int_or_none,
merge_dicts, merge_dicts,
@ -58,40 +56,39 @@ def aa_decode(aa_code):
class XFileShareIE(InfoExtractor): class XFileShareIE(InfoExtractor):
_SITES = ( _SITES = (
# status check 2024-10: site availability, G site: search # status check 2024-02: site availability, G site: search
(r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstream.tv (r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstreeam.tv
(r'filemoon\.(?:sx|to|in)', 'FileMoon'), (r'filemoon\.sx/.', 'FileMoon'),
# (r'gounlimited\.to', 'GoUnlimited'), # domain not found (r'gounlimited\.to', 'GoUnlimited'), # no media pages listed
(r'govid\.me', 'GoVid'), # no media pages listed (r'govid\.me', 'GoVid'), # no media pages listed
(r'highstream\.tv', 'HighStream'), # Cloudflare says host error 522, clipwatching.com now dead (r'highstream\.tv', 'HighStream'), # clipwatching.com redirects here
(r'holavid\.com', 'HolaVid'), # hoster default home page (r'holavid\.com', 'HolaVid'), # Cloudflare says host error 522
# (r'streamty\.com', 'Streamty'), # spam parking domain # (r'streamty\.com', 'Streamty'), # no media pages listed, connection timeout
# (r'thevideobee\.to', 'TheVideoBee'), # domain for sale # (r'thevideobee\.to', 'TheVideoBee'), # no pages listed, refuses connection
(r'uqload\.ws', 'Uqload'), # .com, .co, .to redirect here (r'uqload\.to', 'Uqload'), # .com, .co redirect here
# (r'(vadbam.net', 'VadBam'), # domain not found (r'(?:vedbam\.xyz|vadbam.net)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed
(r'(?:vedbam\.xyz|vadbam\.net|vbn\d.\vdbtm\.shop)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed
(r'vidlo\.us', 'vidlo'), # no valid media pages listed (r'vidlo\.us', 'vidlo'), # no valid media pages listed
(r'vidlocker\.xyz', 'VidLocker'), # no media pages listed (r'vidlocker\.xyz', 'VidLocker'), # no media pages listed
(r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv parked (r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv redirects here
# (r'vup\.to', 'VUp'), # domain not found # (r'vup\.to', 'VUp'), # domain not found
# (r'wolfstream\.tv', 'WolfStream'), # domain not found (r'wolfstream\.tv', 'WolfStream'),
(r'xvideosharing\.com', 'XVideoSharing'), (r'xvideosharing\.com', 'XVideoSharing'), # just started showing 'maintenance mode'
) )
IE_DESC = 'XFileShare-based sites: %s' % ', '.join(list(zip(*_SITES))[1]) IE_DESC = 'XFileShare-based sites: %s' % ', '.join(list(zip(*_SITES))[1])
_VALID_URL = (r'https?://(?:www\.)?(?P<host>%s)/(?P<sub>[a-z]/|)(?:embed-)?(?P<id>[0-9a-zA-Z]+)' _VALID_URL = (r'https?://(?:www\.)?(?P<host>%s)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
% '|'.join(site for site in list(zip(*_SITES))[0])) % '|'.join(site for site in list(zip(*_SITES))[0]))
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1' % '|'.join(site for site in list(zip(*_SITES))[0])] _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1' % '|'.join(site for site in list(zip(*_SITES))[0])]
_FILE_NOT_FOUND_REGEXES = ( _FILE_NOT_FOUND_REGEXES = (
r'>\s*(?:404 - )?File Not Found\s*<', r'>(?:404 - )?File Not Found<',
r'>\s*The file was removed by administrator\s*<', r'>The file was removed by administrator<',
) )
_TITLE_REGEXES = ( _TITLE_REGEXES = (
r'style="z-index: [0-9]+;">([^<]+)</span>', r'style="z-index: [0-9]+;">([^<]+)</span>',
r'<td nowrap>([^<]+)</td>', r'<td nowrap>([^<]+)</td>',
r'h4-fine[^>]*>([^<]+)<', r'h4-fine[^>]*>([^<]+)<',
r'>Watch (.+?)(?: mp4)?(?: The Ultimate Free Video Hosting Solution for Webmasters and Bloggers)?<', r'>Watch (.+)[ <]',
r'<h2 class="video-page-head">([^<]+)</h2>', r'<h2 class="video-page-head">([^<]+)</h2>',
r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<', # streamin.to (dead) r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<', # streamin.to (dead)
r'title\s*:\s*"([^"]+)"', # govid.me r'title\s*:\s*"([^"]+)"', # govid.me
@ -109,41 +106,38 @@ class XFileShareIE(InfoExtractor):
_TESTS = [{ _TESTS = [{
'note': 'link in `sources`', 'note': 'link in `sources`',
'url': 'https://uqload.ws/4sah252totrk.html', 'url': 'https://uqload.to/dcsu06gdb45o',
'md5': '1f11151b5044862fbc3c112732f9f7d8', 'md5': '7f8db187b254379440bf4fcad094ae86',
'info_dict': { 'info_dict': {
'id': '4sah252totrk', 'id': 'dcsu06gdb45o',
'ext': 'mp4', 'ext': 'mp4',
'title': 'JEONGHAN WONWOO Interview With Allure Korea Arabic Sub', 'title': 'f2e31015957e74c8c8427982e161c3fc mp4',
'thumbnail': r're:https://.*\.jpg' 'thumbnail': r're:https://.*\.jpg'
}, },
'params': { 'params': {
'nocheckcertificate': True, 'nocheckcertificate': True,
}, },
# 'expected_warnings': ['Unable to extract JWPlayer data'], 'expected_warnings': ['Unable to extract JWPlayer data'],
}, { }, {
'note': 'link in Playerjs', # need test with 'link in decoded `sources`' 'note': 'link in decoded `sources`',
'url': 'https://xvideosharing.com/8cnupzc1z8xq.html', 'url': 'https://xvideosharing.com/1tlg6agrrdgc',
'md5': '9725ca7229e8f3046f2417da3bd5eddc', 'md5': '2608ce41932c1657ae56258a64e647d9',
'info_dict': { 'info_dict': {
'id': '8cnupzc1z8xq', 'id': '1tlg6agrrdgc',
'ext': 'mp4', 'ext': 'mp4',
'title': 'HEVC X265 Big Buck Bunny 1080 10s 20MB', 'title': '0121',
'thumbnail': r're:https?://.*\.jpg', 'thumbnail': r're:https?://.*\.jpg',
}, },
'skip': 'This server is in maintenance mode.',
}, { }, {
'note': 'JWPlayer link in un-p,a,c,k,e,d JS, in player frame', 'note': 'JWPlayer link in un-p,a,c,k,e,d JS',
'url': 'https://filemoon.sx/d/fbsxidybremo', 'url': 'https://filemoon.sx/e/dw40rxrzruqz',
'md5': '82007a71661630f60e866f0d6ed31b2a', 'md5': '5a713742f57ac4aef29b74733e8dda01',
'info_dict': { 'info_dict': {
'id': 'fbsxidybremo', 'id': 'dw40rxrzruqz',
'title': 'Uchouten', 'title': 'dw40rxrzruqz',
'ext': 'mp4' 'ext': 'mp4'
}, },
'params': {
'skip_download': 'ffmpeg',
},
'expected_warnings': ['hlsnative has detected features it does not support'],
}, { }, {
'note': 'JWPlayer link in un-p,a,c,k,e,d JS', 'note': 'JWPlayer link in un-p,a,c,k,e,d JS',
'url': 'https://vadbam.net/6lnbkci96wly.html', 'url': 'https://vadbam.net/6lnbkci96wly.html',
@ -156,7 +150,7 @@ class XFileShareIE(InfoExtractor):
}, { }, {
'note': 'JWPlayer link in clear', 'note': 'JWPlayer link in clear',
'url': 'https://w1.viidshar.com/nnibe0xf0h79.html', 'url': 'https://w1.viidshar.com/nnibe0xf0h79.html',
'md5': 'b95b97978093bc287c322307c689bd94', 'md5': 'f0a580ce9df06cc61b4a5c979d672367',
'info_dict': { 'info_dict': {
'id': 'nnibe0xf0h79', 'id': 'nnibe0xf0h79',
'title': 'JaGa 68ar', 'title': 'JaGa 68ar',
@ -166,6 +160,15 @@ class XFileShareIE(InfoExtractor):
'skip_download': 'ffmpeg', 'skip_download': 'ffmpeg',
}, },
'expected_warnings': ['hlsnative has detected features it does not support'], 'expected_warnings': ['hlsnative has detected features it does not support'],
}, {
'note': 'JWPlayer link in clear',
'url': 'https://wolfstream.tv/a3drtehyrg52.html',
'md5': '1901d86a79c5e0c6a51bdc9a4cfd3769',
'info_dict': {
'id': 'a3drtehyrg52',
'title': 'NFL 2023 W04 DET@GB',
'ext': 'mp4'
},
}, { }, {
'url': 'https://aparat.cam/n4d6dh0wvlpr', 'url': 'https://aparat.cam/n4d6dh0wvlpr',
'only_matching': True, 'only_matching': True,
@ -178,12 +181,6 @@ class XFileShareIE(InfoExtractor):
}, { }, {
'url': 'https://vedbam.xyz/6lnbkci96wly.html', 'url': 'https://vedbam.xyz/6lnbkci96wly.html',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://vbn2.vdbtm.shop/6lnbkci96wly.html',
'only_matching': True,
}, {
'url': 'https://filemoon.in/e/5abn1ze9jifb',
'only_matching': True,
}] }]
@classmethod @classmethod
@ -197,26 +194,17 @@ class XFileShareIE(InfoExtractor):
return list(yield_urls()) return list(yield_urls())
def _real_extract(self, url): def _real_extract(self, url):
host, sub, video_id = self._match_valid_url(url).group('host', 'sub', 'id') host, video_id = self._match_valid_url(url).group('host', 'id')
url = 'https://%s/%s%s' % ( url = 'https://%s/%s' % (
host, sub, host,
'embed-%s.html' % video_id if host in ('govid.me', 'vidlo.us') else video_id) 'embed-%s.html' % video_id if host in ('govid.me', 'vidlo.us') else video_id)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
main = self._search_regex( container_div = get_element_by_id('container', webpage) or webpage
r'(?s)<main>(.+)</main>', webpage, 'main', default=webpage)
container_div = (
get_element_by_id('container', main)
or get_element_by_class('container', main)
or webpage)
if self._search_regex( if self._search_regex(
r'>This server is in maintenance mode\.', container_div, r'>This server is in maintenance mode\.', container_div,
'maint error', group=0, default=None): 'maint error', group=0, default=None):
raise ExtractorError(clean_html(container_div), expected=True) raise ExtractorError(clean_html(container_div), expected=True)
if self._search_regex(
'not available in your country', container_div,
'geo block', group=0, default=None):
self.raise_geo_restricted()
if self._search_regex( if self._search_regex(
self._FILE_NOT_FOUND_REGEXES, container_div, self._FILE_NOT_FOUND_REGEXES, container_div,
'missing video error', group=0, default=None): 'missing video error', group=0, default=None):
@ -240,41 +228,38 @@ class XFileShareIE(InfoExtractor):
title = ( title = (
self._search_regex(self._TITLE_REGEXES, webpage, 'title', default=None) self._search_regex(self._TITLE_REGEXES, webpage, 'title', default=None)
or self._og_search_title(webpage, default='')).strip() or self._og_search_title(webpage, default=None)
or video_id).strip()
def deobfuscate(html): obf_code = True
while obf_code:
for regex, func in ( for regex, func in (
(r'(?s)(?<!-)\b(eval\(function\(p,a,c,k,e,d\)\{(?:(?!</script>).)+\)\))', (r'(?s)(?<!-)\b(eval\(function\(p,a,c,k,e,d\)\{(?:(?!</script>).)+\)\))',
decode_packed_codes), decode_packed_codes),
(r'(゚.+)', aa_decode)): (r'(゚.+)', aa_decode)):
obf_code = self._search_regex(regex, html, 'obfuscated code', default=None) obf_code = self._search_regex(regex, webpage, 'obfuscated code', default=None)
if obf_code: if obf_code:
return html.replace(obf_code, func(obf_code)) webpage = webpage.replace(obf_code, func(obf_code))
break
def jw_extract(html):
jwplayer_data = self._find_jwplayer_data( jwplayer_data = self._find_jwplayer_data(
html.replace(r'\'', '\''), video_id) webpage.replace(r'\'', '\''), video_id)
result = self._parse_jwplayer_data( result = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, jwplayer_data, video_id, require_title=False,
m3u8_id='hls', mpd_id='dash') m3u8_id='hls', mpd_id='dash')
result = traverse_obj(result, (
(None, ('entries', 0)), T(lambda r: r if r['formats'] else None)), if not traverse_obj(result, 'formats'):
get_all=False) or {} if jwplayer_data:
if not result and jwplayer_data:
self.report_warning( self.report_warning(
'Failed to extract JWPlayer formats', video_id=video_id) 'Failed to extract JWPlayer formats', video_id=video_id)
return result
def extract_from_links(html):
urls = set() urls = set()
for regex in self._SOURCE_URL_REGEXES: for regex in self._SOURCE_URL_REGEXES:
for mobj in re.finditer(regex, html): for mobj in re.finditer(regex, webpage):
urls.add(mobj.group('url')) urls.add(mobj.group('url'))
sources = self._search_json( sources = self._search_regex(
r'\bsources\s*:', webpage, 'sources', video_id, r'sources\s*:\s*(\[(?!{)[^\]]+\])', webpage, 'sources', default=None)
contains_pattern=r'\[(?!{)[^\]]+\]', default=[]) urls.update(traverse_obj(sources, (T(lambda s: self._parse_json(s, video_id)), Ellipsis)))
urls.update(sources)
formats = [] formats = []
for video_url in traverse_obj(urls, (Ellipsis, T(url_or_none))): for video_url in traverse_obj(urls, (Ellipsis, T(url_or_none))):
@ -288,33 +273,7 @@ class XFileShareIE(InfoExtractor):
'url': video_url, 'url': video_url,
'format_id': 'sd', 'format_id': 'sd',
}) })
return {'formats': formats} result = {'formats': formats}
def extract_info(html):
html = deobfuscate(html) or html
result = jw_extract(html)
if not result.get('formats'):
result = extract_from_links(html)
return result
def pages_to_extract(html):
yield html
# page with separate protected download page also has player link
player_iframe = self._search_regex(
r'(<iframe\s[^>]+>)',
get_element_by_id('iframe-holder', html) or '',
'player iframe', default='')
player_url = extract_attributes(player_iframe).get('src')
if player_url:
html = self._download_webpage(player_url, video_id, note='Downloading player page', fatal=False)
if html:
yield html
result = {}
for html in pages_to_extract(webpage):
result = extract_info(html)
if result.get('formats'):
break
self._sort_formats(result['formats']) self._sort_formats(result['formats'])

View File

@ -30,20 +30,17 @@ class YandexMusicBaseIE(InfoExtractor):
@staticmethod @staticmethod
def _raise_captcha(): def _raise_captcha():
raise ExtractorError( raise ExtractorError(
'YandexMusic has considered youtube-dl requests automated ' 'YandexMusic has considered youtube-dl requests automated and '
'and asks you to solve a CAPTCHA. You can wait for some time ' 'asks you to solve a CAPTCHA. You can either wait for some '
'until unblocked and optionally use --sleep-interval in future; ' 'time until unblocked and optionally use --sleep-interval '
'otherwise solve the CAPTCHA at https://music.yandex.ru/, ' 'in future or alternatively you can go to https://music.yandex.ru/ '
'then export cookies and pass the cookie file to youtube-dl ' 'solve CAPTCHA, then export cookies and pass cookie file to '
'with --cookies.', 'youtube-dl with --cookies',
expected=True) expected=True)
def _download_webpage_handle(self, *args, **kwargs): def _download_webpage_handle(self, *args, **kwargs):
webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs) webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs)
blocked_ip_msg = ( if 'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;вашего IP-адреса, похожи на&nbsp;автоматические.' in webpage:
'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;'
'вашего IP-адреса, похожи на&nbsp;автоматические.')
if blocked_ip_msg in (webpage or [''])[0]:
self._raise_captcha() self._raise_captcha()
return webpage return webpage

View File

@ -1960,7 +1960,7 @@ def get_element_by_attribute(attribute, value, html, escape_value=True):
def get_elements_by_class(class_name, html): def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list""" """Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute( return get_elements_by_attribute(
'class', r'[^\'"]*(?<!-)\b%s\b(?!-)[^\'"]*' % re.escape(class_name), 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False) html, escape_value=False)
@ -4498,7 +4498,7 @@ def strip_jsonp(code):
def js_to_json(code, *args, **kwargs): def js_to_json(code, *args, **kwargs):
# vars is a dict of (var, val) pairs to substitute # vars is a dict of (var, val) pairs to substitute
js_vars = args[0] if len(args) > 0 else kwargs.get('vars', {}) vars = args[0] if len(args) > 0 else kwargs.get('vars', {})
strict = kwargs.get('strict', False) strict = kwargs.get('strict', False)
STRING_QUOTES = '\'"`' STRING_QUOTES = '\'"`'
@ -4523,13 +4523,9 @@ def js_to_json(code, *args, **kwargs):
else escape) else escape)
def template_substitute(match): def template_substitute(match):
evaluated = js_to_json(match.group(1), js_vars, strict=strict) evaluated = js_to_json(match.group(1), vars, strict=strict)
if evaluated[0] == '"': if evaluated[0] == '"':
try:
return json.loads(evaluated) return json.loads(evaluated)
except JSONDecodeError:
if strict:
raise
return evaluated return evaluated
def fix_kv(m): def fix_kv(m):
@ -4563,14 +4559,14 @@ def js_to_json(code, *args, **kwargs):
i = int(im.group(1), base) i = int(im.group(1), base)
return ('"%s":' if v.endswith(':') else '%s') % inv(i) return ('"%s":' if v.endswith(':') else '%s') % inv(i)
if v in js_vars: if v in vars:
try: try:
if not strict: if not strict:
json.loads(js_vars[v]) json.loads(vars[v])
except JSONDecodeError: except JSONDecodeError:
return inv(json.dumps(js_vars[v])) return inv(json.dumps(vars[v]))
else: else:
return inv(js_vars[v]) return inv(vars[v])
if not strict: if not strict:
v = try_call(inv, args=(v,), default=v) v = try_call(inv, args=(v,), default=v)
@ -4581,7 +4577,7 @@ def js_to_json(code, *args, **kwargs):
raise ValueError('Unknown value: ' + v) raise ValueError('Unknown value: ' + v)
def create_map(mobj): def create_map(mobj):
return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=js_vars)))) return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars))))
code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code) code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
if not strict: if not strict:
@ -6719,8 +6715,3 @@ class _UnsafeExtensionError(Exception):
raise cls(extension) raise cls(extension)
return extension return extension
def json_stringify(json_data, **kwargs):
kwargs.setdefault('separators', (',', ':'))
return json.dumps(json_data, **kwargs).decode('utf-8')