diff --git a/test/test_utils.py b/test/test_utils.py index 2947cce7e..16523a0c9 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1603,11 +1603,14 @@ Line 1 def test_get_element_by_class(self): html = ''' - nice + nice ''' self.assertEqual(get_element_by_class('foo', html), 'nice') + self.assertEqual(get_element_by_class('bar', html), 'nice') self.assertEqual(get_element_by_class('no-such-class', html), None) + self.assertEqual(get_element_by_class('baz', html), None) + self.assertEqual(get_element_by_class('bam', html), None) def test_get_element_by_attribute(self): html = ''' @@ -1626,10 +1629,13 @@ Line 1 def test_get_elements_by_class(self): html = ''' - nicealso nice + nicealso nice ''' self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice']) + self.assertEqual(get_elements_by_class('bar', html), ['nice', 'also nice']) + self.assertEqual(get_elements_by_class('baz', html), []) + self.assertEqual(get_elements_by_class('bam', html), []) self.assertEqual(get_elements_by_class('no-such-class', html), []) def test_get_elements_by_attribute(self): diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index 9b0016d07..c54406e7a 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -3128,7 +3128,8 @@ class InfoExtractor(object): continue urls.add(source_url) source_type = source.get('type') or '' - ext = mimetype2ext(source_type) or determine_ext(source_url) + # https://github.com/yt-dlp/yt-dlp/pull/10956 + ext = determine_ext(source_url, default_ext=mimetype2ext(source_type)) if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url: formats.extend(self._extract_m3u8_formats( source_url, video_id, 'mp4', entry_protocol='m3u8_native', diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index b01900afa..5b1cd453d 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -31,10 +31,14 @@ from ..utils import ( parse_resolution, sanitized_Request, smuggle_url, + strip_or_none, + T, + traverse_obj, unescapeHTML, unified_timestamp, unsmuggle_url, UnsupportedError, + update_url_query, url_or_none, urljoin, xpath_attr, @@ -2237,6 +2241,7 @@ class GenericIE(InfoExtractor): 'display_id': 'kelis-4th-of-july', 'ext': 'mp4', 'title': 'Kelis - 4th Of July', + 'description': 'Kelis - 4th Of July', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', }, }, { @@ -2246,7 +2251,7 @@ class GenericIE(InfoExtractor): 'id': '105', 'display_id': 'kelis-4th-of-july', 'ext': 'mp4', - 'title': 'Kelis - 4th Of July / Embed Player', + 'title': r're:Kelis - 4th Of July(?: / Embed Player)?$', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', }, 'params': { @@ -2297,6 +2302,32 @@ class GenericIE(InfoExtractor): 'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер', 'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg', }, + 'skip': 'needs Referer ?', + }, { + # KVS Player v10 + 'url': 'https://www.cambro.tv/588174/marleny-1/', + 'md5': '759d2050590986c6fc341da0592c4d8e', + 'info_dict': { + 'id': '588174', + 'display_id': 'marleny-1', + 'ext': 'mp4', + 'title': 'marleny 1', + 'description': 'la maestra de tic toc', + 'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/588000/588174/preview\.jpg', + 'age_limit': 18, + }, + }, { + # KVS Player v10 embed, NSFW + 'url': 'https://www.cambro.tv/embed/436185', + 'md5': '24338dc8b182900a2c9eda075a0a46c0', + 'info_dict': { + 'id': '436185', + 'display_id': 'jaeandbailey-chaturbate-webcam-porn-videos', + 'ext': 'mp4', + 'title': 'jaeandbailey Chaturbate webcam porn videos', + 'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/436000/436185/preview\.jpg', + 'age_limit': 18, + }, }, { 'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes', 'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4', @@ -2309,14 +2340,16 @@ class GenericIE(InfoExtractor): 'height': 720, 'age_limit': 18, }, + # 'skip': 'Geo-blocked in some mjurisdictions', }, { + # KVS Player v2 'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/', 'md5': 'e2f0a4c329f7986280b7328e24036d60', 'info_dict': { 'id': '284002', 'display_id': 'just-out-of-the-shower-joi', 'ext': 'mp4', - 'title': 'Just Out Of The Shower JOI - Shooshtime', + 'title': r're:Just Out Of The Shower JOI(?: - Shooshtime)?$', 'height': 720, 'age_limit': 18, }, @@ -2482,9 +2515,12 @@ class GenericIE(InfoExtractor): return '/'.join(urlparts) + '?' + url_query flashvars = self._search_regex( - r'(?s)]*>.*?var\s+flashvars\s*=\s*(\{.+?\});.*?', - webpage, 'flashvars') - flashvars = self._parse_json(flashvars, video_id, transform_source=js_to_json) + r'''(?]*)?>[\s\S]*?var\s+%s\s*=' % (flashvars,), + webpage, 'flashvars', video_id, end_pattern=r';[\s\S]*?', + transform_source=js_to_json) # extract the part after the last / as the display_id from the # canonical URL. @@ -2493,12 +2529,7 @@ class GenericIE(InfoExtractor): r'|)', webpage, 'display_id', fatal=False ) - title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)', webpage, 'title') - - thumbnail = flashvars['preview_url'] - if thumbnail.startswith('//'): - protocol, _, _ = url.partition('/') - thumbnail = protocol + thumbnail + title = flashvars.get('video_title') or self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)', webpage, 'title') url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys())) formats = [] @@ -2506,9 +2537,13 @@ class GenericIE(InfoExtractor): if '/get_file/' not in flashvars[key]: continue format_id = flashvars.get(key + '_text', key) + f_url = urljoin(url, getrealurl(flashvars[key], flashvars['license_code'])) + rnd = flashvars.get('rnd', key) + if rnd: + f_url = update_url_query(f_url, {'rnd': rnd}) formats.append(merge_dicts( parse_resolution(format_id) or parse_resolution(flashvars[key]), { - 'url': urljoin(url, getrealurl(flashvars[key], flashvars['license_code'])), + 'url': f_url, 'format_id': format_id, 'ext': 'mp4', 'http_headers': {'Referer': url}, @@ -2518,13 +2553,31 @@ class GenericIE(InfoExtractor): self._sort_formats(formats) - return { + csv2list = (T(lambda s: s.split(',')), Ellipsis, T(strip_or_none)) + info = traverse_obj(flashvars, { + 'tags': ('video_tags',) + csv2list, + 'categories': ('video_categories',) + csv2list, + 'thumbnails': ( + T(dict.items), lambda _, k_v: k_v[0].startswith('preview_url'), { + 'url': (1, T(lambda u: urljoin(url, u))), + 'preference': (0, T(lambda k: 100 - len(k))), + }), + }) + info = merge_dicts(info, { 'id': flashvars['video_id'], 'display_id': display_id, 'title': title, - 'thumbnail': thumbnail, 'formats': formats, - } + }) + + # check-porn test for embed pages + if 'age_limit' not in info and traverse_obj(info, ( + ('title', (('tags', 'categories'), Ellipsis) or []), + T(lambda t: bool(re.search(r'(?i)(?:^|\s+)porn(?:$|\s+)', t)) or None)), + get_all=False): + info['age_limit'] = 18 + + return info def _real_extract(self, url): if url.startswith('//'): @@ -3598,7 +3651,7 @@ class GenericIE(InfoExtractor): ), webpage, 'KVS player', group='ver', default=False) if found: self.report_extraction('%s: KVS Player' % (video_id, )) - if found.split('.')[0] not in ('4', '5', '6'): + if found.split('.')[0] not in ('2', '4', '5', '6', '10'): self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, )) return merge_dicts( self._extract_kvs(url, webpage, video_id), diff --git a/youtube_dl/extractor/hentaistigma.py b/youtube_dl/extractor/hentaistigma.py index 86a93de4d..c01fe05fd 100644 --- a/youtube_dl/extractor/hentaistigma.py +++ b/youtube_dl/extractor/hentaistigma.py @@ -1,6 +1,11 @@ +# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + merge_dicts, + traverse_obj, +) class HentaiStigmaIE(InfoExtractor): @@ -24,16 +29,17 @@ class HentaiStigmaIE(InfoExtractor): title = self._html_search_regex( r']+class="posttitle"[^>]*>]*>([^<]+)', webpage, 'title') - wrap_url = self._html_search_regex( + + wrap_url = self._search_regex( r']+src="([^"]+mp4)"', webpage, 'wrapper url') - wrap_webpage = self._download_webpage(wrap_url, video_id) - video_url = self._html_search_regex( - r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url') + vid_page = self._download_webpage(wrap_url, video_id) - return { + entries = self._parse_html5_media_entries(wrap_url, vid_page, video_id) + self._sort_formats(traverse_obj(entries, (0, 'formats')) or []) + + return merge_dicts({ 'id': video_id, - 'url': video_url, 'title': title, 'age_limit': 18, - } + }, entries[0]) diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py index 6d4d93394..861b6952b 100644 --- a/youtube_dl/extractor/kaltura.py +++ b/youtube_dl/extractor/kaltura.py @@ -23,7 +23,7 @@ class KalturaIE(InfoExtractor): (?: kaltura:(?P\d+):(?P[0-9a-z_]+)| https?:// - (:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/ + (?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/ (?: (?: # flash player diff --git a/youtube_dl/extractor/mgoon.py b/youtube_dl/extractor/mgoon.py index 7bb473900..56086f7b9 100644 --- a/youtube_dl/extractor/mgoon.py +++ b/youtube_dl/extractor/mgoon.py @@ -13,7 +13,7 @@ from ..utils import ( class MgoonIE(InfoExtractor): _VALID_URL = r'''(?x)https?://(?:www\.)? - (?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)| + (?:(?:m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)| video\.mgoon\.com)/(?P[0-9]+)''' _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}' _TESTS = [ diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py index 1ee78edbc..2e1341f44 100644 --- a/youtube_dl/extractor/orf.py +++ b/youtube_dl/extractor/orf.py @@ -112,7 +112,7 @@ class ORFRadioIE(ORFRadioBase): _VALID_URL = ( r'https?://sound\.orf\.at/radio/(?P{0})/sendung/(?P\d+)(?:/(?P\w+))?'.format(_STATION_RE), - r'https?://(?P{0})\.orf\.at/player/(?P\d{{8}})/(?P\d+)'.format(_STATION_RE), + r'https?://(?P{0})\.orf\.at/(?:player|programm)/(?P\d{{8}})/(?P\d+)'.format(_STATION_RE), ) _TESTS = [{ @@ -150,6 +150,10 @@ class ORFRadioIE(ORFRadioBase): 'duration': 1500, }, 'skip': 'Shows from ORF Sound are only available for 30 days.' + }, { + # yt-dlp/yt-dlp#11014 + 'url': 'https://oe1.orf.at/programm/20240916/769302/Playgrounds', + 'only_matching': True, }] def _real_extract(self, url): diff --git a/youtube_dl/extractor/xfileshare.py b/youtube_dl/extractor/xfileshare.py index 4dc3032e7..f1dae792e 100644 --- a/youtube_dl/extractor/xfileshare.py +++ b/youtube_dl/extractor/xfileshare.py @@ -12,7 +12,9 @@ from ..utils import ( clean_html, decode_packed_codes, determine_ext, + extract_attributes, ExtractorError, + get_element_by_class, get_element_by_id, int_or_none, merge_dicts, @@ -56,39 +58,40 @@ def aa_decode(aa_code): class XFileShareIE(InfoExtractor): _SITES = ( - # status check 2024-02: site availability, G site: search - (r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstreeam.tv - (r'filemoon\.sx/.', 'FileMoon'), - (r'gounlimited\.to', 'GoUnlimited'), # no media pages listed + # status check 2024-10: site availability, G site: search + (r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstream.tv + (r'filemoon\.(?:sx|to|in)', 'FileMoon'), + # (r'gounlimited\.to', 'GoUnlimited'), # domain not found (r'govid\.me', 'GoVid'), # no media pages listed - (r'highstream\.tv', 'HighStream'), # clipwatching.com redirects here - (r'holavid\.com', 'HolaVid'), # Cloudflare says host error 522 - # (r'streamty\.com', 'Streamty'), # no media pages listed, connection timeout - # (r'thevideobee\.to', 'TheVideoBee'), # no pages listed, refuses connection - (r'uqload\.to', 'Uqload'), # .com, .co redirect here - (r'(?:vedbam\.xyz|vadbam.net)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed + (r'highstream\.tv', 'HighStream'), # Cloudflare says host error 522, clipwatching.com now dead + (r'holavid\.com', 'HolaVid'), # hoster default home page + # (r'streamty\.com', 'Streamty'), # spam parking domain + # (r'thevideobee\.to', 'TheVideoBee'), # domain for sale + (r'uqload\.ws', 'Uqload'), # .com, .co, .to redirect here + # (r'(vadbam.net', 'VadBam'), # domain not found + (r'(?:vedbam\.xyz|vadbam\.net|vbn\d.\vdbtm\.shop)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed (r'vidlo\.us', 'vidlo'), # no valid media pages listed (r'vidlocker\.xyz', 'VidLocker'), # no media pages listed - (r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv redirects here + (r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv parked # (r'vup\.to', 'VUp'), # domain not found - (r'wolfstream\.tv', 'WolfStream'), - (r'xvideosharing\.com', 'XVideoSharing'), # just started showing 'maintenance mode' + # (r'wolfstream\.tv', 'WolfStream'), # domain not found + (r'xvideosharing\.com', 'XVideoSharing'), ) IE_DESC = 'XFileShare-based sites: %s' % ', '.join(list(zip(*_SITES))[1]) - _VALID_URL = (r'https?://(?:www\.)?(?P%s)/(?:embed-)?(?P[0-9a-zA-Z]+)' + _VALID_URL = (r'https?://(?:www\.)?(?P%s)/(?P[a-z]/|)(?:embed-)?(?P[0-9a-zA-Z]+)' % '|'.join(site for site in list(zip(*_SITES))[0])) _EMBED_REGEX = [r']+\bsrc=(["\'])(?P(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1' % '|'.join(site for site in list(zip(*_SITES))[0])] _FILE_NOT_FOUND_REGEXES = ( - r'>(?:404 - )?File Not Found<', - r'>The file was removed by administrator<', + r'>\s*(?:404 - )?File Not Found\s*<', + r'>\s*The file was removed by administrator\s*<', ) _TITLE_REGEXES = ( r'style="z-index: [0-9]+;">([^<]+)', r'([^<]+)', r'h4-fine[^>]*>([^<]+)<', - r'>Watch (.+)[ <]', + r'>Watch (.+?)(?: mp4)?(?: The Ultimate Free Video Hosting Solution for Webmasters and Bloggers)?<', r'

([^<]+)

', r'

]*>([^<]+)<', # streamin.to (dead) r'title\s*:\s*"([^"]+)"', # govid.me @@ -106,38 +109,41 @@ class XFileShareIE(InfoExtractor): _TESTS = [{ 'note': 'link in `sources`', - 'url': 'https://uqload.to/dcsu06gdb45o', - 'md5': '7f8db187b254379440bf4fcad094ae86', + 'url': 'https://uqload.ws/4sah252totrk.html', + 'md5': '1f11151b5044862fbc3c112732f9f7d8', 'info_dict': { - 'id': 'dcsu06gdb45o', + 'id': '4sah252totrk', 'ext': 'mp4', - 'title': 'f2e31015957e74c8c8427982e161c3fc mp4', + 'title': 'JEONGHAN WONWOO Interview With Allure Korea Arabic Sub', 'thumbnail': r're:https://.*\.jpg' }, 'params': { 'nocheckcertificate': True, }, - 'expected_warnings': ['Unable to extract JWPlayer data'], + # 'expected_warnings': ['Unable to extract JWPlayer data'], }, { - 'note': 'link in decoded `sources`', - 'url': 'https://xvideosharing.com/1tlg6agrrdgc', - 'md5': '2608ce41932c1657ae56258a64e647d9', + 'note': 'link in Playerjs', # need test with 'link in decoded `sources`' + 'url': 'https://xvideosharing.com/8cnupzc1z8xq.html', + 'md5': '9725ca7229e8f3046f2417da3bd5eddc', 'info_dict': { - 'id': '1tlg6agrrdgc', + 'id': '8cnupzc1z8xq', 'ext': 'mp4', - 'title': '0121', + 'title': 'HEVC X265 Big Buck Bunny 1080 10s 20MB', 'thumbnail': r're:https?://.*\.jpg', }, - 'skip': 'This server is in maintenance mode.', }, { - 'note': 'JWPlayer link in un-p,a,c,k,e,d JS', - 'url': 'https://filemoon.sx/e/dw40rxrzruqz', - 'md5': '5a713742f57ac4aef29b74733e8dda01', + 'note': 'JWPlayer link in un-p,a,c,k,e,d JS, in player frame', + 'url': 'https://filemoon.sx/d/fbsxidybremo', + 'md5': '82007a71661630f60e866f0d6ed31b2a', 'info_dict': { - 'id': 'dw40rxrzruqz', - 'title': 'dw40rxrzruqz', + 'id': 'fbsxidybremo', + 'title': 'Uchouten', 'ext': 'mp4' }, + 'params': { + 'skip_download': 'ffmpeg', + }, + 'expected_warnings': ['hlsnative has detected features it does not support'], }, { 'note': 'JWPlayer link in un-p,a,c,k,e,d JS', 'url': 'https://vadbam.net/6lnbkci96wly.html', @@ -150,7 +156,7 @@ class XFileShareIE(InfoExtractor): }, { 'note': 'JWPlayer link in clear', 'url': 'https://w1.viidshar.com/nnibe0xf0h79.html', - 'md5': 'f0a580ce9df06cc61b4a5c979d672367', + 'md5': 'b95b97978093bc287c322307c689bd94', 'info_dict': { 'id': 'nnibe0xf0h79', 'title': 'JaGa 68ar', @@ -160,15 +166,6 @@ class XFileShareIE(InfoExtractor): 'skip_download': 'ffmpeg', }, 'expected_warnings': ['hlsnative has detected features it does not support'], - }, { - 'note': 'JWPlayer link in clear', - 'url': 'https://wolfstream.tv/a3drtehyrg52.html', - 'md5': '1901d86a79c5e0c6a51bdc9a4cfd3769', - 'info_dict': { - 'id': 'a3drtehyrg52', - 'title': 'NFL 2023 W04 DET@GB', - 'ext': 'mp4' - }, }, { 'url': 'https://aparat.cam/n4d6dh0wvlpr', 'only_matching': True, @@ -181,6 +178,12 @@ class XFileShareIE(InfoExtractor): }, { 'url': 'https://vedbam.xyz/6lnbkci96wly.html', 'only_matching': True, + }, { + 'url': 'https://vbn2.vdbtm.shop/6lnbkci96wly.html', + 'only_matching': True, + }, { + 'url': 'https://filemoon.in/e/5abn1ze9jifb', + 'only_matching': True, }] @classmethod @@ -194,17 +197,26 @@ class XFileShareIE(InfoExtractor): return list(yield_urls()) def _real_extract(self, url): - host, video_id = self._match_valid_url(url).group('host', 'id') + host, sub, video_id = self._match_valid_url(url).group('host', 'sub', 'id') - url = 'https://%s/%s' % ( - host, + url = 'https://%s/%s%s' % ( + host, sub, 'embed-%s.html' % video_id if host in ('govid.me', 'vidlo.us') else video_id) webpage = self._download_webpage(url, video_id) - container_div = get_element_by_id('container', webpage) or webpage + main = self._search_regex( + r'(?s)
(.+)
', webpage, 'main', default=webpage) + container_div = ( + get_element_by_id('container', main) + or get_element_by_class('container', main) + or webpage) if self._search_regex( r'>This server is in maintenance mode\.', container_div, 'maint error', group=0, default=None): raise ExtractorError(clean_html(container_div), expected=True) + if self._search_regex( + 'not available in your country', container_div, + 'geo block', group=0, default=None): + self.raise_geo_restricted() if self._search_regex( self._FILE_NOT_FOUND_REGEXES, container_div, 'missing video error', group=0, default=None): @@ -228,38 +240,41 @@ class XFileShareIE(InfoExtractor): title = ( self._search_regex(self._TITLE_REGEXES, webpage, 'title', default=None) - or self._og_search_title(webpage, default=None) - or video_id).strip() + or self._og_search_title(webpage, default='')).strip() - obf_code = True - while obf_code: + def deobfuscate(html): for regex, func in ( (r'(?s)(?).)+\)\))', decode_packed_codes), (r'(゚.+)', aa_decode)): - obf_code = self._search_regex(regex, webpage, 'obfuscated code', default=None) + obf_code = self._search_regex(regex, html, 'obfuscated code', default=None) if obf_code: - webpage = webpage.replace(obf_code, func(obf_code)) - break + return html.replace(obf_code, func(obf_code)) - jwplayer_data = self._find_jwplayer_data( - webpage.replace(r'\'', '\''), video_id) - result = self._parse_jwplayer_data( - jwplayer_data, video_id, require_title=False, - m3u8_id='hls', mpd_id='dash') - - if not traverse_obj(result, 'formats'): - if jwplayer_data: + def jw_extract(html): + jwplayer_data = self._find_jwplayer_data( + html.replace(r'\'', '\''), video_id) + result = self._parse_jwplayer_data( + jwplayer_data, video_id, require_title=False, + m3u8_id='hls', mpd_id='dash') + result = traverse_obj(result, ( + (None, ('entries', 0)), T(lambda r: r if r['formats'] else None)), + get_all=False) or {} + if not result and jwplayer_data: self.report_warning( 'Failed to extract JWPlayer formats', video_id=video_id) + return result + + def extract_from_links(html): urls = set() for regex in self._SOURCE_URL_REGEXES: - for mobj in re.finditer(regex, webpage): + for mobj in re.finditer(regex, html): urls.add(mobj.group('url')) - sources = self._search_regex( - r'sources\s*:\s*(\[(?!{)[^\]]+\])', webpage, 'sources', default=None) - urls.update(traverse_obj(sources, (T(lambda s: self._parse_json(s, video_id)), Ellipsis))) + sources = self._search_json( + r'\bsources\s*:', webpage, 'sources', video_id, + contains_pattern=r'\[(?!{)[^\]]+\]', default=[]) + urls.update(sources) formats = [] for video_url in traverse_obj(urls, (Ellipsis, T(url_or_none))): @@ -273,7 +288,33 @@ class XFileShareIE(InfoExtractor): 'url': video_url, 'format_id': 'sd', }) - result = {'formats': formats} + return {'formats': formats} + + def extract_info(html): + html = deobfuscate(html) or html + result = jw_extract(html) + if not result.get('formats'): + result = extract_from_links(html) + return result + + def pages_to_extract(html): + yield html + # page with separate protected download page also has player link + player_iframe = self._search_regex( + r'(]+>)', + get_element_by_id('iframe-holder', html) or '', + 'player iframe', default='') + player_url = extract_attributes(player_iframe).get('src') + if player_url: + html = self._download_webpage(player_url, video_id, note='Downloading player page', fatal=False) + if html: + yield html + + result = {} + for html in pages_to_extract(webpage): + result = extract_info(html) + if result.get('formats'): + break self._sort_formats(result['formats']) diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py index 8da5b430f..91b731673 100644 --- a/youtube_dl/extractor/yandexmusic.py +++ b/youtube_dl/extractor/yandexmusic.py @@ -30,17 +30,20 @@ class YandexMusicBaseIE(InfoExtractor): @staticmethod def _raise_captcha(): raise ExtractorError( - 'YandexMusic has considered youtube-dl requests automated and ' - 'asks you to solve a CAPTCHA. You can either wait for some ' - 'time until unblocked and optionally use --sleep-interval ' - 'in future or alternatively you can go to https://music.yandex.ru/ ' - 'solve CAPTCHA, then export cookies and pass cookie file to ' - 'youtube-dl with --cookies', + 'YandexMusic has considered youtube-dl requests automated ' + 'and asks you to solve a CAPTCHA. You can wait for some time ' + 'until unblocked and optionally use --sleep-interval in future; ' + 'otherwise solve the CAPTCHA at https://music.yandex.ru/, ' + 'then export cookies and pass the cookie file to youtube-dl ' + 'with --cookies.', expected=True) def _download_webpage_handle(self, *args, **kwargs): webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs) - if 'Нам очень жаль, но запросы, поступившие с вашего IP-адреса, похожи на автоматические.' in webpage: + blocked_ip_msg = ( + 'Нам очень жаль, но запросы, поступившие с ' + 'вашего IP-адреса, похожи на автоматические.') + if blocked_ip_msg in (webpage or [''])[0]: self._raise_captcha() return webpage diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index ac1e78002..50a226c02 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -1960,7 +1960,7 @@ def get_element_by_attribute(attribute, value, html, escape_value=True): def get_elements_by_class(class_name, html): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( - 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), + 'class', r'[^\'"]*(? 0 else kwargs.get('vars', {}) + js_vars = args[0] if len(args) > 0 else kwargs.get('vars', {}) strict = kwargs.get('strict', False) STRING_QUOTES = '\'"`' @@ -4523,9 +4523,13 @@ def js_to_json(code, *args, **kwargs): else escape) def template_substitute(match): - evaluated = js_to_json(match.group(1), vars, strict=strict) + evaluated = js_to_json(match.group(1), js_vars, strict=strict) if evaluated[0] == '"': - return json.loads(evaluated) + try: + return json.loads(evaluated) + except JSONDecodeError: + if strict: + raise return evaluated def fix_kv(m): @@ -4559,14 +4563,14 @@ def js_to_json(code, *args, **kwargs): i = int(im.group(1), base) return ('"%s":' if v.endswith(':') else '%s') % inv(i) - if v in vars: + if v in js_vars: try: if not strict: - json.loads(vars[v]) + json.loads(js_vars[v]) except JSONDecodeError: - return inv(json.dumps(vars[v])) + return inv(json.dumps(js_vars[v])) else: - return inv(vars[v]) + return inv(js_vars[v]) if not strict: v = try_call(inv, args=(v,), default=v) @@ -4577,7 +4581,7 @@ def js_to_json(code, *args, **kwargs): raise ValueError('Unknown value: ' + v) def create_map(mobj): - return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars)))) + return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=js_vars)))) code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code) if not strict: @@ -6715,3 +6719,8 @@ class _UnsafeExtensionError(Exception): raise cls(extension) return extension + + +def json_stringify(json_data, **kwargs): + kwargs.setdefault('separators', (',', ':')) + return json.dumps(json_data, **kwargs).decode('utf-8')