Compare commits

...

12 Commits

Author SHA1 Message Date
dirkf
aae565d565 [XFileShare] Fix typo 2024-10-21 11:04:07 +01:00
dirkf
cc5f6b1197 [XFileShare] Fix: remove redirected URL pattern 2024-10-21 02:37:05 +01:00
dirkf
cffda07f3c [Generic] Upodate KVS extraction for player v10
* support "renamed" flashvars variable
* use `_search_json()
* support `rnd` query parameter
* extract tags, categories, age_limit, more thumbnails`
* closes #31007
2024-10-21 00:14:58 +01:00
dirkf
6397648af6 [utils] Don't raise in js_to_json() template substitution when non-strict
* template extression should be evaluated with the same strict-ness
2024-10-21 00:06:00 +01:00
dirkf
26f0de9eab [utils] Correctly match class names in get_element[s]_by_class()
* reproduce CSS .classname behaviour ("bar" matches "bar", "foo bar baz", etc)
* add tests
2024-10-20 13:22:25 +01:00
dirkf
b80634ecf7 [XFileShare] Re-factor and fix tests
* update site list
* support page with player data in <iframe>
* use `_search_json()`
* improve "not found" detection
* improve title extraction
2024-10-20 12:28:08 +01:00
dirkf
0ddcc15fd1 [XFileShare] Add geo-block detection 2024-10-20 11:53:06 +01:00
dirkf
38b3a0980c [YandexMusic] Fix CAPTCHA check
* correct logic in _download_webpage() hook (yt-dlp/yt-dlp#4432)
* improve error message.
2024-10-19 17:01:24 +01:00
dirkf
10f38086d4 [core] Fix jwplayer format parsing
* thx yt-dlp/yt-dlp#10956
2024-10-07 15:17:04 +01:00
dirkf
fa7fdb263b [Mgoon,Kaltura] Fix regex typo (:?
* thx yt-dlp/yt-dlp#10807 (584d455)
2024-10-07 15:16:46 +01:00
dirkf
8388ee5f8d [HentaiStigma] Support new frame format with HTML5 video
* resolves #25019
2024-10-07 15:16:35 +01:00
dirkf
97516d5ed3 [ORFRadio] Support /programm/ URL format
* fixes yt-dlp/yt-dlp#11014
2024-10-07 15:16:25 +01:00
10 changed files with 235 additions and 115 deletions

View File

@ -1603,11 +1603,14 @@ Line 1
def test_get_element_by_class(self): def test_get_element_by_class(self):
html = ''' html = '''
<span class="foo bar">nice</span> <span class="foo bar baz-bam">nice</span>
''' '''
self.assertEqual(get_element_by_class('foo', html), 'nice') self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('bar', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None) self.assertEqual(get_element_by_class('no-such-class', html), None)
self.assertEqual(get_element_by_class('baz', html), None)
self.assertEqual(get_element_by_class('bam', html), None)
def test_get_element_by_attribute(self): def test_get_element_by_attribute(self):
html = ''' html = '''
@ -1626,10 +1629,13 @@ Line 1
def test_get_elements_by_class(self): def test_get_elements_by_class(self):
html = ''' html = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span> <span class="foo bar baz-bam">nice</span><span class="foo bar">also nice</span>
''' '''
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice']) self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('baz', html), [])
self.assertEqual(get_elements_by_class('bam', html), [])
self.assertEqual(get_elements_by_class('no-such-class', html), []) self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self): def test_get_elements_by_attribute(self):

View File

@ -3128,7 +3128,8 @@ class InfoExtractor(object):
continue continue
urls.add(source_url) urls.add(source_url)
source_type = source.get('type') or '' source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url) # https://github.com/yt-dlp/yt-dlp/pull/10956
ext = determine_ext(source_url, default_ext=mimetype2ext(source_type))
if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url: if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url:
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native', source_url, video_id, 'mp4', entry_protocol='m3u8_native',

View File

@ -31,10 +31,14 @@ from ..utils import (
parse_resolution, parse_resolution,
sanitized_Request, sanitized_Request,
smuggle_url, smuggle_url,
strip_or_none,
T,
traverse_obj,
unescapeHTML, unescapeHTML,
unified_timestamp, unified_timestamp,
unsmuggle_url, unsmuggle_url,
UnsupportedError, UnsupportedError,
update_url_query,
url_or_none, url_or_none,
urljoin, urljoin,
xpath_attr, xpath_attr,
@ -2237,6 +2241,7 @@ class GenericIE(InfoExtractor):
'display_id': 'kelis-4th-of-july', 'display_id': 'kelis-4th-of-july',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Kelis - 4th Of July', 'title': 'Kelis - 4th Of July',
'description': 'Kelis - 4th Of July',
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
}, },
}, { }, {
@ -2246,7 +2251,7 @@ class GenericIE(InfoExtractor):
'id': '105', 'id': '105',
'display_id': 'kelis-4th-of-july', 'display_id': 'kelis-4th-of-july',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Kelis - 4th Of July / Embed Player', 'title': r're:Kelis - 4th Of July(?: / Embed Player)?$',
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg', 'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
}, },
'params': { 'params': {
@ -2297,6 +2302,32 @@ class GenericIE(InfoExtractor):
'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер', 'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер',
'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg', 'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg',
}, },
'skip': 'needs Referer ?',
}, {
# KVS Player v10
'url': 'https://www.cambro.tv/588174/marleny-1/',
'md5': '759d2050590986c6fc341da0592c4d8e',
'info_dict': {
'id': '588174',
'display_id': 'marleny-1',
'ext': 'mp4',
'title': 'marleny 1',
'description': 'la maestra de tic toc',
'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/588000/588174/preview\.jpg',
'age_limit': 18,
},
}, {
# KVS Player v10 embed, NSFW
'url': 'https://www.cambro.tv/embed/436185',
'md5': '24338dc8b182900a2c9eda075a0a46c0',
'info_dict': {
'id': '436185',
'display_id': 'jaeandbailey-chaturbate-webcam-porn-videos',
'ext': 'mp4',
'title': 'jaeandbailey Chaturbate webcam porn videos',
'thumbnail': r're:https?://www\.cambro\.tv/contents/videos_screenshots/436000/436185/preview\.jpg',
'age_limit': 18,
},
}, { }, {
'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes', 'url': 'https://mrdeepfakes.com/video/5/selena-gomez-pov-deep-fakes',
'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4', 'md5': 'fec4ad5ec150f655e0c74c696a4a2ff4',
@ -2309,14 +2340,16 @@ class GenericIE(InfoExtractor):
'height': 720, 'height': 720,
'age_limit': 18, 'age_limit': 18,
}, },
# 'skip': 'Geo-blocked in some mjurisdictions',
}, { }, {
# KVS Player v2
'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/', 'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/',
'md5': 'e2f0a4c329f7986280b7328e24036d60', 'md5': 'e2f0a4c329f7986280b7328e24036d60',
'info_dict': { 'info_dict': {
'id': '284002', 'id': '284002',
'display_id': 'just-out-of-the-shower-joi', 'display_id': 'just-out-of-the-shower-joi',
'ext': 'mp4', 'ext': 'mp4',
'title': 'Just Out Of The Shower JOI - Shooshtime', 'title': r're:Just Out Of The Shower JOI(?: - Shooshtime)?$',
'height': 720, 'height': 720,
'age_limit': 18, 'age_limit': 18,
}, },
@ -2482,9 +2515,12 @@ class GenericIE(InfoExtractor):
return '/'.join(urlparts) + '?' + url_query return '/'.join(urlparts) + '?' + url_query
flashvars = self._search_regex( flashvars = self._search_regex(
r'(?s)<script\b[^>]*>.*?var\s+flashvars\s*=\s*(\{.+?\});.*?</script>', r'''(?<![=!+*-])=\s*kt_player\s*\(\s*'kt_player'\s*,\s*[^)]+,\s*([\w$]+)\s*\)''',
webpage, 'flashvars') webpage, 'flashvars name', default='flashvars')
flashvars = self._parse_json(flashvars, video_id, transform_source=js_to_json) flashvars = self._search_json(
r'<script(?:\s[^>]*)?>[\s\S]*?var\s+%s\s*=' % (flashvars,),
webpage, 'flashvars', video_id, end_pattern=r';[\s\S]*?</script>',
transform_source=js_to_json)
# extract the part after the last / as the display_id from the # extract the part after the last / as the display_id from the
# canonical URL. # canonical URL.
@ -2493,12 +2529,7 @@ class GenericIE(InfoExtractor):
r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)', r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
webpage, 'display_id', fatal=False webpage, 'display_id', fatal=False
) )
title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title') title = flashvars.get('video_title') or self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
thumbnail = flashvars['preview_url']
if thumbnail.startswith('//'):
protocol, _, _ = url.partition('/')
thumbnail = protocol + thumbnail
url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys())) url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys()))
formats = [] formats = []
@ -2506,9 +2537,13 @@ class GenericIE(InfoExtractor):
if '/get_file/' not in flashvars[key]: if '/get_file/' not in flashvars[key]:
continue continue
format_id = flashvars.get(key + '_text', key) format_id = flashvars.get(key + '_text', key)
f_url = urljoin(url, getrealurl(flashvars[key], flashvars['license_code']))
rnd = flashvars.get('rnd', key)
if rnd:
f_url = update_url_query(f_url, {'rnd': rnd})
formats.append(merge_dicts( formats.append(merge_dicts(
parse_resolution(format_id) or parse_resolution(flashvars[key]), { parse_resolution(format_id) or parse_resolution(flashvars[key]), {
'url': urljoin(url, getrealurl(flashvars[key], flashvars['license_code'])), 'url': f_url,
'format_id': format_id, 'format_id': format_id,
'ext': 'mp4', 'ext': 'mp4',
'http_headers': {'Referer': url}, 'http_headers': {'Referer': url},
@ -2518,13 +2553,31 @@ class GenericIE(InfoExtractor):
self._sort_formats(formats) self._sort_formats(formats)
return { csv2list = (T(lambda s: s.split(',')), Ellipsis, T(strip_or_none))
info = traverse_obj(flashvars, {
'tags': ('video_tags',) + csv2list,
'categories': ('video_categories',) + csv2list,
'thumbnails': (
T(dict.items), lambda _, k_v: k_v[0].startswith('preview_url'), {
'url': (1, T(lambda u: urljoin(url, u))),
'preference': (0, T(lambda k: 100 - len(k))),
}),
})
info = merge_dicts(info, {
'id': flashvars['video_id'], 'id': flashvars['video_id'],
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
'thumbnail': thumbnail,
'formats': formats, 'formats': formats,
} })
# check-porn test for embed pages
if 'age_limit' not in info and traverse_obj(info, (
('title', (('tags', 'categories'), Ellipsis) or []),
T(lambda t: bool(re.search(r'(?i)(?:^|\s+)porn(?:$|\s+)', t)) or None)),
get_all=False):
info['age_limit'] = 18
return info
def _real_extract(self, url): def _real_extract(self, url):
if url.startswith('//'): if url.startswith('//'):
@ -3598,7 +3651,7 @@ class GenericIE(InfoExtractor):
), webpage, 'KVS player', group='ver', default=False) ), webpage, 'KVS player', group='ver', default=False)
if found: if found:
self.report_extraction('%s: KVS Player' % (video_id, )) self.report_extraction('%s: KVS Player' % (video_id, ))
if found.split('.')[0] not in ('4', '5', '6'): if found.split('.')[0] not in ('2', '4', '5', '6', '10'):
self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, )) self.report_warning('Untested major version (%s) in player engine - download may fail.' % (found, ))
return merge_dicts( return merge_dicts(
self._extract_kvs(url, webpage, video_id), self._extract_kvs(url, webpage, video_id),

View File

@ -1,6 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals from __future__ import unicode_literals
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import (
merge_dicts,
traverse_obj,
)
class HentaiStigmaIE(InfoExtractor): class HentaiStigmaIE(InfoExtractor):
@ -24,16 +29,17 @@ class HentaiStigmaIE(InfoExtractor):
title = self._html_search_regex( title = self._html_search_regex(
r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>', r'<h2[^>]+class="posttitle"[^>]*><a[^>]*>([^<]+)</a>',
webpage, 'title') webpage, 'title')
wrap_url = self._html_search_regex(
wrap_url = self._search_regex(
r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url') r'<iframe[^>]+src="([^"]+mp4)"', webpage, 'wrapper url')
wrap_webpage = self._download_webpage(wrap_url, video_id)
video_url = self._html_search_regex( vid_page = self._download_webpage(wrap_url, video_id)
r'file\s*:\s*"([^"]+)"', wrap_webpage, 'video url')
return { entries = self._parse_html5_media_entries(wrap_url, vid_page, video_id)
self._sort_formats(traverse_obj(entries, (0, 'formats')) or [])
return merge_dicts({
'id': video_id, 'id': video_id,
'url': video_url,
'title': title, 'title': title,
'age_limit': 18, 'age_limit': 18,
} }, entries[0])

View File

@ -23,7 +23,7 @@ class KalturaIE(InfoExtractor):
(?: (?:
kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)| kaltura:(?P<partner_id>\d+):(?P<id>[0-9a-z_]+)|
https?:// https?://
(:?(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/ (?:(?:www|cdnapi(?:sec)?)\.)?kaltura\.com(?::\d+)?/
(?: (?:
(?: (?:
# flash player # flash player

View File

@ -13,7 +13,7 @@ from ..utils import (
class MgoonIE(InfoExtractor): class MgoonIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)? _VALID_URL = r'''(?x)https?://(?:www\.)?
(?:(:?m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)| (?:(?:m\.)?mgoon\.com/(?:ch/(?:.+)/v|play/view)|
video\.mgoon\.com)/(?P<id>[0-9]+)''' video\.mgoon\.com)/(?P<id>[0-9]+)'''
_API_URL = 'http://mpos.mgoon.com/player/video?id={0:}' _API_URL = 'http://mpos.mgoon.com/player/video?id={0:}'
_TESTS = [ _TESTS = [

View File

@ -112,7 +112,7 @@ class ORFRadioIE(ORFRadioBase):
_VALID_URL = ( _VALID_URL = (
r'https?://sound\.orf\.at/radio/(?P<station>{0})/sendung/(?P<id>\d+)(?:/(?P<show>\w+))?'.format(_STATION_RE), r'https?://sound\.orf\.at/radio/(?P<station>{0})/sendung/(?P<id>\d+)(?:/(?P<show>\w+))?'.format(_STATION_RE),
r'https?://(?P<station>{0})\.orf\.at/player/(?P<date>\d{{8}})/(?P<id>\d+)'.format(_STATION_RE), r'https?://(?P<station>{0})\.orf\.at/(?:player|programm)/(?P<date>\d{{8}})/(?P<id>\d+)'.format(_STATION_RE),
) )
_TESTS = [{ _TESTS = [{
@ -150,6 +150,10 @@ class ORFRadioIE(ORFRadioBase):
'duration': 1500, 'duration': 1500,
}, },
'skip': 'Shows from ORF Sound are only available for 30 days.' 'skip': 'Shows from ORF Sound are only available for 30 days.'
}, {
# yt-dlp/yt-dlp#11014
'url': 'https://oe1.orf.at/programm/20240916/769302/Playgrounds',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):

View File

@ -12,7 +12,9 @@ from ..utils import (
clean_html, clean_html,
decode_packed_codes, decode_packed_codes,
determine_ext, determine_ext,
extract_attributes,
ExtractorError, ExtractorError,
get_element_by_class,
get_element_by_id, get_element_by_id,
int_or_none, int_or_none,
merge_dicts, merge_dicts,
@ -56,39 +58,40 @@ def aa_decode(aa_code):
class XFileShareIE(InfoExtractor): class XFileShareIE(InfoExtractor):
_SITES = ( _SITES = (
# status check 2024-02: site availability, G site: search # status check 2024-10: site availability, G site: search
(r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstreeam.tv (r'aparat\.cam', 'Aparat'), # Cloudflare says host error 522, apparently changed to wolfstream.tv
(r'filemoon\.sx/.', 'FileMoon'), (r'filemoon\.(?:sx|to|in)', 'FileMoon'),
(r'gounlimited\.to', 'GoUnlimited'), # no media pages listed # (r'gounlimited\.to', 'GoUnlimited'), # domain not found
(r'govid\.me', 'GoVid'), # no media pages listed (r'govid\.me', 'GoVid'), # no media pages listed
(r'highstream\.tv', 'HighStream'), # clipwatching.com redirects here (r'highstream\.tv', 'HighStream'), # Cloudflare says host error 522, clipwatching.com now dead
(r'holavid\.com', 'HolaVid'), # Cloudflare says host error 522 (r'holavid\.com', 'HolaVid'), # hoster default home page
# (r'streamty\.com', 'Streamty'), # no media pages listed, connection timeout # (r'streamty\.com', 'Streamty'), # spam parking domain
# (r'thevideobee\.to', 'TheVideoBee'), # no pages listed, refuses connection # (r'thevideobee\.to', 'TheVideoBee'), # domain for sale
(r'uqload\.to', 'Uqload'), # .com, .co redirect here (r'uqload\.ws', 'Uqload'), # .com, .co, .to redirect here
(r'(?:vedbam\.xyz|vadbam.net)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed # (r'(vadbam.net', 'VadBam'), # domain not found
(r'(?:vedbam\.xyz|vadbam\.net|vbn\d\.vdbtm\.shop)', 'V?dB?m'), # vidbom.com redirects here, but no valid media pages listed
(r'vidlo\.us', 'vidlo'), # no valid media pages listed (r'vidlo\.us', 'vidlo'), # no valid media pages listed
(r'vidlocker\.xyz', 'VidLocker'), # no media pages listed (r'vidlocker\.xyz', 'VidLocker'), # no media pages listed
(r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv redirects here (r'(?:w\d\.)?viidshar\.com', 'VidShare'), # vidshare.tv parked
# (r'vup\.to', 'VUp'), # domain not found # (r'vup\.to', 'VUp'), # domain not found
(r'wolfstream\.tv', 'WolfStream'), # (r'wolfstream\.tv', 'WolfStream'), # domain not found
(r'xvideosharing\.com', 'XVideoSharing'), # just started showing 'maintenance mode' (r'xvideosharing\.com', 'XVideoSharing'),
) )
IE_DESC = 'XFileShare-based sites: %s' % ', '.join(list(zip(*_SITES))[1]) IE_DESC = 'XFileShare-based sites: %s' % ', '.join(list(zip(*_SITES))[1])
_VALID_URL = (r'https?://(?:www\.)?(?P<host>%s)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)' _VALID_URL = (r'https?://(?:www\.)?(?P<host>%s)/(?P<sub>[a-z]/|)(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
% '|'.join(site for site in list(zip(*_SITES))[0])) % '|'.join(site for site in list(zip(*_SITES))[0]))
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1' % '|'.join(site for site in list(zip(*_SITES))[0])] _EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:%s)/embed-[0-9a-zA-Z]+.*?)\1' % '|'.join(site for site in list(zip(*_SITES))[0])]
_FILE_NOT_FOUND_REGEXES = ( _FILE_NOT_FOUND_REGEXES = (
r'>(?:404 - )?File Not Found<', r'>\s*(?:404 - )?File Not Found\s*<',
r'>The file was removed by administrator<', r'>\s*The file was removed by administrator\s*<',
) )
_TITLE_REGEXES = ( _TITLE_REGEXES = (
r'style="z-index: [0-9]+;">([^<]+)</span>', r'style="z-index: [0-9]+;">([^<]+)</span>',
r'<td nowrap>([^<]+)</td>', r'<td nowrap>([^<]+)</td>',
r'h4-fine[^>]*>([^<]+)<', r'h4-fine[^>]*>([^<]+)<',
r'>Watch (.+)[ <]', r'>Watch (.+?)(?: mp4)?(?: The Ultimate Free Video Hosting Solution for Webmasters and Bloggers)?<',
r'<h2 class="video-page-head">([^<]+)</h2>', r'<h2 class="video-page-head">([^<]+)</h2>',
r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<', # streamin.to (dead) r'<h2 style="[^"]*color:#403f3d[^"]*"[^>]*>([^<]+)<', # streamin.to (dead)
r'title\s*:\s*"([^"]+)"', # govid.me r'title\s*:\s*"([^"]+)"', # govid.me
@ -106,38 +109,41 @@ class XFileShareIE(InfoExtractor):
_TESTS = [{ _TESTS = [{
'note': 'link in `sources`', 'note': 'link in `sources`',
'url': 'https://uqload.to/dcsu06gdb45o', 'url': 'https://uqload.ws/4sah252totrk.html',
'md5': '7f8db187b254379440bf4fcad094ae86', 'md5': '1f11151b5044862fbc3c112732f9f7d8',
'info_dict': { 'info_dict': {
'id': 'dcsu06gdb45o', 'id': '4sah252totrk',
'ext': 'mp4', 'ext': 'mp4',
'title': 'f2e31015957e74c8c8427982e161c3fc mp4', 'title': 'JEONGHAN WONWOO Interview With Allure Korea Arabic Sub',
'thumbnail': r're:https://.*\.jpg' 'thumbnail': r're:https://.*\.jpg'
}, },
'params': { 'params': {
'nocheckcertificate': True, 'nocheckcertificate': True,
}, },
'expected_warnings': ['Unable to extract JWPlayer data'], # 'expected_warnings': ['Unable to extract JWPlayer data'],
}, { }, {
'note': 'link in decoded `sources`', 'note': 'link in Playerjs', # need test with 'link in decoded `sources`'
'url': 'https://xvideosharing.com/1tlg6agrrdgc', 'url': 'https://xvideosharing.com/8cnupzc1z8xq.html',
'md5': '2608ce41932c1657ae56258a64e647d9', 'md5': '9725ca7229e8f3046f2417da3bd5eddc',
'info_dict': { 'info_dict': {
'id': '1tlg6agrrdgc', 'id': '8cnupzc1z8xq',
'ext': 'mp4', 'ext': 'mp4',
'title': '0121', 'title': 'HEVC X265 Big Buck Bunny 1080 10s 20MB',
'thumbnail': r're:https?://.*\.jpg', 'thumbnail': r're:https?://.*\.jpg',
}, },
'skip': 'This server is in maintenance mode.',
}, { }, {
'note': 'JWPlayer link in un-p,a,c,k,e,d JS', 'note': 'JWPlayer link in un-p,a,c,k,e,d JS, in player frame',
'url': 'https://filemoon.sx/e/dw40rxrzruqz', 'url': 'https://filemoon.sx/d/fbsxidybremo',
'md5': '5a713742f57ac4aef29b74733e8dda01', 'md5': '82007a71661630f60e866f0d6ed31b2a',
'info_dict': { 'info_dict': {
'id': 'dw40rxrzruqz', 'id': 'fbsxidybremo',
'title': 'dw40rxrzruqz', 'title': 'Uchouten',
'ext': 'mp4' 'ext': 'mp4'
}, },
'params': {
'skip_download': 'ffmpeg',
},
'expected_warnings': ['hlsnative has detected features it does not support'],
}, { }, {
'note': 'JWPlayer link in un-p,a,c,k,e,d JS', 'note': 'JWPlayer link in un-p,a,c,k,e,d JS',
'url': 'https://vadbam.net/6lnbkci96wly.html', 'url': 'https://vadbam.net/6lnbkci96wly.html',
@ -150,7 +156,7 @@ class XFileShareIE(InfoExtractor):
}, { }, {
'note': 'JWPlayer link in clear', 'note': 'JWPlayer link in clear',
'url': 'https://w1.viidshar.com/nnibe0xf0h79.html', 'url': 'https://w1.viidshar.com/nnibe0xf0h79.html',
'md5': 'f0a580ce9df06cc61b4a5c979d672367', 'md5': 'b95b97978093bc287c322307c689bd94',
'info_dict': { 'info_dict': {
'id': 'nnibe0xf0h79', 'id': 'nnibe0xf0h79',
'title': 'JaGa 68ar', 'title': 'JaGa 68ar',
@ -160,27 +166,21 @@ class XFileShareIE(InfoExtractor):
'skip_download': 'ffmpeg', 'skip_download': 'ffmpeg',
}, },
'expected_warnings': ['hlsnative has detected features it does not support'], 'expected_warnings': ['hlsnative has detected features it does not support'],
}, {
'note': 'JWPlayer link in clear',
'url': 'https://wolfstream.tv/a3drtehyrg52.html',
'md5': '1901d86a79c5e0c6a51bdc9a4cfd3769',
'info_dict': {
'id': 'a3drtehyrg52',
'title': 'NFL 2023 W04 DET@GB',
'ext': 'mp4'
},
}, { }, {
'url': 'https://aparat.cam/n4d6dh0wvlpr', 'url': 'https://aparat.cam/n4d6dh0wvlpr',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://uqload.to/ug5somm0ctnk.html',
'only_matching': True,
}, { }, {
'url': 'https://highstream.tv/2owiyz3sjoux', 'url': 'https://highstream.tv/2owiyz3sjoux',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://vedbam.xyz/6lnbkci96wly.html', 'url': 'https://vedbam.xyz/6lnbkci96wly.html',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://vbn2.vdbtm.shop/6lnbkci96wly.html',
'only_matching': True,
}, {
'url': 'https://filemoon.in/e/5abn1ze9jifb',
'only_matching': True,
}] }]
@classmethod @classmethod
@ -194,17 +194,26 @@ class XFileShareIE(InfoExtractor):
return list(yield_urls()) return list(yield_urls())
def _real_extract(self, url): def _real_extract(self, url):
host, video_id = self._match_valid_url(url).group('host', 'id') host, sub, video_id = self._match_valid_url(url).group('host', 'sub', 'id')
url = 'https://%s/%s' % ( url = 'https://%s/%s%s' % (
host, host, sub,
'embed-%s.html' % video_id if host in ('govid.me', 'vidlo.us') else video_id) 'embed-%s.html' % video_id if host in ('govid.me', 'vidlo.us') else video_id)
webpage = self._download_webpage(url, video_id) webpage = self._download_webpage(url, video_id)
container_div = get_element_by_id('container', webpage) or webpage main = self._search_regex(
r'(?s)<main>(.+)</main>', webpage, 'main', default=webpage)
container_div = (
get_element_by_id('container', main)
or get_element_by_class('container', main)
or webpage)
if self._search_regex( if self._search_regex(
r'>This server is in maintenance mode\.', container_div, r'>This server is in maintenance mode\.', container_div,
'maint error', group=0, default=None): 'maint error', group=0, default=None):
raise ExtractorError(clean_html(container_div), expected=True) raise ExtractorError(clean_html(container_div), expected=True)
if self._search_regex(
'not available in your country', container_div,
'geo block', group=0, default=None):
self.raise_geo_restricted()
if self._search_regex( if self._search_regex(
self._FILE_NOT_FOUND_REGEXES, container_div, self._FILE_NOT_FOUND_REGEXES, container_div,
'missing video error', group=0, default=None): 'missing video error', group=0, default=None):
@ -228,38 +237,41 @@ class XFileShareIE(InfoExtractor):
title = ( title = (
self._search_regex(self._TITLE_REGEXES, webpage, 'title', default=None) self._search_regex(self._TITLE_REGEXES, webpage, 'title', default=None)
or self._og_search_title(webpage, default=None) or self._og_search_title(webpage, default='')).strip()
or video_id).strip()
obf_code = True def deobfuscate(html):
while obf_code:
for regex, func in ( for regex, func in (
(r'(?s)(?<!-)\b(eval\(function\(p,a,c,k,e,d\)\{(?:(?!</script>).)+\)\))', (r'(?s)(?<!-)\b(eval\(function\(p,a,c,k,e,d\)\{(?:(?!</script>).)+\)\))',
decode_packed_codes), decode_packed_codes),
(r'(゚.+)', aa_decode)): (r'(゚.+)', aa_decode)):
obf_code = self._search_regex(regex, webpage, 'obfuscated code', default=None) obf_code = self._search_regex(regex, html, 'obfuscated code', default=None)
if obf_code: if obf_code:
webpage = webpage.replace(obf_code, func(obf_code)) return html.replace(obf_code, func(obf_code))
break
def jw_extract(html):
jwplayer_data = self._find_jwplayer_data( jwplayer_data = self._find_jwplayer_data(
webpage.replace(r'\'', '\''), video_id) html.replace(r'\'', '\''), video_id)
result = self._parse_jwplayer_data( result = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, jwplayer_data, video_id, require_title=False,
m3u8_id='hls', mpd_id='dash') m3u8_id='hls', mpd_id='dash')
result = traverse_obj(result, (
if not traverse_obj(result, 'formats'): (None, ('entries', 0)), T(lambda r: r if r['formats'] else None)),
if jwplayer_data: get_all=False) or {}
if not result and jwplayer_data:
self.report_warning( self.report_warning(
'Failed to extract JWPlayer formats', video_id=video_id) 'Failed to extract JWPlayer formats', video_id=video_id)
return result
def extract_from_links(html):
urls = set() urls = set()
for regex in self._SOURCE_URL_REGEXES: for regex in self._SOURCE_URL_REGEXES:
for mobj in re.finditer(regex, webpage): for mobj in re.finditer(regex, html):
urls.add(mobj.group('url')) urls.add(mobj.group('url'))
sources = self._search_regex( sources = self._search_json(
r'sources\s*:\s*(\[(?!{)[^\]]+\])', webpage, 'sources', default=None) r'\bsources\s*:', webpage, 'sources', video_id,
urls.update(traverse_obj(sources, (T(lambda s: self._parse_json(s, video_id)), Ellipsis))) contains_pattern=r'\[(?!{)[^\]]+\]', default=[])
urls.update(sources)
formats = [] formats = []
for video_url in traverse_obj(urls, (Ellipsis, T(url_or_none))): for video_url in traverse_obj(urls, (Ellipsis, T(url_or_none))):
@ -273,7 +285,33 @@ class XFileShareIE(InfoExtractor):
'url': video_url, 'url': video_url,
'format_id': 'sd', 'format_id': 'sd',
}) })
result = {'formats': formats} return {'formats': formats}
def extract_info(html):
html = deobfuscate(html) or html
result = jw_extract(html)
if not result.get('formats'):
result = extract_from_links(html)
return result
def pages_to_extract(html):
yield html
# page with separate protected download page also has player link
player_iframe = self._search_regex(
r'(<iframe\s[^>]+>)',
get_element_by_id('iframe-holder', html) or '',
'player iframe', default='')
player_url = extract_attributes(player_iframe).get('src')
if player_url:
html = self._download_webpage(player_url, video_id, note='Downloading player page', fatal=False)
if html:
yield html
result = {}
for html in pages_to_extract(webpage):
result = extract_info(html)
if result.get('formats'):
break
self._sort_formats(result['formats']) self._sort_formats(result['formats'])

View File

@ -30,17 +30,20 @@ class YandexMusicBaseIE(InfoExtractor):
@staticmethod @staticmethod
def _raise_captcha(): def _raise_captcha():
raise ExtractorError( raise ExtractorError(
'YandexMusic has considered youtube-dl requests automated and ' 'YandexMusic has considered youtube-dl requests automated '
'asks you to solve a CAPTCHA. You can either wait for some ' 'and asks you to solve a CAPTCHA. You can wait for some time '
'time until unblocked and optionally use --sleep-interval ' 'until unblocked and optionally use --sleep-interval in future; '
'in future or alternatively you can go to https://music.yandex.ru/ ' 'otherwise solve the CAPTCHA at https://music.yandex.ru/, '
'solve CAPTCHA, then export cookies and pass cookie file to ' 'then export cookies and pass the cookie file to youtube-dl '
'youtube-dl with --cookies', 'with --cookies.',
expected=True) expected=True)
def _download_webpage_handle(self, *args, **kwargs): def _download_webpage_handle(self, *args, **kwargs):
webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs) webpage = super(YandexMusicBaseIE, self)._download_webpage_handle(*args, **kwargs)
if 'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;вашего IP-адреса, похожи на&nbsp;автоматические.' in webpage: blocked_ip_msg = (
'Нам очень жаль, но&nbsp;запросы, поступившие с&nbsp;'
'вашего IP-адреса, похожи на&nbsp;автоматические.')
if blocked_ip_msg in (webpage or [''])[0]:
self._raise_captcha() self._raise_captcha()
return webpage return webpage

View File

@ -1960,7 +1960,7 @@ def get_element_by_attribute(attribute, value, html, escape_value=True):
def get_elements_by_class(class_name, html): def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list""" """Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute( return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), 'class', r'[^\'"]*(?<!-)\b%s\b(?!-)[^\'"]*' % re.escape(class_name),
html, escape_value=False) html, escape_value=False)
@ -4498,7 +4498,7 @@ def strip_jsonp(code):
def js_to_json(code, *args, **kwargs): def js_to_json(code, *args, **kwargs):
# vars is a dict of (var, val) pairs to substitute # vars is a dict of (var, val) pairs to substitute
vars = args[0] if len(args) > 0 else kwargs.get('vars', {}) js_vars = args[0] if len(args) > 0 else kwargs.get('vars', {})
strict = kwargs.get('strict', False) strict = kwargs.get('strict', False)
STRING_QUOTES = '\'"`' STRING_QUOTES = '\'"`'
@ -4523,9 +4523,13 @@ def js_to_json(code, *args, **kwargs):
else escape) else escape)
def template_substitute(match): def template_substitute(match):
evaluated = js_to_json(match.group(1), vars, strict=strict) evaluated = js_to_json(match.group(1), js_vars, strict=strict)
if evaluated[0] == '"': if evaluated[0] == '"':
try:
return json.loads(evaluated) return json.loads(evaluated)
except JSONDecodeError:
if strict:
raise
return evaluated return evaluated
def fix_kv(m): def fix_kv(m):
@ -4559,14 +4563,14 @@ def js_to_json(code, *args, **kwargs):
i = int(im.group(1), base) i = int(im.group(1), base)
return ('"%s":' if v.endswith(':') else '%s') % inv(i) return ('"%s":' if v.endswith(':') else '%s') % inv(i)
if v in vars: if v in js_vars:
try: try:
if not strict: if not strict:
json.loads(vars[v]) json.loads(js_vars[v])
except JSONDecodeError: except JSONDecodeError:
return inv(json.dumps(vars[v])) return inv(json.dumps(js_vars[v]))
else: else:
return inv(vars[v]) return inv(js_vars[v])
if not strict: if not strict:
v = try_call(inv, args=(v,), default=v) v = try_call(inv, args=(v,), default=v)
@ -4577,7 +4581,7 @@ def js_to_json(code, *args, **kwargs):
raise ValueError('Unknown value: ' + v) raise ValueError('Unknown value: ' + v)
def create_map(mobj): def create_map(mobj):
return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars)))) return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=js_vars))))
code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code) code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code)
if not strict: if not strict:
@ -6715,3 +6719,8 @@ class _UnsafeExtensionError(Exception):
raise cls(extension) raise cls(extension)
return extension return extension
def json_stringify(json_data, **kwargs):
kwargs.setdefault('separators', (',', ':'))
return json.dumps(json_data, **kwargs).decode('utf-8')