mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-06-02 11:02:40 +09:00
Compare commits
5 Commits
c0f0f2bdf3
...
d80a0d0e5a
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d80a0d0e5a | ||
![]() |
3eb8d22ddb | ||
![]() |
4e714f9df1 | ||
![]() |
c1ea7f5a24 | ||
![]() |
5966b43092 |
@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
|
|
||||||
class BokeCCIE(BokeCCBaseIE):
|
class BokeCCIE(BokeCCBaseIE):
|
||||||
_IE_DESC = 'CC视频'
|
IE_DESC = 'CC视频'
|
||||||
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
|
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
|
@ -9,7 +9,7 @@ from ..utils import (
|
|||||||
|
|
||||||
|
|
||||||
class CloudyIE(InfoExtractor):
|
class CloudyIE(InfoExtractor):
|
||||||
_IE_DESC = 'cloudy.ec'
|
IE_DESC = 'cloudy.ec'
|
||||||
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
|
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
||||||
|
@ -422,6 +422,8 @@ class InfoExtractor(object):
|
|||||||
_GEO_COUNTRIES = None
|
_GEO_COUNTRIES = None
|
||||||
_GEO_IP_BLOCKS = None
|
_GEO_IP_BLOCKS = None
|
||||||
_WORKING = True
|
_WORKING = True
|
||||||
|
# supply this in public subclasses: used in supported sites list, etc
|
||||||
|
# IE_DESC = 'short description of IE'
|
||||||
|
|
||||||
def __init__(self, downloader=None):
|
def __init__(self, downloader=None):
|
||||||
"""Constructor. Receives an optional downloader."""
|
"""Constructor. Receives an optional downloader."""
|
||||||
|
@ -494,6 +494,11 @@ from .hotstar import (
|
|||||||
)
|
)
|
||||||
from .howcast import HowcastIE
|
from .howcast import HowcastIE
|
||||||
from .howstuffworks import HowStuffWorksIE
|
from .howstuffworks import HowStuffWorksIE
|
||||||
|
from .hqporner import (
|
||||||
|
HQPornerIE,
|
||||||
|
HQPornerListIE,
|
||||||
|
HQPornerSearchIE,
|
||||||
|
)
|
||||||
from .hrfernsehen import HRFernsehenIE
|
from .hrfernsehen import HRFernsehenIE
|
||||||
from .hrti import (
|
from .hrti import (
|
||||||
HRTiIE,
|
HRTiIE,
|
||||||
|
233
youtube_dl/extractor/hqporner.py
Normal file
233
youtube_dl/extractor/hqporner.py
Normal file
@ -0,0 +1,233 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
import itertools
|
||||||
|
import re
|
||||||
|
from string import capwords
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
clean_html,
|
||||||
|
date_from_str,
|
||||||
|
get_element_by_class,
|
||||||
|
get_element_by_id,
|
||||||
|
int_or_none,
|
||||||
|
join_nonempty,
|
||||||
|
parse_duration,
|
||||||
|
strip_or_none,
|
||||||
|
unified_strdate,
|
||||||
|
urljoin,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class HQPornerIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?hqporner\.com/hdporn/(?P<id>[\d]+)-'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://hqporner.com/hdporn/110374-looking_for_a_change_of_pace.html',
|
||||||
|
'md5': '7eb7b791a1ce8a619bde603b2dc334b5',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '110374',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Looking For A Change Of Pace',
|
||||||
|
'description': 'featuring Myra',
|
||||||
|
'upload_date': '20230227',
|
||||||
|
'age_limit': 18,
|
||||||
|
'tags': list,
|
||||||
|
'categories': list,
|
||||||
|
'duration': 3271,
|
||||||
|
'thumbnail': r're:https?://.*\.jpg',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://hqporner.com/hdporn/86482-all_night_rager.html',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
# some pages need a `referer` to avoid 404
|
||||||
|
webpage = self._download_webpage(url, video_id, headers={'referer': 'https://hqporner.com/?q=porn'})
|
||||||
|
|
||||||
|
# details below video are in a <header> element
|
||||||
|
heading = self._search_regex(r'''(?s)(<header>.+?</section>)\s*</div>''', webpage, 'heading', default='')
|
||||||
|
title = (
|
||||||
|
capwords(clean_html(get_element_by_class('main-h1', heading) or ''))
|
||||||
|
or self._html_search_regex(
|
||||||
|
r'<title\b[^>]*>\s*([^<]+)(?:\s+-\s+HQporner\.com)?\s*</title>',
|
||||||
|
webpage, 'title'))
|
||||||
|
# video details are in a page loaded by this <iframe>
|
||||||
|
player = get_element_by_id('playerWrapper', webpage)
|
||||||
|
player = self._search_regex(
|
||||||
|
r'''<iframe [^>]*\bsrc\s*=\s*('|")(?P<url>(?:(?!\1)\S)+)''',
|
||||||
|
player, 'player iframe', group='url')
|
||||||
|
player = self._download_webpage(urljoin(url, player), video_id, note='Downloading player iframe')
|
||||||
|
# select the complete set of videos
|
||||||
|
player = self._search_regex(
|
||||||
|
r'''\belse\s*\{\s*\$\s*\(\s*('|")#jw\1\s*\)\s*\.\s*html\s*\(\s*(?P<vstr>("|')\s*<video [^>]+>[^)]+</video>\s*\3)''',
|
||||||
|
player, 'video element', group='vstr')
|
||||||
|
# it's a string containing HTML5 video
|
||||||
|
info = self._parse_html5_media_entries(
|
||||||
|
url, self._parse_json(player, video_id), video_id)[0] # not used now: m3u8_id='hls', m3u8_entry_protocol='m3u8_native', mpd_id='dash')
|
||||||
|
# site offers no timestamp, but may have "however-many date-units ago"
|
||||||
|
upload_date = get_element_by_class('fa-calendar', heading) or ''
|
||||||
|
if upload_date.endswith(' ago'):
|
||||||
|
upload_date = date_from_str('now-' + upload_date[:-len(' ago')].replace(' ', '').replace('one', '1'))
|
||||||
|
if upload_date:
|
||||||
|
upload_date = upload_date.isoformat().replace('-', '')
|
||||||
|
else:
|
||||||
|
# probably ineffective
|
||||||
|
upload_date = unified_strdate(upload_date)
|
||||||
|
|
||||||
|
# utils candidate
|
||||||
|
def tag_list(s, delim=','):
|
||||||
|
return [t for t in map(strip_or_none, s.split(delim)) if t] or None
|
||||||
|
|
||||||
|
desc = self._html_search_meta('description', webpage, default='')
|
||||||
|
|
||||||
|
info.update({
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'age_limit': 18,
|
||||||
|
'upload_date': upload_date,
|
||||||
|
'description': clean_html(get_element_by_class('icon fa-star-o', heading)),
|
||||||
|
'duration': parse_duration(
|
||||||
|
get_element_by_class('fa-clock-o', heading)
|
||||||
|
or self._search_regex(r'Video duration is\s+([^.]+)', desc, 'duration', default='')),
|
||||||
|
'categories': tag_list(
|
||||||
|
clean_html(self._search_regex(r'(?s)</h3>\s*<p>(.+?)</p>', heading, 'categories', default='').replace('</a>', ','))
|
||||||
|
or self._html_search_meta('keywords', webpage, default='')),
|
||||||
|
'tags': tag_list(self._search_regex(r'Tags [\w\s-]+:\s+([^.]+)', desc, 'tags', default='')),
|
||||||
|
})
|
||||||
|
return info
|
||||||
|
|
||||||
|
|
||||||
|
class HQPornerListBaseIE(InfoExtractor):
|
||||||
|
|
||||||
|
# yt-dlp shim
|
||||||
|
@classmethod
|
||||||
|
def _match_valid_url(cls, url):
|
||||||
|
return re.match(cls._VALID_URL, url)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
pl_id, pg = self._match_valid_url(url).group('id', 'pg')
|
||||||
|
pg = int_or_none(pg)
|
||||||
|
|
||||||
|
def entries():
|
||||||
|
"""
|
||||||
|
Generate the playlist
|
||||||
|
If `pg` is a page number, get the list for that page
|
||||||
|
Otherwise continue from `url` until exhausted
|
||||||
|
"""
|
||||||
|
next = url
|
||||||
|
for pnum in itertools.count(start=pg or 1):
|
||||||
|
p_url = urljoin(url, next)
|
||||||
|
first = (pg or 1) == pnum
|
||||||
|
if not p_url and not first:
|
||||||
|
break
|
||||||
|
page = self._download_webpage(
|
||||||
|
p_url, pl_id,
|
||||||
|
note='Downloading page' + ('' if first else (' %d' % pnum)),
|
||||||
|
fatal=first)
|
||||||
|
if not page:
|
||||||
|
break
|
||||||
|
for m in re.finditer(
|
||||||
|
# each entry in a playlist page has a hyperlinked <img> followed by the caption:
|
||||||
|
# <h3> containing a hyperlinked title, followed by a <span> containing duration
|
||||||
|
r'''(?sx)
|
||||||
|
# <img> with thumbnail (extracted), then closing containing <a><div>
|
||||||
|
(?:<img\b[^>]+\bsrc\s*=\s*("|')(?P<thm>(?:(?!\1).)+)\1[^>]*>.*?)?
|
||||||
|
# caption: extract href and title
|
||||||
|
<h3\b[^>]+\bclass\s*=\s*("|')(?:(?:(?!\3).)+\s)*?meta-data-title(?:\s(?:(?!\3).)+)*\3[^>]*>\s*
|
||||||
|
<a\b[^>]+\bhref\b=("|')(?P<url>(?:(?!\4).)+)\4[^>]*>(?P<ttl>[^<]*)</a>\s*
|
||||||
|
</h3>\s*
|
||||||
|
# extract duration
|
||||||
|
(?:<span\b[^>]+\bclass\s*=\s*("|')(?:(?:(?!\7).)+\s)*?fa-clock-o(?:\s(?:(?!\7).)+)*\7[^>]*>(?P<dur>[^<]+)</span>)?
|
||||||
|
''', page):
|
||||||
|
a_url = self._proto_relative_url(urljoin(url, m.group('url')))
|
||||||
|
if a_url:
|
||||||
|
res = self.url_result(a_url, video_title=capwords(clean_html(m.group('ttl')) or '') or None)
|
||||||
|
res.update({
|
||||||
|
'duration': parse_duration(m.group('dur')),
|
||||||
|
'thumbnail': self._proto_relative_url(urljoin(a_url, m.group('thm'))),
|
||||||
|
})
|
||||||
|
yield res
|
||||||
|
if pg is not None:
|
||||||
|
break
|
||||||
|
# next is the last link in the pagination block, unless that's the current URL
|
||||||
|
maybe_next = urljoin(url, self._search_regex(
|
||||||
|
r'''(?s)<a\b[^>]+\bhref\s*=\s*("|')(?P<url>(?:(?!\1).)+)\1[^>]*>[^>]*</a>\s*</li>\s*$''',
|
||||||
|
get_element_by_class('pagination', page) or '', 'next page',
|
||||||
|
group='url', default=None))
|
||||||
|
next = maybe_next if maybe_next != next else None
|
||||||
|
|
||||||
|
return self.playlist_result(entries(), playlist_id=join_nonempty(pl_id, pg, delim=':'))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _set_title(info):
|
||||||
|
pl_id, _, pg = info['id'].rpartition(':')
|
||||||
|
if not pl_id:
|
||||||
|
pl_id = pg
|
||||||
|
pg = None
|
||||||
|
info['title'] = pl_id.replace('/', ': ')
|
||||||
|
if pg:
|
||||||
|
info['title'] += ' [%s]' % (pg, )
|
||||||
|
|
||||||
|
|
||||||
|
class HQPornerListIE(HQPornerListBaseIE):
|
||||||
|
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?hqporner\.com/(?P<id>(?:top|(?:category|actress)/[^/]+))(?:/(?P<pg>\d+))?'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://hqporner.com/category/beach-porn',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'category/beach-porn',
|
||||||
|
'title': 'Category: beach-porn',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 250,
|
||||||
|
}, {
|
||||||
|
'url': 'https://hqporner.com/category/beach-porn/2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'category/beach-porn:2',
|
||||||
|
'title': 'Category: beach-porn [2]',
|
||||||
|
},
|
||||||
|
'playlist_count': 50,
|
||||||
|
}, {
|
||||||
|
'url': 'https://hqporner.com/actress/mary/1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'actress/mary:1',
|
||||||
|
'title': 'Actress: mary [1]',
|
||||||
|
},
|
||||||
|
'playlist_count': 50,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
res = super(HQPornerListIE, self)._real_extract(url)
|
||||||
|
self._set_title(res)
|
||||||
|
res['title'] = res['title'].capitalize()
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
class HQPornerSearchIE(HQPornerListBaseIE):
|
||||||
|
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?hqporner\.com/\?q=(?P<id>[^&]+)(?:&p=(?P<pg>\d+))?'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://hqporner.com/?q=french',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'french',
|
||||||
|
'title': 'Searching: french',
|
||||||
|
},
|
||||||
|
'playlist_mincount': 250,
|
||||||
|
}, {
|
||||||
|
'url': 'https://hqporner.com/?q=french&p=2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'french:2',
|
||||||
|
'title': 'Searching: french [2]',
|
||||||
|
},
|
||||||
|
'playlist_count': 50,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
res = super(HQPornerSearchIE, self)._real_extract(url)
|
||||||
|
self._set_title(res)
|
||||||
|
res['title'] = 'Searching: ' + res['title']
|
||||||
|
return res
|
@ -35,15 +35,6 @@ from ..utils import (
|
|||||||
|
|
||||||
class ITVBaseIE(InfoExtractor):
|
class ITVBaseIE(InfoExtractor):
|
||||||
|
|
||||||
def _search_nextjs_data(self, webpage, video_id, **kw):
|
|
||||||
transform_source = kw.pop('transform_source', None)
|
|
||||||
fatal = kw.pop('fatal', True)
|
|
||||||
return self._parse_json(
|
|
||||||
self._search_regex(
|
|
||||||
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
|
|
||||||
webpage, 'next.js data', group='js', fatal=fatal, **kw),
|
|
||||||
video_id, transform_source=transform_source, fatal=fatal)
|
|
||||||
|
|
||||||
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
|
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
|
||||||
if errnote is False:
|
if errnote is False:
|
||||||
return False
|
return False
|
||||||
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
|
|||||||
|
|
||||||
class ITVIE(ITVBaseIE):
|
class ITVIE(ITVBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
|
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
|
||||||
_IE_DESC = 'ITVX'
|
IE_DESC = 'ITVX'
|
||||||
|
_WORKING = False
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'Hub URLs redirect to ITVX',
|
'note': 'Hub URLs redirect to ITVX',
|
||||||
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
|
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
|
||||||
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
|
|||||||
'ext': determine_ext(href, 'vtt'),
|
'ext': determine_ext(href, 'vtt'),
|
||||||
})
|
})
|
||||||
|
|
||||||
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
|
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
|
||||||
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
|
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
|
||||||
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
|
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
|
||||||
info = self._og_extract(webpage, require_title=not title)
|
info = self._og_extract(webpage, require_title=not title)
|
||||||
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
|
|||||||
|
|
||||||
class ITVBTCCIE(ITVBaseIE):
|
class ITVBTCCIE(ITVBaseIE):
|
||||||
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
||||||
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
|
IE_DESC = 'ITV articles: News, British Touring Car Championship'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'note': 'British Touring Car Championship',
|
'note': 'British Touring Car Championship',
|
||||||
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',
|
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',
|
||||||
|
@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
|
|||||||
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
|
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
|
||||||
['arch', '', 'http://ussenate-f.akamaihd.net/']
|
['arch', '', 'http://ussenate-f.akamaihd.net/']
|
||||||
]
|
]
|
||||||
_IE_NAME = 'senate.gov'
|
IE_NAME = 'senate.gov'
|
||||||
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
|
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
|
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
|
||||||
|
@ -686,6 +686,8 @@ class JSInterpreter(object):
|
|||||||
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
|
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
|
||||||
|
|
||||||
def _dump(self, obj, namespace):
|
def _dump(self, obj, namespace):
|
||||||
|
if obj is JS_Undefined:
|
||||||
|
return 'undefined'
|
||||||
try:
|
try:
|
||||||
return json.dumps(obj)
|
return json.dumps(obj)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user