youtube-dl/youtube_dl/extractor/porn91.py

101 lines
3.8 KiB
Python
Raw Normal View History

2016-10-02 20:39:18 +09:00
# coding: utf-8
from __future__ import unicode_literals
2022-04-10 22:19:35 +09:00
import re
from .common import InfoExtractor
2022-04-10 22:19:35 +09:00
from ..compat import (
compat_urllib_parse_unquote,
)
2015-05-31 01:20:37 +09:00
from ..utils import (
2022-04-10 22:19:35 +09:00
clean_html,
extract_attributes,
get_elements_by_class,
2015-05-31 01:20:37 +09:00
parse_duration,
2015-05-31 01:26:12 +09:00
ExtractorError,
2022-04-10 22:19:35 +09:00
str_to_int,
strip_or_none,
unified_strdate,
2015-05-31 01:20:37 +09:00
)
class Porn91IE(InfoExtractor):
IE_NAME = '91porn'
_VALID_URL = r'(?:https?://)(?:www\.|)91porn\.com/.+?\?viewkey=(?P<id>[\w\d]+)'
_TEST = {
2015-05-31 00:35:55 +09:00
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
2022-04-10 22:19:35 +09:00
'md5': 'd869db281402e0ef4ddef3c38b866f86',
2015-05-31 00:35:55 +09:00
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹水嫩性感再爽一次',
2015-05-31 01:20:37 +09:00
'ext': 'mp4',
'duration': 431,
2015-08-09 00:30:57 +09:00
'age_limit': 18,
2022-04-10 22:19:35 +09:00
'upload_date': '20150520',
'uploader': '千岁九王爷',
2015-05-31 00:35:55 +09:00
}
}
def _real_extract(self, url):
2015-05-31 01:03:19 +09:00
video_id = self._match_id(url)
# set language for page to be extracted
self._set_cookie('91porn.com', 'language', 'cn_CN')
2016-03-20 05:35:48 +09:00
webpage = self._download_webpage(
'http://91porn.com/view_video.php?viewkey=%s' % video_id, video_id)
2015-05-31 01:26:12 +09:00
if '作为游客你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
2022-04-10 22:19:35 +09:00
title = self._html_search_regex(
r'(?s)<title\b[^>]*>\s*(.+?)\s*(?:Chinese\s+homemade\s+video\s*)?</title', webpage, 'title')
2019-07-14 04:57:44 +09:00
video_link_url = self._search_regex(
2022-04-10 22:19:35 +09:00
r'''document\s*\.\s*write\s*\(\s*strencode2\s*\((?P<q>"|')(?P<enc_str>[%\da-fA-F]+)(?P=q)\s*\)''',
webpage, 'video link', group='enc_str')
video_link_url = compat_urllib_parse_unquote(video_link_url)
2019-07-14 04:57:44 +09:00
2022-04-10 22:19:35 +09:00
info_dict = self._parse_html5_media_entries(url, '<video>%s</video>' % (video_link_url, ), video_id)[0]
# extract various fields in <tag class=info>Name: value</tag>
2022-04-10 22:19:35 +09:00
FIELD_MAP = {
# cn_CN name: (yt-dl key, value parser, en name)
2022-04-10 22:19:35 +09:00
'时长': ('duration', parse_duration, 'Runtime', ),
'查看': ('view_count', str_to_int, 'Views', ),
'留言': ('comment_count', str_to_int, 'Comments', ),
'收藏': ('like_count', str_to_int, 'Favorites', ),
'添加时间': ('upload_date', unified_strdate, 'Added', ),
# same as title for en, not description for cn_CN
'__ignore__': ('description', strip_or_none, 'Description', ),
'作者': ('uploader', strip_or_none, 'From', ),
}
# yt-dl's original implementation of get_elements_by_class() uses regex
# yt-dlp uses an actual HTML parser, and can be confused by bad HTML fragments
for elt in get_elements_by_class(
'info',
# concatenate <span>s ...
re.sub(r'(?i)</span>\s*<span\b[^>]*?>', '',
# ... and strip out possibly unbalanced <font> for yt-dlp
re.sub(r'(?i)(?:<font\b[^>]*?>|</font\s*>)', '', webpage))) or []:
2022-04-10 22:19:35 +09:00
elt = re.split(r':\s*', clean_html(elt), 1)
if len(elt) != 2 or elt[1] == '':
continue
parm = FIELD_MAP.get(elt[0].strip())
if parm and elt[1] is not None:
info_dict[parm[0]] = parm[1](elt[1]) if parm[1] else elt[1]
2015-05-31 01:20:37 +09:00
2022-04-10 22:19:35 +09:00
thumbnail = extract_attributes(
self._search_regex(
r'''(<video\b[^>]*\bid\s*=\s*(?P<q>"|')player_one(?P=q)[^>]*>)''',
webpage, 'poster', default='')).get('poster')
2015-05-31 01:20:37 +09:00
info_dict.update({
'id': video_id,
'title': title,
2015-08-09 00:30:57 +09:00
'age_limit': self._rta_search(webpage),
2022-04-10 22:19:35 +09:00
'thumbnail': thumbnail,
})
return info_dict