This commit is contained in:
dirkf 2024-04-18 10:26:32 +08:00 committed by GitHub
commit 03cf660824
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 52 additions and 15 deletions

View File

@ -1,11 +1,21 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
)
from ..utils import (
clean_html,
extract_attributes,
get_elements_by_class,
parse_duration,
int_or_none,
ExtractorError,
str_to_int,
strip_or_none,
unified_strdate,
)
@ -15,18 +25,21 @@ class Porn91IE(InfoExtractor):
_TEST = {
'url': 'http://91porn.com/view_video.php?viewkey=7e42283b4f5ab36da134',
'md5': '7fcdb5349354f40d41689bd0fa8db05a',
'md5': 'd869db281402e0ef4ddef3c38b866f86',
'info_dict': {
'id': '7e42283b4f5ab36da134',
'title': '18岁大一漂亮学妹水嫩性感再爽一次',
'ext': 'mp4',
'duration': 431,
'age_limit': 18,
'upload_date': '20150520',
'uploader': '千岁九王爷',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
# set language for page to be extracted
self._set_cookie('91porn.com', 'language', 'cn_CN')
webpage = self._download_webpage(
@ -35,29 +48,53 @@ class Porn91IE(InfoExtractor):
if '作为游客你每天只可观看10个视频' in webpage:
raise ExtractorError('91 Porn says: Daily limit 10 videos exceeded', expected=True)
title = self._search_regex(
r'<div id="viewvideo-title">([^<]+)</div>', webpage, 'title')
title = title.replace('\n', '')
title = self._html_search_regex(
r'(?s)<title\b[^>]*>\s*(.+?)\s*(?:Chinese\s+homemade\s+video\s*)?</title', webpage, 'title')
video_link_url = self._search_regex(
r'<textarea[^>]+id=["\']fm-video_link[^>]+>([^<]+)</textarea>',
webpage, 'video link')
videopage = self._download_webpage(video_link_url, video_id)
r'''document\s*\.\s*write\s*\(\s*strencode2\s*\((?P<q>"|')(?P<enc_str>[%\da-fA-F]+)(?P=q)\s*\)''',
webpage, 'video link', group='enc_str')
video_link_url = compat_urllib_parse_unquote(video_link_url)
info_dict = self._parse_html5_media_entries(url, videopage, video_id)[0]
info_dict = self._parse_html5_media_entries(url, '<video>%s</video>' % (video_link_url, ), video_id)[0]
duration = parse_duration(self._search_regex(
r'时长:\s*</span>\s*(\d+:\d+)', webpage, 'duration', fatal=False))
# extract various fields in <tag class=info>Name: value</tag>
FIELD_MAP = {
# cn_CN name: (yt-dl key, value parser, en name)
'时长': ('duration', parse_duration, 'Runtime', ),
'查看': ('view_count', str_to_int, 'Views', ),
'留言': ('comment_count', str_to_int, 'Comments', ),
'收藏': ('like_count', str_to_int, 'Favorites', ),
'添加时间': ('upload_date', unified_strdate, 'Added', ),
# same as title for en, not description for cn_CN
'__ignore__': ('description', strip_or_none, 'Description', ),
'作者': ('uploader', strip_or_none, 'From', ),
}
# yt-dl's original implementation of get_elements_by_class() uses regex
# yt-dlp uses an actual HTML parser, and can be confused by bad HTML fragments
for elt in get_elements_by_class(
'info',
# concatenate <span>s ...
re.sub(r'(?i)</span>\s*<span\b[^>]*?>', '',
# ... and strip out possibly unbalanced <font> for yt-dlp
re.sub(r'(?i)(?:<font\b[^>]*?>|</font\s*>)', '', webpage))) or []:
elt = re.split(r':\s*', clean_html(elt), 1)
if len(elt) != 2 or elt[1] == '':
continue
parm = FIELD_MAP.get(elt[0].strip())
if parm and elt[1] is not None:
info_dict[parm[0]] = parm[1](elt[1]) if parm[1] else elt[1]
comment_count = int_or_none(self._search_regex(
r'留言:\s*</span>\s*(\d+)', webpage, 'comment count', fatal=False))
thumbnail = extract_attributes(
self._search_regex(
r'''(<video\b[^>]*\bid\s*=\s*(?P<q>"|')player_one(?P=q)[^>]*>)''',
webpage, 'poster', default='')).get('poster')
info_dict.update({
'id': video_id,
'title': title,
'duration': duration,
'comment_count': comment_count,
'age_limit': self._rta_search(webpage),
'thumbnail': thumbnail,
})
return info_dict