[motherless] Fix review issues and improve extraction (closes #26495, closes #27450)

This commit is contained in:
Sergey M․ 2021-01-05 07:40:06 +07:00
parent f318882955
commit ecae54a98d
No known key found for this signature in database
GPG Key ID: 2C393E0F18A9236D
1 changed files with 34 additions and 18 deletions

View File

@ -61,6 +61,23 @@ class MotherlessIE(InfoExtractor):
# no keywords # no keywords
'url': 'http://motherless.com/8B4BBC1', 'url': 'http://motherless.com/8B4BBC1',
'only_matching': True, 'only_matching': True,
}, {
# see https://motherless.com/videos/recent for recent videos with
# uploaded date in "ago" format
'url': 'https://motherless.com/3C3E2CF',
'info_dict': {
'id': '3C3E2CF',
'ext': 'mp4',
'title': 'a/ Hot Teens',
'categories': list,
'upload_date': '20210104',
'uploader_id': 'yonbiw',
'thumbnail': r're:https?://.*\.jpg',
'age_limit': 18,
},
'params': {
'skip_download': True,
},
}] }]
def _real_extract(self, url): def _real_extract(self, url):
@ -85,29 +102,28 @@ class MotherlessIE(InfoExtractor):
or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id) or 'http://cdn4.videos.motherlessmedia.com/videos/%s.mp4?fs=opencloud' % video_id)
age_limit = self._rta_search(webpage) age_limit = self._rta_search(webpage)
view_count = str_to_int(self._html_search_regex( view_count = str_to_int(self._html_search_regex(
(r'>([\d,.]+)\s+Views<', # 1,234,567 Views (r'>([\d,.]+)\s+Views<', r'<strong>Views</strong>\s+([^<]+)<'),
r'<strong>Views</strong>\s+([^<]+)<'),
webpage, 'view count', fatal=False)) webpage, 'view count', fatal=False))
like_count = str_to_int(self._html_search_regex( like_count = str_to_int(self._html_search_regex(
(r'>([\d,.]+)\s+Favorites<', # 1,234 Favorites (r'>([\d,.]+)\s+Favorites<',
r'<strong>Favorited</strong>\s+([^<]+)<'), r'<strong>Favorited</strong>\s+([^<]+)<'),
webpage, 'like count', fatal=False)) webpage, 'like count', fatal=False))
upload_date = self._html_search_regex( upload_date = unified_strdate(self._search_regex(
(r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<', r'class=["\']count[^>]+>(\d+\s+[a-zA-Z]{3}\s+\d{4})<', webpage,
r'class=["\']count[^>]+>(\d+[hd])\s+[aA]go<', # 20h/1d ago 'upload date', default=None))
r'<strong>Uploaded</strong>\s+([^<]+)<'), webpage, 'upload date') if not upload_date:
relative = re.match(r'(\d+)([hd])$', upload_date) uploaded_ago = self._search_regex(
if relative: r'>\s*(\d+[hd])\s+[aA]go\b', webpage, 'uploaded ago',
delta = int(relative.group(1)) default=None)
unit = relative.group(2) if uploaded_ago:
if unit == 'h': delta = int(uploaded_ago[:-1])
delta_t = datetime.timedelta(hours=delta) _AGO_UNITS = {
else: # unit == 'd' 'h': 'hours',
delta_t = datetime.timedelta(days=delta) 'd': 'days',
upload_date = (datetime.datetime.now() - delta_t).strftime('%Y%m%d') }
else: kwargs = {_AGO_UNITS.get(uploaded_ago[-1]): delta}
upload_date = unified_strdate(upload_date) upload_date = (datetime.datetime.utcnow() - datetime.timedelta(**kwargs)).strftime('%Y%m%d')
comment_count = webpage.count('class="media-comment-contents"') comment_count = webpage.count('class="media-comment-contents"')
uploader_id = self._html_search_regex( uploader_id = self._html_search_regex(