Compare commits

...

12 Commits

Author SHA1 Message Date
edutel
5053da4902
Merge ea4948068d323a081324f69a805df14ddab13f6f into 4e714f9df1ed2cccd51df60d45ff5504abe827b7 2025-03-26 23:22:00 +08:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
dirkf
ea4948068d
Linted 2022-10-29 16:10:56 +00:00
dirkf
eff6cd4c24
Improve course/category extraction 2022-10-29 15:57:14 +00:00
dirkf
0a99e9f59d
Linted 2022-10-29 07:49:38 +01:00
dirkf
04a7c7a849
Fix test 2022-10-29 07:32:52 +01:00
dirkf
dc80f50f7e
Outdent for linter 2022-10-29 01:44:44 +00:00
dirkf
1ce8590329
Further improve extraction
* detect when login required
* extract further metadata
2022-10-28 22:43:54 +00:00
dirkf
0235e627b9
Improve extraction
* use User-Agent Mozilla/5.0
* use Referer for manifests and downloads
* finalise review comments
2022-10-28 21:55:44 +00:00
EduTel
3ee378c099 refactor and fix 2022-01-23 14:36:52 -06:00
EduTel
6abc344f22 fix platzi 2022-01-21 18:01:06 -06:00
6 changed files with 188 additions and 62 deletions

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频'
IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec'
IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False:
return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX'
IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{
'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'),
})
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{
'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -1,16 +1,23 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_b64decode,
compat_kwargs,
compat_str,
compat_urllib_parse_urlparse,
)
from ..utils import (
clean_html,
dict_get,
ExtractorError,
get_element_by_class,
int_or_none,
parse_iso8601,
str_or_none,
strip_or_none,
try_get,
url_or_none,
urlencode_postdata,
@ -22,6 +29,42 @@ class PlatziBaseIE(InfoExtractor):
_LOGIN_URL = 'https://platzi.com/login/'
_NETRC_MACHINE = 'platzi'
def _raise_extractor_error(self, video_id, reason, expected=True):
raise ExtractorError('[%s] %s: %s' % (self.IE_NAME, video_id, reason), expected=expected)
def _download_webpage(self, url_or_request, video_id, *args, **kwargs):
# CF likes Connection: keep-alive and so disfavours Py2
# retry on 403 may get in
kwargs['expected_status'] = 403
# header parameters required fpor Py3 to breach site's CF fence w/o 403
headers = kwargs.get('headers') or {}
new_hdrs = {}
if 'User-Agent' not in headers:
headers['User-Agent'] = 'Mozilla/5.0' # (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.0.0 Safari/537.36'
kwargs['headers'] = new_hdrs = headers
if new_hdrs:
kwargs = compat_kwargs(kwargs)
for _ in range(2):
x = super(PlatziBaseIE, self)._download_webpage_handle(url_or_request, video_id, *args, **kwargs)
if x is False:
return x
if x[1].getcode() != 403:
break
kwargs.pop('expected_status', None)
note = kwargs.pop('note', '')
kwargs['note'] = (note or 'Downloading webpage') + ' - retrying'
kwargs = compat_kwargs(kwargs)
path = compat_urllib_parse_urlparse(x[1].geturl())
if path == '/':
self._raise_extractor_error(video_id, 'Redirected to home page: content expired?')
elif path == '/login':
self.raise_login_required()
else:
errs = clean_html(get_element_by_class('Errorpage-text', x[0]))
if errs:
self._raise_extractor_error(video_id, errs)
return x[0]
def _real_initialize(self):
self._login()
@ -75,6 +118,26 @@ class PlatziIE(PlatziBaseIE):
'''
_TESTS = [{
'url': 'https://platzi.com/clases/1927-intro-selenium/29383-bienvenida-al-curso',
'md5': '0af120f1ffd18a2246f19099d52b83e2',
'info_dict': {
'id': '29383',
'ext': 'mp4',
'title': 'Por qué aprender Selenium y qué verás',
'description': 'md5:bbe91d2760052ca4054a3149a6580436',
'timestamp': 1627400390,
'upload_date': '20210727',
'creator': 'Héctor Vega',
'series': 'Curso de Introducción a Selenium con Python',
'duration': 11700,
'categories': list,
},
'params': {
'format': 'bestvideo',
# 'skip_download': True,
},
'expected_warnings': ['HTTP Error 401']
}, {
'url': 'https://platzi.com/clases/1311-next-js/12074-creando-nuestra-primera-pagina/',
'md5': '8f56448241005b561c10f11a595b37e3',
'info_dict': {
@ -84,7 +147,7 @@ class PlatziIE(PlatziBaseIE):
'description': 'md5:4c866e45034fc76412fbf6e60ae008bc',
'duration': 420,
},
'skip': 'Requires platzi account credentials',
'skip': 'Content expired',
}, {
'url': 'https://courses.platzi.com/classes/1367-communication-codestream/13430-background/',
'info_dict': {
@ -94,10 +157,7 @@ class PlatziIE(PlatziBaseIE):
'description': 'md5:49c83c09404b15e6e71defaf87f6b305',
'duration': 360,
},
'skip': 'Requires platzi account credentials',
'params': {
'skip_download': True,
},
'skip': 'Content expired',
}]
def _real_extract(self, url):
@ -105,50 +165,60 @@ class PlatziIE(PlatziBaseIE):
webpage = self._download_webpage(url, lecture_id)
data = self._parse_json(
data_preloaded_state = self._parse_json(
self._search_regex(
# client_data may contain "};" so that we have to try more
# strict regex first
(r'client_data\s*=\s*({.+?})\s*;\s*\n',
r'client_data\s*=\s*({.+?})\s*;'),
webpage, 'client data'),
(r'window\s*.\s*__PRELOADED_STATE__\s*=\s*({.*?});?\s*</script'), webpage, 'client data'),
lecture_id)
material = data['initialState']['material']
desc = material['description']
title = desc['title']
video_player = try_get(data_preloaded_state, lambda x: x['videoPlayer'], dict) or {}
title = strip_or_none(video_player.get('name')) or self._og_search_title(webpage)
servers = try_get(video_player, lambda x: x['video']['servers'], dict) or {}
if not servers and try_get(video_player, lambda x: x['blockedInfo']['blocked']):
why = video_player['blockedInfo'].get('type') or 'unspecified'
if why == 'unlogged':
self.raise_login_required()
self._raise_extractor_error(lecture_id, 'All video formats blocked because ' + why)
formats = []
for server_id, server in material['videos'].items():
if not isinstance(server, dict):
headers = {'Referer': url}
extractions = {
'hls': lambda x: formats.extend(self._extract_m3u8_formats(
server_json[x], lecture_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
note='Downloading %s m3u8 information' % (server_json.get('id', x), ),
headers=headers, fatal=False)),
'dash': lambda x: formats.extend(self._extract_mpd_formats(
server_json[x], lecture_id, mpd_id='dash',
note='Downloading %s MPD manifest' % (server_json.get('id', x), ),
headers=headers, fatal=False)),
}
for server, server_json in servers.items():
if not isinstance(server_json, dict):
continue
for format_id in ('hls', 'dash'):
format_url = url_or_none(server.get(format_id))
if not format_url:
continue
if format_id == 'hls':
formats.extend(self._extract_m3u8_formats(
format_url, lecture_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id=format_id,
note='Downloading %s m3u8 information' % server_id,
fatal=False))
elif format_id == 'dash':
formats.extend(self._extract_mpd_formats(
format_url, lecture_id, mpd_id=format_id,
note='Downloading %s MPD manifest' % server_id,
fatal=False))
for fmt in server_json.keys():
extraction = extractions.get(fmt)
if callable(extraction):
extraction(fmt)
self._sort_formats(formats)
for f in formats:
f.setdefault('http_headers', {})['Referer'] = headers['Referer']
content = str_or_none(desc.get('content'))
description = (clean_html(compat_b64decode(content).decode('utf-8'))
if content else None)
duration = int_or_none(material.get('duration'), invscale=60)
def categories():
cat = strip_or_none(video_player.get('courseCategory'))
if cat:
return [cat]
return {
'id': lecture_id,
'title': title,
'description': description,
'duration': duration,
'description': clean_html(video_player.get('courseDescription')) or self._og_search_description(webpage),
'duration': int_or_none(video_player.get('duration'), invscale=60),
'thumbnail': url_or_none(video_player.get('thumbnail')) or self._og_search_thumbnail(webpage),
'timestamp': parse_iso8601(dict_get(video_player, ('dateModified', 'datePublished'))),
'creator': strip_or_none(video_player.get('teacherName')) or clean_html(get_element_by_class('TeacherDetails-name', webpage)),
'comment_count': int_or_none(video_player.get('commentsNumber')),
'categories': categories(),
'series': strip_or_none(video_player.get('courseTitle')) or None,
'formats': formats,
}
@ -157,17 +227,35 @@ class PlatziCourseIE(PlatziBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
platzi\.com/clases| # es version
courses\.platzi\.com/classes # en version
(?P<clas>
platzi\.com/clases| # es version
courses\.platzi\.com/classes # en version
)|
platzi\.com(?:/(?P<curs>cursos))?
)/(?P<id>[^/?\#&]+)
'''
_TESTS = [{
'url': 'https://platzi.com/web-angular/',
'info_dict': {
'id': 'web-angular',
'title': 'Frontend con Angular',
},
'playlist_count': 9,
}, {
'url': 'https://platzi.com/cursos/angular/',
'info_dict': {
'id': '2478',
'title': 'Curso de Fundamentos de Angular',
},
'playlist_count': 21,
}, {
'url': 'https://platzi.com/clases/next-js/',
'info_dict': {
'id': '1311',
'title': 'Curso de Next.js',
},
'playlist_count': 22,
'skip': 'Oops (updating page)',
}, {
'url': 'https://courses.platzi.com/classes/communication-codestream/',
'info_dict': {
@ -175,23 +263,62 @@ class PlatziCourseIE(PlatziBaseIE):
'title': 'Codestream Course',
},
'playlist_count': 14,
'skip': 'Content expired',
}]
@classmethod
def _match_valid_url(cls, url):
return re.match(cls._VALID_URL, url)
@classmethod
def suitable(cls, url):
return False if PlatziIE.suitable(url) else super(PlatziCourseIE, cls).suitable(url)
def __extract_things(self, webpage, thing_id, thing_pattern):
return self.playlist_from_matches(
re.finditer(thing_pattern, webpage),
playlist_id=thing_id,
playlist_title=self._og_search_title(webpage, default=None),
getter=lambda m: urljoin('https://platzi.com', m.group('path')))
def _extract_classes(self, webpage, course_id):
display_id = course_id
course_id = self._search_regex(
r'''(["'])courseId\1\s*:\s*(?P<id>\d+)''',
webpage, 'course id', group='id', fatal=False) or course_id
return self.__extract_things(
webpage, course_id,
r'''<a\b[^>]+\bhref\s*=\s*['"]?(?P<path>/clases/\d+-%s/[^/]+)'''
% (display_id, ))
def _extract_categories(self, webpage, cat_id):
return self.__extract_things(
webpage, cat_id,
r'''<a\b[^>]+\bhref\s*=\s*['"]?(?P<path>/cursos/[^/]+)''')
def _real_extract(self, url):
course_name = self._match_id(url)
webpage = self._download_webpage(url, course_name)
m = self._match_valid_url(url)
classes, courses, this_id = m.group('clas', 'curs', 'id')
props = self._parse_json(
self._search_regex(r'data\s*=\s*({.+?})\s*;', webpage, 'data'),
course_name)['initialProps']
webpage = self._download_webpage(url, this_id)
if courses:
return self._extract_classes(webpage, this_id)
if not classes:
return self._extract_categories(webpage, this_id)
# this branch now seems always to give "Oops" pages
course_name = this_id
initialData = self._search_regex(
(r'window.initialData\s*=\s*({.+?})\s*;\s*\n', r'window.initialData\s*=\s*({.+?})\s*;'),
webpage, 'initialData')
props = self._parse_json(initialData, course_name, default={})
props = try_get(props, lambda x: x['initialProps'], dict) or {}
entries = []
for chapter_num, chapter in enumerate(props['concepts'], 1):
for chapter_num, chapter in enumerate(props.get('concepts') or [], 1):
if not isinstance(chapter, dict):
continue
materials = chapter.get('materials')
@ -221,4 +348,8 @@ class PlatziCourseIE(PlatziBaseIE):
course_id = compat_str(try_get(props, lambda x: x['course']['id']))
course_title = try_get(props, lambda x: x['course']['name'], compat_str)
return self.playlist_result(entries, course_id, course_title)
result = self.playlist_result(entries, course_id, course_title)
desc = clean_html(get_element_by_class('RouteDescription-content', webpage))
if desc:
result['description'] = desc
return result

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',