Compare commits

...

5 Commits

Author SHA1 Message Date
Yu-Kai "Steven" Wang
ce955813e4
Merge 20a1e3544805d0947070324dd54452ad51c2ac81 into 3eb8d22ddb8982ca4fb56bb7a8d6517538bf14c6 2025-04-01 11:02:18 +02:00
dirkf
3eb8d22ddb
[JSInterp] Temporary fix for #33102 2025-03-31 04:21:09 +01:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
Isaac-the-Man
20a1e35448 [ettoday] Add new extractor 2021-01-09 05:19:46 -05:00
8 changed files with 178 additions and 15 deletions

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE): class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频' IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)' _VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{ _TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor): class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec' IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)' _VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{ _TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac', 'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None _GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None _GEO_IP_BLOCKS = None
_WORKING = True _WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None): def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader.""" """Constructor. Receives an optional downloader."""

View File

@ -0,0 +1,165 @@
# coding: utf-8
from __future__ import unicode_literals
import re
import unicodedata
from .common import InfoExtractor
from ..utils import (
get_element_by_class,
extract_attributes,
int_or_none,
strip_or_none,
parse_iso8601,
unescapeHTML,
ExtractorError)
class EttodayIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:boba|www)\.ettoday\.net/
(?P<type>
videonews|
tools/player|
video
)/(?:
(?P<x>[0-9]+)/(?P<y>[0-9]+)/|
(?:[0-9]+-)
)?(?P<id>[0-9]+)'''
_TESTS = [{
'url': 'https://boba.ettoday.net/videonews/250060',
'md5': 'd875be90d233878829d779d336e550cc',
'info_dict': {
'id': '250060',
'ext': 'mp4',
'title': '梁靜茹《勇氣》《暖暖》演唱會必聽! 「愛真的需要勇氣..」全場合唱超感人',
'description': '梁靜茹《勇氣》《暖暖》演唱會必聽! 「愛真的需要勇氣..」全場合唱超感人',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 188,
'timestamp': 1609088513,
'upload_date': '20201227'
}
}, {
'url': 'https://boba.ettoday.net/videonews/250575',
'md5': 'a8a883f23809e6fd14d6ffcdc4950a2d',
'info_dict': {
'id': '250575',
'ext': 'mp4',
'title': '【料理之王】EP10精華亞州廚神Jason Wang指定「沙公ft.柑橘」 黃晶晶V.S洪士元',
'description': '訂閱《料理之王》頻道:https://bit.ly/32n7bIS全新廚藝節目《料理之王》10月23日起每周五晚上九點於料理之王、播吧Youtube頻道首播。主持人Lulu 黃路梓茵首席導師廚佛瑞德Fred、「亞洲廚神」Jason Wang王凱傑、福原愛飛行導師吳健豪、王輔立',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1570,
'timestamp': 1609325661,
'upload_date': '20201230'
}
}, {
'url': 'https://www.ettoday.net/tools/player/709749-250599?title=%E5%B7%A5%E4%BA%BA%E6%89%9B%E3%80%8C9%E5%B1%A4%E6%B3%A1%E6%A3%89%E7%A3%9A%E3%80%8D%E9%9A%A8%E6%A9%9F%E5%81%87%E8%B7%8C%EF%BC%81%E8%B7%AF%E4%BA%BA%E5%85%A8%E8%B7%B3%E9%96%8B%E3%80%81%E9%98%BF%E5%AC%A4%E5%9A%87%E5%A3%9E%E9%95%B7%E9%9F%B3%E5%B0%96%E5%8F%AB&bid=boba_preroll_web&show_ad=1&uccu=3&auto',
'md5': '14f92cdb0d535363243343542aebe121',
'info_dict': {
'id': '250599',
'ext': 'mp4',
'title': '工人扛「9層泡棉磚」隨機假跌路人全跳開、阿嬤嚇壞長音尖叫',
'thumbnail': r're:^https?://.*\.jpg$'
}
}, {
'url': 'https://boba.ettoday.net/video/33/174/247571',
'md5': '675178c997f644f622723749fc2f987c',
'info_dict': {
'id': '247571',
'ext': 'mp4',
'title': '慣老闆們小心!律師來教社畜面對職場不合理待遇 Ft.律師男友Joey回覆IG問答【社畜時代podcast】EP.03',
'description': '社畜podcast來了 律師來幫你解答囉',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2419,
'timestamp': 1607653880,
'upload_date': '20201211'
}
}]
def _get_preference(self, url_info):
if (url_info.get('quality') == 'AUTO'):
return -1
else:
return int(url_info.get('quality'))
def _sanitize_control_char(self, s):
return ''.join(ch for ch in s if unicodedata.category(ch)[0] != "C")
def _extract_videonews_info(self, url, video_id):
webpage = self._download_webpage(url, video_id)
json_data = self._search_json_ld(
self._sanitize_control_char(webpage), video_id,
expected_type='VideoObject', fatal=False, default={})
title = strip_or_none(
unescapeHTML(json_data.get('title') or self._og_search_title(webpage)))
desc = strip_or_none(
unescapeHTML(json_data.get('description') or self._og_search_description(webpage)))
tb = json_data.get('thumbnail') or self._og_search_thumbnail(webpage)
upload = int_or_none(
json_data.get('timestamp')
or parse_iso8601(self._html_search_meta('pubdate', webpage)))
attrs = extract_attributes(get_element_by_class('video', webpage))
return attrs.get('src'), {
'title': title,
'description': desc,
'thumbnail': tb,
'duration': json_data.get('duration'),
'timestamp': upload
}
def _extract_toolplayer_info(self, webpage, video_id):
title = self._html_search_regex(
r'<title>(?P<title>.+?)</title>',
webpage, 'title', group='title', default=None)
tb = self._html_search_regex(
r"setAttribute\('poster',[^\S]'(?P<thumbnail>.+?)'\)",
webpage, 'thumbnail', group='thumbnail', default=None)
return {
'title': title,
'thumbnail': tb
}
def _real_extract(self, url):
video_id = self._match_id(url)
page_type = self._search_regex(self._VALID_URL, url, 'page type', 'type')
info_dict = {}
if page_type == 'videonews' or page_type == 'video':
src_url, info_dict = self._extract_videonews_info(url, video_id)
content = self._download_webpage(src_url, video_id)
elif page_type == 'tools/player':
content = self._download_webpage(url, video_id)
info_dict = self._extract_toolplayer_info(content, video_id)
else:
raise ExtractorError('Unsupported url type.')
r = re.compile(
r"quality !== \'(?:[0-9,A-Z]+)\'\) {[^\S]+url = \'(?P<url>[^\']+)\';[^\S]+quality = \'(?P<quality>[0-9|AUTO]{3,4})P?\';")
urls_info = [m.groupdict() for m in r.finditer(content)]
formats = []
for url_info in urls_info:
formats.extend(self._extract_m3u8_formats(
url_info.get('url'), video_id, 'mp4',
entry_protocol='m3u8_native', preference=self._get_preference(url_info)))
self._sort_formats(formats)
return {
'id': video_id,
'title': info_dict.get('title'),
'formats': formats,
'description': info_dict.get('description'),
'thumbnail': info_dict.get('thumbnail'),
'duration': info_dict.get('duration'),
'timestamp': info_dict.get('timestamp')
}

View File

@ -368,6 +368,7 @@ from .espn import (
FiveThirtyEightIE, FiveThirtyEightIE,
) )
from .esri import EsriVideoIE from .esri import EsriVideoIE
from .ettoday import EttodayIE
from .europa import EuropaIE from .europa import EuropaIE
from .expotv import ExpoTVIE from .expotv import ExpoTVIE
from .expressen import ExpressenIE from .expressen import ExpressenIE

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor): class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True): def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False: if errnote is False:
return False return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE): class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)' _VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX' IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{ _TESTS = [{
'note': 'Hub URLs redirect to ITVX', 'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012', 'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'), 'ext': determine_ext(href, 'vtt'),
}) })
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}') next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {}) video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle') title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title) info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE): class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)' _VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship' IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{ _TESTS = [{
'note': 'British Touring Car Championship', 'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch', 'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'], ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/'] ['arch', '', 'http://ussenate-f.akamaihd.net/']
] ]
_IE_NAME = 'senate.gov' IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)' _VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{ _TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png', 'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',

View File

@ -686,6 +686,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e) raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace): def _dump(self, obj, namespace):
if obj is JS_Undefined:
return 'undefined'
try: try:
return json.dumps(obj) return json.dumps(obj)
except TypeError: except TypeError: