Compare commits

...

8 Commits

Author SHA1 Message Date
Yukan Zhang
a884c993ca
Merge 69a40d3eb06316ac944aea2a0d28d2ce15d447cd into 3eb8d22ddb8982ca4fb56bb7a8d6517538bf14c6 2025-04-01 10:21:34 +02:00
dirkf
3eb8d22ddb
[JSInterp] Temporary fix for #33102 2025-03-31 04:21:09 +01:00
dirkf
4e714f9df1 [Misc] Correct [_]IE_DESC/NAME in a few IEs
* thx seproDev, yt-dlp/yt-dlp/pull/12694/commits/ae69e3c
* also add documenting comment in `InfoExtractor`
2025-03-26 12:47:19 +00:00
dirkf
c1ea7f5a24 [ITV] Mark ITVX not working
* update old shim
* correct [_]IE_DESC
2025-03-26 12:17:49 +00:00
Yukan Zhang
69a40d3eb0 [sohu] fix extractor conflict
certain URL would cause conflict between SohuPlaylistIE and SohuIE
2021-06-10 00:58:34 -07:00
Yukan Zhang
22c0a90a7a [sohu] Add extractor for playlist 2021-06-10 00:09:08 -07:00
Yukan Zhang
cbe89f0c28 Added a multipart video testcase for sohu.py 2021-05-26 23:16:37 -07:00
Yukan Zhang
d3cb4f6743 [sohu] Fix extraction (closes #18542, closes #28944) 2021-05-20 01:22:44 -07:00
8 changed files with 140 additions and 19 deletions

View File

@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
class BokeCCIE(BokeCCBaseIE):
_IE_DESC = 'CC视频'
IE_DESC = 'CC视频'
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
_TESTS = [{

View File

@ -9,7 +9,7 @@ from ..utils import (
class CloudyIE(InfoExtractor):
_IE_DESC = 'cloudy.ec'
IE_DESC = 'cloudy.ec'
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'https://www.cloudy.ec/v/af511e2527aac',

View File

@ -422,6 +422,8 @@ class InfoExtractor(object):
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
# supply this in public subclasses: used in supported sites list, etc
# IE_DESC = 'short description of IE'
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""

View File

@ -1154,7 +1154,10 @@ from .slideshare import SlideshareIE
from .slideslive import SlidesLiveIE
from .slutload import SlutloadIE
from .snotr import SnotrIE
from .sohu import SohuIE
from .sohu import (
SohuIE,
SohuPlaylistIE,
)
from .sonyliv import SonyLIVIE
from .soundcloud import (
SoundcloudEmbedIE,

View File

@ -35,15 +35,6 @@ from ..utils import (
class ITVBaseIE(InfoExtractor):
def _search_nextjs_data(self, webpage, video_id, **kw):
transform_source = kw.pop('transform_source', None)
fatal = kw.pop('fatal', True)
return self._parse_json(
self._search_regex(
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
webpage, 'next.js data', group='js', fatal=fatal, **kw),
video_id, transform_source=transform_source, fatal=fatal)
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
if errnote is False:
return False
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
class ITVIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
_IE_DESC = 'ITVX'
IE_DESC = 'ITVX'
_WORKING = False
_TESTS = [{
'note': 'Hub URLs redirect to ITVX',
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
'ext': determine_ext(href, 'vtt'),
})
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
info = self._og_extract(webpage, require_title=not title)
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
class ITVBTCCIE(ITVBaseIE):
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
IE_DESC = 'ITV articles: News, British Touring Car Championship'
_TESTS = [{
'note': 'British Touring Car Championship',
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',

View File

@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
IE_NAME = 'senate.gov'
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
_TESTS = [{
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',

View File

@ -1,6 +1,7 @@
# coding: utf-8
from __future__ import unicode_literals
import math
import re
from .common import InfoExtractor
@ -11,12 +12,13 @@ from ..compat import (
from ..utils import (
ExtractorError,
int_or_none,
try_get,
try_get, NO_DEFAULT, HTMLAttributeParser,
)
class SohuIE(InfoExtractor):
_VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
_VALID_URL = r'https?://(?:my\.)?tv\.sohu\.com/.+?/.+(?:\.html|\.shtml).*?'
_VALID_URL_OG_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'
# Sohu videos give different MD5 sums on Travis CI and my machine
_TESTS = [{
@ -82,6 +84,58 @@ class SohuIE(InfoExtractor):
'params': {
'skip_download': True
}
}, {
'url': 'https://tv.sohu.com/v/dXMvMjMyNzk5ODg5Lzc4NjkzNDY0LnNodG1s.html',
'info_dict': {
'id': '78693464',
'ext': 'mp4',
'title': '【爱范品】第31期MWC见不到的奇葩手机',
}
}, {
'note': 'Video in issue #18542: https://github.com/ytdl-org/youtube-dl/issues/18542',
'url': 'https://tv.sohu.com/v/MjAxNzA3MTMvbjYwMDA1MzM4MS5zaHRtbA==.html',
'info_dict': {
'id': '600053381',
'ext': 'mp4',
'title': '试听:侯旭《孤鸟》',
},
}, {
'note': 'Video in issue #28944: https://github.com/ytdl-org/youtube-dl/issues/28944',
'url': 'https://tv.sohu.com/v/dXMvNTAyMjA5MTMvNjg1NjIyNTYuc2h0bWw=.html?src=pl',
'info_dict': {
'id': '68562256',
'ext': 'mp4',
'title': "Cryin' [HD 1080p] Chris.Botti(feat. Steven Tyler",
},
}, {
'note': 'Multipart video with new address format',
'url': 'https://tv.sohu.com/v/dXMvMjQyNTYyMTYzLzc4OTEwMzM5LnNodG1s.html?src=pl',
'info_dict': {
'id': '78910339',
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
},
'playlist': [{
'info_dict': {
'id': '78910339_part1',
'ext': 'mp4',
'duration': 294,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part2',
'ext': 'mp4',
'duration': 300,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}, {
'info_dict': {
'id': '78910339_part3',
'ext': 'mp4',
'duration': 150,
'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
}
}]
}]
def _real_extract(self, url):
@ -97,7 +151,12 @@ class SohuIE(InfoExtractor):
'Downloading JSON data for %s' % vid_id,
headers=self.geo_verification_headers())
mobj = re.match(self._VALID_URL, url)
mobj = re.match(self._VALID_URL_OG_URL, url)
if mobj is None:
webpage = self._download_webpage(url, '')
url = self._og_search_url(webpage)
mobj = re.match(self._VALID_URL_OG_URL, url)
video_id = mobj.group('id')
mytv = mobj.group('mytv') is not None
@ -200,3 +259,65 @@ class SohuIE(InfoExtractor):
}
return info
class SohuPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:my\.)?tv\.sohu\.com/pl/(?P<pl_id>\d+)$'
_URL_IN_PLAYLIST = re.compile(r'<strong>.*?</strong>')
parser = HTMLAttributeParser()
_TESTS = [{
'url': 'https://my.tv.sohu.com/pl/9637148',
'info_dict': {
'title': '【语文大师】初中必背、常考经典古诗词',
'id': '9637148',
},
'playlist_count': 70,
}, {
'url': 'https://my.tv.sohu.com/pl/9700995',
'info_dict': {
'title': '牛人游戏',
'id': '9700995',
},
'playlist_count': 198,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('pl_id')
webpage = self._download_webpage(url, playlist_id)
title = self._get_playlist_title(webpage)
all_pages = self._get_all_pages_in_playlist(webpage, url)
video_list = self._get_video_list(all_pages, playlist_id)
playlist = self.playlist_result(self._entries(video_list), playlist_id, title)
return playlist
def _get_playlist_title(self, webpage):
title = self._search_regex(r'<title>(.*?)</title>', webpage, 'title')
return re.sub(r'(?:^栏目:| -搜狐视频$)', '', title)
def _entries(self, video_list):
entries = []
for mobj in re.finditer(self._URL_IN_PLAYLIST, video_list):
self.parser.feed(mobj.group(0))
url = self.parser.attrs['href']
title = self.parser.attrs['title']
entry = self.url_result(url, SohuIE.ie_key(), '', title)
entries.append(entry)
return entries
def _get_all_pages_in_playlist(self, first_page, url):
pgcount = int(self._search_regex(r'var pgcount = \'(\d+)\'', first_page, 'pgcount'))
pgsize = int(self._search_regex(r'var pgsize = \'(\d+)\'', first_page, 'pgsize'))
return [url + '/index%d.shtml' % (i + 1) for i in range(0, math.ceil(pgcount / pgsize))]
def _get_video_list(self, all_pages, playlist_id):
video_list = ''
for i, url in enumerate(all_pages):
webpage = self._download_webpage(url, "playlist " + playlist_id + " page: %d" % (1 + i))
video_list += self._search_regex(
r'<ul class="uList cfix">(.*?)</ul>',
webpage, 'video list', NO_DEFAULT, True, re.DOTALL)
return video_list

View File

@ -686,6 +686,8 @@ class JSInterpreter(object):
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
def _dump(self, obj, namespace):
if obj is JS_Undefined:
return 'undefined'
try:
return json.dumps(obj)
except TypeError: