mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-07-17 00:44:15 +09:00
Compare commits
9 Commits
59ee921051
...
e09620a9a3
Author | SHA1 | Date | |
---|---|---|---|
![]() |
e09620a9a3 | ||
![]() |
3eb8d22ddb | ||
![]() |
4e714f9df1 | ||
![]() |
c1ea7f5a24 | ||
![]() |
94c633d58a | ||
![]() |
6ddf44e2f3 | ||
![]() |
89ef8b9b99 | ||
![]() |
4d83c072b1 | ||
![]() |
7ce02b5bf9 |
@ -32,7 +32,7 @@ class BokeCCBaseIE(InfoExtractor):
|
||||
|
||||
|
||||
class BokeCCIE(BokeCCBaseIE):
|
||||
_IE_DESC = 'CC视频'
|
||||
IE_DESC = 'CC视频'
|
||||
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
|
||||
|
||||
_TESTS = [{
|
||||
|
@ -9,7 +9,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class CloudyIE(InfoExtractor):
|
||||
_IE_DESC = 'cloudy.ec'
|
||||
IE_DESC = 'cloudy.ec'
|
||||
_VALID_URL = r'https?://(?:www\.)?cloudy\.ec/(?:v/|embed\.php\?.*?\bid=)(?P<id>[A-Za-z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
||||
|
@ -422,6 +422,8 @@ class InfoExtractor(object):
|
||||
_GEO_COUNTRIES = None
|
||||
_GEO_IP_BLOCKS = None
|
||||
_WORKING = True
|
||||
# supply this in public subclasses: used in supported sites list, etc
|
||||
# IE_DESC = 'short description of IE'
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
"""Constructor. Receives an optional downloader."""
|
||||
|
@ -961,6 +961,7 @@ from .pluralsight import (
|
||||
PluralsightIE,
|
||||
PluralsightCourseIE,
|
||||
)
|
||||
from .podchaser import PodchaserIE
|
||||
from .podomatic import PodomaticIE
|
||||
from .pokemon import PokemonIE
|
||||
from .polskieradio import (
|
||||
|
@ -35,15 +35,6 @@ from ..utils import (
|
||||
|
||||
class ITVBaseIE(InfoExtractor):
|
||||
|
||||
def _search_nextjs_data(self, webpage, video_id, **kw):
|
||||
transform_source = kw.pop('transform_source', None)
|
||||
fatal = kw.pop('fatal', True)
|
||||
return self._parse_json(
|
||||
self._search_regex(
|
||||
r'''<script\b[^>]+\bid=('|")__NEXT_DATA__\1[^>]*>(?P<js>[^<]+)</script>''',
|
||||
webpage, 'next.js data', group='js', fatal=fatal, **kw),
|
||||
video_id, transform_source=transform_source, fatal=fatal)
|
||||
|
||||
def __handle_request_webpage_error(self, err, video_id=None, errnote=None, fatal=True):
|
||||
if errnote is False:
|
||||
return False
|
||||
@ -109,7 +100,9 @@ class ITVBaseIE(InfoExtractor):
|
||||
|
||||
class ITVIE(ITVBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?:(?P<w>watch)|hub)/[^/]+/(?(w)[\w-]+/)(?P<id>\w+)'
|
||||
_IE_DESC = 'ITVX'
|
||||
IE_DESC = 'ITVX'
|
||||
_WORKING = False
|
||||
|
||||
_TESTS = [{
|
||||
'note': 'Hub URLs redirect to ITVX',
|
||||
'url': 'https://www.itv.com/hub/liar/2a4547a0012',
|
||||
@ -270,7 +263,7 @@ class ITVIE(ITVBaseIE):
|
||||
'ext': determine_ext(href, 'vtt'),
|
||||
})
|
||||
|
||||
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default='{}')
|
||||
next_data = self._search_nextjs_data(webpage, video_id, fatal=False, default={})
|
||||
video_data.update(traverse_obj(next_data, ('props', 'pageProps', ('title', 'episode')), expected_type=dict)[0] or {})
|
||||
title = traverse_obj(video_data, 'headerTitle', 'episodeTitle')
|
||||
info = self._og_extract(webpage, require_title=not title)
|
||||
@ -323,7 +316,7 @@ class ITVIE(ITVBaseIE):
|
||||
|
||||
class ITVBTCCIE(ITVBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?itv\.com/(?!(?:watch|hub)/)(?:[^/]+/)+(?P<id>[^/?#&]+)'
|
||||
_IE_DESC = 'ITV articles: News, British Touring Car Championship'
|
||||
IE_DESC = 'ITV articles: News, British Touring Car Championship'
|
||||
_TESTS = [{
|
||||
'note': 'British Touring Car Championship',
|
||||
'url': 'https://www.itv.com/btcc/articles/btcc-2018-all-the-action-from-brands-hatch',
|
||||
|
114
youtube_dl/extractor/podchaser.py
Normal file
114
youtube_dl/extractor/podchaser.py
Normal file
@ -0,0 +1,114 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import json
|
||||
import re
|
||||
from ..utils import float_or_none, try_get, str_to_int, unified_timestamp, merge_dicts
|
||||
from ..compat import compat_str
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class PodchaserIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?podchaser\.com/
|
||||
(?:
|
||||
(?:podcasts/[\w-]+-(?P<podcast_id>[\d]+)))
|
||||
(?:/episodes/[\w\-]+-
|
||||
(?P<id>[\d]+))?'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.podchaser.com/podcasts/cum-town-36924/episodes/ep-285-freeze-me-off-104365585',
|
||||
'info_dict': {
|
||||
'id': '104365585',
|
||||
'title': "Ep. 285 – freeze me off",
|
||||
'description': 'cam ahn',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'ext': 'mp3',
|
||||
'categories': ['Comedy'],
|
||||
'tags': ['comedy', 'dark humor'],
|
||||
'series': 'Cum Town',
|
||||
'duration': 3708,
|
||||
'timestamp': 1636531259,
|
||||
'upload_date': '20211110'
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.podchaser.com/podcasts/the-bone-zone-28853',
|
||||
'info_dict': {
|
||||
'id': '28853',
|
||||
'title': 'The Bone Zone',
|
||||
'description': 'Podcast by The Bone Zone',
|
||||
},
|
||||
'playlist_count': 275
|
||||
}, {
|
||||
'url': 'https://www.podchaser.com/podcasts/sean-carrolls-mindscape-scienc-699349/episodes',
|
||||
'info_dict': {
|
||||
'id': '699349',
|
||||
'title': "Sean Carroll's Mindscape: Science, Society, Philosophy, Culture, Arts, and Ideas",
|
||||
'description': 'md5:2cbd8f4749891a84dc8235342e0b5ff1'
|
||||
},
|
||||
'playlist_mincount': 199
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
audio_id, podcast_id = mobj.group('id'), mobj.group('podcast_id')
|
||||
|
||||
# If one episode
|
||||
if audio_id:
|
||||
episodes = [self._download_json("https://api.podchaser.com/episodes/%s" % audio_id, audio_id)]
|
||||
|
||||
# Else get every episode available
|
||||
else:
|
||||
total_episode_count = self._download_json(
|
||||
"https://api.podchaser.com/list/episode", podcast_id,
|
||||
headers={'Content-Type': 'application/json;charset=utf-8'},
|
||||
data=json.dumps({
|
||||
"filters": {"podcast_id": podcast_id}
|
||||
}).encode()).get('total')
|
||||
episodes = []
|
||||
print(total_episode_count)
|
||||
for i in range(total_episode_count // 100 + 1):
|
||||
curr_episodes_data = self._download_json(
|
||||
"https://api.podchaser.com/list/episode", podcast_id,
|
||||
headers={'Content-Type': 'application/json;charset=utf-8'},
|
||||
data=json.dumps({
|
||||
"start": i * 100,
|
||||
"count": (i + 1) * 100,
|
||||
"sort_order": "SORT_ORDER_RECENT",
|
||||
"filters": {
|
||||
"podcast_id": podcast_id
|
||||
}, "options": {}
|
||||
}).encode())
|
||||
curr_episodes = curr_episodes_data.get('entities') or []
|
||||
if len(curr_episodes) + len(episodes) <= total_episode_count:
|
||||
episodes.extend(curr_episodes)
|
||||
|
||||
podcast_data = merge_dicts(
|
||||
self._download_json("https://api.podchaser.com/podcasts/%s" % podcast_id, audio_id or podcast_id) or {},
|
||||
episodes[0].get('podcast') or {} if episodes else {})
|
||||
|
||||
entries = [{
|
||||
'id': compat_str(episode.get('id')),
|
||||
'title': episode.get('title'),
|
||||
'description': episode.get('description'),
|
||||
'url': episode.get('audio_url'),
|
||||
'thumbnail': episode.get('image_url'),
|
||||
'duration': str_to_int(episode.get('length')),
|
||||
'timestamp': unified_timestamp(episode.get('air_date')),
|
||||
'rating': float_or_none(episode.get('rating')),
|
||||
'categories': [
|
||||
x.get('text') for x in
|
||||
podcast_data.get('categories')
|
||||
or try_get(podcast_data, lambda x: x['summary']['categories'], list) or []],
|
||||
'tags': [tag.get('text') for tag in podcast_data.get('tags') or []],
|
||||
'series': podcast_data.get('title'),
|
||||
} for episode in episodes]
|
||||
|
||||
if len(entries) > 1:
|
||||
# Return playlist
|
||||
return self.playlist_result(
|
||||
entries, playlist_id=compat_str(podcast_data.get('id')),
|
||||
playlist_title=podcast_data.get('title'),
|
||||
playlist_description=podcast_data.get('description'))
|
||||
|
||||
# Return episode
|
||||
return entries[0]
|
@ -47,7 +47,7 @@ class SenateISVPIE(InfoExtractor):
|
||||
['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
|
||||
['arch', '', 'http://ussenate-f.akamaihd.net/']
|
||||
]
|
||||
_IE_NAME = 'senate.gov'
|
||||
IE_NAME = 'senate.gov'
|
||||
_VALID_URL = r'https?://(?:www\.)?senate\.gov/isvp/?\?(?P<qs>.+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.senate.gov/isvp/?comm=judiciary&type=live&stt=&filename=judiciary031715&auto_play=false&wmode=transparent&poster=http%3A%2F%2Fwww.judiciary.senate.gov%2Fthemes%2Fjudiciary%2Fimages%2Fvideo-poster-flash-fit.png',
|
||||
|
@ -686,6 +686,8 @@ class JSInterpreter(object):
|
||||
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
|
||||
|
||||
def _dump(self, obj, namespace):
|
||||
if obj is JS_Undefined:
|
||||
return 'undefined'
|
||||
try:
|
||||
return json.dumps(obj)
|
||||
except TypeError:
|
||||
|
Loading…
x
Reference in New Issue
Block a user