Compare commits

..

No commits in common. "75972e200d033429bf9d34793ad3ffc813042347" and "40d66e07dfef72fbef761ac720b82eb77deb7398" have entirely different histories.

3 changed files with 96 additions and 101 deletions

View File

@ -2,47 +2,21 @@
from __future__ import unicode_literals from __future__ import unicode_literals
import re import re
import functools
from .common import InfoExtractor from .common import InfoExtractor
from ..compat import compat_str
from ..utils import ( from ..utils import (
clean_html, clean_html,
float_or_none,
int_or_none, int_or_none,
parse_iso8601, try_get,
unified_timestamp,
OnDemandPagedList,
) )
class ACastBaseIE(InfoExtractor): class ACastIE(InfoExtractor):
def _extract_episode(self, episode, show_info):
title = episode['title']
info = {
'id': episode['id'],
'display_id': episode.get('episodeUrl'),
'url': episode['url'],
'title': title,
'description': clean_html(episode.get('description') or episode.get('summary')),
'thumbnail': episode.get('image'),
'timestamp': parse_iso8601(episode.get('publishDate')),
'duration': int_or_none(episode.get('duration')),
'filesize': int_or_none(episode.get('contentLength')),
'season_number': int_or_none(episode.get('season')),
'episode': title,
'episode_number': int_or_none(episode.get('episode')),
}
info.update(show_info)
return info
def _extract_show_info(self, show):
return {
'creator': show.get('author'),
'series': show.get('title'),
}
def _call_api(self, path, video_id, query=None):
return self._download_json(
'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query)
class ACastIE(ACastBaseIE):
IE_NAME = 'acast' IE_NAME = 'acast'
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
@ -54,15 +28,15 @@ class ACastIE(ACastBaseIE):
''' '''
_TESTS = [{ _TESTS = [{
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna', 'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
'md5': 'f5598f3ad1e4776fed12ec1407153e4b', 'md5': '16d936099ec5ca2d5869e3a813ee8dc4',
'info_dict': { 'info_dict': {
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9', 'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
'ext': 'mp3', 'ext': 'mp3',
'title': '2. Raggarmordet - Röster ur det förflutna', 'title': '2. Raggarmordet - Röster ur det förflutna',
'description': 'md5:a992ae67f4d98f1c0141598f7bebbf67', 'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
'timestamp': 1477346700, 'timestamp': 1477346700,
'upload_date': '20161024', 'upload_date': '20161024',
'duration': 2766, 'duration': 2766.602563,
'creator': 'Anton Berg & Martin Johnson', 'creator': 'Anton Berg & Martin Johnson',
'series': 'Spår', 'series': 'Spår',
'episode': '2. Raggarmordet - Röster ur det förflutna', 'episode': '2. Raggarmordet - Röster ur det förflutna',
@ -71,7 +45,7 @@ class ACastIE(ACastBaseIE):
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015', 'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2', 'url': 'https://play.acast.com/s/rattegangspodden/s04e09-styckmordet-i-helenelund-del-22',
'only_matching': True, 'only_matching': True,
}, { }, {
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9', 'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
@ -80,14 +54,40 @@ class ACastIE(ACastBaseIE):
def _real_extract(self, url): def _real_extract(self, url):
channel, display_id = re.match(self._VALID_URL, url).groups() channel, display_id = re.match(self._VALID_URL, url).groups()
episode = self._call_api( s = self._download_json(
'%s/episodes/%s' % (channel, display_id), 'https://feeder.acast.com/api/v1/shows/%s/episodes/%s' % (channel, display_id),
display_id, {'showInfo': 'true'}) display_id)
return self._extract_episode( media_url = s['url']
episode, self._extract_show_info(episode.get('show') or {})) if re.search(r'[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}', display_id):
episode_url = s.get('episodeUrl')
if episode_url:
display_id = episode_url
else:
channel, display_id = re.match(self._VALID_URL, s['link']).groups()
cast_data = self._download_json(
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id),
display_id)['result']
e = cast_data['episode']
title = e.get('name') or s['title']
return {
'id': compat_str(e['id']),
'display_id': display_id,
'url': media_url,
'title': title,
'description': e.get('summary') or clean_html(e.get('description') or s.get('description')),
'thumbnail': e.get('image'),
'timestamp': unified_timestamp(e.get('publishingDate') or s.get('publishDate')),
'duration': float_or_none(e.get('duration') or s.get('duration')),
'filesize': int_or_none(e.get('contentLength')),
'creator': try_get(cast_data, lambda x: x['show']['author'], compat_str),
'series': try_get(cast_data, lambda x: x['show']['name'], compat_str),
'season_number': int_or_none(e.get('seasonNumber')),
'episode': title,
'episode_number': int_or_none(e.get('episodeNumber')),
}
class ACastChannelIE(ACastBaseIE): class ACastChannelIE(InfoExtractor):
IE_NAME = 'acast:channel' IE_NAME = 'acast:channel'
_VALID_URL = r'''(?x) _VALID_URL = r'''(?x)
https?:// https?://
@ -102,24 +102,34 @@ class ACastChannelIE(ACastBaseIE):
'info_dict': { 'info_dict': {
'id': '4efc5294-5385-4847-98bd-519799ce5786', 'id': '4efc5294-5385-4847-98bd-519799ce5786',
'title': 'Today in Focus', 'title': 'Today in Focus',
'description': 'md5:c09ce28c91002ce4ffce71d6504abaae', 'description': 'md5:9ba5564de5ce897faeb12963f4537a64',
}, },
'playlist_mincount': 200, 'playlist_mincount': 35,
}, { }, {
'url': 'http://play.acast.com/s/ft-banking-weekly', 'url': 'http://play.acast.com/s/ft-banking-weekly',
'only_matching': True, 'only_matching': True,
}] }]
_API_BASE_URL = 'https://play.acast.com/api/'
_PAGE_SIZE = 10
@classmethod @classmethod
def suitable(cls, url): def suitable(cls, url):
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url) return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
def _fetch_page(self, channel_slug, page):
casts = self._download_json(
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
channel_slug, note='Download page %d of channel data' % page)
for cast in casts:
yield self.url_result(
'https://play.acast.com/s/%s/%s' % (channel_slug, cast['url']),
'ACast', cast['id'])
def _real_extract(self, url): def _real_extract(self, url):
show_slug = self._match_id(url) channel_slug = self._match_id(url)
show = self._call_api(show_slug, show_slug) channel_data = self._download_json(
show_info = self._extract_show_info(show) self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
entries = [] entries = OnDemandPagedList(functools.partial(
for episode in (show.get('episodes') or []): self._fetch_page, channel_slug), self._PAGE_SIZE)
entries.append(self._extract_episode(episode, show_info)) return self.playlist_result(entries, compat_str(
return self.playlist_result( channel_data['id']), channel_data['name'], channel_data.get('description'))
entries, show.get('id'), show.get('title'), show.get('description'))

View File

@ -4,28 +4,25 @@ import re
from .common import InfoExtractor from .common import InfoExtractor
from ..utils import ( from ..utils import (
clean_html, determine_ext,
ExtractorError,
int_or_none, int_or_none,
str_or_none, js_to_json,
try_get, unescapeHTML,
) )
class StitcherIE(InfoExtractor): class StitcherIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?stitcher\.com/(?:podcast|show)/(?:[^/]+/)+e(?:pisode)?/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)' _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
_TESTS = [{ _TESTS = [{
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true', 'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
'md5': 'e9635098e0da10b21a0e2b85585530f6', 'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
'info_dict': { 'info_dict': {
'id': '40789481', 'id': '40789481',
'ext': 'mp3', 'ext': 'mp3',
'title': 'Machine Learning Mastery and Cancer Clusters', 'title': 'Machine Learning Mastery and Cancer Clusters',
'description': 'md5:547adb4081864be114ae3831b4c2b42f', 'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
'duration': 1604, 'duration': 1604,
'thumbnail': r're:^https?://.*\.jpg', 'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20180126',
'timestamp': 1516989316,
}, },
}, { }, {
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true', 'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
@ -41,7 +38,6 @@ class StitcherIE(InfoExtractor):
'params': { 'params': {
'skip_download': True, 'skip_download': True,
}, },
'skip': 'Page Not Found',
}, { }, {
# escaped title # escaped title
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true', 'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
@ -49,39 +45,37 @@ class StitcherIE(InfoExtractor):
}, { }, {
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true', 'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
'only_matching': True, 'only_matching': True,
}, {
'url': 'https://www.stitcher.com/show/threedom/episode/circles-on-a-stick-200212584',
'only_matching': True,
}] }]
def _real_extract(self, url): def _real_extract(self, url):
display_id, audio_id = re.match(self._VALID_URL, url).groups() mobj = re.match(self._VALID_URL, url)
audio_id = mobj.group('id')
display_id = mobj.group('display_id') or audio_id
resp = self._download_json( webpage = self._download_webpage(url, display_id)
'https://api.prod.stitcher.com/episode/' + audio_id,
display_id or audio_id)
episode = try_get(resp, lambda x: x['data']['episodes'][0], dict)
if not episode:
raise ExtractorError(resp['errors'][0]['message'], expected=True)
title = episode['title'].strip() episode = self._parse_json(
audio_url = episode['audio_url'] js_to_json(self._search_regex(
r'(?s)var\s+stitcher(?:Config)?\s*=\s*({.+?});\n', webpage, 'episode config')),
display_id)['config']['episode']
thumbnail = None title = unescapeHTML(episode['title'])
show_id = episode.get('show_id') formats = [{
if show_id and episode.get('classic_id') != -1: 'url': episode[episode_key],
thumbnail = 'https://stitcher-classic.imgix.net/feedimages/%s.jpg' % show_id 'ext': determine_ext(episode[episode_key]) or 'mp3',
'vcodec': 'none',
} for episode_key in ('episodeURL',) if episode.get(episode_key)]
description = self._search_regex(
r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
duration = int_or_none(episode.get('duration'))
thumbnail = episode.get('episodeImage')
return { return {
'id': audio_id, 'id': audio_id,
'display_id': display_id, 'display_id': display_id,
'title': title, 'title': title,
'description': clean_html(episode.get('html_description') or episode.get('description')), 'description': description,
'duration': int_or_none(episode.get('duration')), 'duration': duration,
'thumbnail': thumbnail, 'thumbnail': thumbnail,
'url': audio_url, 'formats': formats,
'vcodec': 'none',
'timestamp': int_or_none(episode.get('date_created')),
'season_number': int_or_none(episode.get('season')),
'season_id': str_or_none(episode.get('season_id')),
} }

View File

@ -25,6 +25,7 @@ class VVVVIDIE(InfoExtractor):
'duration': 239, 'duration': 239,
'series': '"Perché dovrei guardarlo?" di Dario Moccia', 'series': '"Perché dovrei guardarlo?" di Dario Moccia',
'season_id': '437', 'season_id': '437',
'season_number': 1,
'episode': 'Ping Pong', 'episode': 'Ping Pong',
'episode_number': 1, 'episode_number': 1,
'episode_id': '3334', 'episode_id': '3334',
@ -74,6 +75,7 @@ class VVVVIDIE(InfoExtractor):
def _extract_common_video_info(self, video_data): def _extract_common_video_info(self, video_data):
return { return {
'thumbnail': video_data.get('thumbnail'), 'thumbnail': video_data.get('thumbnail'),
'episode_number': int_or_none(video_data.get('number')),
'episode_id': str_or_none(video_data.get('id')), 'episode_id': str_or_none(video_data.get('id')),
} }
@ -143,17 +145,6 @@ class VVVVIDIE(InfoExtractor):
return d return d
info = {}
def metadata_from_url(r_url):
if not info and r_url:
mobj = re.search(r'_(?:S(\d+))?Ep(\d+)', r_url)
if mobj:
info['episode_number'] = int(mobj.group(2))
season_number = mobj.group(1)
if season_number:
info['season_number'] = int(season_number)
for quality in ('_sd', ''): for quality in ('_sd', ''):
embed_code = video_data.get('embed_info' + quality) embed_code = video_data.get('embed_info' + quality)
if not embed_code: if not embed_code:
@ -175,12 +166,9 @@ class VVVVIDIE(InfoExtractor):
else: else:
formats.extend(self._extract_wowza_formats( formats.extend(self._extract_wowza_formats(
'http://sb.top-ix.org/videomg/_definst_/mp4:%s/playlist.m3u8' % embed_code, video_id)) 'http://sb.top-ix.org/videomg/_definst_/mp4:%s/playlist.m3u8' % embed_code, video_id))
metadata_from_url(embed_code)
self._sort_formats(formats) self._sort_formats(formats)
metadata_from_url(video_data.get('thumbnail')) info = self._extract_common_video_info(video_data)
info.update(self._extract_common_video_info(video_data))
info.update({ info.update({
'id': video_id, 'id': video_id,
'title': title, 'title': title,
@ -188,6 +176,7 @@ class VVVVIDIE(InfoExtractor):
'duration': int_or_none(video_data.get('length')), 'duration': int_or_none(video_data.get('length')),
'series': video_data.get('show_title'), 'series': video_data.get('show_title'),
'season_id': season_id, 'season_id': season_id,
'season_number': video_data.get('season_number'),
'episode': title, 'episode': title,
'view_count': int_or_none(video_data.get('views')), 'view_count': int_or_none(video_data.get('views')),
'like_count': int_or_none(video_data.get('video_likes')), 'like_count': int_or_none(video_data.get('video_likes')),
@ -222,6 +211,7 @@ class VVVVIDShowIE(VVVVIDIE):
entries = [] entries = []
for season in (seasons or []): for season in (seasons or []):
season_number = int_or_none(season.get('number'))
episodes = season.get('episodes') or [] episodes = season.get('episodes') or []
for episode in episodes: for episode in episodes:
if episode.get('playable') is False: if episode.get('playable') is False:
@ -237,6 +227,7 @@ class VVVVIDShowIE(VVVVIDIE):
'url': '/'.join([base_url, season_id, video_id]), 'url': '/'.join([base_url, season_id, video_id]),
'title': episode.get('title'), 'title': episode.get('title'),
'description': episode.get('description'), 'description': episode.get('description'),
'season_number': season_number,
'season_id': season_id, 'season_id': season_id,
}) })
entries.append(info) entries.append(info)