mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-10-18 22:28:37 +09:00
Merge branch 'master' into loom
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -8,8 +9,8 @@ from ..utils import (
|
||||
clean_html,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
parse_resolution,
|
||||
try_get,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
@@ -24,8 +25,9 @@ class CCMAIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'L\'espot de La Marató de TV3',
|
||||
'description': 'md5:f12987f320e2f6e988e9908e4fe97765',
|
||||
'timestamp': 1470918540,
|
||||
'upload_date': '20160811',
|
||||
'timestamp': 1478608140,
|
||||
'upload_date': '20161108',
|
||||
'age_limit': 0,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.ccma.cat/catradio/alacarta/programa/el-consell-de-savis-analitza-el-derbi/audio/943685/',
|
||||
@@ -35,8 +37,24 @@ class CCMAIE(InfoExtractor):
|
||||
'ext': 'mp3',
|
||||
'title': 'El Consell de Savis analitza el derbi',
|
||||
'description': 'md5:e2a3648145f3241cb9c6b4b624033e53',
|
||||
'upload_date': '20171205',
|
||||
'timestamp': 1512507300,
|
||||
'upload_date': '20170512',
|
||||
'timestamp': 1494622500,
|
||||
'vcodec': 'none',
|
||||
'categories': ['Esports'],
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.ccma.cat/tv3/alacarta/crims/crims-josep-tallada-lespereu-me-capitol-1/video/6031387/',
|
||||
'md5': 'b43c3d3486f430f3032b5b160d80cbc3',
|
||||
'info_dict': {
|
||||
'id': '6031387',
|
||||
'ext': 'mp4',
|
||||
'title': 'Crims - Josep Talleda, l\'"Espereu-me" (capítol 1)',
|
||||
'description': 'md5:7cbdafb640da9d0d2c0f62bad1e74e60',
|
||||
'timestamp': 1582577700,
|
||||
'upload_date': '20200224',
|
||||
'subtitles': 'mincount:4',
|
||||
'age_limit': 16,
|
||||
'series': 'Crims',
|
||||
}
|
||||
}]
|
||||
|
||||
@@ -72,17 +90,27 @@ class CCMAIE(InfoExtractor):
|
||||
|
||||
informacio = media['informacio']
|
||||
title = informacio['titol']
|
||||
durada = informacio.get('durada', {})
|
||||
durada = informacio.get('durada') or {}
|
||||
duration = int_or_none(durada.get('milisegons'), 1000) or parse_duration(durada.get('text'))
|
||||
timestamp = parse_iso8601(informacio.get('data_emissio', {}).get('utc'))
|
||||
tematica = try_get(informacio, lambda x: x['tematica']['text'])
|
||||
|
||||
timestamp = None
|
||||
data_utc = try_get(informacio, lambda x: x['data_emissio']['utc'])
|
||||
try:
|
||||
timestamp = datetime.datetime.strptime(
|
||||
data_utc, '%Y-%d-%mT%H:%M:%S%z').timestamp()
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
subtitles = {}
|
||||
subtitols = media.get('subtitols', {})
|
||||
if subtitols:
|
||||
sub_url = subtitols.get('url')
|
||||
subtitols = media.get('subtitols') or []
|
||||
if isinstance(subtitols, dict):
|
||||
subtitols = [subtitols]
|
||||
for st in subtitols:
|
||||
sub_url = st.get('url')
|
||||
if sub_url:
|
||||
subtitles.setdefault(
|
||||
subtitols.get('iso') or subtitols.get('text') or 'ca', []).append({
|
||||
st.get('iso') or st.get('text') or 'ca', []).append({
|
||||
'url': sub_url,
|
||||
})
|
||||
|
||||
@@ -97,6 +125,16 @@ class CCMAIE(InfoExtractor):
|
||||
'height': int_or_none(imatges.get('alcada')),
|
||||
}]
|
||||
|
||||
age_limit = None
|
||||
codi_etic = try_get(informacio, lambda x: x['codi_etic']['id'])
|
||||
if codi_etic:
|
||||
codi_etic_s = codi_etic.split('_')
|
||||
if len(codi_etic_s) == 2:
|
||||
if codi_etic_s[1] == 'TP':
|
||||
age_limit = 0
|
||||
else:
|
||||
age_limit = int_or_none(codi_etic_s[1])
|
||||
|
||||
return {
|
||||
'id': media_id,
|
||||
'title': title,
|
||||
@@ -106,4 +144,9 @@ class CCMAIE(InfoExtractor):
|
||||
'thumbnails': thumbnails,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
'age_limit': age_limit,
|
||||
'alt_title': informacio.get('titol_complet'),
|
||||
'episode_number': int_or_none(informacio.get('capitol')),
|
||||
'categories': [tematica] if tematica else None,
|
||||
'series': informacio.get('programa'),
|
||||
}
|
||||
|
@@ -2064,7 +2064,7 @@ class InfoExtractor(object):
|
||||
})
|
||||
return entries
|
||||
|
||||
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}, data=None, headers={}, query={}):
|
||||
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
|
||||
res = self._download_xml_handle(
|
||||
mpd_url, video_id,
|
||||
note=note or 'Downloading MPD manifest',
|
||||
@@ -2078,10 +2078,9 @@ class InfoExtractor(object):
|
||||
mpd_base_url = base_url(urlh.geturl())
|
||||
|
||||
return self._parse_mpd_formats(
|
||||
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
|
||||
formats_dict=formats_dict, mpd_url=mpd_url)
|
||||
mpd_doc, mpd_id, mpd_base_url, mpd_url)
|
||||
|
||||
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
|
||||
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
|
||||
"""
|
||||
Parse formats from MPD manifest.
|
||||
References:
|
||||
@@ -2359,15 +2358,7 @@ class InfoExtractor(object):
|
||||
else:
|
||||
# Assuming direct URL to unfragmented media.
|
||||
f['url'] = base_url
|
||||
|
||||
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
|
||||
# is not necessarily unique within a Period thus formats with
|
||||
# the same `format_id` are quite possible. There are numerous examples
|
||||
# of such manifests (see https://github.com/ytdl-org/youtube-dl/issues/15111,
|
||||
# https://github.com/ytdl-org/youtube-dl/issues/13919)
|
||||
full_info = formats_dict.get(representation_id, {}).copy()
|
||||
full_info.update(f)
|
||||
formats.append(full_info)
|
||||
formats.append(f)
|
||||
else:
|
||||
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
||||
return formats
|
||||
|
@@ -12,7 +12,14 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class EggheadCourseIE(InfoExtractor):
|
||||
class EggheadBaseIE(InfoExtractor):
|
||||
def _call_api(self, path, video_id, resource, fatal=True):
|
||||
return self._download_json(
|
||||
'https://app.egghead.io/api/v1/' + path,
|
||||
video_id, 'Downloading %s JSON' % resource, fatal=fatal)
|
||||
|
||||
|
||||
class EggheadCourseIE(EggheadBaseIE):
|
||||
IE_DESC = 'egghead.io course'
|
||||
IE_NAME = 'egghead:course'
|
||||
_VALID_URL = r'https://egghead\.io/courses/(?P<id>[^/?#&]+)'
|
||||
@@ -28,10 +35,9 @@ class EggheadCourseIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
lessons = self._download_json(
|
||||
'https://egghead.io/api/v1/series/%s/lessons' % playlist_id,
|
||||
playlist_id, 'Downloading course lessons JSON')
|
||||
series_path = 'series/' + playlist_id
|
||||
lessons = self._call_api(
|
||||
series_path + '/lessons', playlist_id, 'course lessons')
|
||||
|
||||
entries = []
|
||||
for lesson in lessons:
|
||||
@@ -44,9 +50,8 @@ class EggheadCourseIE(InfoExtractor):
|
||||
entries.append(self.url_result(
|
||||
lesson_url, ie=EggheadLessonIE.ie_key(), video_id=lesson_id))
|
||||
|
||||
course = self._download_json(
|
||||
'https://egghead.io/api/v1/series/%s' % playlist_id,
|
||||
playlist_id, 'Downloading course JSON', fatal=False) or {}
|
||||
course = self._call_api(
|
||||
series_path, playlist_id, 'course', False) or {}
|
||||
|
||||
playlist_id = course.get('id')
|
||||
if playlist_id:
|
||||
@@ -57,7 +62,7 @@ class EggheadCourseIE(InfoExtractor):
|
||||
course.get('description'))
|
||||
|
||||
|
||||
class EggheadLessonIE(InfoExtractor):
|
||||
class EggheadLessonIE(EggheadBaseIE):
|
||||
IE_DESC = 'egghead.io lesson'
|
||||
IE_NAME = 'egghead:lesson'
|
||||
_VALID_URL = r'https://egghead\.io/(?:api/v1/)?lessons/(?P<id>[^/?#&]+)'
|
||||
@@ -74,7 +79,7 @@ class EggheadLessonIE(InfoExtractor):
|
||||
'upload_date': '20161209',
|
||||
'duration': 304,
|
||||
'view_count': 0,
|
||||
'tags': ['javascript', 'free'],
|
||||
'tags': 'count:2',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -88,8 +93,8 @@ class EggheadLessonIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
lesson = self._download_json(
|
||||
'https://egghead.io/api/v1/lessons/%s' % display_id, display_id)
|
||||
lesson = self._call_api(
|
||||
'lessons/' + display_id, display_id, 'lesson')
|
||||
|
||||
lesson_id = compat_str(lesson['id'])
|
||||
title = lesson['title']
|
||||
|
@@ -1400,7 +1400,6 @@ from .vidme import (
|
||||
VidmeUserIE,
|
||||
VidmeUserLikesIE,
|
||||
)
|
||||
from .vidzi import VidziIE
|
||||
from .vier import VierIE, VierVideosIE
|
||||
from .viewlift import (
|
||||
ViewLiftIE,
|
||||
|
@@ -4,7 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
str_or_none,
|
||||
strip_or_none,
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class VidioIE(InfoExtractor):
|
||||
@@ -21,57 +27,63 @@ class VidioIE(InfoExtractor):
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 149,
|
||||
'like_count': int,
|
||||
'uploader': 'TWELVE Pic',
|
||||
'timestamp': 1444902800,
|
||||
'upload_date': '20151015',
|
||||
'uploader_id': 'twelvepictures',
|
||||
'channel': 'Cover Music Video',
|
||||
'channel_id': '280236',
|
||||
'view_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'tags': 'count:4',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.vidio.com/watch/77949-south-korea-test-fires-missile-that-can-strike-all-of-the-north',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_initialize(self):
|
||||
self._api_key = self._download_json(
|
||||
'https://www.vidio.com/auth', None, data=b'')['api_key']
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id, display_id = mobj.group('id', 'display_id')
|
||||
video_id, display_id = re.match(self._VALID_URL, url).groups()
|
||||
data = self._download_json(
|
||||
'https://api.vidio.com/videos/' + video_id, display_id, headers={
|
||||
'Content-Type': 'application/vnd.api+json',
|
||||
'X-API-KEY': self._api_key,
|
||||
})
|
||||
video = data['videos'][0]
|
||||
title = video['title'].strip()
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
|
||||
m3u8_url, duration, thumbnail = [None] * 3
|
||||
|
||||
clips = self._parse_json(
|
||||
self._html_search_regex(
|
||||
r'data-json-clips\s*=\s*(["\'])(?P<data>\[.+?\])\1',
|
||||
webpage, 'video data', default='[]', group='data'),
|
||||
display_id, fatal=False)
|
||||
if clips:
|
||||
clip = clips[0]
|
||||
m3u8_url = clip.get('sources', [{}])[0].get('file')
|
||||
duration = clip.get('clip_duration')
|
||||
thumbnail = clip.get('image')
|
||||
|
||||
m3u8_url = m3u8_url or self._search_regex(
|
||||
r'data(?:-vjs)?-clip-hls-url=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||
webpage, 'hls url', group='url')
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native')
|
||||
data['clips'][0]['hls_url'], display_id, 'mp4', 'm3u8_native')
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = int_or_none(duration or self._search_regex(
|
||||
r'data-video-duration=(["\'])(?P<duration>\d+)\1', webpage,
|
||||
'duration', fatal=False, group='duration'))
|
||||
thumbnail = thumbnail or self._og_search_thumbnail(webpage)
|
||||
|
||||
like_count = int_or_none(self._search_regex(
|
||||
(r'<span[^>]+data-comment-vote-count=["\'](\d+)',
|
||||
r'<span[^>]+class=["\'].*?\blike(?:__|-)count\b.*?["\'][^>]*>\s*(\d+)'),
|
||||
webpage, 'like count', fatal=False))
|
||||
get_first = lambda x: try_get(data, lambda y: y[x + 's'][0], dict) or {}
|
||||
channel = get_first('channel')
|
||||
user = get_first('user')
|
||||
username = user.get('username')
|
||||
get_count = lambda x: int_or_none(video.get('total_' + x))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'like_count': like_count,
|
||||
'description': strip_or_none(video.get('description')),
|
||||
'thumbnail': video.get('image_url_medium'),
|
||||
'duration': int_or_none(video.get('duration')),
|
||||
'like_count': get_count('likes'),
|
||||
'formats': formats,
|
||||
'uploader': user.get('name'),
|
||||
'timestamp': parse_iso8601(video.get('created_at')),
|
||||
'uploader_id': username,
|
||||
'uploader_url': 'https://www.vidio.com/@' + username if username else None,
|
||||
'channel': channel.get('name'),
|
||||
'channel_id': str_or_none(channel.get('id')),
|
||||
'view_count': get_count('view_count'),
|
||||
'dislike_count': get_count('dislikes'),
|
||||
'comment_count': get_count('comments'),
|
||||
'tags': video.get('tag_list'),
|
||||
}
|
||||
|
@@ -1,68 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
decode_packed_codes,
|
||||
js_to_json,
|
||||
NO_DEFAULT,
|
||||
PACKED_CODES_RE,
|
||||
)
|
||||
|
||||
|
||||
class VidziIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?vidzi\.(?:tv|cc|si|nu)/(?:embed-)?(?P<id>[0-9a-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://vidzi.tv/cghql9yq6emu.html',
|
||||
'md5': '4f16c71ca0c8c8635ab6932b5f3f1660',
|
||||
'info_dict': {
|
||||
'id': 'cghql9yq6emu',
|
||||
'ext': 'mp4',
|
||||
'title': 'youtube-dl test video 1\\\\2\'3/4<5\\\\6ä7↭',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://vidzi.tv/embed-4z2yb0rzphe9-600x338.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://vidzi.cc/cghql9yq6emu.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://vidzi.si/rph9gztxj1et.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://vidzi.nu/cghql9yq6emu.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://vidzi.tv/%s' % video_id, video_id)
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h2 class="video-title">(.*?)</h2>', webpage, 'title')
|
||||
|
||||
codes = [webpage]
|
||||
codes.extend([
|
||||
decode_packed_codes(mobj.group(0)).replace('\\\'', '\'')
|
||||
for mobj in re.finditer(PACKED_CODES_RE, webpage)])
|
||||
for num, code in enumerate(codes, 1):
|
||||
jwplayer_data = self._parse_json(
|
||||
self._search_regex(
|
||||
r'setup\(([^)]+)\)', code, 'jwplayer data',
|
||||
default=NO_DEFAULT if num == len(codes) else '{}'),
|
||||
video_id, transform_source=lambda s: js_to_json(
|
||||
re.sub(r'\s*\+\s*window\[.+?\]', '', s)))
|
||||
if jwplayer_data:
|
||||
break
|
||||
|
||||
info_dict = self._parse_jwplayer_data(jwplayer_data, video_id, require_title=False)
|
||||
info_dict['title'] = title
|
||||
|
||||
return info_dict
|
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user