mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-01-10 21:40:11 +09:00
Compare commits
8 Commits
bdedb17398
...
965107b6f2
Author | SHA1 | Date | |
---|---|---|---|
|
965107b6f2 | ||
|
c5098961b0 | ||
|
dbc08fba83 | ||
|
71223bff39 | ||
|
a53b4004cf | ||
|
ac5b267afe | ||
|
2a0d9305f4 | ||
|
74bb98431e |
@ -425,6 +425,34 @@ class TestJSInterpreter(unittest.TestCase):
|
|||||||
self._test(jsi, [''], args=['', '-'])
|
self._test(jsi, [''], args=['', '-'])
|
||||||
self._test(jsi, [], args=['', ''])
|
self._test(jsi, [], args=['', ''])
|
||||||
|
|
||||||
|
def test_slice(self):
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', [])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', [])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', [])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', [])
|
||||||
|
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7])
|
||||||
|
self._test('function f(){return "012345678".slice()}', '012345678')
|
||||||
|
self._test('function f(){return "012345678".slice(0)}', '012345678')
|
||||||
|
self._test('function f(){return "012345678".slice(5)}', '5678')
|
||||||
|
self._test('function f(){return "012345678".slice(99)}', '')
|
||||||
|
self._test('function f(){return "012345678".slice(-2)}', '78')
|
||||||
|
self._test('function f(){return "012345678".slice(-99)}', '012345678')
|
||||||
|
self._test('function f(){return "012345678".slice(0, 0)}', '')
|
||||||
|
self._test('function f(){return "012345678".slice(1, 0)}', '')
|
||||||
|
self._test('function f(){return "012345678".slice(0, 1)}', '0')
|
||||||
|
self._test('function f(){return "012345678".slice(3, 6)}', '345')
|
||||||
|
self._test('function f(){return "012345678".slice(1, -1)}', '1234567')
|
||||||
|
self._test('function f(){return "012345678".slice(-1, 1)}', '')
|
||||||
|
self._test('function f(){return "012345678".slice(-3, -1)}', '67')
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -174,6 +174,14 @@ _NSIG_TESTS = [
|
|||||||
'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js',
|
'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js',
|
||||||
'7X-he4jjvMx7BCX', 'sViSydX8IHtdWA',
|
'7X-he4jjvMx7BCX', 'sViSydX8IHtdWA',
|
||||||
),
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js',
|
||||||
|
'-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw',
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js',
|
||||||
|
'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw',
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -1238,10 +1238,7 @@ from .svt import (
|
|||||||
from .swrmediathek import SWRMediathekIE
|
from .swrmediathek import SWRMediathekIE
|
||||||
from .syfy import SyfyIE
|
from .syfy import SyfyIE
|
||||||
from .sztvhu import SztvHuIE
|
from .sztvhu import SztvHuIE
|
||||||
from .tagesschau import (
|
from .tagesschau import TagesschauIE
|
||||||
TagesschauPlayerIE,
|
|
||||||
TagesschauIE,
|
|
||||||
)
|
|
||||||
from .tass import TassIE
|
from .tass import TassIE
|
||||||
from .tbs import TBSIE
|
from .tbs import TBSIE
|
||||||
from .tdslifeway import TDSLifewayIE
|
from .tdslifeway import TDSLifewayIE
|
||||||
|
@ -5,127 +5,54 @@ import re
|
|||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
bool_or_none,
|
||||||
|
compat_str,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
js_to_json,
|
ExtractorError,
|
||||||
parse_iso8601,
|
parse_duration,
|
||||||
parse_filesize,
|
parse_filesize,
|
||||||
|
remove_quotes,
|
||||||
|
strip_or_none,
|
||||||
|
try_get,
|
||||||
|
unescapeHTML,
|
||||||
|
unified_timestamp,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Note that there are tagesschau.de/api and tagesschau.de/api2 endpoints, which
|
||||||
|
# may be useful, but not all pages and not all formats can be easily accessed
|
||||||
|
# by API.
|
||||||
|
|
||||||
class TagesschauPlayerIE(InfoExtractor):
|
|
||||||
IE_NAME = 'tagesschau:player'
|
|
||||||
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'
|
|
||||||
|
|
||||||
_TESTS = [{
|
_FORMATS = {
|
||||||
'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
|
'xs': {'quality': 0},
|
||||||
'md5': '8d09548d5c15debad38bee3a4d15ca21',
|
's': {'width': 320, 'height': 180, 'quality': 1},
|
||||||
'info_dict': {
|
'sm': {'width': 480, 'height': 270, 'quality': 1},
|
||||||
'id': '179517',
|
'm': {'width': 512, 'height': 288, 'quality': 2},
|
||||||
'ext': 'mp4',
|
'ml': {'width': 640, 'height': 360, 'quality': 2},
|
||||||
'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD',
|
'l': {'width': 960, 'height': 540, 'quality': 3},
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
'xl': {'width': 1280, 'height': 720, 'quality': 4},
|
||||||
'formats': 'mincount:6',
|
'xxl': {'quality': 5},
|
||||||
},
|
'mp3': {'abr': 64, 'vcodec': 'none', 'quality': 0},
|
||||||
}, {
|
'hi.mp3': {'abr': 192, 'vcodec': 'none', 'quality': 1},
|
||||||
'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html',
|
}
|
||||||
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
|
|
||||||
'info_dict': {
|
|
||||||
'id': '29417',
|
|
||||||
'ext': 'mp3',
|
|
||||||
'title': 'Trabi - Bye, bye Rennpappe',
|
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
|
||||||
'formats': 'mincount:2',
|
|
||||||
},
|
|
||||||
}, {
|
|
||||||
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html',
|
|
||||||
'only_matching': True,
|
|
||||||
}]
|
|
||||||
|
|
||||||
_FORMATS = {
|
_FIELD_PREFERENCE = ('height', 'width', 'vbr', 'abr')
|
||||||
'xs': {'quality': 0},
|
|
||||||
's': {'width': 320, 'height': 180, 'quality': 1},
|
|
||||||
'm': {'width': 512, 'height': 288, 'quality': 2},
|
|
||||||
'l': {'width': 960, 'height': 540, 'quality': 3},
|
|
||||||
'xl': {'width': 1280, 'height': 720, 'quality': 4},
|
|
||||||
'xxl': {'quality': 5},
|
|
||||||
}
|
|
||||||
|
|
||||||
def _extract_via_api(self, kind, video_id):
|
|
||||||
info = self._download_json(
|
|
||||||
'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id),
|
|
||||||
video_id)
|
|
||||||
title = info['headline']
|
|
||||||
formats = []
|
|
||||||
for media in info['mediadata']:
|
|
||||||
for format_id, format_url in media.items():
|
|
||||||
if determine_ext(format_url) == 'm3u8':
|
|
||||||
formats.extend(self._extract_m3u8_formats(
|
|
||||||
format_url, video_id, 'mp4',
|
|
||||||
entry_protocol='m3u8_native', m3u8_id='hls'))
|
|
||||||
else:
|
|
||||||
formats.append({
|
|
||||||
'url': format_url,
|
|
||||||
'format_id': format_id,
|
|
||||||
'vcodec': 'none' if kind == 'audio' else None,
|
|
||||||
})
|
|
||||||
self._sort_formats(formats)
|
|
||||||
timestamp = parse_iso8601(info.get('date'))
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'timestamp': timestamp,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _normalize_format_id(format_id, ext):
|
||||||
mobj = re.match(self._VALID_URL, url)
|
if format_id:
|
||||||
video_id = mobj.group('id')
|
m = re.match(r"web([^.]+)\.[^.]+$", format_id)
|
||||||
|
if m:
|
||||||
# kind = mobj.group('kind').lower()
|
format_id = m.group(1)
|
||||||
# if kind == 'video':
|
if format_id == 'hi' and ext:
|
||||||
# return self._extract_via_api(kind, video_id)
|
# high-quality audio files
|
||||||
|
format_id = '%s.%s' % (format_id, ext)
|
||||||
# JSON api does not provide some audio formats (e.g. ogg) thus
|
return format_id
|
||||||
# extracting audio via webpage
|
|
||||||
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
title = self._og_search_title(webpage).strip()
|
|
||||||
formats = []
|
|
||||||
|
|
||||||
for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage):
|
|
||||||
media = self._parse_json(js_to_json(media_json), video_id, fatal=False)
|
|
||||||
if not media:
|
|
||||||
continue
|
|
||||||
src = media.get('src')
|
|
||||||
if not src:
|
|
||||||
return
|
|
||||||
quality = media.get('quality')
|
|
||||||
kind = media.get('type', '').split('/')[0]
|
|
||||||
ext = determine_ext(src)
|
|
||||||
f = {
|
|
||||||
'url': src,
|
|
||||||
'format_id': '%s_%s' % (quality, ext) if quality else ext,
|
|
||||||
'ext': ext,
|
|
||||||
'vcodec': 'none' if kind == 'audio' else None,
|
|
||||||
}
|
|
||||||
f.update(self._FORMATS.get(quality, {}))
|
|
||||||
formats.append(f)
|
|
||||||
|
|
||||||
self._sort_formats(formats)
|
|
||||||
|
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
|
||||||
|
|
||||||
return {
|
|
||||||
'id': video_id,
|
|
||||||
'title': title,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'formats': formats,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class TagesschauIE(InfoExtractor):
|
class TagesschauIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'
|
_VALID_URL = r'https?://(?:www\.)?tagesschau\.de(?:/?|/(?P<path>[^?#]+?(?:/(?P<id>[^/#?]+?(?:-?[0-9]+))(?:~_?[^/#?]+?)?(?:\.html)?)?))(?:[#?].*)?$'
|
||||||
|
|
||||||
_TESTS = [{
|
_TESTS = [{
|
||||||
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
|
'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
|
||||||
@ -134,48 +61,111 @@ class TagesschauIE(InfoExtractor):
|
|||||||
'id': 'video-102143',
|
'id': 'video-102143',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
|
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
|
||||||
'description': '18.07.2015 20:10 Uhr',
|
'description': '18.07.2015 20:10',
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
'thumbnail': r're:^https?:.*\.jpg$',
|
||||||
|
'upload_date': '20150718',
|
||||||
|
'duration': 138,
|
||||||
|
'timestamp': 1437250200,
|
||||||
|
'uploader': 'ARD',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# with player
|
||||||
|
'url': 'http://www.tagesschau.de/multimedia/video/video-102143~player.html',
|
||||||
|
'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'video-102143',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
|
||||||
|
'description': '18.07.2015 20:10',
|
||||||
|
'thumbnail': r're:^https?:.*\.jpg$',
|
||||||
|
'upload_date': '20150718',
|
||||||
|
'timestamp': 1437250200,
|
||||||
|
'uploader': 'ARD',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
|
'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
|
||||||
'md5': '3c54c1f6243d279b706bde660ceec633',
|
'md5': '3c54c1f6243d279b706bde660ceec633',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'ts-5727',
|
'id': 'video-45741',
|
||||||
'ext': 'mp4',
|
'ext': 'mp4',
|
||||||
'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
|
'title': 'tagesschau 20 Uhr - 04.12.14 20:00',
|
||||||
'description': 'md5:695c01bfd98b7e313c501386327aea59',
|
'description': '04.12.2014 20:00',
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
'thumbnail': r're:^https?:.*\.jpg$',
|
||||||
|
'uploader': 'tagesschau',
|
||||||
|
'timestamp': 1417723200,
|
||||||
|
'upload_date': '20141204',
|
||||||
|
'subtitles': dict,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# exclusive audio
|
# exclusive audio
|
||||||
'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html',
|
'url': 'https://www.tagesschau.de/multimedia/audio/audio-103205.html',
|
||||||
'md5': '76e6eec6ebd40740671cf0a2c88617e5',
|
'md5': 'c8e7b72aeca664031db0ba198519b09a',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'audio-29417',
|
'id': 'audio-103205',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Trabi - Bye, bye Rennpappe',
|
'title': 'Die USA: ein Impfwunder?',
|
||||||
'description': 'md5:8687dda862cbbe2cfb2df09b56341317',
|
'description': '06.03.2021 06:07',
|
||||||
|
'timestamp': 1615010820,
|
||||||
|
'upload_date': '20210306',
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
'thumbnail': r're:^https?:.*\.jpg$',
|
||||||
|
'uploader': 'Jule Käppel, ARD Washington',
|
||||||
|
'creator': 'ARD',
|
||||||
|
'channel': 'tagesschau.de',
|
||||||
|
'is_live': False,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
# audio in article
|
# audio in article
|
||||||
'url': 'http://www.tagesschau.de/inland/bnd-303.html',
|
'url': 'https://www.tagesschau.de/ausland/amerika/biden-versoehnung-101.html',
|
||||||
'md5': 'e0916c623e85fc1d2b26b78f299d3958',
|
'md5': '4c46b0283719d97aa976037e1ecb7b73',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'bnd-303',
|
'id': 'audio-103429',
|
||||||
|
'title': 'Bidens Versöhnungswerk kommt nicht voran',
|
||||||
'ext': 'mp3',
|
'ext': 'mp3',
|
||||||
'title': 'Viele Baustellen für neuen BND-Chef',
|
'timestamp': 1615444860,
|
||||||
'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4',
|
'uploader': 'Sebastian Hesse, ARD Washington',
|
||||||
'thumbnail': r're:^https?:.*\.jpg$',
|
'description': '11.03.2021 06:41',
|
||||||
|
'upload_date': '20210311',
|
||||||
|
'creator': 'ARD',
|
||||||
|
'channel': 'tagesschau.de',
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html',
|
# playlist in article
|
||||||
|
'url': 'https://www.tagesschau.de/ausland/impfungen-coronavirus-usa-101.html',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': 'afd-parteitag-135',
|
'id': 'impfungen-coronavirus-usa-101',
|
||||||
'title': 'Möchtegern-Underdog mit Machtanspruch',
|
'title': 'Kampf gegen das Coronavirus: Impfwunder USA?',
|
||||||
},
|
},
|
||||||
'playlist_count': 2,
|
'playlist_count': 2,
|
||||||
|
}, {
|
||||||
|
# article without videos
|
||||||
|
'url': 'https://www.tagesschau.de/wirtschaft/ukraine-russland-kredit-101.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'ukraine-russland-kredit-101',
|
||||||
|
'title': 'Ukraine stoppt Rückzahlung russischer Kredite',
|
||||||
|
},
|
||||||
|
'playlist_count': 0,
|
||||||
|
}, {
|
||||||
|
# legacy website
|
||||||
|
'url': 'https://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
|
||||||
|
'md5': 'ab6d190c8147560d6429a467566affe6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'video-102303',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Bericht aus Berlin: Sommerinterview mit Angela Merkel',
|
||||||
|
'description': '19.07.2015 19:05 Uhr',
|
||||||
|
}
|
||||||
|
}, {
|
||||||
|
# handling of generic title
|
||||||
|
'url': 'https://www.tagesschau.de/multimedia/video/video-835681.html',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'video-835681',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Tagesschau in 100 Sekunden - 13.03.21 17:35',
|
||||||
|
'upload_date': '20210313',
|
||||||
|
'uploader': 'Tagesschau24',
|
||||||
|
'description': '13.03.2021 17:35',
|
||||||
|
'timestamp': 1615656900,
|
||||||
|
}
|
||||||
}, {
|
}, {
|
||||||
'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
|
'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
@ -204,13 +194,176 @@ class TagesschauIE(InfoExtractor):
|
|||||||
# playlist article with collapsing sections
|
# playlist article with collapsing sections
|
||||||
'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
|
'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
|
||||||
'only_matching': True,
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.tagesschau.de/',
|
||||||
|
'only_matching': True,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
@classmethod
|
def _video_id_from_url(self, url):
|
||||||
def suitable(cls, url):
|
if url:
|
||||||
return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url)
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
if mobj:
|
||||||
|
return mobj.group('id')
|
||||||
|
|
||||||
def _extract_formats(self, download_text, media_kind):
|
def _handle_generic_titles(self, title, pixelConf):
|
||||||
|
if strip_or_none(title, '').lower() not in ('ganze sendung', '100 sekunden',
|
||||||
|
'tagesschau in 100 sekunden'):
|
||||||
|
return title
|
||||||
|
# otherwise find more meaningful title than the generic Ganze Sendung/100 Sekunden
|
||||||
|
for item in pixelConf:
|
||||||
|
if item.get('tracker') == 'AGFdebug':
|
||||||
|
s = try_get(item, lambda x: x['clipData']['program'], compat_str)
|
||||||
|
if s:
|
||||||
|
# extract date and time
|
||||||
|
parts = (try_get(item, lambda x: x['clipData']['title'], compat_str)
|
||||||
|
or '').split('_')[-2:]
|
||||||
|
if len(parts) == 2:
|
||||||
|
title = "%s - %s" % (s, ' '.join(parts))
|
||||||
|
else:
|
||||||
|
title = s
|
||||||
|
break
|
||||||
|
return title
|
||||||
|
|
||||||
|
def _extract_from_player(self, player_div, video_id_fallback, title_fallback):
|
||||||
|
player_data = unescapeHTML(self._search_regex(
|
||||||
|
r'data-config=(?P<quote>["\'])(?P<data>[^"\']*)(?P=quote)',
|
||||||
|
player_div, 'data-config', group='data'))
|
||||||
|
|
||||||
|
meta = self._parse_json(player_data, video_id_fallback, fatal=False)
|
||||||
|
mc = try_get(meta, lambda x: x['mc'], dict)
|
||||||
|
if not mc:
|
||||||
|
# fallback if parsing json fails, as tagesschau API sometimes sends
|
||||||
|
# invalid json
|
||||||
|
stream_hls = remove_quotes(self._search_regex(
|
||||||
|
r'"http[^"]+?\.m3u8"', player_data, '.m3u8-url', group=0))
|
||||||
|
formats = self._extract_m3u8_formats(stream_hls, video_id_fallback,
|
||||||
|
ext='mp4', m3u8_id='hls',
|
||||||
|
entry_protocol='m3u8_native')
|
||||||
|
self._sort_formats(formats, field_preference=_FIELD_PREFERENCE)
|
||||||
|
return {
|
||||||
|
'id': video_id_fallback,
|
||||||
|
'title': title_fallback,
|
||||||
|
'formats': formats,
|
||||||
|
}
|
||||||
|
|
||||||
|
# this url is more permanent than the original link
|
||||||
|
webpage_url = url_or_none(try_get(mc, lambda x: x['_sharing']['link']))
|
||||||
|
|
||||||
|
video_id = self._video_id_from_url(webpage_url)
|
||||||
|
duration = None
|
||||||
|
pixelConf = try_get(meta, lambda x: x['pc']['_pixelConfig'], list) or []
|
||||||
|
for item in pixelConf:
|
||||||
|
video_id = (video_id or try_get(item,
|
||||||
|
[lambda x: x['playerID'],
|
||||||
|
lambda x: x['clipData']['playerId']], compat_str))
|
||||||
|
duration = (duration or parse_duration(try_get(item,
|
||||||
|
[lambda x: x['clipData']['length'],
|
||||||
|
lambda x: x['clipData']['duration']])))
|
||||||
|
if not video_id:
|
||||||
|
video_id = video_id_fallback
|
||||||
|
|
||||||
|
formats = []
|
||||||
|
for elem in mc.get('_mediaArray', []):
|
||||||
|
for d in elem.get('_mediaStreamArray', []):
|
||||||
|
link_url = url_or_none(d.get('_stream'))
|
||||||
|
if not link_url:
|
||||||
|
continue
|
||||||
|
ext = determine_ext(link_url)
|
||||||
|
if ext == "m3u8":
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
link_url, video_id_fallback, ext='mp4',
|
||||||
|
entry_protocol='m3u8_native',
|
||||||
|
m3u8_id='hls', fatal=False))
|
||||||
|
elif ext == "f4m":
|
||||||
|
formats.extend(self._extract_f4m_formats(
|
||||||
|
link_url, video_id_fallback, f4m_id='hds', fatal=False))
|
||||||
|
else:
|
||||||
|
format_id = _normalize_format_id(self._search_regex(
|
||||||
|
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
|
||||||
|
default=ext, fatal=False),
|
||||||
|
ext)
|
||||||
|
fmt = {
|
||||||
|
'format_id': format_id,
|
||||||
|
'url': link_url,
|
||||||
|
'format_name': ext,
|
||||||
|
}
|
||||||
|
fmt.update(_FORMATS.get(format_id, {}))
|
||||||
|
formats.append(fmt)
|
||||||
|
self._sort_formats(formats, field_preference=_FIELD_PREFERENCE)
|
||||||
|
if not formats:
|
||||||
|
raise ExtractorError("could not extract formats from json")
|
||||||
|
|
||||||
|
# note that mc['_title'] can be very different from actual title,
|
||||||
|
# such as an image description in case of audio files
|
||||||
|
title = (try_get(mc, [lambda x: x['_info']['clipTitle'],
|
||||||
|
lambda x: x['_download']['title']], compat_str)
|
||||||
|
or title_fallback)
|
||||||
|
title = self._handle_generic_titles(title, pixelConf)
|
||||||
|
|
||||||
|
sub_url = url_or_none(mc.get('_subtitleUrl'))
|
||||||
|
subs = {'de': [{'ext': 'ttml', 'url': sub_url}]} if sub_url else None
|
||||||
|
|
||||||
|
images = try_get(mc, lambda x: x['_previewImage'], dict) or {}
|
||||||
|
thumbnails = [{
|
||||||
|
'url': url_or_none('https://www.tagesschau.de/%s'
|
||||||
|
% (images[format_id],)),
|
||||||
|
'preference': _FORMATS.get(format_id, {}).get('quality'),
|
||||||
|
} for format_id in images] or None
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'title': title,
|
||||||
|
'formats': formats,
|
||||||
|
'webpage_url': webpage_url,
|
||||||
|
'subtitles': subs,
|
||||||
|
'thumbnails': thumbnails,
|
||||||
|
'duration': duration,
|
||||||
|
'timestamp': unified_timestamp(try_get(mc, [lambda x: x['_download']['date'],
|
||||||
|
lambda x: x['_info']['clipDate']])),
|
||||||
|
'is_live': bool_or_none(mc.get('_isLive')),
|
||||||
|
'channel': try_get(mc, lambda x: x['_download']['channel'], compat_str),
|
||||||
|
'uploader': try_get(mc, lambda x: x['_info']['channelTitle'], compat_str),
|
||||||
|
'creator': try_get(mc, lambda x: x['_info']['clipContentSrc'], compat_str),
|
||||||
|
'description': try_get(mc, lambda x: x['_info']['clipDate'], compat_str),
|
||||||
|
}
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
video_id = mobj.group('id') or mobj.group('path')
|
||||||
|
display_id = video_id.lstrip('-') if video_id else 'tagesschau.de'
|
||||||
|
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
|
||||||
|
title = (self._og_search_title(webpage, default=None)
|
||||||
|
or self._html_search_regex(
|
||||||
|
[r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
|
||||||
|
r'<title[^>]*>(.+?)</title>'],
|
||||||
|
webpage, 'title'))
|
||||||
|
|
||||||
|
webpage_type = self._og_search_property('type', webpage, default=None)
|
||||||
|
|
||||||
|
player_pattern = r'<div[^>]+data-ts_component=(?P<quote>["\'])ts-mediaplayer(?P=quote)[^>]*>'
|
||||||
|
players = [m.group(0) for m in re.finditer(player_pattern, webpage)]
|
||||||
|
if not players:
|
||||||
|
# assume old website format
|
||||||
|
return self._legacy_extract(webpage, display_id, title, webpage_type)
|
||||||
|
elif (len(players) > 1
|
||||||
|
and not self._downloader.params.get('noplaylist')
|
||||||
|
and (webpage_type == 'website' or not mobj.group('id'))):
|
||||||
|
# article or playlist
|
||||||
|
entries = []
|
||||||
|
seen = set()
|
||||||
|
for player in players:
|
||||||
|
entry = self._extract_from_player(player, video_id, title)
|
||||||
|
if entry['id'] not in seen:
|
||||||
|
entries.append(entry)
|
||||||
|
seen.add(entry['id'])
|
||||||
|
return self.playlist_result(entries, display_id, title)
|
||||||
|
else:
|
||||||
|
# single video/audio
|
||||||
|
return self._extract_from_player(players[0], video_id, title)
|
||||||
|
|
||||||
|
def _legacy_extract_formats(self, download_text, media_kind):
|
||||||
links = re.finditer(
|
links = re.finditer(
|
||||||
r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
|
r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
|
||||||
download_text)
|
download_text)
|
||||||
@ -219,9 +372,10 @@ class TagesschauIE(InfoExtractor):
|
|||||||
link_url = l.group('url')
|
link_url = l.group('url')
|
||||||
if not link_url:
|
if not link_url:
|
||||||
continue
|
continue
|
||||||
format_id = self._search_regex(
|
ext = determine_ext(link_url)
|
||||||
|
format_id = _normalize_format_id(self._search_regex(
|
||||||
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
|
r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
|
||||||
default=determine_ext(link_url))
|
default=ext), ext)
|
||||||
format = {
|
format = {
|
||||||
'format_id': format_id,
|
'format_id': format_id,
|
||||||
'url': l.group('url'),
|
'url': l.group('url'),
|
||||||
@ -262,39 +416,30 @@ class TagesschauIE(InfoExtractor):
|
|||||||
self._sort_formats(formats)
|
self._sort_formats(formats)
|
||||||
return formats
|
return formats
|
||||||
|
|
||||||
def _real_extract(self, url):
|
# Some old pages still use the old format, so we keep the previous
|
||||||
mobj = re.match(self._VALID_URL, url)
|
# extractor for now.
|
||||||
video_id = mobj.group('id') or mobj.group('path')
|
def _legacy_extract(self, webpage, display_id, title, webpage_type):
|
||||||
display_id = video_id.lstrip('-')
|
DOWNLOAD_REGEX = r'<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'
|
||||||
|
|
||||||
webpage = self._download_webpage(url, display_id)
|
|
||||||
|
|
||||||
title = self._html_search_regex(
|
|
||||||
r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
|
|
||||||
webpage, 'title', default=None) or self._og_search_title(webpage)
|
|
||||||
|
|
||||||
DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'
|
|
||||||
|
|
||||||
webpage_type = self._og_search_property('type', webpage, default=None)
|
|
||||||
if webpage_type == 'website': # Article
|
if webpage_type == 'website': # Article
|
||||||
entries = []
|
entries = []
|
||||||
for num, (entry_title, media_kind, download_text) in enumerate(re.findall(
|
for num, (entry_title, media_kind, download_text) in enumerate(re.findall(
|
||||||
r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
|
r'<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
|
||||||
webpage), 1):
|
webpage, flags=re.S), 1):
|
||||||
entries.append({
|
entries.append({
|
||||||
'id': '%s-%d' % (display_id, num),
|
'id': '%s-%d' % (display_id, num),
|
||||||
'title': '%s' % entry_title,
|
'title': '%s' % entry_title,
|
||||||
'formats': self._extract_formats(download_text, media_kind),
|
'formats': self._legacy_extract_formats(download_text, media_kind),
|
||||||
})
|
})
|
||||||
if len(entries) > 1:
|
if len(entries) != 1:
|
||||||
return self.playlist_result(entries, display_id, title)
|
return self.playlist_result(entries, display_id, title)
|
||||||
formats = entries[0]['formats']
|
formats = entries[0]['formats']
|
||||||
else: # Assume single video
|
else: # Assume single video
|
||||||
download_text = self._search_regex(
|
download_text = self._search_regex(
|
||||||
DOWNLOAD_REGEX, webpage, 'download links', group='links')
|
DOWNLOAD_REGEX, webpage, 'download links', flags=re.S, group='links')
|
||||||
media_kind = self._search_regex(
|
media_kind = self._search_regex(
|
||||||
DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind')
|
DOWNLOAD_REGEX, webpage, 'media kind', default='Video', flags=re.S, group='kind')
|
||||||
formats = self._extract_formats(download_text, media_kind)
|
formats = self._legacy_extract_formats(download_text, media_kind)
|
||||||
thumbnail = self._og_search_thumbnail(webpage)
|
thumbnail = self._og_search_thumbnail(webpage)
|
||||||
description = self._html_search_regex(
|
description = self._html_search_regex(
|
||||||
r'(?s)<p class="teasertext">(.*?)</p>',
|
r'(?s)<p class="teasertext">(.*?)</p>',
|
||||||
|
@ -1659,17 +1659,46 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
|||||||
def _extract_n_function_name(self, jscode):
|
def _extract_n_function_name(self, jscode):
|
||||||
func_name, idx = self._search_regex(
|
func_name, idx = self._search_regex(
|
||||||
# new: (b=String.fromCharCode(110),c=a.get(b))&&c=nfunc[idx](c)
|
# new: (b=String.fromCharCode(110),c=a.get(b))&&c=nfunc[idx](c)
|
||||||
# or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)s
|
# or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)
|
||||||
# old: .get("n"))&&(b=nfunc[idx](b)
|
# or: (PL(a),b=a.j.n||null)&&(b=nfunc[idx](b)
|
||||||
# older: .get("n"))&&(b=nfunc(b)
|
# or: (b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
|
||||||
|
# old: (b=a.get("n"))&&(b=nfunc[idx](b)(?P<c>[a-z])\s*=\s*[a-z]\s*
|
||||||
|
# older: (b=a.get("n"))&&(b=nfunc(b)
|
||||||
r'''(?x)
|
r'''(?x)
|
||||||
(?:\(\s*(?P<b>[a-z])\s*=\s*(?:
|
\((?:[\w$()\s]+,)*?\s* # (
|
||||||
String\s*\.\s*fromCharCode\s*\(\s*110\s*\)|
|
(?P<b>[a-z])\s*=\s* # b=
|
||||||
"n+"\[\s*\+?s*[\w$.]+\s*]
|
(?:
|
||||||
)\s*,(?P<c>[a-z])\s*=\s*[a-z]\s*)?
|
(?: # expect ,c=a.get(b) (etc)
|
||||||
\.\s*get\s*\(\s*(?(b)(?P=b)|"n{1,2}")(?:\s*\)){2}\s*&&\s*\(\s*(?(c)(?P=c)|b)\s*=\s*
|
String\s*\.\s*fromCharCode\s*\(\s*110\s*\)|
|
||||||
(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\)
|
"n+"\[\s*\+?s*[\w$.]+\s*]
|
||||||
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'))
|
)\s*(?:,[\w$()\s]+(?=,))*|
|
||||||
|
(?P<old>[\w$]+) # a (old[er])
|
||||||
|
)\s*
|
||||||
|
(?(old)
|
||||||
|
# b.get("n")
|
||||||
|
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
|
||||||
|
(?:\.\s*n|\[\s*"n"\s*]|\.\s*get\s*\(\s*"n"\s*\))
|
||||||
|
| # ,c=a.get(b)
|
||||||
|
,\s*(?P<c>[a-z])\s*=\s*[a-z]\s*
|
||||||
|
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
|
||||||
|
(?:\[\s*(?P=b)\s*]|\.\s*get\s*\(\s*(?P=b)\s*\))
|
||||||
|
)
|
||||||
|
# interstitial junk
|
||||||
|
\s*(?:\|\|\s*null\s*)?(?:\)\s*)?&&\s*(?:\(\s*)?
|
||||||
|
(?(c)(?P=c)|(?P=b))\s*=\s* # [c|b]=
|
||||||
|
# nfunc|nfunc[idx]
|
||||||
|
(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\)
|
||||||
|
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'),
|
||||||
|
default=(None, None))
|
||||||
|
# thx bashonly: yt-dlp/yt-dlp/pull/10611
|
||||||
|
if not func_name:
|
||||||
|
self.report_warning('Falling back to generic n function search')
|
||||||
|
return self._search_regex(
|
||||||
|
r'''(?xs)
|
||||||
|
(?:(?<=[^\w$])|^) # instead of \b, which ignores $
|
||||||
|
(?P<name>(?!\d)[a-zA-Z\d_$]+)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
|
||||||
|
\s*\{(?:(?!};).)+?["']enhanced_except_
|
||||||
|
''', jscode, 'Initial JS player n function name', group='name')
|
||||||
if not idx:
|
if not idx:
|
||||||
return func_name
|
return func_name
|
||||||
|
|
||||||
|
@ -925,9 +925,16 @@ class JSInterpreter(object):
|
|||||||
obj.reverse()
|
obj.reverse()
|
||||||
return obj
|
return obj
|
||||||
elif member == 'slice':
|
elif member == 'slice':
|
||||||
assertion(isinstance(obj, list), 'must be applied on a list')
|
assertion(isinstance(obj, (list, compat_str)), 'must be applied on a list or string')
|
||||||
assertion(len(argvals) == 1, 'takes exactly one argument')
|
# From [1]:
|
||||||
return obj[argvals[0]:]
|
# .slice() - like [:]
|
||||||
|
# .slice(n) - like [n:] (not [slice(n)]
|
||||||
|
# .slice(m, n) - like [m:n] or [slice(m, n)]
|
||||||
|
# [1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/slice
|
||||||
|
assertion(len(argvals) <= 2, 'takes between 0 and 2 arguments')
|
||||||
|
if len(argvals) < 2:
|
||||||
|
argvals += (None,)
|
||||||
|
return obj[slice(*argvals)]
|
||||||
elif member == 'splice':
|
elif member == 'splice':
|
||||||
assertion(isinstance(obj, list), 'must be applied on a list')
|
assertion(isinstance(obj, list), 'must be applied on a list')
|
||||||
assertion(argvals, 'takes one or more arguments')
|
assertion(argvals, 'takes one or more arguments')
|
||||||
|
Loading…
Reference in New Issue
Block a user