Compare commits

...

17 Commits

Author SHA1 Message Date
ckaotik
2268982d39
Merge 47f1d70149 into c5098961b0 2024-08-21 22:33:22 -04:00
dirkf
c5098961b0 [Youtube] Rework n function extraction pattern
Now also succeeds with player b12cc44b
2024-08-06 20:59:09 +01:00
dirkf
dbc08fba83 [jsinterp] Improve slice implementation for player b12cc44b
Partly taken from yt-dlp/yt-dlp#10664, thx seproDev
        Fixes #32896
2024-08-06 20:51:38 +01:00
Aiur Adept
71223bff39
[Youtube] Fix nsig extraction for player 20dfca59 (#32891)
* dirkf's patch for nsig extraction
* add generic search per  yt-dlp/yt-dlp/pull/10611 - thx bashonly

---------

Co-authored-by: dirkf <fieldhouse@gmx.net>
2024-08-01 19:18:34 +01:00
ckaotik
47f1d70149 Merge branch 'master' into mediathekviewweb 2021-05-09 13:50:20 +02:00
ckaotik
bbfd415058 Specify subtitle file extension 2021-03-13 11:27:11 +01:00
ckaotik
338e4b8527 Import __future__.unicode_literals 2021-02-21 14:31:04 +01:00
ckaotik
e585d7bcf5 Specify utf-8 encoding 2021-02-21 14:26:31 +01:00
ckaotik
c8d69b4d33 [mediathekviewweb] Stray whitespace 2021-02-21 13:08:32 +01:00
ckaotik
230fb7caa1 [mediathekviewweb] Added tests 2021-02-21 13:03:35 +01:00
ckaotik
57f070e5ac [mediathekviewweb] Added pretty playlist name if topic is common for all results 2021-02-21 13:03:35 +01:00
ckaotik
3e3b11c80b [mediathekviewweb] Fixed future and everywhere detection 2021-02-21 13:03:35 +01:00
ckaotik
14384d4a6f [mediathekviewweb] Tweaked detection & naming for sign language/audio description 2021-02-21 13:03:35 +01:00
ckaotik
8a6fd68b92 [mediathekviewweb] Support future/everywhere filters. 2021-02-21 13:03:35 +01:00
ckaotik
a482e8fba0 [mediathekviewweb] flake8 2021-02-21 13:03:35 +01:00
ckaotik
4c91c4f146 [mediathekviewweb] Register extractor 2021-02-21 13:03:35 +01:00
ckaotik
e17d20829b [mediathekviewweb] Add new extractor 2021-02-21 13:03:35 +01:00
6 changed files with 369 additions and 13 deletions

View File

@ -425,6 +425,34 @@ class TestJSInterpreter(unittest.TestCase):
self._test(jsi, [''], args=['', '-']) self._test(jsi, [''], args=['', '-'])
self._test(jsi, [], args=['', '']) self._test(jsi, [], args=['', ''])
def test_slice(self):
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice()}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(5)}', [5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(99)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-2)}', [7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-99)}', [0, 1, 2, 3, 4, 5, 6, 7, 8])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 0)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, 0)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(0, 1)}', [0])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(3, 6)}', [3, 4, 5])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(1, -1)}', [1, 2, 3, 4, 5, 6, 7])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-1, 1)}', [])
self._test('function f(){return [0, 1, 2, 3, 4, 5, 6, 7, 8].slice(-3, -1)}', [6, 7])
self._test('function f(){return "012345678".slice()}', '012345678')
self._test('function f(){return "012345678".slice(0)}', '012345678')
self._test('function f(){return "012345678".slice(5)}', '5678')
self._test('function f(){return "012345678".slice(99)}', '')
self._test('function f(){return "012345678".slice(-2)}', '78')
self._test('function f(){return "012345678".slice(-99)}', '012345678')
self._test('function f(){return "012345678".slice(0, 0)}', '')
self._test('function f(){return "012345678".slice(1, 0)}', '')
self._test('function f(){return "012345678".slice(0, 1)}', '0')
self._test('function f(){return "012345678".slice(3, 6)}', '345')
self._test('function f(){return "012345678".slice(1, -1)}', '1234567')
self._test('function f(){return "012345678".slice(-1, 1)}', '')
self._test('function f(){return "012345678".slice(-3, -1)}', '67')
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()

View File

@ -174,6 +174,14 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js', 'https://www.youtube.com/s/player/5604538d/player_ias.vflset/en_US/base.js',
'7X-he4jjvMx7BCX', 'sViSydX8IHtdWA', '7X-he4jjvMx7BCX', 'sViSydX8IHtdWA',
), ),
(
'https://www.youtube.com/s/player/20dfca59/player_ias.vflset/en_US/base.js',
'-fLCxedkAk4LUTK2', 'O8kfRq1y1eyHGw',
),
(
'https://www.youtube.com/s/player/b12cc44b/player_ias.vflset/en_US/base.js',
'keLa5R2U00sR9SQK', 'N1OGyujjEwMnLw',
),
] ]

View File

@ -678,6 +678,10 @@ from .mediasite import (
MediasiteCatalogIE, MediasiteCatalogIE,
MediasiteNamedCatalogIE, MediasiteNamedCatalogIE,
) )
from .mediathekviewweb import (
MediathekViewWebSearchIE,
MediathekViewWebIE,
)
from .medici import MediciIE from .medici import MediciIE
from .megaphone import MegaphoneIE from .megaphone import MegaphoneIE
from .meipai import MeipaiIE from .meipai import MeipaiIE

View File

@ -0,0 +1,280 @@
# coding=utf-8
from __future__ import unicode_literals
import datetime
import itertools
import json
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import compat_parse_qs, compat_urlparse
from ..utils import ExtractorError, int_or_none
class MediathekViewWebSearchIE(SearchInfoExtractor):
IE_NAME = 'mediathekviewweb:search'
IE_DESC = 'MediathekViewWeb search'
_SEARCH_KEY = 'mvwsearch'
_MAX_RESULTS = float('inf')
_MAX_RESULTS_PER_PAGE = 50
_TESTS = [
{
'url': 'mvwsearchall:sandmännchen !kika',
'info_dict': {
'title': 'Unser Sandmännchen',
},
'playlist': [],
'playlist_count': 7,
},
{
# Audio description & common topic.
'url': 'mvwsearch:#Sendung,Maus Audiodeskription',
'info_dict': {
'title': 'Die Sendung mit der Maus',
},
'playlist': [],
'playlist_count': 1,
'params': {
'format': 'medium-audio_description',
'skip_download': True,
}
},
{
# Sign language.
'url': 'mvwsearchall:!ard #Tagesschau Gebärdensprache',
'info_dict': {
'title': '!ard #Tagesschau Gebärdensprache',
},
'playlist': [],
'playlist_mincount': 365,
'params': {
'format': 'medium-sign_language',
'skip_download': True,
},
},
]
# Map of title affixes indicating video variants.
_variants = {
'audio_description': 'Audiodeskription',
'sign_language': 'mit Gebärdensprache',
}
_future = True
_everywhere = False
def _build_conditions(self, search):
# @note So far, there is no API endpoint to convert a query string into
# a complete query object, as required by the /api/query endpoint.
filters = {}
extra = {}
for component in search.lower().split():
if len(component) == 0:
continue
operator = component[0:1]
value = component[1:]
if len(value) == 0:
# Treat single character query as such.
# @note This differs from MVW's implementation.
operator = ''
value = component
# Extra, non-field settings.
if operator == '>':
value = int(value.split(',')[0]) * 60
extra['duration_min'] = max(extra.get('duration_min', 0), value)
continue
elif operator == '<':
value = int(value.split(',')[0]) * 60
extra['duration_max'] = min(extra.get('duration_max', float('inf')), value)
continue
# Field query operators.
if operator == '!':
field = 'channel'
elif operator == '#':
field = 'topic'
elif operator == '+':
field = 'title'
elif operator == '*':
field = 'description'
else:
# No known operator specified.
field = 'generic'
value = component
# @note In theory, comma-joined values are for AND queries. However
# so far, each condition is AND joined, even without comma.
filters.setdefault(field, []).append(' '.join(value.split(',')))
# Generic filters can apply to different fields, based on the query.
if 'generic' in filters:
if self._everywhere:
filters['channel,topic,title,description'] = filters['generic']
elif 'topic' in filters:
filters['title'] = filters['generic']
else:
filters['topic,title'] = filters['generic']
filters.pop('generic')
conditions = []
for field, keys in filters.items():
for query in keys:
conditions.append({
'fields': field.split(','),
'query': query,
})
return conditions, extra
def _extract_playlist_entries(self, results):
entries = []
for item in results:
variant = None
for key, value in self._variants.items():
if item.setdefault('title', '').find(value) != -1:
variant = key
formats = []
formats.append({
'url': item['url_video'],
'format': ('medium (' + self._variants[variant] + ')') if variant else None,
'format_id': ('medium-' + variant) if variant else 'medium',
'language_preference': -10 if variant else 10,
'quality': -2,
'filesize': item.get('size'),
})
if len(item.get('url_video_low', '')) > 0:
formats.append({
'url': item['url_video_low'],
'format': ('low (' + self._variants[variant] + ')') if variant else None,
'format_id': ('low-' + variant) if variant else 'low',
'language_preference': -10 if variant else 10,
'quality': -3,
})
if len(item.get('url_video_hd', '')) > 0:
formats.append({
'url': item['url_video_hd'],
'format': ('high (' + self._variants[variant] + ')') if variant else None,
'format_id': ('high-' + variant) if variant else 'high',
'language_preference': -10 if variant else 10,
'quality': -1,
})
self._sort_formats(formats)
video = {
'_type': 'video',
'formats': formats,
'id': item.get('id'),
'title': item.get('title'),
'description': item.get('description'),
'series': item.get('topic'),
'channel': item.get('channel'),
'uploader': item.get('channel'),
'duration': int_or_none(item.get('duration')),
'webpage_url': item.get('url_website'),
}
if item.get('timestamp'):
upload_date = datetime.datetime.utcfromtimestamp(item['timestamp'])
video['upload_date'] = upload_date.strftime('%Y%m%d')
if item.get('url_subtitle'):
video.setdefault('subtitles', {}).setdefault('de', []).append({
'url': item.get('url_subtitle'),
'ext': 'ttml',
})
entries.append(video)
return entries
def _get_n_results(self, query, n):
queries, extra = self._build_conditions(query)
queryObject = {
'queries': queries,
'sortBy': 'timestamp',
'sortOrder': 'desc',
'future': self._future,
'duration_min': extra.get('duration_min'),
'duration_max': extra.get('duration_max'),
'offset': 0,
'size': min(n, self._MAX_RESULTS_PER_PAGE),
}
entries = []
for page_num in itertools.count(1):
queryObject.update({'offset': (page_num - 1) * queryObject['size']})
results = self._download_json('https://mediathekviewweb.de/api/query', query,
note='Fetching page %d' % page_num,
data=json.dumps(queryObject).encode('utf-8'),
headers={'Content-Type': 'text/plain'})
if results['err'] is not None:
raise ExtractorError('API returned an error: %s' % results['err'][0])
entries.extend(self._extract_playlist_entries(results['result']['results']))
meta = results['result']['queryInfo']
if len(entries) >= n:
entries = entries[:n]
break
elif meta['resultCount'] == 0:
break
common_topic = None
if entries:
common_topic = entries[0]['series']
for entry in entries:
common_topic = common_topic if entry['series'] == common_topic else None
return self.playlist_result(entries, playlist_title=common_topic or query)
class MediathekViewWebIE(InfoExtractor):
# @see https://github.com/mediathekview/mediathekviewweb
IE_NAME = 'mediathekviewweb'
_VALID_URL = r'https?://mediathekviewweb\.de/\#query=(?P<id>.+)'
_TESTS = [
{
# Test for everywhere.
'url': 'https://mediathekviewweb.de/#query=!ard%20%23Tagesschau%2020%2CUhr&everywhere=true',
'info_dict': {
'title': '!ard #Tagesschau 20,Uhr',
},
# Without everywhere, there are <100 results.
'playlist_mincount': 365,
'params': {
'skip_download': True,
},
},
{
# Test for non-future videos.
'url': 'https://mediathekviewweb.de/#query=%23sport%2Cim%2Costen%20biathlon&future=false',
'info_dict': {
'title': 'Sport im Osten',
},
# Future yields 4 results instead.
'playlist_maxcount': 2,
'params': {
'skip_download': True,
},
},
]
def _real_extract(self, url):
query_hash = self._match_id(url)
url_stub = '?query=' + query_hash
query = compat_parse_qs(compat_urlparse.urlparse(url_stub).query)
search = query['query'][0]
query.pop('query')
if len(query) > 0:
# Detect global flags, MVW is very strict about accepted values.
extractor = MediathekViewWebSearchIE(self._downloader)
if query.get('everywhere', []) == ['true']:
extractor._everywhere = True
if query.get('future', []) == ['false']:
extractor._future = False
return extractor._real_extract('mvwsearchall:' + search)
return self.url_result('mvwsearchall:' + search, ie=MediathekViewWebSearchIE.ie_key())

View File

@ -1659,17 +1659,46 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def _extract_n_function_name(self, jscode): def _extract_n_function_name(self, jscode):
func_name, idx = self._search_regex( func_name, idx = self._search_regex(
# new: (b=String.fromCharCode(110),c=a.get(b))&&c=nfunc[idx](c) # new: (b=String.fromCharCode(110),c=a.get(b))&&c=nfunc[idx](c)
# or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)s # or: (b="nn"[+a.D],c=a.get(b))&&(c=nfunc[idx](c)
# old: .get("n"))&&(b=nfunc[idx](b) # or: (PL(a),b=a.j.n||null)&&(b=nfunc[idx](b)
# older: .get("n"))&&(b=nfunc(b) # or: (b="nn"[+a.D],vL(a),c=a.j[b]||null)&&(c=narray[idx](c),a.set(b,c),narray.length||nfunc("")
# old: (b=a.get("n"))&&(b=nfunc[idx](b)(?P<c>[a-z])\s*=\s*[a-z]\s*
# older: (b=a.get("n"))&&(b=nfunc(b)
r'''(?x) r'''(?x)
(?:\(\s*(?P<b>[a-z])\s*=\s*(?: \((?:[\w$()\s]+,)*?\s* # (
(?P<b>[a-z])\s*=\s* # b=
(?:
(?: # expect ,c=a.get(b) (etc)
String\s*\.\s*fromCharCode\s*\(\s*110\s*\)| String\s*\.\s*fromCharCode\s*\(\s*110\s*\)|
"n+"\[\s*\+?s*[\w$.]+\s*] "n+"\[\s*\+?s*[\w$.]+\s*]
)\s*,(?P<c>[a-z])\s*=\s*[a-z]\s*)? )\s*(?:,[\w$()\s]+(?=,))*|
\.\s*get\s*\(\s*(?(b)(?P=b)|"n{1,2}")(?:\s*\)){2}\s*&&\s*\(\s*(?(c)(?P=c)|b)\s*=\s* (?P<old>[\w$]+) # a (old[er])
)\s*
(?(old)
# b.get("n")
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
(?:\.\s*n|\[\s*"n"\s*]|\.\s*get\s*\(\s*"n"\s*\))
| # ,c=a.get(b)
,\s*(?P<c>[a-z])\s*=\s*[a-z]\s*
(?:\.\s*[\w$]+\s*|\[\s*[\w$]+\s*]\s*)*?
(?:\[\s*(?P=b)\s*]|\.\s*get\s*\(\s*(?P=b)\s*\))
)
# interstitial junk
\s*(?:\|\|\s*null\s*)?(?:\)\s*)?&&\s*(?:\(\s*)?
(?(c)(?P=c)|(?P=b))\s*=\s* # [c|b]=
# nfunc|nfunc[idx]
(?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\) (?P<nfunc>[a-zA-Z_$][\w$]*)(?:\s*\[(?P<idx>\d+)\])?\s*\(\s*[\w$]+\s*\)
''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx')) ''', jscode, 'Initial JS player n function name', group=('nfunc', 'idx'),
default=(None, None))
# thx bashonly: yt-dlp/yt-dlp/pull/10611
if not func_name:
self.report_warning('Falling back to generic n function search')
return self._search_regex(
r'''(?xs)
(?:(?<=[^\w$])|^) # instead of \b, which ignores $
(?P<name>(?!\d)[a-zA-Z\d_$]+)\s*=\s*function\((?!\d)[a-zA-Z\d_$]+\)
\s*\{(?:(?!};).)+?["']enhanced_except_
''', jscode, 'Initial JS player n function name', group='name')
if not idx: if not idx:
return func_name return func_name

View File

@ -925,9 +925,16 @@ class JSInterpreter(object):
obj.reverse() obj.reverse()
return obj return obj
elif member == 'slice': elif member == 'slice':
assertion(isinstance(obj, list), 'must be applied on a list') assertion(isinstance(obj, (list, compat_str)), 'must be applied on a list or string')
assertion(len(argvals) == 1, 'takes exactly one argument') # From [1]:
return obj[argvals[0]:] # .slice() - like [:]
# .slice(n) - like [n:] (not [slice(n)]
# .slice(m, n) - like [m:n] or [slice(m, n)]
# [1] https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/slice
assertion(len(argvals) <= 2, 'takes between 0 and 2 arguments')
if len(argvals) < 2:
argvals += (None,)
return obj[slice(*argvals)]
elif member == 'splice': elif member == 'splice':
assertion(isinstance(obj, list), 'must be applied on a list') assertion(isinstance(obj, list), 'must be applied on a list')
assertion(argvals, 'takes one or more arguments') assertion(argvals, 'takes one or more arguments')