Compare commits

...

39 Commits

Author SHA1 Message Date
bibiak
67b0af6a42
Merge 8e6e47d6c1b19838de621aae4f3d31bda0c10cdc into da7223d4aa42ff9fc680b0951d043dd03cec2d30 2025-03-22 07:22:22 +08:00
dirkf
da7223d4aa [YouTube] Improve support for tce-style player JS
* improve extraction of global "useful data" Array from player JS
* also handle tv-player and add tests: thx seproDev (yt-dlp/yt-dlp#12684)

Co-Authored-By: sepro <sepro@sepr0.com>
2025-03-21 16:26:25 +00:00
dirkf
37c2440d6a [YouTube] Update player client data
thx seproDev (yt-dlp/yt-dlp#12603)

Co-authored-by: sepro <sepro@sepr0.com>
2025-03-21 16:13:24 +00:00
dirkf
420d53387c [JSInterp] Improve tests
* from yt-dlp/yt-dlp#12313
* also fix d7c2708
2025-03-11 02:00:24 +00:00
dirkf
32f89de92b [YouTube] Update TVHTML5 client parameters
* resolves #33078
2025-03-11 02:00:24 +00:00
dirkf
283dca56fe [YouTube] Initially support tce-style player JS
* resolves #33079
2025-03-11 02:00:24 +00:00
dirkf
422b1b31cf [YouTube] Temporarily redirect from tce-style player JS 2025-03-11 02:00:24 +00:00
dirkf
1dc27e1c3b [JSInterp] Make indexing error handling more conformant
* by default TypeError -> undefined, else raise
* set allow_undefined=True/False to override
2025-03-11 02:00:24 +00:00
dirkf
af049e309b [JSInterp] Handle undefined, etc, passed to JS_RegExp and Exception 2025-03-11 02:00:24 +00:00
dirkf
94849bc997 [JSInterp] Improve Date processing
* add JS_Date class implementing JS Date
* support constructor args other than date string
* support static methods of Date
* Date objects are still automatically coerced to timestamp before using in JS.
2025-03-11 02:00:24 +00:00
dirkf
974c7d7f34 [compat] Fix inheriting from compat_collections_chain_map
* see ytdl-org/youtube-dl#33079#issuecomment-2704038049
2025-03-11 02:00:24 +00:00
dirkf
8738407d77 [compat] Support zstd Content-Encoding
* see RFC 8878 7.2
2025-03-11 02:00:24 +00:00
dirkf
cecaa18b80 [compat] Clean-up
* make workaround_optparse_bug9161 private
* add comments
* avoid leaving test objects behind
2025-03-11 02:00:24 +00:00
bibiak
8e6e47d6c1
Merge branch 'ytdl-org:master' into master 2024-08-17 08:41:10 +02:00
bibiak
f5dd875a02 moved txt_or_none outside if statement 2023-10-02 14:13:50 +00:00
bibiak
efee229d66
Merge branch 'ytdl-org:master' into master 2023-10-02 15:51:03 +02:00
bibiak
8aac6a6702
Merge branch 'ytdl-org:master' into master 2023-08-31 20:40:06 +02:00
bibiak
0c53d4245d
Merge branch 'ytdl-org:master' into master 2023-08-31 15:14:56 +02:00
bibiak
a9223364b3
Merge branch 'ytdl-org:master' into master 2023-06-29 17:43:26 +02:00
bibiak
9f3bbddc5c
Merge branch 'ytdl-org:master' into master 2023-06-19 21:33:30 +02:00
bibiak
7caa31f0f0
Merge branch 'ytdl-org:master' into master 2023-06-13 07:10:17 +02:00
Marcin Biczan
8a2249ecf1 too many except 2023-05-22 21:25:05 +02:00
bibiak
e7c42394a6
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:22:27 +02:00
bibiak
79b0cde4dc
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:22:18 +02:00
bibiak
da19699ff8
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:22:01 +02:00
bibiak
578c53381b
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:21:43 +02:00
bibiak
283b6b31f5
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:21:27 +02:00
bibiak
56c07235ee
Update youtube_dl/extractor/tvp.py
Co-authored-by: dirkf <fieldhouse@gmx.net>
2023-05-22 21:21:08 +02:00
bibiak
1d148eb75f
Merge branch 'ytdl-org:master' into master 2023-05-22 20:53:23 +02:00
dirkf
237c59f7f5
Update youtube_dl/extractor/tvp.py 2023-05-09 16:54:40 +01:00
dirkf
4891480197
Update youtube_dl/extractor/tvp.py
Add `txt_or_none()` shim
2023-05-09 16:50:35 +01:00
dirkf
6e827118cb
Update tvp.py from yt-dlp
* pull changes from https://github.com/yt-dlp/yt-dlp/pull/6989, thanks selfisekai
* use `traverse_obj()` for safer extraction
* fix tests that are not blocked from UK

Co-authored-by: selfisekai
2023-05-09 16:44:13 +01:00
dirkf
efedc80daf
Update extractors.py
[skip ci]
2023-05-09 16:34:41 +01:00
bibiak
93e0d820ce
Merge branch 'ytdl-org:master' into master 2023-04-22 14:50:01 +02:00
bibiak
37ff4c9399
Merge branch 'ytdl-org:master' into master 2023-04-11 14:56:29 +02:00
bibiak
16cb050ae6
Merge branch 'ytdl-org:master' into master 2023-04-09 13:54:54 +02:00
Marcin Biczan
4b6bef45b5 added support for full offer 2023-04-06 21:25:00 +02:00
Marcin Biczan
b08dc56f46 json uneeded 2023-04-05 22:03:19 +02:00
Marcin Biczan
953fce852f TVPapp extractor added :: init 2023-04-05 21:54:58 +02:00
8 changed files with 1019 additions and 196 deletions

View File

@ -11,6 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import math
import re
import time
from youtube_dl.compat import compat_str as str
from youtube_dl.jsinterp import JS_Undefined, JSInterpreter
@ -208,6 +209,34 @@ class TestJSInterpreter(unittest.TestCase):
self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT'])
# epoch 0
self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC'])
# undefined
self._test(jsi, NaN, args=[JS_Undefined])
# y,m,d, ... - may fail with older dates lacking DST data
jsi = JSInterpreter(
'function f() { return new Date(%s); }'
% ('2024, 5, 29, 2, 52, 12, 42',))
self._test(jsi, (
1719625932042 # UK value
+ (
+ 3600 # back to GMT
+ (time.altzone if time.daylight # host's DST
else time.timezone)
) * 1000))
# no arg
self.assertAlmostEqual(JSInterpreter(
'function f() { return new Date() - 0; }').call_function('f'),
time.time() * 1000, delta=100)
# Date.now()
self.assertAlmostEqual(JSInterpreter(
'function f() { return Date.now(); }').call_function('f'),
time.time() * 1000, delta=100)
# Date.parse()
jsi = JSInterpreter('function f(dt) { return Date.parse(dt); }')
self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC'])
# Date.UTC()
jsi = JSInterpreter('function f() { return Date.UTC(%s); }'
% ('1970, 0, 1, 0, 0, 0, 0',))
self._test(jsi, 0)
def test_call(self):
jsi = JSInterpreter('''
@ -463,6 +492,14 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f(){return NaN << 42}', 0)
self._test('function f(){return "21.9" << 1}', 42)
self._test('function f(){return 21 << 4294967297}', 42)
self._test('function f(){return true << "5";}', 32)
self._test('function f(){return true << true;}', 2)
self._test('function f(){return "19" & "21.9";}', 17)
self._test('function f(){return "19" & false;}', 0)
self._test('function f(){return "11.0" >> "2.1";}', 2)
self._test('function f(){return 5 ^ 9;}', 12)
self._test('function f(){return 0.0 << NaN}', 0)
self._test('function f(){return null << undefined}', 0)
def test_negative(self):
self._test('function f(){return 2 * -2.0 ;}', -4)

View File

@ -223,6 +223,42 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/9c6dfc4a/player_ias.vflset/en_US/base.js',
'jbu7ylIosQHyJyJV', 'uwI0ESiynAmhNg',
),
(
'https://www.youtube.com/s/player/f6e09c70/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
),
(
'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
),
(
'https://www.youtube.com/s/player/d50f54ef/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'XFTb2HoeOE5MHg',
),
(
'https://www.youtube.com/s/player/074a8365/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'ufTsrE0IVYrkl8v',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'N5uAlLqm0eg1GyHO', 'dCBQOejdq5s-ww',
),
(
'https://www.youtube.com/s/player/69f581a5/tv-player-ias.vflset/tv-player-ias.js',
'-qIP447rVlTTwaZjY', 'KNcGOksBAvwqQg',
),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
),
]
@ -284,7 +320,7 @@ def t_factory(name, sig_func, url_pattern):
def signature(jscode, sig_input):
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
func = YoutubeIE(FakeYDL({'cachedir': False}))._parse_sig_js(jscode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
@ -292,9 +328,10 @@ def signature(jscode, sig_input):
def n_sig(jscode, sig_input):
funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
return JSInterpreter(jscode).call_function(
funcname, sig_input, _ytdl_do_not_return=sig_input)
ie = YoutubeIE(FakeYDL({'cachedir': False}))
jsi = JSInterpreter(jscode)
jsi, _, func_code = ie._extract_n_function_code_jsi(sig_input, jsi)
return ie._extract_n_function_from_code(jsi, func_code)(sig_input)
make_sig_test = t_factory(

View File

@ -18,7 +18,7 @@ from .compat import (
compat_getpass,
compat_register_utf8,
compat_shlex_split,
workaround_optparse_bug9161,
_workaround_optparse_bug9161,
)
from .utils import (
_UnsafeExtensionError,
@ -50,7 +50,7 @@ def _real_main(argv=None):
# Compatibility fix for Windows
compat_register_utf8()
workaround_optparse_bug9161()
_workaround_optparse_bug9161()
setproctitle('youtube-dl')

View File

@ -16,7 +16,6 @@ import os
import platform
import re
import shlex
import shutil
import socket
import struct
import subprocess
@ -24,11 +23,15 @@ import sys
import types
import xml.etree.ElementTree
_IDENTITY = lambda x: x
# naming convention
# 'compat_' + Python3_name.replace('.', '_')
# other aliases exist for convenience and/or legacy
# wrap disposable test values in type() to reclaim storage
# deal with critical unicode/str things first
# deal with critical unicode/str things first:
# compat_str, compat_basestring, compat_chr
try:
# Python 2
compat_str, compat_basestring, compat_chr = (
@ -39,18 +42,23 @@ except NameError:
str, (str, bytes), chr
)
# casefold
# compat_casefold
try:
compat_str.casefold
compat_casefold = lambda s: s.casefold()
except AttributeError:
from .casefold import _casefold as compat_casefold
# compat_collections_abc
try:
import collections.abc as compat_collections_abc
except ImportError:
import collections as compat_collections_abc
# compat_urllib_request
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
@ -79,11 +87,15 @@ except TypeError:
_add_init_method_arg(compat_urllib_request.Request)
del _add_init_method_arg
# compat_urllib_error
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
# compat_urllib_parse
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
@ -98,17 +110,23 @@ except ImportError: # Python 2
compat_urlparse = compat_urllib_parse
compat_urllib_parse_urlparse = compat_urllib_parse.urlparse
# compat_urllib_response
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
# compat_urllib_response.addinfourl
try:
compat_urllib_response.addinfourl.status
except AttributeError:
# .getcode() is deprecated in Py 3.
compat_urllib_response.addinfourl.status = property(lambda self: self.getcode())
# compat_http_cookiejar
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
@ -127,12 +145,16 @@ else:
compat_cookiejar_Cookie = compat_cookiejar.Cookie
compat_http_cookiejar_Cookie = compat_cookiejar_Cookie
# compat_http_cookies
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
compat_http_cookies = compat_cookies
# compat_http_cookies_SimpleCookie
if sys.version_info[0] == 2 or sys.version_info < (3, 3):
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
def load(self, rawdata):
@ -155,11 +177,15 @@ else:
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie
# compat_html_entities, probably useless now
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
# compat_html_entities_html5
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
@ -2408,18 +2434,24 @@ except AttributeError:
# Py < 3.1
compat_http_client.HTTPResponse.getcode = lambda self: self.status
# compat_urllib_HTTPError
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
compat_urllib_HTTPError = compat_HTTPError
# compat_urllib_request_urlretrieve
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
compat_urllib_request_urlretrieve = compat_urlretrieve
# compat_html_parser_HTMLParser, compat_html_parser_HTMLParseError
try:
from HTMLParser import (
HTMLParser as compat_HTMLParser,
@ -2432,22 +2464,33 @@ except ImportError: # Python 3
# HTMLParseError was deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exception handling
class compat_HTMLParseError(Exception):
pass
compat_html_parser_HTMLParser = compat_HTMLParser
compat_html_parser_HTMLParseError = compat_HTMLParseError
# compat_subprocess_get_DEVNULL
try:
_DEVNULL = subprocess.DEVNULL
compat_subprocess_get_DEVNULL = lambda: _DEVNULL
except AttributeError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
# compat_http_server
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
# compat_urllib_parse_unquote_to_bytes,
# compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus,
# compat_urllib_parse_urlencode,
# compat_urllib_parse_parse_qs
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
@ -2598,6 +2641,8 @@ except ImportError: # Python 2
compat_urllib_parse_parse_qs = compat_parse_qs
# compat_urllib_request_DataHandler
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
@ -2632,16 +2677,20 @@ except ImportError: # Python < 3.4
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
# compat_xml_etree_ElementTree_ParseError
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
compat_xml_etree_ElementTree_ParseError = compat_xml_parse_error
etree = xml.etree.ElementTree
# compat_xml_etree_ElementTree_Element
_etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
class _TreeBuilder(_etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
@ -2650,7 +2699,7 @@ try:
# xml.etree.ElementTree.Element is a method in Python <=2.6 and
# the following will crash with:
# TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
isinstance(None, etree.Element)
isinstance(None, _etree.Element)
from xml.etree.ElementTree import Element as compat_etree_Element
except TypeError: # Python <=2.6
from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
@ -2658,12 +2707,12 @@ compat_xml_etree_ElementTree_Element = compat_etree_Element
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
return _etree.XML(text, parser=_etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
_etree_iter = _etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
@ -2675,27 +2724,29 @@ else:
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser = _etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
el = _etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
doc = _XML(text, parser=_etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if hasattr(etree, 'register_namespace'):
compat_etree_register_namespace = etree.register_namespace
else:
# compat_xml_etree_register_namespace
try:
compat_etree_register_namespace = _etree.register_namespace
except AttributeError:
def compat_etree_register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
@ -2704,14 +2755,16 @@ else:
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(etree._namespace_map.items()):
if re.match(r'ns\d+$', prefix):
raise ValueError('Prefix format reserved for internal use')
for k, v in list(_etree._namespace_map.items()):
if k == uri or v == prefix:
del etree._namespace_map[k]
etree._namespace_map[uri] = prefix
del _etree._namespace_map[k]
_etree._namespace_map[uri] = prefix
compat_xml_etree_register_namespace = compat_etree_register_namespace
# compat_xpath, compat_etree_iterfind
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
@ -2898,7 +2951,6 @@ if sys.version_info < (2, 7):
def __init__(self, root):
self.root = root
##
# Generate all matching objects.
def compat_etree_iterfind(elem, path, namespaces=None):
@ -2933,13 +2985,15 @@ if sys.version_info < (2, 7):
else:
compat_xpath = lambda xpath: xpath
compat_etree_iterfind = lambda element, match: element.iterfind(match)
compat_xpath = _IDENTITY
# compat_os_name
compat_os_name = os._name if os.name == 'java' else os.name
# compat_shlex_quote
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
@ -2954,6 +3008,7 @@ else:
return "'" + s.replace("'", "'\"'\"'") + "'"
# compat_shlex.split
try:
args = shlex.split('中文')
assert (isinstance(args, list)
@ -2969,6 +3024,7 @@ except (AssertionError, UnicodeEncodeError):
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
# compat_ord
def compat_ord(c):
if isinstance(c, int):
return c
@ -2976,6 +3032,7 @@ def compat_ord(c):
return ord(c)
# compat_getenv, compat_os_path_expanduser, compat_setenv
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
@ -3063,6 +3120,7 @@ else:
compat_os_path_expanduser = compat_expanduser
# compat_os_path_realpath
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
@ -3076,6 +3134,7 @@ else:
compat_os_path_realpath = compat_realpath
# compat_print
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
@ -3086,6 +3145,7 @@ else:
print(s)
# compat_getpass_getpass
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
@ -3098,22 +3158,22 @@ else:
compat_getpass_getpass = compat_getpass
# compat_input
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# compat_kwargs
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
(lambda x: x)(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
compat_kwargs = _IDENTITY
# compat_numeric_types
@ -3132,6 +3192,8 @@ except NameError: # Python 3
# compat_int
compat_int = compat_integer_types[-1]
# compat_socket_create_connection
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
@ -3158,6 +3220,7 @@ else:
compat_socket_create_connection = socket.create_connection
# compat_contextlib_suppress
try:
from contextlib import suppress as compat_contextlib_suppress
except ImportError:
@ -3200,12 +3263,12 @@ except AttributeError:
# repeated .close() is OK, but just in case
with compat_contextlib_suppress(EnvironmentError):
f.close()
popen.wait()
popen.wait()
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
def _workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
@ -3224,9 +3287,10 @@ def workaround_optparse_bug9161():
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
# compat_shutil_get_terminal_size
try:
from shutil import get_terminal_size as compat_get_terminal_size # Python >= 3.3
except ImportError:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
@ -3256,27 +3320,33 @@ else:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
compat_shutil_get_terminal_size = compat_get_terminal_size
# compat_itertools_count
try:
itertools.count(start=0, step=1)
type(itertools.count(start=0, step=1))
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
except TypeError: # Python 2.6 lacks step
def compat_itertools_count(start=0, step=1):
while True:
yield start
start += step
# compat_tokenize_tokenize
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
# compat_struct_pack, compat_struct_unpack, compat_Struct
try:
struct.pack('!I', 0)
type(struct.pack('!I', 0))
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
@ -3308,8 +3378,10 @@ else:
compat_Struct = struct.Struct
# compat_map/filter() returning an iterator, supposedly the
# same versioning as for zip below
# builtins returning an iterator
# compat_map, compat_filter
# supposedly the same versioning as for zip below
try:
from future_builtins import map as compat_map
except ImportError:
@ -3326,6 +3398,7 @@ except ImportError:
except ImportError:
compat_filter = filter
# compat_zip
try:
from future_builtins import zip as compat_zip
except ImportError: # not 2.6+ or is 3.x
@ -3335,6 +3408,7 @@ except ImportError: # not 2.6+ or is 3.x
compat_zip = zip
# compat_itertools_zip_longest
# method renamed between Py2/3
try:
from itertools import zip_longest as compat_itertools_zip_longest
@ -3342,7 +3416,8 @@ except ImportError:
from itertools import izip_longest as compat_itertools_zip_longest
# new class in collections
# compat_collections_chain_map
# collections.ChainMap: new class
try:
from collections import ChainMap as compat_collections_chain_map
# Py3.3's ChainMap is deficient
@ -3398,19 +3473,22 @@ except ImportError:
def new_child(self, m=None, **kwargs):
m = m or {}
m.update(kwargs)
return compat_collections_chain_map(m, *self.maps)
# support inheritance !
return type(self)(m, *self.maps)
@property
def parents(self):
return compat_collections_chain_map(*(self.maps[1:]))
return type(self)(*(self.maps[1:]))
# compat_re_Pattern, compat_re_Match
# Pythons disagree on the type of a pattern (RegexObject, _sre.SRE_Pattern, Pattern, ...?)
compat_re_Pattern = type(re.compile(''))
# and on the type of a match
compat_re_Match = type(re.match('a', 'a'))
# compat_base64_b64decode
if sys.version_info < (3, 3):
def compat_b64decode(s, *args, **kwargs):
if isinstance(s, compat_str):
@ -3422,6 +3500,7 @@ else:
compat_base64_b64decode = compat_b64decode
# compat_ctypes_WINFUNCTYPE
if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
# PyPy2 prior to version 5.4.0 expects byte strings as Windows function
# names, see the original PyPy issue [1] and the youtube-dl one [2].
@ -3440,6 +3519,7 @@ else:
return ctypes.WINFUNCTYPE(*args, **kwargs)
# compat_open
if sys.version_info < (3, 0):
# open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None
def compat_open(file_, *args, **kwargs):
@ -3467,18 +3547,28 @@ except AttributeError:
def compat_datetime_timedelta_total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
# optional decompression packages
# compat_brotli
# PyPi brotli package implements 'br' Content-Encoding
try:
import brotli as compat_brotli
except ImportError:
compat_brotli = None
# compat_ncompress
# PyPi ncompress package implements 'compress' Content-Encoding
try:
import ncompress as compat_ncompress
except ImportError:
compat_ncompress = None
# compat_zstandard
# PyPi zstandard package implements 'zstd' Content-Encoding (RFC 8878 7.2)
try:
import zstandard as compat_zstandard
except ImportError:
compat_zstandard = None
legacy = [
'compat_HTMLParseError',
@ -3495,6 +3585,7 @@ legacy = [
'compat_getpass',
'compat_parse_qs',
'compat_realpath',
'compat_shlex_split',
'compat_urllib_parse_parse_qs',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
@ -3508,8 +3599,6 @@ legacy = [
__all__ = [
'compat_html_parser_HTMLParseError',
'compat_html_parser_HTMLParser',
'compat_Struct',
'compat_base64_b64decode',
'compat_basestring',
@ -3518,13 +3607,9 @@ __all__ = [
'compat_chr',
'compat_collections_abc',
'compat_collections_chain_map',
'compat_datetime_timedelta_total_seconds',
'compat_http_cookiejar',
'compat_http_cookiejar_Cookie',
'compat_http_cookies',
'compat_http_cookies_SimpleCookie',
'compat_contextlib_suppress',
'compat_ctypes_WINFUNCTYPE',
'compat_datetime_timedelta_total_seconds',
'compat_etree_fromstring',
'compat_etree_iterfind',
'compat_filter',
@ -3533,6 +3618,12 @@ __all__ = [
'compat_getpass_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_html_parser_HTMLParseError',
'compat_html_parser_HTMLParser',
'compat_http_cookiejar',
'compat_http_cookiejar_Cookie',
'compat_http_cookies',
'compat_http_cookies_SimpleCookie',
'compat_http_client',
'compat_http_server',
'compat_input',
@ -3555,7 +3646,7 @@ __all__ = [
'compat_register_utf8',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_shutil_get_terminal_size',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
@ -3575,5 +3666,5 @@ __all__ = [
'compat_xml_etree_register_namespace',
'compat_xpath',
'compat_zip',
'workaround_optparse_bug9161',
'compat_zstandard',
]

View File

@ -1384,7 +1384,9 @@ from .tvnow import (
from .tvp import (
TVPEmbedIE,
TVPIE,
TVPWebsiteIE,
TVPStreamIE,
TVPVODSeriesIE,
TVPVODVideoIE,
)
from .tvplay import (
TVPlayIE,

View File

@ -2,52 +2,274 @@
from __future__ import unicode_literals
import itertools
import random
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
clean_html,
determine_ext,
ExtractorError,
get_element_by_attribute,
orderedSet,
int_or_none,
js_to_json,
traverse_obj,
url_or_none,
)
def txt_or_none(v, default=None):
return default if v is None else (compat_str(v).strip() or default)
if not hasattr(InfoExtractor, '_match_valid_url'):
import sys
from ..compat import (
compat_os_name,
compat_re_Pattern as compiled_regex_type,
)
from ..utils import (
bug_reports_message,
error_to_compat_str,
NO_DEFAULT,
RegexNotFoundError,
)
BaseIE = InfoExtractor
class InfoExtractor(BaseIE):
def _match_valid_url(self, url):
return re.match(self._VALID_URL, url)
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
elif isinstance(group, (list, tuple)):
return tuple(mobj.group(g) for g in group)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if isinstance(res, tuple):
return tuple(map(clean_html, res))
return clean_html(res or None)
def _search_json(self, start_pattern, string, name, video_id, **kwargs):
"""Searches string for the JSON object specified by start_pattern"""
# self, start_pattern, string, name, video_id, *, end_pattern='',
# contains_pattern=r'{(?s:.+)}', fatal=True, default=NO_DEFAULT
end_pattern = kwargs.pop('end_pattern', '')
contains_pattern = kwargs.pop('contains_pattern', r'{(?:[\s\S]+)}')
fatal = kwargs.get('fatal', True)
default = kwargs.get('default', NO_DEFAULT)
# NB: end_pattern is only used to reduce the size of the initial match
if default is NO_DEFAULT:
default, has_default = {}, False
else:
fatal, has_default = False, True
json_string = self._search_regex(
r'(?:{0})\s*(?P<json>{1})\s*(?:{2})'.format(
start_pattern, contains_pattern, end_pattern),
string, name, group='json', fatal=fatal, default=None if has_default else NO_DEFAULT)
if not json_string:
return default
try:
# return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
return self._parse_json(json_string, video_id, **kwargs)
except ExtractorError as e:
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
msg = 'Unable to extract {0} - Failed to parse JSON'.format(_name)
if fatal:
raise ExtractorError(msg, cause=e.cause, video_id=video_id)
elif not has_default:
self.report_warning(
'{0}: {1}'.format(msg, error_to_compat_str(e)), video_id=video_id)
return default
class TVPIE(InfoExtractor):
IE_NAME = 'tvp'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'https?://[^/]+\.tvp\.(?:pl|info)/(?:video/(?:[^,\s]*,)*|(?:(?!\d+/)[^/]+/)*)(?P<id>\d+)'
_VALID_URL = r'https?://(?:[^/]+\.)?(?:tvp(?:parlament)?\.(?:pl|info)|polandin\.com|tvpworld\.com|swipeto\.pl)/(?:(?:(?!\d+/)[^/]+/)*|(?:video|website)/[^/]+,)(?P<id>\d+)'
_TESTS = [{
# TVPlayer 2 in js wrapper
'url': 'https://swipeto.pl/64095316/uliczny-foxtrot-wypozyczalnia-kaset-kto-pamieta-dvdvideo',
'info_dict': {
'id': '64095316',
'ext': 'mp4',
'title': 'Uliczny Foxtrot — Wypożyczalnia kaset. Kto pamięta DVD-Video?',
'age_limit': 0,
'duration': 374,
'thumbnail': r're:https://.+',
},
'expected_warnings': [
'Failed to download ISM manifest: HTTP Error 404: Not Found',
'Failed to download m3u8 information: HTTP Error 404: Not Found',
],
'skip': 'Video gone: 404 Nie znaleziono obiektu',
}, {
# TVPlayer 2 in js wrapper (redirect to VodVideo)
'url': 'https://vod.tvp.pl/video/czas-honoru,i-seria-odc-13,194536',
'md5': 'a21eb0aa862f25414430f15fdfb9e76c',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 Władek',
'description': 'md5:437f48b93558370b031740546b696e24',
'description': 'md5:76649d2014f65c99477be17f23a4dead',
'age_limit': 12,
},
'add_ie': ['Generic', 'TVPEmbed'],
}, {
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
'md5': 'b0005b542e5b4de643a9690326ab1257',
# film (old format)
'url': 'https://vod.tvp.pl/website/krzysztof-krawczyk-cale-moje-zycie,51374466',
'info_dict': {
'id': '17916176',
'id': '51374509',
'ext': 'mp4',
'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
'description': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
'title': 'Krzysztof Krawczyk całe moje życie, Krzysztof Krawczyk całe moje życie',
'description': 'md5:2e80823f00f5fc263555482f76f8fa42',
'age_limit': 12,
},
'params': {
'skip_download': True,
},
'add_ie': ['TVPEmbed'],
'skip': 'This video is not available from your location due to geo restriction',
}, {
# TVPlayer legacy
'url': 'https://www.tvp.pl/polska-press-video-uploader/wideo/62042351',
'info_dict': {
'id': '62042351',
'ext': 'mp4',
'title': 'Wideo',
'description': 'Wideo Kamera',
'duration': 24,
'age_limit': 0,
'thumbnail': r're:https://.+',
},
'add_ie': ['TVPEmbed'],
}, {
# TVPlayer 2 in iframe
# page id is not the same as video id(#7799)
'url': 'https://wiadomosci.tvp.pl/33908820/28092017-1930',
'md5': '84cd3c8aec4840046e5ab712416b73d0',
'url': 'https://wiadomosci.tvp.pl/50725617/dzieci-na-sprzedaz-dla-homoseksualistow',
'md5': 'd35fb45103802488fcb7470e411b9ed4',
'info_dict': {
'id': '33908820',
'id': '50725617',
'ext': 'mp4',
'title': 'Wiadomości, 28.09.2017, 19:30',
'description': 'Wydanie główne codziennego serwisu informacyjnego.'
'title': 'Dzieci na sprzedaż dla homoseksualistów',
'description': 'md5:7d318eef04e55ddd9f87a8488ac7d590',
'age_limit': 12,
'duration': 259,
'thumbnail': r're:https://.+',
},
'skip': 'HTTP Error 404: Not Found',
'add_ie': ['TVPEmbed'],
}, {
# TVPlayer 2 in client-side rendered website (regional; window.__newsData)
'url': 'https://warszawa.tvp.pl/25804446/studio-yayo',
'info_dict': {
'id': '25804446',
'ext': 'mp4',
'title': 'Studio Yayo',
'upload_date': '20160616',
'timestamp': 1466075700,
'age_limit': 0,
'duration': 20,
'thumbnail': r're:https://.+',
},
'add_ie': ['TVPEmbed'],
'skip': 'Video is geo restricted',
}, {
# TVPlayer 2 in client-side rendered website (tvp.info; window.__videoData)
'url': 'https://www.tvp.info/52880236/09042021-0800',
'info_dict': {
'id': '52880236',
'ext': 'mp4',
'title': '09.04.2021, 08:00',
'age_limit': 0,
'thumbnail': r're:https://.+',
},
'add_ie': ['TVPEmbed'],
'skip': 'Video is geo restricted',
}, {
# client-side rendered (regional) program (playlist) page
'url': 'https://opole.tvp.pl/9660819/rozmowa-dnia',
'info_dict': {
'id': '9660819',
'description': 'Od poniedziałku do piątku o 18:55',
'title': 'Rozmowa dnia',
},
'playlist_mincount': 1800,
'params': {
'skip_download': True,
}
}, {
# ABC-specific video embeding
# moved to https://bajkowakraina.tvp.pl/wideo/50981130,teleranek,51027049,zubr,51116450
'url': 'https://abc.tvp.pl/48636269/zubry-odc-124',
'info_dict': {
'id': '48320456',
'ext': 'mp4',
'title': 'Teleranek, Żubr',
},
'skip': 'Video gone: Nie znaleziono obiektu',
}, {
# yet another vue page
'url': 'https://jp2.tvp.pl/46925618/filmy',
'info_dict': {
'id': '46925618',
'title': 'Filmy',
},
'playlist_mincount': 19,
}, {
# redirect
'url': 'https://vod.tvp.pl/48463890/wadowickie-spotkania-z-janem-pawlem-ii',
'info_dict': {
'id': '295157',
'title': 'Wadowickie spotkania z Janem Pawłem II',
},
'playlist_mincount': 12,
'add_ie': ['TVPEmbed', 'TVPVODSeries'],
}, {
'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272',
'only_matching': True,
@ -66,31 +288,212 @@ class TVPIE(InfoExtractor):
}, {
'url': 'http://www.tvp.info/25511919/trwa-rewolucja-wladza-zdecydowala-sie-na-pogwalcenie-konstytucji',
'only_matching': True,
}, {
'url': 'https://tvp.info/49193823/teczowe-flagi-na-pomnikach-prokuratura-wszczela-postepowanie-wieszwiecej',
'only_matching': True,
}, {
'url': 'https://www.tvpparlament.pl/retransmisje-vod/inne/wizyta-premiera-mateusza-morawieckiego-w-firmie-berotu-sp-z-oo/48857277',
'only_matching': True,
}, {
'url': 'https://tvpworld.com/48583640/tescos-polish-business-bought-by-danish-chain-netto',
'only_matching': True,
}, {
'url': 'https://polandin.com/47942651/pln-10-billion-in-subsidies-transferred-to-companies-pm',
'only_matching': True,
}]
def _parse_vue_website_data(self, webpage, page_id):
website_data = self._search_regex([
# website - regiony, tvp.info
# directory - jp2.tvp.pl
r'window\s*\.\s*__(?:website|directory)Data\s*=\s*({[\s\S]+?});',
], webpage, 'website data')
if not website_data:
return None
return self._parse_json(website_data, page_id, transform_source=js_to_json)
def _extract_vue_video(self, video_data, page_id=None):
if isinstance(video_data, compat_str):
video_data = self._parse_json(video_data, page_id, transform_source=js_to_json)
video_id = txt_or_none(video_data.get('_id')) or page_id
if not video_id:
return
is_website = video_data.get('type') == 'website'
if is_website:
url = video_data['url']
fucked_up_url_parts = re.match(r'https?://vod\.tvp\.pl/(\d+)/([^/?#]+)', url)
if fucked_up_url_parts:
url = 'https://vod.tvp.pl/website/' + ','.join(fucked_up_url_parts.group(2, 1))
else:
url = 'tvp:' + video_id
return {
'_type': 'url_transparent',
'id': video_id,
'url': url,
'ie_key': (TVPIE if is_website else TVPEmbedIE).ie_key(),
'title': txt_or_none(video_data.get('title')),
'description': txt_or_none(video_data.get('lead')),
'timestamp': int_or_none(video_data.get('release_date_long')),
'duration': int_or_none(video_data.get('duration')),
'thumbnails': traverse_obj(video_data, ('image', (None, Ellipsis), 'url'), expected_type=url_or_none) or None,
}
def _handle_vuejs_page(self, url, webpage, page_id):
# vue client-side rendered sites (all regional pages + tvp.info)
video_data = self._search_regex([
r'window\.__(?:news|video)Data\s*=\s*({(?:.|\s)+?})\s*;',
], webpage, 'video data', default=None)
if video_data:
video_data = self._extract_vue_video(video_data, page_id=page_id)
if video_data:
return self._extract_vue_video(video_data, page_id=page_id)
else:
# paged playlists
website_data = self._parse_vue_website_data(webpage, page_id)
if website_data:
entries = self._vuejs_entries(url, website_data, page_id)
return {
'_type': 'playlist',
'id': page_id,
'title': txt_or_none(website_data.get('title')),
'description': txt_or_none(website_data.get('lead')),
'entries': entries,
}
raise ExtractorError('Could not extract video/website data')
def _vuejs_entries(self, url, website_data, page_id):
def extract_videos(wd):
for video in traverse_obj(wd, (None, ('latestVideo', (('videos', 'items'), Ellipsis)))):
video = self._extract_vue_video(video)
if video:
yield video
for from_ in extract_videos(website_data):
yield from_
if website_data.get('items_total_count') > website_data.get('items_per_page'):
for page in itertools.count(2):
page_website_data = self._parse_vue_website_data(
self._download_webpage(url, page_id, note='Downloading page #%d' % page,
query={'page': page}),
page_id)
if not page_website_data.get('videos') and not page_website_data.get('items'):
break
for from_ in extract_videos(page_website_data):
yield from_
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
video_id = self._search_regex([
webpage, urlh = self._download_webpage_handle(url, page_id, expected_status=404)
# The URL may redirect to a VOD
# example: https://vod.tvp.pl/48463890/wadowickie-spotkania-z-janem-pawlem-ii
for ie_cls in (TVPVODSeriesIE, TVPVODVideoIE):
if ie_cls.suitable(urlh.url):
return self.url_result(urlh.url, ie=ie_cls.ie_key(), video_id=page_id)
if urlh.getcode() == 404:
raise compat_HTTPError(url, 404, 'HTTP Error 404: Not Found', urlh.headers, urlh)
if re.search(
r'window\s*\.\s*__(?:video|news|website|directory)Data\s*=',
webpage):
return self._handle_vuejs_page(url, webpage, page_id)
# classic server-side rendered sites
video_id = self._search_regex((
r'<iframe[^>]+src="[^"]*?embed\.php\?(?:[^&]+&)*ID=(\d+)',
r'<iframe[^>]+src="[^"]*?object_id=(\d+)',
r"object_id\s*:\s*'(\d+)'",
r'data-video-id="(\d+)"'], webpage, 'video id', default=page_id)
r'data-video-id="(\d+)"',
# abc.tvp.pl - somehow there are more than one video IDs that seem to be the same video?
# the first one is referenced to as "copyid", and seems to be unused by the website
r'<script>\s*tvpabc\.video\.init\(\s*\d+,\s*(\d+)\s*\)\s*</script>',
), webpage, 'video id', default=page_id)
return {
'_type': 'url_transparent',
'url': 'tvp:' + video_id,
'description': self._og_search_description(
webpage, default=None) or self._html_search_meta(
'description', webpage, default=None),
webpage, default=None) or (self._html_search_meta(
'description', webpage, default=None)
if '//s.tvp.pl/files/portal/v' in webpage else None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'ie_key': 'TVPEmbed',
}
class TVPStreamIE(InfoExtractor):
IE_NAME = 'tvp:stream'
_VALID_URL = r'(?:tvpstream:|https?://(?:tvpstream\.vod|stream)\.tvp\.pl/(?:\?(?:[^&]+[&;])*channel_id=)?)(?P<id>\d*)'
_TESTS = [{
'url': 'https://stream.tvp.pl/?channel_id=56969941',
'only_matching': True,
}, {
'url': 'https://tvpstream.vod.tvp.pl/?channel_id=1455',
'info_dict': {
'id': r're:\d+',
'title': r're:\S.*',
'ext': 'mp4',
},
'params': {
'skip_download': 'm3u8',
},
'add_ie': ['TVPEmbed'],
}, {
'url': 'tvpstream:39821455',
'only_matching': True,
}, {
# the default stream when you provide no channel_id, most probably TVP Info
'url': 'tvpstream:',
'only_matching': True,
}, {
'url': 'https://tvpstream.vod.tvp.pl/',
'only_matching': True,
}]
def _real_extract(self, url):
channel_id = self._match_id(url)
channel_url = self._proto_relative_url('//stream.tvp.pl/?channel_id=%s' % channel_id or 'default')
webpage = self._download_webpage(channel_url, channel_id or 'default', 'Downloading channel webpage')
channels = self._search_json(
r'window\s*\.\s*__channels\s*=', webpage, 'channel list', channel_id,
contains_pattern=r'\[\s*\{[\s\S]+}\s*]')
channel = traverse_obj(channels, (lambda _, v: channel_id == compat_str(v['id'])), get_all=False) if channel_id else channels[0]
audition = traverse_obj(channel, ('items', lambda _, v: v['is_live'] is True), get_all=False)
return {
'_type': 'url_transparent',
'id': channel_id or channel['id'],
'url': 'tvp:%s' % (audition['video_id'], ),
'title': audition.get('title'),
'alt_title': channel.get('title'),
'is_live': True,
'ie_key': 'TVPEmbed',
}
class TVPEmbedIE(InfoExtractor):
IE_NAME = 'tvp:embed'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'(?:tvp:|https?://[^/]+\.tvp\.(?:pl|info)/sess/tvplayer\.php\?.*?object_id=)(?P<id>\d+)'
# XFF is not effective
_GEO_BYPASS = False
_VALID_URL_PAT = (
r'''
(?:
tvp:
|https?://
(?:[^/]+\.)?
(?:tvp(?:parlament)?\.pl|tvp\.info|tvpworld\.com|swipeto\.pl)/
(?:sess/
(?:tvplayer\.php\?.*?object_id
|TVPlayer2/(?:embed|api)\.php\?.*[Ii][Dd])
|shared/details\.php\?.*?object_id)
=)
(?P<id>\d+)
''')
_VALID_URL = '(?x)' + _VALID_URL_PAT
_EMBED_REGEX = [r'(?x)<iframe[^>]+?src=(["\'])(?P<url>{0})'.format(_VALID_URL_PAT)]
_TESTS = [{
'url': 'tvp:194536',
'md5': 'a21eb0aa862f25414430f15fdfb9e76c',
@ -98,9 +501,16 @@ class TVPEmbedIE(InfoExtractor):
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 Władek',
'description': 'md5:76649d2014f65c99477be17f23a4dead',
'age_limit': 12,
'duration': 2652,
'series': 'Czas honoru',
'episode': 'Episode 13',
'episode_number': 13,
'season': 'sezon 1',
'thumbnail': r're:https://.+',
},
}, {
# not available
'url': 'http://www.tvp.pl/sess/tvplayer.php?object_id=22670268',
'md5': '8c9cd59d16edabf39331f93bf8a766c7',
'info_dict': {
@ -108,7 +518,28 @@ class TVPEmbedIE(InfoExtractor):
'ext': 'mp4',
'title': 'Panorama, 07.12.2015, 15:40',
},
'skip': 'Transmisja została zakończona lub materiał niedostępny',
'skip': 'Nie znaleziono obiektu',
}, {
'url': 'https://www.tvp.pl/sess/tvplayer.php?object_id=51247504&amp;autoplay=false',
'info_dict': {
'id': '51247504',
'ext': 'mp4',
'title': 'Razmova 091220',
'duration': 876,
'age_limit': 0,
'thumbnail': r're:https://.+',
},
}, {
# TVPlayer2 embed URL
'url': 'https://tvp.info/sess/TVPlayer2/embed.php?ID=50595757',
'only_matching': True,
}, {
'url': 'https://wiadomosci.tvp.pl/sess/TVPlayer2/api.php?id=51233452',
'only_matching': True,
}, {
# pulsembed on dziennik.pl
'url': 'https://www.tvp.pl/shared/details.php?copy_id=52205981&object_id=52204505&autoplay=false&is_muted=false&allowfullscreen=true&template=external-embed/video/iframe-video.html',
'only_matching': True,
}, {
'url': 'tvp:22670268',
'only_matching': True,
@ -117,136 +548,272 @@ class TVPEmbedIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
# could be anything that is a valid JS function name
callback = random.choice((
'jebac_pis',
'jebacpis',
'ziobro',
'sasin70',
'sasin_przejebal_70_milionow_PLN',
'tvp_is_a_state_propaganda_service',
))
webpage = self._download_webpage(
'http://www.tvp.pl/sess/tvplayer.php?object_id=%s' % video_id, video_id)
('https://www.tvp.pl/sess/TVPlayer2/api.php?id=%s'
+ '&@method=getTvpConfig&@callback=%s') % (video_id, callback), video_id)
error = self._html_search_regex(
r'(?s)<p[^>]+\bclass=["\']notAvailable__text["\'][^>]*>(.+?)</p>',
webpage, 'error', default=None) or clean_html(
get_element_by_attribute('class', 'msg error', webpage))
if error:
raise ExtractorError('%s said: %s' % (
self.IE_NAME, clean_html(error)), expected=True)
# stripping JSONP padding
null, datastr = self._search_regex(
r'\s%s\s*\(\s*(?P<null>null\s*,\s*)?(?P<json>(?(null)\[\s*)?\{(?:[\s\S]+)}(?(null)]\s*))\)\s*;' % (re.escape(callback), ),
webpage, 'JSON API result', group=('null', 'json'))
data = self._parse_json(datastr, video_id, fatal=False)
if null:
error_desc = traverse_obj(data, (0, 'desc'), expected_type=compat_str)
if error_desc == 'Obiekt wymaga płatności':
error_desc = 'Video requires payment and log-in, but log-in is not implemented'
raise ExtractorError(error_desc or 'unexpected JSON error', expected=error_desc)
title = self._search_regex(
r'name\s*:\s*([\'"])Title\1\s*,\s*value\s*:\s*\1(?P<title>.+?)\1',
webpage, 'title', group='title')
series_title = self._search_regex(
r'name\s*:\s*([\'"])SeriesTitle\1\s*,\s*value\s*:\s*\1(?P<series>.+?)\1',
webpage, 'series', group='series', default=None)
if series_title:
title = '%s, %s' % (series_title, title)
content = data['content']
info = traverse_obj(content, 'info', expected_type=dict)
thumbnail = self._search_regex(
r"poster\s*:\s*'([^']+)'", webpage, 'thumbnail', default=None)
if traverse_obj(info, 'isGeoBlocked', expected_type=bool):
# actual country list is not provided, we just assume it's always available in PL
self.raise_geo_restricted(countries=['PL'])
video_url = self._search_regex(
r'0:{src:([\'"])(?P<url>.*?)\1', webpage,
'formats', group='url', default=None)
if not video_url or 'material_niedostepny.mp4' in video_url:
video_url = self._download_json(
'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id,
video_id)['video_url']
is_live = traverse_obj(info, 'isLive', expected_type=bool)
formats = []
video_url_base = self._search_regex(
r'(https?://.+?/video)(?:\.(?:ism|f4m|m3u8)|-\d+\.mp4)',
video_url, 'video base url', default=None)
if video_url_base:
# TODO: <Group> found instead of <AdaptationSet> in MPD manifest.
# It's not mentioned in MPEG-DASH standard. Figure that out.
# formats.extend(self._extract_mpd_formats(
# video_url_base + '.ism/video.mpd',
# video_id, mpd_id='dash', fatal=False))
formats.extend(self._extract_ism_formats(
video_url_base + '.ism/Manifest',
video_id, 'mss', fatal=False))
formats.extend(self._extract_f4m_formats(
video_url_base + '.ism/video.f4m',
video_id, f4m_id='hds', fatal=False))
m3u8_formats = self._extract_m3u8_formats(
video_url_base + '.ism/video.m3u8', video_id,
'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
self._sort_formats(m3u8_formats)
m3u8_formats = list(filter(
lambda f: f.get('vcodec') != 'none', m3u8_formats))
formats.extend(m3u8_formats)
for i, m3u8_format in enumerate(m3u8_formats, 2):
http_url = '%s-%d.mp4' % (video_url_base, i)
if self._is_valid_url(http_url, video_id):
f = m3u8_format.copy()
f.update({
'url': http_url,
'format_id': f['format_id'].replace('hls', 'http'),
'protocol': 'http',
})
formats.append(f)
else:
formats = [{
'format_id': 'direct',
'url': video_url,
'ext': determine_ext(video_url, 'mp4'),
}]
for file in traverse_obj(content, ('files', Ellipsis), expected_type=dict):
video_url = url_or_none(file.get('url'))
if not video_url:
continue
ext = determine_ext(video_url, None)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
video_url, video_id, ext='mp4', m3u8_id='hls',
fatal=False, live=is_live))
elif ext == 'mpd':
if is_live:
# doesn't work with either ffmpeg or native downloader
continue
formats.extend(self._extract_mpd_formats(video_url, video_id, mpd_id='dash', fatal=False))
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(video_url, video_id, f4m_id='hds', fatal=False))
elif video_url.endswith('.ism/manifest'):
formats.extend(self._extract_ism_formats(video_url, video_id, ism_id='mss', fatal=False))
elif ext == 'ism':
if '.ism/manifest' in video_url:
formats.extend(self._extract_ism_formats(video_url, video_id, ism_id='mss', fatal=False))
else:
# mp4, wmv or something
quality = traverse_obj(file, 'quality', expected_type=dict) or {}
formats.append({
'format_id': 'direct',
'url': video_url,
'ext': ext or file.get('type'),
'fps': int_or_none(quality.get('fps')),
'tbr': int_or_none(quality.get('bitrate'), scale=1000),
'width': int_or_none(quality.get('width')),
'height': int_or_none(quality.get('height')),
})
self._sort_formats(formats)
return {
title = traverse_obj(info, 'subtitle', 'title', 'seoTitle', expected_type=txt_or_none)
# `seoDescription` may be Falsen
description = traverse_obj(info, 'description', 'seoDescription',
expected_type=lambda x: txt_or_none(x or None))
thumbnails = []
for thumb in traverse_obj(content, ('posters', Ellipsis), expected_type=dict):
thumb_url = thumb.get('src')
if not thumb_url or '{width}' in thumb_url or '{height}' in thumb_url:
continue
thumbnails.append({
'url': thumb.get('src'),
'width': thumb.get('width'),
'height': thumb.get('height'),
})
age_limit = traverse_obj(info, ('ageGroup', 'minAge'), expected_type=int)
if age_limit == 1:
age_limit = 0
duration = traverse_obj(info, 'duration', expected_type=int) if not is_live else None
subtitles = {}
for sub in traverse_obj(content, ('subtitles', Ellipsis), expected_type=dict):
if not (sub.get('url') and sub.get('lang')):
continue
subtitles.setdefault(sub['lang'], []).append({
'url': sub['url'],
'ext': sub.get('type'),
})
info_dict = {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'thumbnails': thumbnails,
'age_limit': age_limit,
'is_live': is_live,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
# vod.tvp.pl
if traverse_obj(info, 'vortalName') == 'vod':
info_dict.update({
'title': '%s, %s' % (info.get('title'), info.get('subtitle')),
'series': info.get('title'),
'season': info.get('season'),
'episode_number': info.get('episode') or None,
})
return info_dict
class TVPVODBaseIE(InfoExtractor):
_API_BASE_URL = 'https://vod.tvp.pl/api/products/'
def _call_api(self, resource, video_id, **kwargs):
return self._download_json(
self._API_BASE_URL + resource, video_id,
query={'lang': 'pl', 'platform': 'BROWSER'}, **kwargs)
def _parse_video(self, video):
video_id = traverse_obj(video, 'externalUid', expected_type=txt_or_none)
if not video_id:
return None
return {
'_type': 'url',
'url': 'tvp:' + video_id,
'ie_key': TVPEmbedIE.ie_key(),
'title': video.get('title'),
'description': traverse_obj(video, ('lead', 'description'), expected_type=txt_or_none),
'age_limit': int_or_none(video.get('rating')),
'duration': int_or_none(video.get('duration')),
}
class TVPWebsiteIE(InfoExtractor):
IE_NAME = 'tvp:series'
_VALID_URL = r'https?://vod\.tvp\.pl/website/(?P<display_id>[^,]+),(?P<id>\d+)'
class TVPVODVideoIE(TVPVODBaseIE):
IE_NAME = 'tvp:vod'
_VALID_URL = r'https?://vod\.tvp\.pl/[a-z\d-]+,\d+/[a-z\d-]+(?<!-odcinki)(?:-odcinki,\d+/odcinek-\d+,S\d+E\d+)?,(?P<id>\d+)(?:\?[^#]+)?(?:#.+)?$'
_TESTS = [{
# series
'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312/video',
'url': 'https://vod.tvp.pl/dla-dzieci,24/laboratorium-alchemika-odcinki,309338/odcinek-24,S01E24,311357',
'info_dict': {
'id': '38678312',
},
'playlist_count': 115,
}, {
# film
'url': 'https://vod.tvp.pl/website/gloria,35139666',
'info_dict': {
'id': '36637049',
'id': '60468609',
'ext': 'mp4',
'title': 'Gloria, Gloria',
},
'params': {
'skip_download': True,
'title': 'Laboratorium alchemika, Tusze termiczne. Jak zobaczyć niewidoczne. Odcinek 24',
'description': 'md5:1d4098d3e537092ccbac1abf49b7cd4c',
'duration': 300,
'episode_number': 24,
'episode': 'Episode 24',
'age_limit': 0,
'series': 'Laboratorium alchemika',
'thumbnail': 're:https://.+',
},
'add_ie': ['TVPEmbed'],
}, {
'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312',
'url': 'https://vod.tvp.pl/filmy-dokumentalne,163/ukrainski-sluga-narodu,339667',
'info_dict': {
'id': '51640077',
'ext': 'mp4',
'title': 'Ukraiński sługa narodu, Ukraiński sługa narodu',
'series': 'Ukraiński sługa narodu',
'description': 'md5:b7940c0a8e439b0c81653a986f544ef3',
'age_limit': 12,
'duration': 3051,
'thumbnail': 're:https://.+',
},
'add_ie': ['TVPEmbed'],
}, {
# new URL format
'url': 'https://vod.tvp.pl/seriale,18/czas-honoru-odcinki,292065/odcinek-13,S01E13,313867',
'md5': 'a21eb0aa862f25414430f15fdfb9e76c',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 Władek',
'description': 'md5:76649d2014f65c99477be17f23a4dead',
'age_limit': 12,
},
'add_ie': ['TVPEmbed'],
}, {
'url': 'https://vod.tvp.pl/filmy-fabularne,136/rozlam,390638',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._parse_video(
self._call_api('vods/' + video_id, video_id))
if not video:
raise ExtractorError('No video data for ' + video_id)
return video
class TVPVODSeriesIE(TVPVODBaseIE):
IE_NAME = 'tvp:vod:series'
_VALID_URL = r'''(?x)
https?://vod\.tvp\.pl/
seriale,(?P<cat>\d+)/
(?P<display_id>[^,]+?)(?(cat)-odcinki),(?P<id>\d+)
(?(cat)|(?P<video>/video)?)(?:[#?]|$)
'''
_VALID_URL = r'https?://vod\.tvp\.pl/(?P<display_id>[a-z\d-]+,\d+)/[a-z\d-]+-odcinki,(?P<id>\d+)(?:\?[^#]+)?(?:#.+)?$'
_TESTS = [{
# series
'url': 'https://vod.tvp.pl/seriale,18/ranczo-odcinki,316445',
# series (old) - redirects to home page
# 'url': 'https://vod.tvp.pl/website/wspaniale-stulecie,17069012/video',
'info_dict': {
'id': '316445',
'title': 'Ranczo',
# 'description': 'md5:a7ccbe1296e6f32425cef17639f1b24b',
'age_limit': 12,
'categories': ['seriale'],
},
'playlist_mincount': 129,
}, {
'url': 'https://vod.tvp.pl/programy,88/rolnik-szuka-zony-odcinki,284514',
'only_matching': True,
}, {
'url': 'https://vod.tvp.pl/dla-dzieci,24/laboratorium-alchemika-odcinki,309338',
'only_matching': True,
}]
def _entries(self, display_id, playlist_id):
url = 'https://vod.tvp.pl/website/%s,%s/video' % (display_id, playlist_id)
for page_num in itertools.count(1):
page = self._download_webpage(
url, display_id, 'Downloading page %d' % page_num,
query={'page': page_num})
season_path = 'vods/serials/%s/seasons' % (playlist_id, )
seasons = self._call_api(
season_path, playlist_id,
note='Downloading season list') or []
video_ids = orderedSet(re.findall(
r'<a[^>]+\bhref=["\']/video/%s,[^,]+,(\d+)' % display_id,
page))
if not video_ids:
break
for video_id in video_ids:
yield self.url_result(
'tvp:%s' % video_id, ie=TVPEmbedIE.ie_key(),
video_id=video_id)
for ii, season in enumerate(seasons, 1):
season_id = traverse_obj(season, 'id', expected_type=txt_or_none)
if not season_id:
continue
episodes = self._call_api(
'%s/%s/episodes' % (season_path, season_id), playlist_id,
note='Downloading episode list (season %d)' % ii)
for episode in episodes or []:
video_id = traverse_obj(episode, 'externalUid', expected_type=txt_or_none)
if video_id:
yield self._parse_video(episode)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id, playlist_id = mobj.group('display_id', 'id')
return self.playlist_result(
self._entries(display_id, playlist_id), playlist_id)
display_id, playlist_id = self._match_valid_url(url).group('display_id', 'id')
metadata = self._call_api(
'vods/serials/' + playlist_id, playlist_id,
note='Downloading serial metadata') or {}
pl = self.playlist_result(
self._entries(display_id, playlist_id), playlist_id, txt_or_none(metadata.get('title')))
pl.update({
'description': traverse_obj(metadata, ('description', 'lead'), expected_type=clean_html),
'categories': traverse_obj(metadata, ('mainCategory', (None, Ellipsis), 'name'), expected_type=txt_or_none),
'age_limit': traverse_obj(metadata, 'rating', expected_type=int),
})
return pl

View File

@ -91,12 +91,12 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '19.45.4',
'clientVersion': '20.10.4',
'deviceMake': 'Apple',
'deviceModel': 'iPhone16,2',
'userAgent': 'com.google.ios.youtube/19.45.4 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)',
'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)',
'osName': 'iPhone',
'osVersion': '18.1.0.22B83',
'osVersion': '18.3.2.22D82',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
@ -109,7 +109,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20241202.07.00',
'clientVersion': '2.20250311.03.00',
# mweb previously did not require PO Token with this UA
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
},
@ -122,7 +122,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '7.20241201.18.00',
'clientVersion': '7.20250312.16.00',
'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
@ -132,7 +133,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20241126.01.00',
'clientVersion': '2.20250312.04.00',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
@ -691,7 +692,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
@ -1851,12 +1852,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if func_code:
return jsi, player_id, func_code
return self._extract_n_function_code_jsi(video_id, jsi, player_id)
func_name = self._extract_n_function_name(jscode)
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code)
func_code = jsi.extract_function_code(func_name)
if var_ay:
func_code = (func_code[0], ';\n'.join((var_ay, func_code[1])))
self.cache.store('youtube-nsig', player_id, func_code)
if player_id:
self.cache.store('youtube-nsig', player_id, func_code)
return jsi, player_id, func_code
def _extract_n_function_from_code(self, jsi, func_code):

View File

@ -1,10 +1,12 @@
# coding: utf-8
from __future__ import unicode_literals
import calendar
import itertools
import json
import operator
import re
import time
from functools import update_wrapper, wraps
@ -12,8 +14,10 @@ from .utils import (
error_to_compat_str,
ExtractorError,
float_or_none,
int_or_none,
js_to_json,
remove_quotes,
str_or_none,
unified_timestamp,
variadic,
write_string,
@ -150,6 +154,7 @@ def _js_to_primitive(v):
)
# more exact: yt-dlp/yt-dlp#12110
def _js_toString(v):
return (
'undefined' if v is JS_Undefined
@ -158,7 +163,7 @@ def _js_toString(v):
else 'null' if v is None
# bool <= int: do this first
else ('false', 'true')[v] if isinstance(v, bool)
else '{0:.7f}'.format(v).rstrip('.0') if isinstance(v, compat_numeric_types)
else re.sub(r'(?<=\d)\.?0*$', '', '{0:.7f}'.format(v)) if isinstance(v, compat_numeric_types)
else _js_to_primitive(v))
@ -404,6 +409,7 @@ class JSInterpreter(object):
class Exception(ExtractorError):
def __init__(self, msg, *args, **kwargs):
expr = kwargs.pop('expr', None)
msg = str_or_none(msg, default='"None"')
if expr is not None:
msg = '{0} in: {1!r:.100}'.format(msg.rstrip(), expr)
super(JSInterpreter.Exception, self).__init__(msg, *args, **kwargs)
@ -431,6 +437,7 @@ class JSInterpreter(object):
flags, _ = self.regex_flags(flags)
# First, avoid https://github.com/python/cpython/issues/74534
self.__self = None
pattern_txt = str_or_none(pattern_txt) or '(?:)'
self.__pattern_txt = pattern_txt.replace('[[', r'[\[')
self.__flags = flags
@ -475,6 +482,73 @@ class JSInterpreter(object):
flags |= cls.RE_FLAGS[ch]
return flags, expr[idx + 1:]
class JS_Date(object):
_t = None
@staticmethod
def __ymd_etc(*args, **kw_is_utc):
# args: year, monthIndex, day, hours, minutes, seconds, milliseconds
is_utc = kw_is_utc.get('is_utc', False)
args = list(args[:7])
args += [0] * (9 - len(args))
args[1] += 1 # month 0..11 -> 1..12
ms = args[6]
for i in range(6, 9):
args[i] = -1 # don't know
if is_utc:
args[-1] = 1
# TODO: [MDN] When a segment overflows or underflows its expected
# range, it usually "carries over to" or "borrows from" the higher segment.
try:
mktime = calendar.timegm if is_utc else time.mktime
return mktime(time.struct_time(args)) * 1000 + ms
except (OverflowError, ValueError):
return None
@classmethod
def UTC(cls, *args):
t = cls.__ymd_etc(*args, is_utc=True)
return _NaN if t is None else t
@staticmethod
def parse(date_str, **kw_is_raw):
is_raw = kw_is_raw.get('is_raw', False)
t = unified_timestamp(str_or_none(date_str), False)
return int(t * 1000) if t is not None else t if is_raw else _NaN
@staticmethod
def now(**kw_is_raw):
is_raw = kw_is_raw.get('is_raw', False)
t = time.time()
return int(t * 1000) if t is not None else t if is_raw else _NaN
def __init__(self, *args):
if not args:
args = [self.now(is_raw=True)]
if len(args) == 1:
if isinstance(args[0], JSInterpreter.JS_Date):
self._t = int_or_none(args[0].valueOf(), default=None)
else:
arg_type = _js_typeof(args[0])
if arg_type == 'string':
self._t = self.parse(args[0], is_raw=True)
elif arg_type == 'number':
self._t = int(args[0])
else:
self._t = self.__ymd_etc(*args)
def toString(self):
try:
return time.strftime('%a %b %0d %Y %H:%M:%S %Z%z', self._t).rstrip()
except TypeError:
return "Invalid Date"
def valueOf(self):
return _NaN if self._t is None else self._t
@classmethod
def __op_chars(cls):
op_chars = set(';,[')
@ -599,14 +673,15 @@ class JSInterpreter(object):
except Exception as e:
raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e)
def _index(self, obj, idx, allow_undefined=True):
def _index(self, obj, idx, allow_undefined=None):
if idx == 'length' and isinstance(obj, list):
return len(obj)
try:
return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)]
except (TypeError, KeyError, IndexError) as e:
if allow_undefined:
# when is not allowed?
# allow_undefined is None gives correct behaviour
if allow_undefined or (
allow_undefined is None and not isinstance(e, TypeError)):
return JS_Undefined
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
@ -715,7 +790,7 @@ class JSInterpreter(object):
new_kw, _, obj = expr.partition('new ')
if not new_kw:
for klass, konstr in (('Date', lambda x: int(unified_timestamp(x, False) * 1000)),
for klass, konstr in (('Date', lambda *x: self.JS_Date(*x).valueOf()),
('RegExp', self.JS_RegExp),
('Error', self.Exception)):
if not obj.startswith(klass + '('):
@ -1034,6 +1109,7 @@ class JSInterpreter(object):
'String': compat_str,
'Math': float,
'Array': list,
'Date': self.JS_Date,
}
obj = local_vars.get(variable)
if obj in (JS_Undefined, None):
@ -1086,6 +1162,8 @@ class JSInterpreter(object):
assertion(len(argvals) == 2, 'takes two arguments')
return argvals[0] ** argvals[1]
raise self.Exception('Unsupported Math method ' + member, expr=expr)
elif obj is self.JS_Date:
return getattr(obj, member)(*argvals)
if member == 'split':
assertion(len(argvals) <= 2, 'takes at most two arguments')