Compare commits

...

24 Commits

Author SHA1 Message Date
Andrei Lebedev
813105e9e8
Merge b83889a20eb01094a3361cf08f27895fea164049 into da7223d4aa42ff9fc680b0951d043dd03cec2d30 2025-03-22 07:20:25 +08:00
dirkf
da7223d4aa [YouTube] Improve support for tce-style player JS
* improve extraction of global "useful data" Array from player JS
* also handle tv-player and add tests: thx seproDev (yt-dlp/yt-dlp#12684)

Co-Authored-By: sepro <sepro@sepr0.com>
2025-03-21 16:26:25 +00:00
dirkf
37c2440d6a [YouTube] Update player client data
thx seproDev (yt-dlp/yt-dlp#12603)

Co-authored-by: sepro <sepro@sepr0.com>
2025-03-21 16:13:24 +00:00
dirkf
420d53387c [JSInterp] Improve tests
* from yt-dlp/yt-dlp#12313
* also fix d7c2708
2025-03-11 02:00:24 +00:00
dirkf
32f89de92b [YouTube] Update TVHTML5 client parameters
* resolves #33078
2025-03-11 02:00:24 +00:00
dirkf
283dca56fe [YouTube] Initially support tce-style player JS
* resolves #33079
2025-03-11 02:00:24 +00:00
dirkf
422b1b31cf [YouTube] Temporarily redirect from tce-style player JS 2025-03-11 02:00:24 +00:00
dirkf
1dc27e1c3b [JSInterp] Make indexing error handling more conformant
* by default TypeError -> undefined, else raise
* set allow_undefined=True/False to override
2025-03-11 02:00:24 +00:00
dirkf
af049e309b [JSInterp] Handle undefined, etc, passed to JS_RegExp and Exception 2025-03-11 02:00:24 +00:00
dirkf
94849bc997 [JSInterp] Improve Date processing
* add JS_Date class implementing JS Date
* support constructor args other than date string
* support static methods of Date
* Date objects are still automatically coerced to timestamp before using in JS.
2025-03-11 02:00:24 +00:00
dirkf
974c7d7f34 [compat] Fix inheriting from compat_collections_chain_map
* see ytdl-org/youtube-dl#33079#issuecomment-2704038049
2025-03-11 02:00:24 +00:00
dirkf
8738407d77 [compat] Support zstd Content-Encoding
* see RFC 8878 7.2
2025-03-11 02:00:24 +00:00
dirkf
cecaa18b80 [compat] Clean-up
* make workaround_optparse_bug9161 private
* add comments
* avoid leaving test objects behind
2025-03-11 02:00:24 +00:00
dirkf
673277e510
[YouTube] Fix 91b1569 2025-02-28 01:02:20 +00:00
dirkf
91b1569f68
[YouTube] Fix channel playlist extraction (#33074)
* [YouTube] Extract playlist items from LOCKUP_VIEW_MODEL_...
* resolves #33073
* thx seproDev (yt-dlp/yt-dlp#11615)

Co-authored-by: sepro <sepro@sepr0.com>
2025-02-28 00:02:10 +00:00
dirkf
b83889a20e
Update mark_watched doc 2022-11-15 19:08:40 +00:00
dirkf
6b65df9cad
[common] Make mark_watched() more lenient 2022-11-15 19:04:07 +00:00
dirkf
23fe05b4f7
Make empty result false 2022-11-15 18:34:50 +00:00
dirkf
8479922ba1
QA 2022-11-14 20:55:15 +00:00
dirkf
a61abdaa68
.chain(*iterables) 2022-11-14 16:37:05 +00:00
dirkf
2a47a5a3f9
_EMBED_REGEX 2022-11-14 16:28:24 +00:00
dirkf
8f0e4816e3
Possibly working version pt.2 2022-11-14 16:24:27 +00:00
dirkf
6468249594
Possibly working version without storyboards (permanently?) and sttls (for now) 2022-11-14 16:00:04 +00:00
Andrei Lebedev
a98ff43ac2 [Panopto] Backport from yt-dlp 2022-07-20 01:09:05 +02:00
11 changed files with 1056 additions and 72 deletions

View File

@ -11,6 +11,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import math
import re
import time
from youtube_dl.compat import compat_str as str
from youtube_dl.jsinterp import JS_Undefined, JSInterpreter
@ -208,6 +209,34 @@ class TestJSInterpreter(unittest.TestCase):
self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT'])
# epoch 0
self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC'])
# undefined
self._test(jsi, NaN, args=[JS_Undefined])
# y,m,d, ... - may fail with older dates lacking DST data
jsi = JSInterpreter(
'function f() { return new Date(%s); }'
% ('2024, 5, 29, 2, 52, 12, 42',))
self._test(jsi, (
1719625932042 # UK value
+ (
+ 3600 # back to GMT
+ (time.altzone if time.daylight # host's DST
else time.timezone)
) * 1000))
# no arg
self.assertAlmostEqual(JSInterpreter(
'function f() { return new Date() - 0; }').call_function('f'),
time.time() * 1000, delta=100)
# Date.now()
self.assertAlmostEqual(JSInterpreter(
'function f() { return Date.now(); }').call_function('f'),
time.time() * 1000, delta=100)
# Date.parse()
jsi = JSInterpreter('function f(dt) { return Date.parse(dt); }')
self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC'])
# Date.UTC()
jsi = JSInterpreter('function f() { return Date.UTC(%s); }'
% ('1970, 0, 1, 0, 0, 0, 0',))
self._test(jsi, 0)
def test_call(self):
jsi = JSInterpreter('''
@ -463,6 +492,14 @@ class TestJSInterpreter(unittest.TestCase):
self._test('function f(){return NaN << 42}', 0)
self._test('function f(){return "21.9" << 1}', 42)
self._test('function f(){return 21 << 4294967297}', 42)
self._test('function f(){return true << "5";}', 32)
self._test('function f(){return true << true;}', 2)
self._test('function f(){return "19" & "21.9";}', 17)
self._test('function f(){return "19" & false;}', 0)
self._test('function f(){return "11.0" >> "2.1";}', 2)
self._test('function f(){return 5 ^ 9;}', 12)
self._test('function f(){return 0.0 << NaN}', 0)
self._test('function f(){return null << undefined}', 0)
def test_negative(self):
self._test('function f(){return 2 * -2.0 ;}', -4)

View File

@ -223,6 +223,42 @@ _NSIG_TESTS = [
'https://www.youtube.com/s/player/9c6dfc4a/player_ias.vflset/en_US/base.js',
'jbu7ylIosQHyJyJV', 'uwI0ESiynAmhNg',
),
(
'https://www.youtube.com/s/player/f6e09c70/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
),
(
'https://www.youtube.com/s/player/f6e09c70/player_ias_tce.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'jHbbkcaxm54',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'W9HJZKktxuYoDTqW', 'larxUlagTRAcSw',
),
(
'https://www.youtube.com/s/player/e7567ecf/player_ias_tce.vflset/en_US/base.js',
'Sy4aDGc0VpYRR9ew_', '5UPOT1VhoZxNLQ',
),
(
'https://www.youtube.com/s/player/d50f54ef/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'XFTb2HoeOE5MHg',
),
(
'https://www.youtube.com/s/player/074a8365/player_ias_tce.vflset/en_US/base.js',
'Ha7507LzRmH3Utygtj', 'ufTsrE0IVYrkl8v',
),
(
'https://www.youtube.com/s/player/643afba4/player_ias.vflset/en_US/base.js',
'N5uAlLqm0eg1GyHO', 'dCBQOejdq5s-ww',
),
(
'https://www.youtube.com/s/player/69f581a5/tv-player-ias.vflset/tv-player-ias.js',
'-qIP447rVlTTwaZjY', 'KNcGOksBAvwqQg',
),
(
'https://www.youtube.com/s/player/643afba4/tv-player-ias.vflset/tv-player-ias.js',
'ir9-V6cdbCiyKxhr', '2PL7ZDYAALMfmA',
),
]
@ -284,7 +320,7 @@ def t_factory(name, sig_func, url_pattern):
def signature(jscode, sig_input):
func = YoutubeIE(FakeYDL())._parse_sig_js(jscode)
func = YoutubeIE(FakeYDL({'cachedir': False}))._parse_sig_js(jscode)
src_sig = (
compat_str(string.printable[:sig_input])
if isinstance(sig_input, int) else sig_input)
@ -292,9 +328,10 @@ def signature(jscode, sig_input):
def n_sig(jscode, sig_input):
funcname = YoutubeIE(FakeYDL())._extract_n_function_name(jscode)
return JSInterpreter(jscode).call_function(
funcname, sig_input, _ytdl_do_not_return=sig_input)
ie = YoutubeIE(FakeYDL({'cachedir': False}))
jsi = JSInterpreter(jscode)
jsi, _, func_code = ie._extract_n_function_code_jsi(sig_input, jsi)
return ie._extract_n_function_from_code(jsi, func_code)(sig_input)
make_sig_test = t_factory(

View File

@ -18,7 +18,7 @@ from .compat import (
compat_getpass,
compat_register_utf8,
compat_shlex_split,
workaround_optparse_bug9161,
_workaround_optparse_bug9161,
)
from .utils import (
_UnsafeExtensionError,
@ -50,7 +50,7 @@ def _real_main(argv=None):
# Compatibility fix for Windows
compat_register_utf8()
workaround_optparse_bug9161()
_workaround_optparse_bug9161()
setproctitle('youtube-dl')

View File

@ -16,7 +16,6 @@ import os
import platform
import re
import shlex
import shutil
import socket
import struct
import subprocess
@ -24,11 +23,15 @@ import sys
import types
import xml.etree.ElementTree
_IDENTITY = lambda x: x
# naming convention
# 'compat_' + Python3_name.replace('.', '_')
# other aliases exist for convenience and/or legacy
# wrap disposable test values in type() to reclaim storage
# deal with critical unicode/str things first
# deal with critical unicode/str things first:
# compat_str, compat_basestring, compat_chr
try:
# Python 2
compat_str, compat_basestring, compat_chr = (
@ -39,18 +42,23 @@ except NameError:
str, (str, bytes), chr
)
# casefold
# compat_casefold
try:
compat_str.casefold
compat_casefold = lambda s: s.casefold()
except AttributeError:
from .casefold import _casefold as compat_casefold
# compat_collections_abc
try:
import collections.abc as compat_collections_abc
except ImportError:
import collections as compat_collections_abc
# compat_urllib_request
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
@ -79,11 +87,15 @@ except TypeError:
_add_init_method_arg(compat_urllib_request.Request)
del _add_init_method_arg
# compat_urllib_error
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
# compat_urllib_parse
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
@ -98,17 +110,23 @@ except ImportError: # Python 2
compat_urlparse = compat_urllib_parse
compat_urllib_parse_urlparse = compat_urllib_parse.urlparse
# compat_urllib_response
try:
import urllib.response as compat_urllib_response
except ImportError: # Python 2
import urllib as compat_urllib_response
# compat_urllib_response.addinfourl
try:
compat_urllib_response.addinfourl.status
except AttributeError:
# .getcode() is deprecated in Py 3.
compat_urllib_response.addinfourl.status = property(lambda self: self.getcode())
# compat_http_cookiejar
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
@ -127,12 +145,16 @@ else:
compat_cookiejar_Cookie = compat_cookiejar.Cookie
compat_http_cookiejar_Cookie = compat_cookiejar_Cookie
# compat_http_cookies
try:
import http.cookies as compat_cookies
except ImportError: # Python 2
import Cookie as compat_cookies
compat_http_cookies = compat_cookies
# compat_http_cookies_SimpleCookie
if sys.version_info[0] == 2 or sys.version_info < (3, 3):
class compat_cookies_SimpleCookie(compat_cookies.SimpleCookie):
def load(self, rawdata):
@ -155,11 +177,15 @@ else:
compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
compat_http_cookies_SimpleCookie = compat_cookies_SimpleCookie
# compat_html_entities, probably useless now
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
# compat_html_entities_html5
try: # Python >= 3.3
compat_html_entities_html5 = compat_html_entities.html5
except AttributeError:
@ -2408,18 +2434,24 @@ except AttributeError:
# Py < 3.1
compat_http_client.HTTPResponse.getcode = lambda self: self.status
# compat_urllib_HTTPError
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
compat_urllib_HTTPError = compat_HTTPError
# compat_urllib_request_urlretrieve
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
compat_urllib_request_urlretrieve = compat_urlretrieve
# compat_html_parser_HTMLParser, compat_html_parser_HTMLParseError
try:
from HTMLParser import (
HTMLParser as compat_HTMLParser,
@ -2432,22 +2464,33 @@ except ImportError: # Python 3
# HTMLParseError was deprecated in Python 3.3 and removed in
# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
# and uniform cross-version exception handling
class compat_HTMLParseError(Exception):
pass
compat_html_parser_HTMLParser = compat_HTMLParser
compat_html_parser_HTMLParseError = compat_HTMLParseError
# compat_subprocess_get_DEVNULL
try:
_DEVNULL = subprocess.DEVNULL
compat_subprocess_get_DEVNULL = lambda: _DEVNULL
except AttributeError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
# compat_http_server
try:
import http.server as compat_http_server
except ImportError:
import BaseHTTPServer as compat_http_server
# compat_urllib_parse_unquote_to_bytes,
# compat_urllib_parse_unquote, compat_urllib_parse_unquote_plus,
# compat_urllib_parse_urlencode,
# compat_urllib_parse_parse_qs
try:
from urllib.parse import unquote_to_bytes as compat_urllib_parse_unquote_to_bytes
from urllib.parse import unquote as compat_urllib_parse_unquote
@ -2598,6 +2641,8 @@ except ImportError: # Python 2
compat_urllib_parse_parse_qs = compat_parse_qs
# compat_urllib_request_DataHandler
try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
@ -2632,16 +2677,20 @@ except ImportError: # Python < 3.4
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
# compat_xml_etree_ElementTree_ParseError
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
compat_xml_etree_ElementTree_ParseError = compat_xml_parse_error
etree = xml.etree.ElementTree
# compat_xml_etree_ElementTree_Element
_etree = xml.etree.ElementTree
class _TreeBuilder(etree.TreeBuilder):
class _TreeBuilder(_etree.TreeBuilder):
def doctype(self, name, pubid, system):
pass
@ -2650,7 +2699,7 @@ try:
# xml.etree.ElementTree.Element is a method in Python <=2.6 and
# the following will crash with:
# TypeError: isinstance() arg 2 must be a class, type, or tuple of classes and types
isinstance(None, etree.Element)
isinstance(None, _etree.Element)
from xml.etree.ElementTree import Element as compat_etree_Element
except TypeError: # Python <=2.6
from xml.etree.ElementTree import _ElementInterface as compat_etree_Element
@ -2658,12 +2707,12 @@ compat_xml_etree_ElementTree_Element = compat_etree_Element
if sys.version_info[0] >= 3:
def compat_etree_fromstring(text):
return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
return _etree.XML(text, parser=_etree.XMLParser(target=_TreeBuilder()))
else:
# python 2.x tries to encode unicode strings with ascii (see the
# XMLParser._fixtext method)
try:
_etree_iter = etree.Element.iter
_etree_iter = _etree.Element.iter
except AttributeError: # Python <=2.6
def _etree_iter(root):
for el in root.findall('*'):
@ -2675,27 +2724,29 @@ else:
# 2.7 source
def _XML(text, parser=None):
if not parser:
parser = etree.XMLParser(target=_TreeBuilder())
parser = _etree.XMLParser(target=_TreeBuilder())
parser.feed(text)
return parser.close()
def _element_factory(*args, **kwargs):
el = etree.Element(*args, **kwargs)
el = _etree.Element(*args, **kwargs)
for k, v in el.items():
if isinstance(v, bytes):
el.set(k, v.decode('utf-8'))
return el
def compat_etree_fromstring(text):
doc = _XML(text, parser=etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
doc = _XML(text, parser=_etree.XMLParser(target=_TreeBuilder(element_factory=_element_factory)))
for el in _etree_iter(doc):
if el.text is not None and isinstance(el.text, bytes):
el.text = el.text.decode('utf-8')
return doc
if hasattr(etree, 'register_namespace'):
compat_etree_register_namespace = etree.register_namespace
else:
# compat_xml_etree_register_namespace
try:
compat_etree_register_namespace = _etree.register_namespace
except AttributeError:
def compat_etree_register_namespace(prefix, uri):
"""Register a namespace prefix.
The registry is global, and any existing mapping for either the
@ -2704,14 +2755,16 @@ else:
attributes in this namespace will be serialized with prefix if possible.
ValueError is raised if prefix is reserved or is invalid.
"""
if re.match(r"ns\d+$", prefix):
raise ValueError("Prefix format reserved for internal use")
for k, v in list(etree._namespace_map.items()):
if re.match(r'ns\d+$', prefix):
raise ValueError('Prefix format reserved for internal use')
for k, v in list(_etree._namespace_map.items()):
if k == uri or v == prefix:
del etree._namespace_map[k]
etree._namespace_map[uri] = prefix
del _etree._namespace_map[k]
_etree._namespace_map[uri] = prefix
compat_xml_etree_register_namespace = compat_etree_register_namespace
# compat_xpath, compat_etree_iterfind
if sys.version_info < (2, 7):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
@ -2898,7 +2951,6 @@ if sys.version_info < (2, 7):
def __init__(self, root):
self.root = root
##
# Generate all matching objects.
def compat_etree_iterfind(elem, path, namespaces=None):
@ -2933,13 +2985,15 @@ if sys.version_info < (2, 7):
else:
compat_xpath = lambda xpath: xpath
compat_etree_iterfind = lambda element, match: element.iterfind(match)
compat_xpath = _IDENTITY
# compat_os_name
compat_os_name = os._name if os.name == 'java' else os.name
# compat_shlex_quote
if compat_os_name == 'nt':
def compat_shlex_quote(s):
return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
@ -2954,6 +3008,7 @@ else:
return "'" + s.replace("'", "'\"'\"'") + "'"
# compat_shlex.split
try:
args = shlex.split('中文')
assert (isinstance(args, list)
@ -2969,6 +3024,7 @@ except (AssertionError, UnicodeEncodeError):
return list(map(lambda s: s.decode('utf-8'), shlex.split(s, comments, posix)))
# compat_ord
def compat_ord(c):
if isinstance(c, int):
return c
@ -2976,6 +3032,7 @@ def compat_ord(c):
return ord(c)
# compat_getenv, compat_os_path_expanduser, compat_setenv
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
@ -3063,6 +3120,7 @@ else:
compat_os_path_expanduser = compat_expanduser
# compat_os_path_realpath
if compat_os_name == 'nt' and sys.version_info < (3, 8):
# os.path.realpath on Windows does not follow symbolic links
# prior to Python 3.8 (see https://bugs.python.org/issue9949)
@ -3076,6 +3134,7 @@ else:
compat_os_path_realpath = compat_realpath
# compat_print
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
@ -3086,6 +3145,7 @@ else:
print(s)
# compat_getpass_getpass
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
@ -3098,22 +3158,22 @@ else:
compat_getpass_getpass = compat_getpass
# compat_input
try:
compat_input = raw_input
except NameError: # Python 3
compat_input = input
# compat_kwargs
# Python < 2.6.5 require kwargs to be bytes
try:
def _testfunc(x):
pass
_testfunc(**{'x': 0})
(lambda x: x)(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
compat_kwargs = _IDENTITY
# compat_numeric_types
@ -3132,6 +3192,8 @@ except NameError: # Python 3
# compat_int
compat_int = compat_integer_types[-1]
# compat_socket_create_connection
if sys.version_info < (2, 7):
def compat_socket_create_connection(address, timeout, source_address=None):
host, port = address
@ -3158,6 +3220,7 @@ else:
compat_socket_create_connection = socket.create_connection
# compat_contextlib_suppress
try:
from contextlib import suppress as compat_contextlib_suppress
except ImportError:
@ -3200,12 +3263,12 @@ except AttributeError:
# repeated .close() is OK, but just in case
with compat_contextlib_suppress(EnvironmentError):
f.close()
popen.wait()
popen.wait()
# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
def _workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
@ -3224,9 +3287,10 @@ def workaround_optparse_bug9161():
optparse.OptionGroup.add_option = _compat_add_option
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
compat_get_terminal_size = shutil.get_terminal_size
else:
# compat_shutil_get_terminal_size
try:
from shutil import get_terminal_size as compat_get_terminal_size # Python >= 3.3
except ImportError:
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
def compat_get_terminal_size(fallback=(80, 24)):
@ -3256,27 +3320,33 @@ else:
columns = _columns
if lines is None or lines <= 0:
lines = _lines
return _terminal_size(columns, lines)
compat_shutil_get_terminal_size = compat_get_terminal_size
# compat_itertools_count
try:
itertools.count(start=0, step=1)
type(itertools.count(start=0, step=1))
compat_itertools_count = itertools.count
except TypeError: # Python 2.6
except TypeError: # Python 2.6 lacks step
def compat_itertools_count(start=0, step=1):
while True:
yield start
start += step
# compat_tokenize_tokenize
if sys.version_info >= (3, 0):
from tokenize import tokenize as compat_tokenize_tokenize
else:
from tokenize import generate_tokens as compat_tokenize_tokenize
# compat_struct_pack, compat_struct_unpack, compat_Struct
try:
struct.pack('!I', 0)
type(struct.pack('!I', 0))
except TypeError:
# In Python 2.6 and 2.7.x < 2.7.7, struct requires a bytes argument
# See https://bugs.python.org/issue19099
@ -3308,8 +3378,10 @@ else:
compat_Struct = struct.Struct
# compat_map/filter() returning an iterator, supposedly the
# same versioning as for zip below
# builtins returning an iterator
# compat_map, compat_filter
# supposedly the same versioning as for zip below
try:
from future_builtins import map as compat_map
except ImportError:
@ -3326,6 +3398,7 @@ except ImportError:
except ImportError:
compat_filter = filter
# compat_zip
try:
from future_builtins import zip as compat_zip
except ImportError: # not 2.6+ or is 3.x
@ -3335,6 +3408,7 @@ except ImportError: # not 2.6+ or is 3.x
compat_zip = zip
# compat_itertools_zip_longest
# method renamed between Py2/3
try:
from itertools import zip_longest as compat_itertools_zip_longest
@ -3342,7 +3416,8 @@ except ImportError:
from itertools import izip_longest as compat_itertools_zip_longest
# new class in collections
# compat_collections_chain_map
# collections.ChainMap: new class
try:
from collections import ChainMap as compat_collections_chain_map
# Py3.3's ChainMap is deficient
@ -3398,19 +3473,22 @@ except ImportError:
def new_child(self, m=None, **kwargs):
m = m or {}
m.update(kwargs)
return compat_collections_chain_map(m, *self.maps)
# support inheritance !
return type(self)(m, *self.maps)
@property
def parents(self):
return compat_collections_chain_map(*(self.maps[1:]))
return type(self)(*(self.maps[1:]))
# compat_re_Pattern, compat_re_Match
# Pythons disagree on the type of a pattern (RegexObject, _sre.SRE_Pattern, Pattern, ...?)
compat_re_Pattern = type(re.compile(''))
# and on the type of a match
compat_re_Match = type(re.match('a', 'a'))
# compat_base64_b64decode
if sys.version_info < (3, 3):
def compat_b64decode(s, *args, **kwargs):
if isinstance(s, compat_str):
@ -3422,6 +3500,7 @@ else:
compat_base64_b64decode = compat_b64decode
# compat_ctypes_WINFUNCTYPE
if platform.python_implementation() == 'PyPy' and sys.pypy_version_info < (5, 4, 0):
# PyPy2 prior to version 5.4.0 expects byte strings as Windows function
# names, see the original PyPy issue [1] and the youtube-dl one [2].
@ -3440,6 +3519,7 @@ else:
return ctypes.WINFUNCTYPE(*args, **kwargs)
# compat_open
if sys.version_info < (3, 0):
# open(file, mode='r', buffering=- 1, encoding=None, errors=None, newline=None, closefd=True) not: opener=None
def compat_open(file_, *args, **kwargs):
@ -3467,18 +3547,28 @@ except AttributeError:
def compat_datetime_timedelta_total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
# optional decompression packages
# compat_brotli
# PyPi brotli package implements 'br' Content-Encoding
try:
import brotli as compat_brotli
except ImportError:
compat_brotli = None
# compat_ncompress
# PyPi ncompress package implements 'compress' Content-Encoding
try:
import ncompress as compat_ncompress
except ImportError:
compat_ncompress = None
# compat_zstandard
# PyPi zstandard package implements 'zstd' Content-Encoding (RFC 8878 7.2)
try:
import zstandard as compat_zstandard
except ImportError:
compat_zstandard = None
legacy = [
'compat_HTMLParseError',
@ -3495,6 +3585,7 @@ legacy = [
'compat_getpass',
'compat_parse_qs',
'compat_realpath',
'compat_shlex_split',
'compat_urllib_parse_parse_qs',
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
@ -3508,8 +3599,6 @@ legacy = [
__all__ = [
'compat_html_parser_HTMLParseError',
'compat_html_parser_HTMLParser',
'compat_Struct',
'compat_base64_b64decode',
'compat_basestring',
@ -3518,13 +3607,9 @@ __all__ = [
'compat_chr',
'compat_collections_abc',
'compat_collections_chain_map',
'compat_datetime_timedelta_total_seconds',
'compat_http_cookiejar',
'compat_http_cookiejar_Cookie',
'compat_http_cookies',
'compat_http_cookies_SimpleCookie',
'compat_contextlib_suppress',
'compat_ctypes_WINFUNCTYPE',
'compat_datetime_timedelta_total_seconds',
'compat_etree_fromstring',
'compat_etree_iterfind',
'compat_filter',
@ -3533,6 +3618,12 @@ __all__ = [
'compat_getpass_getpass',
'compat_html_entities',
'compat_html_entities_html5',
'compat_html_parser_HTMLParseError',
'compat_html_parser_HTMLParser',
'compat_http_cookiejar',
'compat_http_cookiejar_Cookie',
'compat_http_cookies',
'compat_http_cookies_SimpleCookie',
'compat_http_client',
'compat_http_server',
'compat_input',
@ -3555,7 +3646,7 @@ __all__ = [
'compat_register_utf8',
'compat_setenv',
'compat_shlex_quote',
'compat_shlex_split',
'compat_shutil_get_terminal_size',
'compat_socket_create_connection',
'compat_str',
'compat_struct_pack',
@ -3575,5 +3666,5 @@ __all__ = [
'compat_xml_etree_register_namespace',
'compat_xpath',
'compat_zip',
'workaround_optparse_bug9161',
'compat_zstandard',
]

View File

@ -3323,7 +3323,11 @@ class InfoExtractor(object):
if (self._downloader.params.get('mark_watched', False)
and (self._get_login_info()[0] is not None
or self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
# extractors apart from YoutubeIE can mark: be more lenient
try:
self._mark_watched(*args, **kwargs)
except NotImplementedError:
self.report_warning('Marking as watched is not supported')
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')

View File

@ -917,6 +917,11 @@ from .palcomp3 import (
PalcoMP3VideoIE,
)
from .pandoratv import PandoraTVIE
from .panopto import (
PanoptoIE,
PanoptoListIE,
PanoptoPlaylistIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE

View File

@ -132,6 +132,7 @@ from .kinja import KinjaEmbedIE
from .arcpublishing import ArcPublishingIE
from .medialaan import MedialaanIE
from .simplecast import SimplecastIE
from .panopto import PanoptoIE
class GenericIE(InfoExtractor):
@ -2340,6 +2341,15 @@ class GenericIE(InfoExtractor):
},
'expected_warnings': ['uploader id'],
},
{
# Panopto embeds
'url': 'https://www.monash.edu/learning-teaching/teachhq/learning-technologies/panopto/how-to/insert-a-quiz-into-a-panopto-video',
'info_dict': {
'title': 'Insert a quiz into a Panopto video',
'id': 'insert-a-quiz-into-a-panopto-video'
},
'playlist_count': 1
},
]
def report_following_redirect(self, new_url):
@ -3518,6 +3528,9 @@ class GenericIE(InfoExtractor):
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
panopto_entries = PanoptoIE._extract_from_webpage(url, webpage)
if panopto_entries:
return self.playlist_result(panopto_entries, video_id, video_title)
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:

View File

@ -0,0 +1,663 @@
# coding: utf-8
from __future__ import unicode_literals
import calendar
from datetime import datetime
import functools
import json
import itertools
from random import random
import re
from .common import InfoExtractor
from ..compat import (
compat_map as map,
compat_parse_qs as parse_qs,
compat_str,
compat_urllib_parse_urlparse,
)
from ..utils import (
bug_reports_message,
ExtractorError,
get_first,
int_or_none,
LazyList,
merge_dicts,
OnDemandPagedList,
orderedSet,
srt_subtitles_timecode,
traverse_obj,
try_get,
update_url_query,
)
import inspect
if len(try_get(InfoExtractor.report_warning,
(lambda x: inspect.getfullargspec(x).FullArgs,
lambda x: inspect.getargspec(x).Args, ), list) or []) <= 2:
BaseInfoExtractor = InfoExtractor
class InfoExtractor(BaseInfoExtractor):
def report_warning(self, warning, only_once=True, _memo=set()):
from hashlib import md5
if only_once:
w_hash = md5(warning).hexdigest()
if w_hash in _memo:
return
_memo.add(w_hash)
super(InfoExtractor, self).report_warning(self, warning)
@classmethod
def _match_valid_url(cls, url):
return re.match(cls._VALID_URL, url)
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs/data
will be dropped. """
list1_data = {(item.get('url'), item.get('data')) for item in subtitle_list1}
ret = list(subtitle_list1)
ret.extend(item for item in subtitle_list2 if (item.get('url'), item.get('data')) not in list1_data)
return ret
class PanoptoBaseIE(InfoExtractor):
BASE_URL_RE = r'(?P<base_url>https?://[\w.-]+\.panopto.(?:com|eu)/Panopto)'
# see panopto core.js
_SUB_LANG_MAPPING = {
0: 'en-US',
1: 'en-GB',
2: 'es-MX',
3: 'es-ES',
4: 'de-DE',
5: 'fr-FR',
6: 'nl-NL',
7: 'th-TH',
8: 'zh-CN',
9: 'zh-TW',
10: 'ko-KR',
11: 'ja-JP',
12: 'ru-RU',
13: 'pt-PT',
14: 'pl-PL',
15: 'en-AU',
16: 'da-DK',
17: 'fi-FI',
18: 'hu-HU',
19: 'nb-NO',
20: 'sv-SE',
21: 'it-IT'
}
def _call_api(self, base_url, path, video_id, data=None, fatal=True, **kwargs):
response = self._download_json(
base_url + path, video_id, data=json.dumps(data).encode('utf8') if data else None,
fatal=fatal, headers={'accept': 'application/json', 'content-type': 'application/json'}, **kwargs)
if not response:
return
error_code = traverse_obj(response, 'ErrorCode')
if error_code == 2:
self.raise_login_required(method='cookies')
elif error_code is not None:
msg = '%s said: %s' % (self.IE_NAME, response.get('ErrorMessage') or '[no message]')
if fatal:
raise ExtractorError(msg, video_id=video_id, expected=True)
else:
self.report_warning(msg, video_id=video_id)
return response
@staticmethod
def _parse_fragment(url):
return dict((k, json.loads(v[0])) for k, v in parse_qs(compat_urllib_parse_urlparse(url).fragment).items())
class PanoptoIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/(Viewer|Embed)\.aspx.*(?:\?|&)id=(?P<id>[a-f0-9-]+)'
_EMBED_REGEX = [
r'''<iframe\b[^>]+\bsrc\s*=\s*(["'])(?P<url>%s/Pages/(?:Viewer|Embed|Sessions/List)\.aspx(?:(?!\1)[\w\W])+)'''
% (PanoptoBaseIE.BASE_URL_RE, )]
_TESTS = [
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=26b3ae9e-4a48-4dcc-96ba-0befba08a0fb',
'info_dict': {
'id': '26b3ae9e-4a48-4dcc-96ba-0befba08a0fb',
'title': 'Panopto for Business - Use Cases',
'timestamp': 1459184200,
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
'upload_date': '20160328',
'ext': 'mp4',
'cast': [],
'chapters': [],
'duration': 88.17099999999999,
'average_rating': int,
'uploader_id': '2db6b718-47a0-4b0b-9e17-ab0b00f42b1e',
'channel_id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'channel': 'Showcase Videos'
},
},
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=ed01b077-c9e5-4c7b-b8ff-15fa306d7a59',
'info_dict': {
'id': 'ed01b077-c9e5-4c7b-b8ff-15fa306d7a59',
'title': 'Overcoming Top 4 Challenges of Enterprise Video',
'uploader': 'Panopto Support',
'timestamp': 1449409251,
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
'upload_date': '20151206',
'ext': 'mp4',
'chapters': 'count:12',
'cast': ['Panopto Support'],
'uploader_id': 'a96d1a31-b4de-489b-9eee-b4a5b414372c',
'average_rating': int,
'description': 'md5:4391837802b3fc856dadf630c4b375d1',
'duration': 1088.2659999999998,
'channel_id': '9f3c1921-43bb-4bda-8b3a-b8d2f05a8546',
'channel': 'Webcasts',
},
},
{
# Extra params in URL
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Viewer.aspx?randomparam=thisisnotreal&id=5fa74e93-3d87-4694-b60e-aaa4012214ed&advance=true',
'info_dict': {
'id': '5fa74e93-3d87-4694-b60e-aaa4012214ed',
'ext': 'mp4',
'duration': 129.513,
'cast': ['Kathryn Kelly'],
'uploader_id': '316a0a58-7fa2-4cd9-be1c-64270d284a56',
'timestamp': 1569845768,
'tags': ['Viewer', 'Enterprise'],
'chapters': [],
'upload_date': '20190930',
'thumbnail': r're:https://howtovideos\.hosted\.panopto\.com/.+',
'description': 'md5:2d844aaa1b1a14ad0e2601a0993b431f',
'title': 'Getting Started: View a Video',
'average_rating': int,
'uploader': 'Kathryn Kelly',
'channel_id': 'fb93bc3c-6750-4b80-a05b-a921013735d3',
'channel': 'Getting Started',
}
},
{
# Does not allow normal Viewer.aspx. AUDIO livestream has no url, so should be skipped and only give one stream.
'url': 'https://unisa.au.panopto.com/Panopto/Pages/Embed.aspx?id=9d9a0fa3-e99a-4ebd-a281-aac2017f4da4',
'info_dict': {
'id': '9d9a0fa3-e99a-4ebd-a281-aac2017f4da4',
'ext': 'mp4',
'cast': ['LTS CLI Script'],
'chapters': [],
'duration': 2178.45,
'description': 'md5:ee5cf653919f55b72bce2dbcf829c9fa',
'channel_id': 'b23e673f-c287-4cb1-8344-aae9005a69f8',
'average_rating': int,
'uploader_id': '38377323-6a23-41e2-9ff6-a8e8004bf6f7',
'uploader': 'LTS CLI Script',
'timestamp': 1572458134,
'title': 'WW2 Vets Interview 3 Ronald Stanley George',
'thumbnail': r're:https://unisa\.au\.panopto\.com/.+',
'channel': 'World War II Veteran Interviews',
'upload_date': '20191030',
},
},
{
# Slides/storyboard
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=a7f12f1d-3872-4310-84b0-f8d8ab15326b',
'info_dict': {
'id': 'a7f12f1d-3872-4310-84b0-f8d8ab15326b',
'ext': 'mhtml',
'timestamp': 1448798857,
'duration': 4712.681,
'title': 'Cache Memory - CompSci 15-213, Lecture 12',
'channel_id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'uploader_id': 'a96d1a31-b4de-489b-9eee-b4a5b414372c',
'upload_date': '20151129',
'average_rating': 0,
'uploader': 'Panopto Support',
'channel': 'Showcase Videos',
'description': 'md5:55e51d54233ddb0e6c2ed388ca73822c',
'cast': ['ISR Videographer', 'Panopto Support'],
'chapters': 'count:28',
'thumbnail': r're:https://demo\.hosted\.panopto\.com/.+',
},
'params': {'format': 'mhtml', 'skip_download': True},
'skip': 'Not yet implemented',
},
{
'url': 'https://na-training-1.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=8285224a-9a2b-4957-84f2-acb0000c4ea9',
'info_dict': {
'id': '8285224a-9a2b-4957-84f2-acb0000c4ea9',
'ext': 'mp4',
'chapters': [],
'title': 'Company Policy',
'average_rating': 0,
'timestamp': 1615058901,
'channel': 'Human Resources',
'tags': ['HumanResources'],
'duration': 1604.243,
'thumbnail': r're:https://na-training-1\.hosted\.panopto\.com/.+',
'uploader_id': '8e8ba0a3-424f-40df-a4f1-ab3a01375103',
'uploader': 'Cait M.',
'upload_date': '20210306',
'cast': ['Cait M.'],
# 'subtitles': {'en-US': [{'ext': 'srt', 'data': 'md5:a3f4d25963fdeace838f327097c13265'}],
# 'es-ES': [{'ext': 'srt', 'data': 'md5:57e9dad365fd0fbaf0468eac4949f189'}]},
},
'params': {'writesubtitles': True, 'skip_download': True}
}, {
# On Panopto there are two subs: "Default" and en-US. en-US is blank and should be skipped.
'url': 'https://na-training-1.hosted.panopto.com/Panopto/Pages/Viewer.aspx?id=940cbd41-f616-4a45-b13e-aaf1000c915b',
'info_dict': {
'id': '940cbd41-f616-4a45-b13e-aaf1000c915b',
'ext': 'mp4',
'subtitles': 'count:1',
'title': 'HR Benefits Review Meeting*',
'cast': ['Panopto Support'],
'chapters': [],
'timestamp': 1575024251,
'thumbnail': r're:https://na-training-1\.hosted\.panopto\.com/.+',
'channel': 'Zoom',
'description': 'md5:04f90a9c2c68b7828144abfb170f0106',
'uploader': 'Panopto Support',
'average_rating': 0,
'duration': 409.34499999999997,
'uploader_id': 'b6ac04ad-38b8-4724-a004-a851004ea3df',
'upload_date': '20191129',
},
'params': {'writesubtitles': True, 'skip_download': True}
},
{
'url': 'https://ucc.cloud.panopto.eu/Panopto/Pages/Viewer.aspx?id=0e8484a4-4ceb-4d98-a63f-ac0200b455cb',
'only_matching': True
},
{
'url': 'https://brown.hosted.panopto.com/Panopto/Pages/Embed.aspx?id=0b3ff73b-36a0-46c5-8455-aadf010a3638',
'only_matching': True
},
]
@classmethod
def suitable(cls, url):
return False if PanoptoPlaylistIE.suitable(url) else super(PanoptoIE, cls).suitable(url)
@classmethod
def _extract_from_webpage(cls, url, webpage):
return LazyList(map(
lambda u: cls.url_result(u, cls.ie_key()),
orderedSet(m.group('url') for m in itertools.chain(
*(re.finditer(embed_re, webpage) for embed_re in cls._EMBED_REGEX)))))
def _mark_watched(self, base_url, video_id, delivery_info):
duration = traverse_obj(delivery_info, ('Delivery', 'Duration'), expected_type=float)
invocation_id = delivery_info.get('InvocationId')
stream_id = traverse_obj(delivery_info, ('Delivery', 'Streams', Ellipsis, 'PublicID'), get_all=False, expected_type=compat_str)
if invocation_id and stream_id and duration:
timestamp_str = '/Date(%s000)/' % (calendar.timegm(datetime.utcnow().timetuple()), )
data = {
'streamRequests': [
{
'ClientTimeStamp': timestamp_str,
'ID': 0,
'InvocationID': invocation_id,
'PlaybackSpeed': 1,
'SecondsListened': duration - 1,
'SecondsRejected': 0,
'StartPosition': 0,
'StartReason': 2,
'StopReason': None,
'StreamID': stream_id,
'TimeStamp': timestamp_str,
'UpdatesRejected': 0
},
]}
self._download_webpage(
base_url + '/Services/Analytics.svc/AddStreamRequests', video_id,
fatal=False, data=json.dumps(data).encode('utf8'), headers={'content-type': 'application/json'},
note='Marking watched', errnote='Unable to mark watched')
@staticmethod
def _extract_chapters(timestamps):
chapters = []
for timestamp in timestamps or []:
caption = timestamp.get('Caption')
start, duration = int_or_none(timestamp.get('Time')), int_or_none(timestamp.get('Duration'))
if not caption or start is None or duration is None:
continue
chapters.append({
'start_time': start,
'end_time': start + duration,
'title': caption
})
return chapters
@staticmethod
def _extract_mhtml_formats(base_url, timestamps):
image_frags = {}
for timestamp in timestamps or []:
duration = timestamp.get('Duration')
obj_id, obj_sn = timestamp.get('ObjectIdentifier'), timestamp.get('ObjectSequenceNumber'),
if timestamp.get('EventTargetType') == 'PowerPoint' and obj_id is not None and obj_sn is not None:
image_frags.setdefault('slides', []).append({
'url': update_url_query(
base_url + '/Pages/Viewer/Image.aspx', {
'id': obj_id,
'number': obj_sn,
}),
'duration': duration
})
obj_pid, session_id, abs_time = timestamp.get('ObjectPublicIdentifier'), timestamp.get('SessionID'), timestamp.get('AbsoluteTime')
if None not in (obj_pid, session_id, abs_time):
image_frags.setdefault('chapter', []).append({
'url': update_url_query(
base_url + '/Pages/Viewer/Thumb.aspx?isPrimary=false', {
'eventTargetPID': obj_pid,
'sessionPID': session_id,
'number': obj_sn,
'absoluteTime': abs_time,
}),
'duration': duration,
})
for name, fragments in image_frags.items():
yield {
'format_id': name,
'ext': 'mhtml',
'protocol': 'mhtml',
'acodec': 'none',
'vcodec': 'none',
'url': 'about:invalid',
'fragments': fragments
}
@staticmethod
def _json2srt(data, delivery):
SRT_CAPTION_FMT = '{0}\n{1} --> {2}\n{3}'
def gen_lines(dat, deliv):
for i, line in enumerate(dat):
start_time = line['Time']
duration = line.get('Duration')
if duration:
end_time = start_time + duration
else:
end_time = traverse_obj(dat, (i + 1, 'Time')) or deliv['Duration']
yield SRT_CAPTION_FMT.format(
i + 1, srt_subtitles_timecode(start_time), srt_subtitles_timecode(end_time), line['Caption'])
return '\n\n'.join(gen_lines(data, delivery))
def _get_subtitles(self, base_url, video_id, delivery):
subtitles = {}
for lang in delivery.get('AvailableLanguages') or []:
response = self._call_api(
base_url, '/Pages/Viewer/DeliveryInfo.aspx', video_id, fatal=False,
note='Downloading captions JSON metadata', query={
'deliveryId': video_id,
'getCaptions': True,
'language': compat_str(lang),
'responseType': 'json'
}
)
if not isinstance(response, list):
continue
subtitles.setdefault(self._SUB_LANG_MAPPING.get(lang) or 'default', []).append({
'ext': 'srt',
'data': self._json2srt(response, delivery),
})
return subtitles
def _extract_streams_formats_and_subtitles(self, video_id, streams, **fmt_kwargs):
formats = []
subtitles = {}
for stream in streams or []:
stream_formats = []
http_stream_url = stream.get('StreamHttpUrl')
stream_url = stream.get('StreamUrl')
if http_stream_url:
stream_formats.append({'url': http_stream_url})
if stream_url:
media_type = stream.get('ViewerMediaFileTypeName')
if media_type in ('hls', ):
# m3u8_formats, stream_subtitles = self._extract_m3u8_formats_and_subtitles(stream_url, video_id)
m3u8_formats = self._extract_m3u8_formats(stream_url, video_id)
stream_formats.extend(m3u8_formats)
# subtitles = self._merge_subtitles(subtitles, stream_subtitles)
else:
stream_formats.append({
'url': stream_url
})
for fmt in stream_formats:
fmt.update({'format_note': stream.get('Tag'), })
fmt.update(fmt_kwargs)
formats.extend(stream_formats)
return formats, subtitles
def _real_extract(self, url):
base_url, video_id = self._match_valid_url(url).group('base_url', 'id')
delivery_info = self._call_api(
base_url, '/Pages/Viewer/DeliveryInfo.aspx', video_id,
query={
'deliveryId': video_id,
'invocationId': '',
'isLiveNotes': 'false',
'refreshAuthCookie': 'true',
'isActiveBroadcast': 'false',
'isEditing': 'false',
'isKollectiveAgentInstalled': 'false',
'isEmbed': 'false',
'responseType': 'json',
}
)
delivery = delivery_info['Delivery']
session_start_time = int_or_none(delivery.get('SessionStartTime'))
timestamps = delivery.get('Timestamps')
# Podcast stream is usually the combined streams. We will prefer that by default.
podcast_formats, podcast_subtitles = self._extract_streams_formats_and_subtitles(
video_id, delivery.get('PodcastStreams'), format_note='PODCAST')
streams_formats, streams_subtitles = self._extract_streams_formats_and_subtitles(
video_id, delivery.get('Streams'), preference=-10)
formats = podcast_formats + streams_formats
formats.extend(self._extract_mhtml_formats(base_url, timestamps))
subtitles = self._merge_subtitles(podcast_subtitles, streams_subtitles)
subtitles = self._merge_subtitles(subtitles, self.extract_subtitles(base_url, video_id, delivery))
self._sort_formats(formats)
self.mark_watched(base_url, video_id, delivery_info)
return {
'id': video_id,
'title': delivery.get('SessionName'),
'cast': traverse_obj(delivery, ('Contributors', Ellipsis, 'DisplayName'), default=[], expected_type=lambda x: x or None),
'timestamp': session_start_time - 11640000000 if session_start_time else None,
'duration': delivery.get('Duration'),
'thumbnail': update_url_query(
base_url + '/Services/FrameGrabber.svc/FrameRedirect?mode=Delivery', {
'objectId': video_id,
'random': random(),
}),
'average_rating': delivery.get('AverageRating'),
'chapters': self._extract_chapters(timestamps),
'uploader': delivery.get('OwnerDisplayName') or None,
'uploader_id': delivery.get('OwnerId'),
'description': delivery.get('SessionAbstract'),
'tags': traverse_obj(delivery, ('Tags', Ellipsis, 'Content')),
'channel_id': delivery.get('SessionGroupPublicID'),
'channel': traverse_obj(delivery, 'SessionGroupLongName', 'SessionGroupShortName', get_all=False),
'formats': formats,
'subtitles': subtitles
}
class PanoptoPlaylistIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/(Viewer|Embed)\.aspx.*(?:\?|&)pid=(?P<id>[a-f0-9-]+)'
_TESTS = [
{
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=f3b39fcf-882f-4849-93d6-a9f401236d36&id=5fa74e93-3d87-4694-b60e-aaa4012214ed&advance=true',
'info_dict': {
'title': 'Featured Video Tutorials',
'id': 'f3b39fcf-882f-4849-93d6-a9f401236d36',
},
'playlist_mincount': 34, # was 36
},
{
'url': 'https://utsa.hosted.panopto.com/Panopto/Pages/Viewer.aspx?pid=e2900555-3ad4-4bdb-854d-ad2401686190',
'info_dict': {
'title': 'Library Website Introduction Playlist',
'id': 'e2900555-3ad4-4bdb-854d-ad2401686190',
'description': 'md5:f958bca50a1cbda15fdc1e20d32b3ecb',
},
'playlist_mincount': 4
},
]
def _entries(self, base_url, playlist_id, session_list_id):
session_list_info = self._call_api(
base_url,
'/Api/SessionLists/%s?collections[0].maxCount=500&collections[0].name=items' % (session_list_id, ),
playlist_id)
items = session_list_info['Items']
for item in items:
if item.get('TypeName') != 'Session':
self.report_warning('Got an item in the playlist that is not a Session' + bug_reports_message(), only_once=True)
continue
yield merge_dicts(
self.url_result(item.get('ViewerUri'), item.get('Id'), item.get('Name')), {
'description': item.get('Description'),
'duration': item.get('Duration'),
'channel': traverse_obj(item, ('Parent', 'Name')),
'channel_id': traverse_obj(item, ('Parent', 'Id'))
})
def _real_extract(self, url):
base_url, playlist_id = self._match_valid_url(url).group('base_url', 'id')
video_id = get_first(parse_qs(url), 'id')
if video_id:
if self.get_param('noplaylist'):
self.to_screen('Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(update_url_query(base_url + '/Pages/Viewer.aspx', {'id': video_id}), ie_key=PanoptoIE.ie_key(), video_id=video_id)
else:
self.to_screen('Downloading playlist {playlist_id}; add --no-playlist to just download video {video_id}'.format(**locals()))
playlist_info = self._call_api(base_url, '/Api/Playlists/' + playlist_id, playlist_id)
return self.playlist_result(
self._entries(base_url, playlist_id, playlist_info['SessionListId']),
playlist_id=playlist_id, playlist_title=playlist_info.get('Name'),
playlist_description=playlist_info.get('Description'))
class PanoptoListIE(PanoptoBaseIE):
_VALID_URL = PanoptoBaseIE.BASE_URL_RE + r'/Pages/Sessions/List\.aspx'
_PAGE_SIZE = 250
_TESTS = [
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx#folderID=%22e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a%22',
'info_dict': {
'id': 'e4c6a2fc-1214-4ca0-8fb7-aef2e29ff63a',
'title': 'Showcase Videos'
},
'playlist_mincount': 140
},
{
'url': 'https://demo.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx#view=2&maxResults=250',
'info_dict': {
'id': 'panopto_list',
'title': 'panopto_list'
},
'playlist_mincount': 300
},
{
# Folder that contains 8 folders and a playlist
'url': 'https://howtovideos.hosted.panopto.com/Panopto/Pages/Sessions/List.aspx?noredirect=true#folderID=%224b9de7ae-0080-4158-8496-a9ba01692c2e%22',
'info_dict': {
'id': '4b9de7ae-0080-4158-8496-a9ba01692c2e',
'title': 'Video Tutorials'
},
'playlist_mincount': 9
}
]
def _fetch_page(self, base_url, query_params, display_id, page):
params = merge_dicts({
'page': page,
'maxResults': self._PAGE_SIZE,
}, query_params, {
'sortColumn': 1,
'getFolderData': True,
'includePlaylists': True,
})
response = self._call_api(
base_url, '/Services/Data.svc/GetSessions', '%s page %d' % (display_id, page + 1),
data={'queryParameters': params}, fatal=False)
for result in get_first(response, 'Results', default=[]):
# This could be a video, playlist (or maybe something else)
item_id = result.get('DeliveryID')
yield merge_dicts(
self.url_result(
traverse_obj(result, 'ViewerUrl', 'EmbedUrl', get_all=False)
or update_url_query(base_url + '/Pages/Viewer.aspx', {'id': item_id}),
item_id, result.get('SessionName')), {
'duration': result.get('Duration'),
'channel': result.get('FolderName'),
'channel_id': result.get('FolderID'),
})
for folder in get_first(response, 'Subfolders', default=[]):
folder_id = folder.get('ID')
yield self.url_result(
'%s/Pages/Sessions/List.aspx#folderID=%s' % (base_url, folder_id),
ie_key=PanoptoListIE.ie_key(), video_id=folder_id, title=folder.get('Name'))
def _extract_folder_metadata(self, base_url, folder_id):
response = self._call_api(
base_url, '/Services/Data.svc/GetFolderInfo', folder_id,
data={'folderID': folder_id}, fatal=False)
return {
'title': get_first(response, 'Name', default=[])
}
def _real_extract(self, url):
mobj = self._match_valid_url(url)
base_url = mobj.group('base_url')
query_params = self._parse_fragment(url)
folder_id, display_id = query_params.get('folderID'), 'panopto_list'
if query_params.get('isSubscriptionsPage'):
display_id = 'subscriptions'
if not query_params.get('subscribableTypes'):
query_params['subscribableTypes'] = [0, 1, 2]
elif query_params.get('isSharedWithMe'):
display_id = 'sharedwithme'
elif folder_id:
display_id = folder_id
query = query_params.get('query')
if query:
display_id += ': query "%s"' % (query, )
info = self.playlist_result(
OnDemandPagedList(
functools.partial(self._fetch_page, base_url, query_params, display_id), self._PAGE_SIZE),
playlist_id=display_id,
playlist_title=display_id)
if folder_id:
info.update(self._extract_folder_metadata(base_url, folder_id))
return info

View File

@ -27,6 +27,7 @@ from ..compat import (
)
from ..jsinterp import JSInterpreter
from ..utils import (
bug_reports_message,
clean_html,
dict_get,
error_to_compat_str,
@ -65,6 +66,7 @@ from ..utils import (
url_or_none,
urlencode_postdata,
urljoin,
variadic,
)
@ -89,12 +91,12 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'IOS',
'clientVersion': '19.45.4',
'clientVersion': '20.10.4',
'deviceMake': 'Apple',
'deviceModel': 'iPhone16,2',
'userAgent': 'com.google.ios.youtube/19.45.4 (iPhone16,2; U; CPU iOS 18_1_0 like Mac OS X;)',
'userAgent': 'com.google.ios.youtube/20.10.4 (iPhone16,2; U; CPU iOS 18_3_2 like Mac OS X;)',
'osName': 'iPhone',
'osVersion': '18.1.0.22B83',
'osVersion': '18.3.2.22D82',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 5,
@ -107,7 +109,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'MWEB',
'clientVersion': '2.20241202.07.00',
'clientVersion': '2.20250311.03.00',
# mweb previously did not require PO Token with this UA
'userAgent': 'Mozilla/5.0 (iPad; CPU OS 16_7_10 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Mobile/15E148 Safari/604.1,gzip(gfe)',
},
@ -120,7 +122,8 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'TVHTML5',
'clientVersion': '7.20241201.18.00',
'clientVersion': '7.20250312.16.00',
'userAgent': 'Mozilla/5.0 (ChromiumStylePlatform) Cobalt/Version',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 7,
@ -130,7 +133,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'INNERTUBE_CONTEXT': {
'client': {
'clientName': 'WEB',
'clientVersion': '2.20241126.01.00',
'clientVersion': '2.20250312.04.00',
},
},
'INNERTUBE_CONTEXT_CLIENT_NAME': 1,
@ -460,6 +463,26 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'uploader': uploader,
}
@staticmethod
def _extract_thumbnails(data, *path_list, **kw_final_key):
"""
Extract thumbnails from thumbnails dict
@param path_list: path list to level that contains 'thumbnails' key
"""
final_key = kw_final_key.get('final_key', 'thumbnails')
return traverse_obj(data, ((
tuple(variadic(path) + (final_key, Ellipsis)
for path in path_list or [()])), {
'url': ('url', T(url_or_none),
# Sometimes youtube gives a wrong thumbnail URL. See:
# https://github.com/yt-dlp/yt-dlp/issues/233
# https://github.com/ytdl-org/youtube-dl/issues/28023
T(lambda u: update_url(u, query=None) if u and 'maxresdefault' in u else u)),
'height': ('height', T(int_or_none)),
'width': ('width', T(int_or_none)),
}, T(lambda t: t if t.get('url') else None)))
def _search_results(self, query, params):
data = {
'context': {
@ -669,7 +692,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'invidious': '|'.join(_INVIDIOUS_SITES),
}
_PLAYER_INFO_RE = (
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})/player',
r'/s/player/(?P<id>[a-zA-Z0-9_-]{8,})//(?:tv-)?player',
r'/(?P<id>[a-zA-Z0-9_-]{8,})/player(?:_ias\.vflset(?:/[a-zA-Z]{2,3}_[a-zA-Z]{2,3})?|-plasma-ias-(?:phone|tablet)-[a-z]{2}_[A-Z]{2}\.vflset)/base\.js$',
r'\b(?P<id>vfl[a-zA-Z0-9_-]+)\b.*?\.js$',
)
@ -1829,12 +1852,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if func_code:
return jsi, player_id, func_code
return self._extract_n_function_code_jsi(video_id, jsi, player_id)
func_name = self._extract_n_function_name(jscode)
def _extract_n_function_code_jsi(self, video_id, jsi, player_id=None):
var_ay = self._search_regex(
r'(?:[;\s]|^)\s*(var\s*[\w$]+\s*=\s*"(?:\\"|[^"])+"\s*\.\s*split\("\W+"\))(?=\s*[,;])',
jsi.code, 'useful values', default='')
func_name = self._extract_n_function_name(jsi.code)
func_code = jsi.extract_function_code(func_name)
if var_ay:
func_code = (func_code[0], ';\n'.join((var_ay, func_code[1])))
self.cache.store('youtube-nsig', player_id, func_code)
if player_id:
self.cache.store('youtube-nsig', player_id, func_code)
return jsi, player_id, func_code
def _extract_n_function_from_code(self, jsi, func_code):
@ -3183,8 +3216,12 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
expected_type=txt_or_none)
def _grid_entries(self, grid_renderer):
for item in grid_renderer['items']:
if not isinstance(item, dict):
for item in traverse_obj(grid_renderer, ('items', Ellipsis, T(dict))):
lockup_view_model = traverse_obj(item, ('lockupViewModel', T(dict)))
if lockup_view_model:
entry = self._extract_lockup_view_model(lockup_view_model)
if entry:
yield entry
continue
renderer = self._extract_grid_item_renderer(item)
if not isinstance(renderer, dict):
@ -3268,6 +3305,25 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
continue
yield self._extract_video(renderer)
def _extract_lockup_view_model(self, view_model):
content_id = view_model.get('contentId')
if not content_id:
return
content_type = view_model.get('contentType')
if content_type not in ('LOCKUP_CONTENT_TYPE_PLAYLIST', 'LOCKUP_CONTENT_TYPE_PODCAST'):
self.report_warning(
'Unsupported lockup view model content type "{0}"{1}'.format(content_type, bug_reports_message()), only_once=True)
return
return merge_dicts(self.url_result(
update_url_query('https://www.youtube.com/playlist', {'list': content_id}),
ie=YoutubeTabIE.ie_key(), video_id=content_id), {
'title': traverse_obj(view_model, (
'metadata', 'lockupMetadataViewModel', 'title', 'content', T(compat_str))),
'thumbnails': self._extract_thumbnails(view_model, (
'contentImage', 'collectionThumbnailViewModel', 'primaryThumbnail',
'thumbnailViewModel', 'image'), final_key='sources'),
})
def _video_entry(self, video_renderer):
video_id = video_renderer.get('videoId')
if video_id:

View File

@ -1,10 +1,12 @@
# coding: utf-8
from __future__ import unicode_literals
import calendar
import itertools
import json
import operator
import re
import time
from functools import update_wrapper, wraps
@ -12,8 +14,10 @@ from .utils import (
error_to_compat_str,
ExtractorError,
float_or_none,
int_or_none,
js_to_json,
remove_quotes,
str_or_none,
unified_timestamp,
variadic,
write_string,
@ -150,6 +154,7 @@ def _js_to_primitive(v):
)
# more exact: yt-dlp/yt-dlp#12110
def _js_toString(v):
return (
'undefined' if v is JS_Undefined
@ -158,7 +163,7 @@ def _js_toString(v):
else 'null' if v is None
# bool <= int: do this first
else ('false', 'true')[v] if isinstance(v, bool)
else '{0:.7f}'.format(v).rstrip('.0') if isinstance(v, compat_numeric_types)
else re.sub(r'(?<=\d)\.?0*$', '', '{0:.7f}'.format(v)) if isinstance(v, compat_numeric_types)
else _js_to_primitive(v))
@ -404,6 +409,7 @@ class JSInterpreter(object):
class Exception(ExtractorError):
def __init__(self, msg, *args, **kwargs):
expr = kwargs.pop('expr', None)
msg = str_or_none(msg, default='"None"')
if expr is not None:
msg = '{0} in: {1!r:.100}'.format(msg.rstrip(), expr)
super(JSInterpreter.Exception, self).__init__(msg, *args, **kwargs)
@ -431,6 +437,7 @@ class JSInterpreter(object):
flags, _ = self.regex_flags(flags)
# First, avoid https://github.com/python/cpython/issues/74534
self.__self = None
pattern_txt = str_or_none(pattern_txt) or '(?:)'
self.__pattern_txt = pattern_txt.replace('[[', r'[\[')
self.__flags = flags
@ -475,6 +482,73 @@ class JSInterpreter(object):
flags |= cls.RE_FLAGS[ch]
return flags, expr[idx + 1:]
class JS_Date(object):
_t = None
@staticmethod
def __ymd_etc(*args, **kw_is_utc):
# args: year, monthIndex, day, hours, minutes, seconds, milliseconds
is_utc = kw_is_utc.get('is_utc', False)
args = list(args[:7])
args += [0] * (9 - len(args))
args[1] += 1 # month 0..11 -> 1..12
ms = args[6]
for i in range(6, 9):
args[i] = -1 # don't know
if is_utc:
args[-1] = 1
# TODO: [MDN] When a segment overflows or underflows its expected
# range, it usually "carries over to" or "borrows from" the higher segment.
try:
mktime = calendar.timegm if is_utc else time.mktime
return mktime(time.struct_time(args)) * 1000 + ms
except (OverflowError, ValueError):
return None
@classmethod
def UTC(cls, *args):
t = cls.__ymd_etc(*args, is_utc=True)
return _NaN if t is None else t
@staticmethod
def parse(date_str, **kw_is_raw):
is_raw = kw_is_raw.get('is_raw', False)
t = unified_timestamp(str_or_none(date_str), False)
return int(t * 1000) if t is not None else t if is_raw else _NaN
@staticmethod
def now(**kw_is_raw):
is_raw = kw_is_raw.get('is_raw', False)
t = time.time()
return int(t * 1000) if t is not None else t if is_raw else _NaN
def __init__(self, *args):
if not args:
args = [self.now(is_raw=True)]
if len(args) == 1:
if isinstance(args[0], JSInterpreter.JS_Date):
self._t = int_or_none(args[0].valueOf(), default=None)
else:
arg_type = _js_typeof(args[0])
if arg_type == 'string':
self._t = self.parse(args[0], is_raw=True)
elif arg_type == 'number':
self._t = int(args[0])
else:
self._t = self.__ymd_etc(*args)
def toString(self):
try:
return time.strftime('%a %b %0d %Y %H:%M:%S %Z%z', self._t).rstrip()
except TypeError:
return "Invalid Date"
def valueOf(self):
return _NaN if self._t is None else self._t
@classmethod
def __op_chars(cls):
op_chars = set(';,[')
@ -599,14 +673,15 @@ class JSInterpreter(object):
except Exception as e:
raise self.Exception('Failed to evaluate {left_val!r:.50} {op} {right_val!r:.50}'.format(**locals()), expr, cause=e)
def _index(self, obj, idx, allow_undefined=True):
def _index(self, obj, idx, allow_undefined=None):
if idx == 'length' and isinstance(obj, list):
return len(obj)
try:
return obj[int(idx)] if isinstance(obj, list) else obj[compat_str(idx)]
except (TypeError, KeyError, IndexError) as e:
if allow_undefined:
# when is not allowed?
# allow_undefined is None gives correct behaviour
if allow_undefined or (
allow_undefined is None and not isinstance(e, TypeError)):
return JS_Undefined
raise self.Exception('Cannot get index {idx!r:.100}'.format(**locals()), expr=repr(obj), cause=e)
@ -715,7 +790,7 @@ class JSInterpreter(object):
new_kw, _, obj = expr.partition('new ')
if not new_kw:
for klass, konstr in (('Date', lambda x: int(unified_timestamp(x, False) * 1000)),
for klass, konstr in (('Date', lambda *x: self.JS_Date(*x).valueOf()),
('RegExp', self.JS_RegExp),
('Error', self.Exception)):
if not obj.startswith(klass + '('):
@ -1034,6 +1109,7 @@ class JSInterpreter(object):
'String': compat_str,
'Math': float,
'Array': list,
'Date': self.JS_Date,
}
obj = local_vars.get(variable)
if obj in (JS_Undefined, None):
@ -1086,6 +1162,8 @@ class JSInterpreter(object):
assertion(len(argvals) == 2, 'takes two arguments')
return argvals[0] ** argvals[1]
raise self.Exception('Unsupported Math method ' + member, expr=expr)
elif obj is self.JS_Date:
return getattr(obj, member)(*argvals)
if member == 'split':
assertion(len(argvals) <= 2, 'takes at most two arguments')

View File

@ -186,11 +186,11 @@ def parseOpts(overrideArguments=None):
general.add_option(
'--mark-watched',
action='store_true', dest='mark_watched', default=False,
help='Mark videos watched (YouTube only)')
help='Mark videos watched (if supported for site)')
general.add_option(
'--no-mark-watched',
action='store_false', dest='mark_watched', default=False,
help='Do not mark videos watched (YouTube only)')
help='Do not mark videos watched')
general.add_option(
'--no-color', '--no-colors',
action='store_true', dest='no_color',