2017-04-02 06:42:10 +09:00
|
|
|
|
# coding: utf-8
|
2016-02-06 22:27:04 +09:00
|
|
|
|
from __future__ import unicode_literals
|
2014-08-28 08:04:43 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
import base64
|
2024-01-28 00:29:25 +09:00
|
|
|
|
import collections
|
2014-09-28 15:53:52 +09:00
|
|
|
|
import datetime
|
2023-05-06 03:25:42 +09:00
|
|
|
|
import functools
|
2014-01-17 22:47:46 +09:00
|
|
|
|
import hashlib
|
2014-01-07 17:35:34 +09:00
|
|
|
|
import json
|
2014-07-11 17:57:08 +09:00
|
|
|
|
import netrc
|
2013-06-24 02:57:38 +09:00
|
|
|
|
import os
|
2017-02-04 20:49:58 +09:00
|
|
|
|
import random
|
2013-06-24 02:57:38 +09:00
|
|
|
|
import re
|
|
|
|
|
import socket
|
2020-09-18 05:41:16 +09:00
|
|
|
|
import ssl
|
2013-06-24 02:57:38 +09:00
|
|
|
|
import sys
|
2014-07-11 17:57:08 +09:00
|
|
|
|
import time
|
2016-02-03 02:07:07 +09:00
|
|
|
|
import math
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2014-11-02 19:23:40 +09:00
|
|
|
|
from ..compat import (
|
2020-05-05 08:00:37 +09:00
|
|
|
|
compat_cookiejar_Cookie,
|
2021-04-06 16:22:28 +09:00
|
|
|
|
compat_cookies_SimpleCookie,
|
2019-03-06 03:21:57 +09:00
|
|
|
|
compat_etree_Element,
|
2016-03-03 20:24:24 +09:00
|
|
|
|
compat_etree_fromstring,
|
2015-08-16 00:55:07 +09:00
|
|
|
|
compat_getpass,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
compat_integer_types,
|
2013-06-24 02:57:38 +09:00
|
|
|
|
compat_http_client,
|
2024-02-21 09:03:17 +09:00
|
|
|
|
compat_kwargs,
|
2023-05-06 03:25:42 +09:00
|
|
|
|
compat_map as map,
|
2023-07-25 08:17:15 +09:00
|
|
|
|
compat_open as open,
|
2016-03-03 20:24:24 +09:00
|
|
|
|
compat_os_name,
|
|
|
|
|
compat_str,
|
2013-06-24 02:57:38 +09:00
|
|
|
|
compat_urllib_error,
|
2016-10-07 20:20:53 +09:00
|
|
|
|
compat_urllib_parse_unquote,
|
2016-03-26 04:46:57 +09:00
|
|
|
|
compat_urllib_parse_urlencode,
|
2016-04-01 01:58:38 +09:00
|
|
|
|
compat_urllib_request,
|
2014-09-24 21:16:56 +09:00
|
|
|
|
compat_urlparse,
|
2017-08-23 02:32:41 +09:00
|
|
|
|
compat_xml_parse_error,
|
2023-05-06 03:25:42 +09:00
|
|
|
|
compat_zip as zip,
|
2014-11-02 19:23:40 +09:00
|
|
|
|
)
|
2017-11-05 00:10:55 +09:00
|
|
|
|
from ..downloader.f4m import (
|
|
|
|
|
get_base_url,
|
|
|
|
|
remove_encrypted_media,
|
|
|
|
|
)
|
2014-11-02 19:23:40 +09:00
|
|
|
|
from ..utils import (
|
2015-06-29 01:56:45 +09:00
|
|
|
|
NO_DEFAULT,
|
2015-01-07 15:20:20 +09:00
|
|
|
|
age_restricted,
|
2016-11-02 04:14:01 +09:00
|
|
|
|
base_url,
|
2015-04-17 21:55:24 +09:00
|
|
|
|
bug_reports_message,
|
2013-06-24 02:57:38 +09:00
|
|
|
|
clean_html,
|
|
|
|
|
compiled_regex_type,
|
2015-07-16 04:15:15 +09:00
|
|
|
|
determine_ext,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
determine_protocol,
|
2019-03-17 11:09:32 +09:00
|
|
|
|
dict_get,
|
2015-12-20 10:00:39 +09:00
|
|
|
|
error_to_compat_str,
|
2013-06-24 02:57:38 +09:00
|
|
|
|
ExtractorError,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
extract_attributes,
|
2015-07-16 04:14:08 +09:00
|
|
|
|
fix_xml_ampersands,
|
2014-09-28 17:34:55 +09:00
|
|
|
|
float_or_none,
|
2017-02-04 20:49:58 +09:00
|
|
|
|
GeoRestrictedError,
|
|
|
|
|
GeoUtils,
|
2014-07-28 22:25:56 +09:00
|
|
|
|
int_or_none,
|
2024-01-28 00:29:25 +09:00
|
|
|
|
join_nonempty,
|
2017-02-17 00:42:36 +09:00
|
|
|
|
js_to_json,
|
2018-07-10 01:43:05 +09:00
|
|
|
|
JSON_LD_RE,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
mimetype2ext,
|
|
|
|
|
orderedSet,
|
2019-03-17 11:09:32 +09:00
|
|
|
|
parse_bitrate,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
parse_codecs,
|
|
|
|
|
parse_duration,
|
2016-01-16 03:36:02 +09:00
|
|
|
|
parse_iso8601,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
parse_m3u8_attributes,
|
2019-03-17 11:09:32 +09:00
|
|
|
|
parse_resolution,
|
2013-10-23 21:38:03 +09:00
|
|
|
|
RegexNotFoundError,
|
2015-11-22 01:18:17 +09:00
|
|
|
|
sanitized_Request,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
sanitize_filename,
|
2019-03-17 11:09:32 +09:00
|
|
|
|
str_or_none,
|
2020-09-19 08:33:17 +09:00
|
|
|
|
str_to_int,
|
2019-05-24 01:52:11 +09:00
|
|
|
|
strip_or_none,
|
2024-01-28 00:29:25 +09:00
|
|
|
|
T,
|
2023-05-06 03:25:42 +09:00
|
|
|
|
traverse_obj,
|
2022-11-11 09:49:13 +09:00
|
|
|
|
try_get,
|
2013-07-17 17:38:23 +09:00
|
|
|
|
unescapeHTML,
|
2015-10-02 01:18:59 +09:00
|
|
|
|
unified_strdate,
|
2016-07-09 05:27:11 +09:00
|
|
|
|
unified_timestamp,
|
2017-03-09 08:13:54 +09:00
|
|
|
|
update_Request,
|
|
|
|
|
update_url_query,
|
|
|
|
|
urljoin,
|
2015-08-02 04:13:21 +09:00
|
|
|
|
url_basename,
|
2018-10-29 02:19:08 +09:00
|
|
|
|
url_or_none,
|
2023-07-19 22:14:50 +09:00
|
|
|
|
variadic,
|
2016-06-08 01:19:33 +09:00
|
|
|
|
xpath_element,
|
2015-08-09 22:07:18 +09:00
|
|
|
|
xpath_text,
|
|
|
|
|
xpath_with_ns,
|
2013-06-24 02:57:38 +09:00
|
|
|
|
)
|
2015-06-29 01:56:45 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
class InfoExtractor(object):
|
|
|
|
|
"""Information Extractor class.
|
|
|
|
|
|
|
|
|
|
Information extractors are the classes that, given a URL, extract
|
|
|
|
|
information about the video (or videos) the URL refers to. This
|
|
|
|
|
information includes the real video URL, the video title, author and
|
|
|
|
|
others. The information is stored in a dictionary which is then
|
2014-12-22 00:58:29 +09:00
|
|
|
|
passed to the YoutubeDL. The YoutubeDL processes this
|
2013-06-24 02:57:38 +09:00
|
|
|
|
information possibly downloading the video to the file system, among
|
|
|
|
|
other possible outcomes.
|
|
|
|
|
|
2015-04-30 00:03:10 +09:00
|
|
|
|
The type field determines the type of the result.
|
2014-11-21 00:47:59 +09:00
|
|
|
|
By far the most common value (and the default if _type is missing) is
|
|
|
|
|
"video", which indicates a single video.
|
|
|
|
|
|
|
|
|
|
For a video, the dictionaries must include the following fields:
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
id: Video identifier.
|
|
|
|
|
title: Video title, unescaped.
|
2013-12-16 22:13:40 +09:00
|
|
|
|
|
2013-12-24 19:56:02 +09:00
|
|
|
|
Additionally, it must contain either a formats entry or a url one:
|
2013-12-16 22:13:40 +09:00
|
|
|
|
|
2013-12-24 19:56:02 +09:00
|
|
|
|
formats: A list of dictionaries for each format available, ordered
|
|
|
|
|
from worst to best quality.
|
|
|
|
|
|
|
|
|
|
Potential fields:
|
2019-03-05 02:39:15 +09:00
|
|
|
|
* url The mandatory URL representing the media:
|
|
|
|
|
for plain file media - HTTP URL of this file,
|
|
|
|
|
for RTMP - RTMP URL,
|
|
|
|
|
for HLS - URL of the M3U8 media playlist,
|
|
|
|
|
for HDS - URL of the F4M manifest,
|
2019-03-15 02:42:14 +09:00
|
|
|
|
for DASH
|
|
|
|
|
- HTTP URL to plain file media (in case of
|
|
|
|
|
unfragmented media)
|
|
|
|
|
- URL of the MPD manifest or base URL
|
|
|
|
|
representing the media if MPD manifest
|
2019-05-11 06:53:48 +09:00
|
|
|
|
is parsed from a string (in case of
|
2019-03-15 02:42:14 +09:00
|
|
|
|
fragmented media)
|
2019-03-05 02:39:15 +09:00
|
|
|
|
for MSS - URL of the ISM manifest.
|
2016-09-17 22:35:22 +09:00
|
|
|
|
* manifest_url
|
|
|
|
|
The URL of the manifest file in case of
|
2019-03-05 02:39:15 +09:00
|
|
|
|
fragmented media:
|
|
|
|
|
for HLS - URL of the M3U8 master playlist,
|
|
|
|
|
for HDS - URL of the F4M manifest,
|
|
|
|
|
for DASH - URL of the MPD manifest,
|
|
|
|
|
for MSS - URL of the ISM manifest.
|
2015-07-24 02:37:45 +09:00
|
|
|
|
* ext Will be calculated from URL if missing
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* format A human-readable description of the format
|
|
|
|
|
("mp4 container with h264/opus").
|
|
|
|
|
Calculated from the format_id, width, height.
|
|
|
|
|
and format_note fields if missing.
|
|
|
|
|
* format_id A short description of the format
|
2013-12-27 05:19:00 +09:00
|
|
|
|
("mp4_h264_opus" or "19").
|
|
|
|
|
Technically optional, but strongly recommended.
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* format_note Additional info about the format
|
|
|
|
|
("3D" or "DASH video")
|
|
|
|
|
* width Width of the video, if known
|
|
|
|
|
* height Height of the video, if known
|
2013-12-24 19:56:02 +09:00
|
|
|
|
* resolution Textual description of width and height
|
2013-12-25 23:18:40 +09:00
|
|
|
|
* tbr Average bitrate of audio and video in KBit/s
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* abr Average audio bitrate in KBit/s
|
|
|
|
|
* acodec Name of the audio codec in use
|
2014-01-19 13:47:20 +09:00
|
|
|
|
* asr Audio sampling rate in Hertz
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* vbr Average video bitrate in KBit/s
|
2014-10-30 17:34:13 +09:00
|
|
|
|
* fps Frame rate
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* vcodec Name of the video codec in use
|
2014-01-24 07:54:06 +09:00
|
|
|
|
* container Name of the container format
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* filesize The number of bytes, if known in advance
|
2014-07-21 19:02:44 +09:00
|
|
|
|
* filesize_approx An estimate for the number of bytes
|
2013-12-16 22:13:40 +09:00
|
|
|
|
* player_url SWF Player URL (used for rtmpdump).
|
2013-12-25 07:32:04 +09:00
|
|
|
|
* protocol The protocol that will be used for the actual
|
|
|
|
|
download, lower-case.
|
2015-01-30 23:53:16 +09:00
|
|
|
|
"http", "https", "rtsp", "rtmp", "rtmpe",
|
2016-03-06 18:47:07 +09:00
|
|
|
|
"m3u8", "m3u8_native" or "http_dash_segments".
|
2017-01-29 07:56:43 +09:00
|
|
|
|
* fragment_base_url
|
|
|
|
|
Base URL for fragments. Each fragment's path
|
|
|
|
|
value (if present) will be relative to
|
|
|
|
|
this URL.
|
|
|
|
|
* fragments A list of fragments of a fragmented media.
|
|
|
|
|
Each fragment entry must contain either an url
|
|
|
|
|
or a path. If an url is present it should be
|
|
|
|
|
considered by a client. Otherwise both path and
|
|
|
|
|
fragment_base_url must be present. Here is
|
|
|
|
|
the list of all potential fields:
|
|
|
|
|
* "url" - fragment's URL
|
|
|
|
|
* "path" - fragment's path relative to
|
|
|
|
|
fragment_base_url
|
2016-09-06 03:18:57 +09:00
|
|
|
|
* "duration" (optional, int or float)
|
|
|
|
|
* "filesize" (optional, int)
|
2024-01-28 00:37:08 +09:00
|
|
|
|
* "range" (optional, str of the form "start-end"
|
|
|
|
|
to use in HTTP Range header)
|
2013-12-24 19:56:02 +09:00
|
|
|
|
* preference Order number of this format. If this field is
|
2014-01-02 04:23:47 +09:00
|
|
|
|
present and not None, the formats get sorted
|
2014-03-24 01:41:43 +09:00
|
|
|
|
by this field, regardless of all other values.
|
2013-12-24 19:56:02 +09:00
|
|
|
|
-1 for default (order by other properties),
|
|
|
|
|
-2 or smaller for less than default.
|
2015-01-04 02:33:38 +09:00
|
|
|
|
< -1000 to hide the format (if there is
|
|
|
|
|
another one which is strictly better)
|
2016-01-01 21:28:45 +09:00
|
|
|
|
* language Language code, e.g. "de" or "en-US".
|
|
|
|
|
* language_preference Is this in the language mentioned in
|
|
|
|
|
the URL?
|
2014-11-20 20:06:33 +09:00
|
|
|
|
10 if it's what the URL is about,
|
|
|
|
|
-1 for default (don't know),
|
|
|
|
|
-10 otherwise, other values reserved for now.
|
2014-01-07 01:15:27 +09:00
|
|
|
|
* quality Order number of the video quality of this
|
|
|
|
|
format, irrespective of the file format.
|
|
|
|
|
-1 for default (order by other properties),
|
|
|
|
|
-2 or smaller for less than default.
|
2014-10-25 07:10:11 +09:00
|
|
|
|
* source_preference Order number for this video source
|
|
|
|
|
(quality takes higher priority)
|
|
|
|
|
-1 for default (order by other properties),
|
|
|
|
|
-2 or smaller for less than default.
|
2014-08-24 08:31:35 +09:00
|
|
|
|
* http_headers A dictionary of additional HTTP headers
|
|
|
|
|
to add to the request.
|
2015-01-10 13:45:51 +09:00
|
|
|
|
* stretched_ratio If given and not 1, indicates that the
|
2015-01-26 02:09:48 +09:00
|
|
|
|
video's pixels are not square.
|
|
|
|
|
width : height ratio as float.
|
|
|
|
|
* no_resume The server does not support resuming the
|
|
|
|
|
(HTTP or RTMP) download. Boolean.
|
2018-02-04 09:16:22 +09:00
|
|
|
|
* downloader_options A dictionary of downloader options as
|
|
|
|
|
described in FileDownloader
|
2015-01-26 02:09:48 +09:00
|
|
|
|
|
2013-12-16 12:09:30 +09:00
|
|
|
|
url: Final video URL.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
ext: Video filename extension.
|
2013-12-16 22:13:40 +09:00
|
|
|
|
format: The video format, defaults to ext (used for --get-format)
|
|
|
|
|
player_url: SWF Player URL (used for rtmpdump).
|
2013-10-04 18:09:43 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
The following fields are optional:
|
|
|
|
|
|
2014-12-12 11:34:28 +09:00
|
|
|
|
alt_title: A secondary title of the video.
|
2014-03-03 20:06:28 +09:00
|
|
|
|
display_id An alternative identifier for the video, not necessarily
|
|
|
|
|
unique, but available before title. Typically, id is
|
|
|
|
|
something like "4234987", title "Dancing naked mole rats",
|
|
|
|
|
and display_id "dancing-naked-mole-rats"
|
2014-06-07 22:33:45 +09:00
|
|
|
|
thumbnails: A list of dictionaries, with the following entries:
|
2015-01-25 10:38:47 +09:00
|
|
|
|
* "id" (optional, string) - Thumbnail format ID
|
2014-06-07 22:33:45 +09:00
|
|
|
|
* "url"
|
2015-01-25 10:38:47 +09:00
|
|
|
|
* "preference" (optional, int) - quality of the image
|
2014-06-07 22:33:45 +09:00
|
|
|
|
* "width" (optional, int)
|
|
|
|
|
* "height" (optional, int)
|
2019-07-18 00:47:53 +09:00
|
|
|
|
* "resolution" (optional, string "{width}x{height}",
|
2014-06-07 22:33:45 +09:00
|
|
|
|
deprecated)
|
2016-07-09 05:24:36 +09:00
|
|
|
|
* "filesize" (optional, int)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
thumbnail: Full URL to a video thumbnail image.
|
2014-12-12 11:34:28 +09:00
|
|
|
|
description: Full video description.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
uploader: Full name of the video uploader.
|
2016-03-03 02:06:39 +09:00
|
|
|
|
license: License name the video is licensed under.
|
2016-05-03 00:31:35 +09:00
|
|
|
|
creator: The creator of the video.
|
2021-03-10 05:36:31 +09:00
|
|
|
|
release_timestamp: UNIX timestamp of the moment the video was released.
|
2015-09-27 00:07:54 +09:00
|
|
|
|
release_date: The date (YYYYMMDD) when the video was released.
|
2021-03-10 05:36:31 +09:00
|
|
|
|
timestamp: UNIX timestamp of the moment the video became available
|
|
|
|
|
(uploaded).
|
2013-06-24 02:57:38 +09:00
|
|
|
|
upload_date: Video upload date (YYYYMMDD).
|
2014-03-14 02:21:55 +09:00
|
|
|
|
If not explicitly set, calculated from timestamp.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
uploader_id: Nickname or id of the video uploader.
|
2016-03-03 02:31:24 +09:00
|
|
|
|
uploader_url: Full URL to a personal webpage of the video uploader.
|
2018-09-15 03:23:36 +09:00
|
|
|
|
channel: Full name of the channel the video is uploaded on.
|
2018-09-15 03:53:01 +09:00
|
|
|
|
Note that channel fields may or may not repeat uploader
|
2018-09-15 03:23:36 +09:00
|
|
|
|
fields. This depends on a particular extractor.
|
|
|
|
|
channel_id: Id of the channel.
|
|
|
|
|
channel_url: Full URL to a channel webpage.
|
2014-08-27 08:44:47 +09:00
|
|
|
|
location: Physical location where the video was filmed.
|
2015-02-16 02:03:41 +09:00
|
|
|
|
subtitles: The available subtitles as a dictionary in the format
|
2016-12-25 02:50:50 +09:00
|
|
|
|
{tag: subformats}. "tag" is usually a language code, and
|
|
|
|
|
"subformats" is a list sorted from lower to higher
|
|
|
|
|
preference, each element is a dictionary with the "ext"
|
|
|
|
|
entry and one of:
|
2015-02-16 02:03:41 +09:00
|
|
|
|
* "data": The subtitles file contents
|
2015-07-24 02:37:45 +09:00
|
|
|
|
* "url": A URL pointing to the subtitles file
|
2015-10-04 23:33:42 +09:00
|
|
|
|
"ext" will be calculated from URL if missing
|
2015-02-17 05:44:17 +09:00
|
|
|
|
automatic_captions: Like 'subtitles', used by the YoutubeIE for
|
|
|
|
|
automatically generated captions
|
2015-12-03 23:55:02 +09:00
|
|
|
|
duration: Length of the video in seconds, as an integer or float.
|
2013-06-29 23:32:28 +09:00
|
|
|
|
view_count: How many users have watched the video on the platform.
|
2013-12-06 02:29:07 +09:00
|
|
|
|
like_count: Number of positive ratings of the video
|
|
|
|
|
dislike_count: Number of negative ratings of the video
|
2015-10-18 12:34:54 +09:00
|
|
|
|
repost_count: Number of reposts of the video
|
2015-02-12 02:39:31 +09:00
|
|
|
|
average_rating: Average rating give by users, the scale used depends on the webpage
|
2013-12-06 02:29:07 +09:00
|
|
|
|
comment_count: Number of comments on the video
|
2015-01-10 07:59:18 +09:00
|
|
|
|
comments: A list of comments, each with one or more of the following
|
|
|
|
|
properties (all but one of text or html optional):
|
|
|
|
|
* "author" - human-readable name of the comment author
|
|
|
|
|
* "author_id" - user ID of the comment author
|
|
|
|
|
* "id" - Comment ID
|
|
|
|
|
* "html" - Comment as HTML
|
|
|
|
|
* "text" - Plain text of the comment
|
|
|
|
|
* "timestamp" - UNIX timestamp of comment
|
|
|
|
|
* "parent" - ID of the comment this one is replying to.
|
|
|
|
|
Set to "root" to indicate that this is a
|
|
|
|
|
comment to the original video.
|
2013-10-06 13:06:30 +09:00
|
|
|
|
age_limit: Age restriction for the video, as an integer (years)
|
2015-07-24 02:37:45 +09:00
|
|
|
|
webpage_url: The URL to the video webpage, if given to youtube-dl it
|
2013-11-03 20:11:13 +09:00
|
|
|
|
should allow to get the same result again. (It will be set
|
|
|
|
|
by YoutubeDL if it's missing)
|
2014-05-15 19:41:42 +09:00
|
|
|
|
categories: A list of categories that the video falls in, for example
|
|
|
|
|
["Sports", "Berlin"]
|
2015-07-29 06:43:03 +09:00
|
|
|
|
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
|
2014-09-19 16:57:53 +09:00
|
|
|
|
is_live: True, False, or None (=unknown). Whether this video is a
|
|
|
|
|
live stream that goes on instead of a fixed-length video.
|
2015-07-21 04:10:28 +09:00
|
|
|
|
start_time: Time in seconds where the reproduction should start, as
|
2015-07-24 02:37:45 +09:00
|
|
|
|
specified in the URL.
|
2015-07-23 20:20:21 +09:00
|
|
|
|
end_time: Time in seconds where the reproduction should end, as
|
2015-07-24 02:37:45 +09:00
|
|
|
|
specified in the URL.
|
2016-05-06 05:40:19 +09:00
|
|
|
|
chapters: A list of dictionaries, with the following entries:
|
|
|
|
|
* "start_time" - The start time of the chapter in seconds
|
|
|
|
|
* "end_time" - The end time of the chapter in seconds
|
|
|
|
|
* "title" (optional, string)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2015-12-31 06:10:44 +09:00
|
|
|
|
The following fields should only be used when the video belongs to some logical
|
|
|
|
|
chapter or section:
|
|
|
|
|
|
|
|
|
|
chapter: Name or title of the chapter the video belongs to.
|
2016-01-01 23:26:56 +09:00
|
|
|
|
chapter_number: Number of the chapter the video belongs to, as an integer.
|
|
|
|
|
chapter_id: Id of the chapter the video belongs to, as a unicode string.
|
2015-12-31 06:10:44 +09:00
|
|
|
|
|
|
|
|
|
The following fields should only be used when the video is an episode of some
|
2016-10-16 20:37:17 +09:00
|
|
|
|
series, programme or podcast:
|
2015-12-31 06:10:44 +09:00
|
|
|
|
|
|
|
|
|
series: Title of the series or programme the video episode belongs to.
|
|
|
|
|
season: Title of the season the video episode belongs to.
|
2016-01-01 23:26:56 +09:00
|
|
|
|
season_number: Number of the season the video episode belongs to, as an integer.
|
|
|
|
|
season_id: Id of the season the video episode belongs to, as a unicode string.
|
2015-12-31 06:10:44 +09:00
|
|
|
|
episode: Title of the video episode. Unlike mandatory video title field,
|
|
|
|
|
this field should denote the exact title of the video episode
|
|
|
|
|
without any kind of decoration.
|
2016-01-01 23:26:56 +09:00
|
|
|
|
episode_number: Number of the video episode within a season, as an integer.
|
|
|
|
|
episode_id: Id of the video episode, as a unicode string.
|
2015-12-31 06:10:44 +09:00
|
|
|
|
|
2016-04-07 05:53:53 +09:00
|
|
|
|
The following fields should only be used when the media is a track or a part of
|
|
|
|
|
a music album:
|
|
|
|
|
|
|
|
|
|
track: Title of the track.
|
|
|
|
|
track_number: Number of the track within an album or a disc, as an integer.
|
|
|
|
|
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
|
|
|
|
|
as a unicode string.
|
|
|
|
|
artist: Artist(s) of the track.
|
|
|
|
|
genre: Genre(s) of the track.
|
|
|
|
|
album: Title of the album the track belongs to.
|
|
|
|
|
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
|
|
|
|
|
album_artist: List of all artists appeared on the album (e.g.
|
|
|
|
|
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
|
|
|
|
|
and compilations).
|
|
|
|
|
disc_number: Number of the disc or other physical medium the track belongs to,
|
|
|
|
|
as an integer.
|
|
|
|
|
release_year: Year (YYYY) when the album was released.
|
|
|
|
|
|
2013-10-04 17:40:42 +09:00
|
|
|
|
Unless mentioned otherwise, the fields should be Unicode strings.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2014-10-04 03:17:10 +09:00
|
|
|
|
Unless mentioned otherwise, None is equivalent to absence of information.
|
|
|
|
|
|
2014-11-21 00:47:59 +09:00
|
|
|
|
|
|
|
|
|
_type "playlist" indicates multiple videos.
|
2014-12-06 22:02:19 +09:00
|
|
|
|
There must be a key "entries", which is a list, an iterable, or a PagedList
|
|
|
|
|
object, each element of which is a valid dictionary by this specification.
|
2014-11-21 00:47:59 +09:00
|
|
|
|
|
2017-12-19 05:51:03 +09:00
|
|
|
|
Additionally, playlists can have "id", "title", "description", "uploader",
|
2020-12-13 18:53:23 +09:00
|
|
|
|
"uploader_id", "uploader_url", "duration" attributes with the same semantics
|
|
|
|
|
as videos (see above).
|
2014-11-21 00:47:59 +09:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_type "multi_video" indicates that there are multiple videos that
|
|
|
|
|
form a single show, for examples multiple acts of an opera or TV episode.
|
|
|
|
|
It must have an entries key like a playlist and contain all the keys
|
|
|
|
|
required for a video at the same time.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_type "url" indicates that the video must be extracted from another
|
|
|
|
|
location, possibly by a different extractor. Its only required key is:
|
|
|
|
|
"url" - the next URL to extract.
|
2014-12-09 18:58:06 +09:00
|
|
|
|
The key "ie_key" can be set to the class name (minus the trailing "IE",
|
|
|
|
|
e.g. "Youtube") if the extractor class is known in advance.
|
|
|
|
|
Additionally, the dictionary may have any properties of the resolved entity
|
|
|
|
|
known in advance, for example "title" if the title of the referred video is
|
2014-11-21 00:47:59 +09:00
|
|
|
|
known ahead of time.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_type "url_transparent" entities have the same specification as "url", but
|
|
|
|
|
indicate that the given additional information is more precise than the one
|
|
|
|
|
associated with the resolved URL.
|
|
|
|
|
This is useful when a site employs a video service that hosts the video and
|
|
|
|
|
its technical metadata, but that video service does not embed a useful
|
|
|
|
|
title, description etc.
|
|
|
|
|
|
|
|
|
|
|
2023-07-19 22:14:50 +09:00
|
|
|
|
A subclass of InfoExtractor must be defined to handle each specific site (or
|
|
|
|
|
several sites). Such a concrete subclass should be added to the list of
|
|
|
|
|
extractors. It should also:
|
|
|
|
|
* define its _VALID_URL attribute as a regexp, or a Sequence of alternative
|
|
|
|
|
regexps (but see below)
|
|
|
|
|
* re-define the _real_extract() method
|
|
|
|
|
* optionally re-define the _real_initialize() method.
|
|
|
|
|
|
|
|
|
|
An extractor subclass may also override suitable() if necessary, but the
|
|
|
|
|
function signature must be preserved and the function must import everything
|
|
|
|
|
it needs (except other extractors), so that lazy_extractors works correctly.
|
|
|
|
|
If the subclass's suitable() and _real_extract() functions avoid using
|
|
|
|
|
_VALID_URL, the subclass need not set that class attribute.
|
|
|
|
|
|
|
|
|
|
An abstract subclass of InfoExtractor may be used to simplify implementation
|
|
|
|
|
within an extractor module; it should not be added to the list of extractors.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2017-02-19 05:53:23 +09:00
|
|
|
|
_GEO_BYPASS attribute may be set to False in order to disable
|
2017-02-04 20:49:58 +09:00
|
|
|
|
geo restriction bypass mechanisms for a particular extractor.
|
|
|
|
|
Though it won't disable explicit geo restriction bypass based on
|
2018-05-20 01:53:24 +09:00
|
|
|
|
country code provided with geo_bypass_country.
|
2017-02-19 05:53:23 +09:00
|
|
|
|
|
|
|
|
|
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
|
|
|
|
|
countries for this extractor. One of these countries will be used by
|
|
|
|
|
geo restriction bypass mechanism right away in order to bypass
|
2018-05-20 01:53:24 +09:00
|
|
|
|
geo restriction, of course, if the mechanism is not disabled.
|
2017-02-04 20:49:58 +09:00
|
|
|
|
|
2018-05-02 09:18:01 +09:00
|
|
|
|
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
|
|
|
|
|
IP blocks in CIDR notation for this extractor. One of these IP blocks
|
|
|
|
|
will be used by geo restriction bypass mechanism similarly
|
2018-05-20 01:53:24 +09:00
|
|
|
|
to _GEO_COUNTRIES.
|
2017-02-21 01:21:15 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
Finally, the _WORKING attribute should be set to False for broken IEs
|
|
|
|
|
in order to warn the users and skip the tests.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
_ready = False
|
|
|
|
|
_downloader = None
|
2017-02-04 20:49:58 +09:00
|
|
|
|
_x_forwarded_for_ip = None
|
2017-02-19 05:53:23 +09:00
|
|
|
|
_GEO_BYPASS = True
|
|
|
|
|
_GEO_COUNTRIES = None
|
2018-05-02 09:18:01 +09:00
|
|
|
|
_GEO_IP_BLOCKS = None
|
2013-06-24 02:57:38 +09:00
|
|
|
|
_WORKING = True
|
|
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
|
"""Constructor. Receives an optional downloader."""
|
|
|
|
|
self._ready = False
|
2017-02-04 20:49:58 +09:00
|
|
|
|
self._x_forwarded_for_ip = None
|
2013-06-24 02:57:38 +09:00
|
|
|
|
self.set_downloader(downloader)
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2023-07-19 22:14:50 +09:00
|
|
|
|
def __match_valid_url(cls, url):
|
2013-08-21 11:06:46 +09:00
|
|
|
|
# This does not use has/getattr intentionally - we want to know whether
|
2023-07-19 22:14:50 +09:00
|
|
|
|
# we have cached the regexp for cls, whereas getattr would also
|
|
|
|
|
# match its superclass
|
2013-08-21 11:06:46 +09:00
|
|
|
|
if '_VALID_URL_RE' not in cls.__dict__:
|
2023-07-19 22:14:50 +09:00
|
|
|
|
# _VALID_URL can now be a list/tuple of patterns
|
|
|
|
|
cls._VALID_URL_RE = tuple(map(re.compile, variadic(cls._VALID_URL)))
|
|
|
|
|
# 20% faster than next(filter(None, (p.match(url) for p in cls._VALID_URL_RE)), None) in 2.7
|
|
|
|
|
for p in cls._VALID_URL_RE:
|
|
|
|
|
p = p.match(url)
|
|
|
|
|
if p:
|
|
|
|
|
return p
|
|
|
|
|
|
|
|
|
|
# The public alias can safely be overridden, as in some back-ports
|
|
|
|
|
_match_valid_url = __match_valid_url
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def suitable(cls, url):
|
|
|
|
|
"""Receives a URL and returns True if suitable for this IE."""
|
|
|
|
|
# This function must import everything it needs (except other extractors),
|
|
|
|
|
# so that lazy_extractors works correctly
|
|
|
|
|
return cls.__match_valid_url(url) is not None
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2014-09-28 16:31:58 +09:00
|
|
|
|
@classmethod
|
|
|
|
|
def _match_id(cls, url):
|
2023-07-19 22:14:50 +09:00
|
|
|
|
m = cls.__match_valid_url(url)
|
2014-09-28 16:31:58 +09:00
|
|
|
|
assert m
|
2017-06-09 02:40:03 +09:00
|
|
|
|
return compat_str(m.group('id'))
|
2014-09-28 16:31:58 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
@classmethod
|
|
|
|
|
def working(cls):
|
|
|
|
|
"""Getter method for _WORKING."""
|
|
|
|
|
return cls._WORKING
|
|
|
|
|
|
|
|
|
|
def initialize(self):
|
|
|
|
|
"""Initializes an instance (authentication, etc)."""
|
2018-05-02 09:18:01 +09:00
|
|
|
|
self._initialize_geo_bypass({
|
|
|
|
|
'countries': self._GEO_COUNTRIES,
|
|
|
|
|
'ip_blocks': self._GEO_IP_BLOCKS,
|
|
|
|
|
})
|
2017-02-19 05:53:23 +09:00
|
|
|
|
if not self._ready:
|
|
|
|
|
self._real_initialize()
|
|
|
|
|
self._ready = True
|
|
|
|
|
|
2018-05-02 09:18:01 +09:00
|
|
|
|
def _initialize_geo_bypass(self, geo_bypass_context):
|
2017-02-22 01:00:43 +09:00
|
|
|
|
"""
|
|
|
|
|
Initialize geo restriction bypass mechanism.
|
|
|
|
|
|
|
|
|
|
This method is used to initialize geo bypass mechanism based on faking
|
|
|
|
|
X-Forwarded-For HTTP header. A random country from provided country list
|
2017-02-22 01:05:31 +09:00
|
|
|
|
is selected and a random IP belonging to this country is generated. This
|
2017-02-22 01:00:43 +09:00
|
|
|
|
IP will be passed as X-Forwarded-For HTTP header in all subsequent
|
|
|
|
|
HTTP requests.
|
|
|
|
|
|
|
|
|
|
This method will be used for initial geo bypass mechanism initialization
|
2018-05-02 09:18:01 +09:00
|
|
|
|
during the instance initialization with _GEO_COUNTRIES and
|
|
|
|
|
_GEO_IP_BLOCKS.
|
2017-02-22 01:00:43 +09:00
|
|
|
|
|
2018-05-02 09:18:01 +09:00
|
|
|
|
You may also manually call it from extractor's code if geo bypass
|
2017-02-22 01:00:43 +09:00
|
|
|
|
information is not available beforehand (e.g. obtained during
|
2018-05-02 09:18:01 +09:00
|
|
|
|
extraction) or due to some other reason. In this case you should pass
|
|
|
|
|
this information in geo bypass context passed as first argument. It may
|
|
|
|
|
contain following fields:
|
|
|
|
|
|
|
|
|
|
countries: List of geo unrestricted countries (similar
|
|
|
|
|
to _GEO_COUNTRIES)
|
|
|
|
|
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
|
|
|
|
|
(similar to _GEO_IP_BLOCKS)
|
|
|
|
|
|
2017-02-22 01:00:43 +09:00
|
|
|
|
"""
|
2017-02-04 20:49:58 +09:00
|
|
|
|
if not self._x_forwarded_for_ip:
|
2018-05-02 09:18:01 +09:00
|
|
|
|
|
|
|
|
|
# Geo bypass mechanism is explicitly disabled by user
|
|
|
|
|
if not self._downloader.params.get('geo_bypass', True):
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
if not geo_bypass_context:
|
|
|
|
|
geo_bypass_context = {}
|
|
|
|
|
|
|
|
|
|
# Backward compatibility: previously _initialize_geo_bypass
|
|
|
|
|
# expected a list of countries, some 3rd party code may still use
|
|
|
|
|
# it this way
|
|
|
|
|
if isinstance(geo_bypass_context, (list, tuple)):
|
|
|
|
|
geo_bypass_context = {
|
|
|
|
|
'countries': geo_bypass_context,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
# The whole point of geo bypass mechanism is to fake IP
|
|
|
|
|
# as X-Forwarded-For HTTP header based on some IP block or
|
|
|
|
|
# country code.
|
|
|
|
|
|
|
|
|
|
# Path 1: bypassing based on IP block in CIDR notation
|
|
|
|
|
|
|
|
|
|
# Explicit IP block specified by user, use it right away
|
|
|
|
|
# regardless of whether extractor is geo bypassable or not
|
|
|
|
|
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
|
|
|
|
|
|
|
|
|
|
# Otherwise use random IP block from geo bypass context but only
|
|
|
|
|
# if extractor is known as geo bypassable
|
|
|
|
|
if not ip_block:
|
|
|
|
|
ip_blocks = geo_bypass_context.get('ip_blocks')
|
|
|
|
|
if self._GEO_BYPASS and ip_blocks:
|
|
|
|
|
ip_block = random.choice(ip_blocks)
|
|
|
|
|
|
|
|
|
|
if ip_block:
|
|
|
|
|
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
|
|
|
|
|
if self._downloader.params.get('verbose', False):
|
|
|
|
|
self._downloader.to_screen(
|
|
|
|
|
'[debug] Using fake IP %s as X-Forwarded-For.'
|
|
|
|
|
% self._x_forwarded_for_ip)
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
# Path 2: bypassing based on country code
|
|
|
|
|
|
|
|
|
|
# Explicit country code specified by user, use it right away
|
|
|
|
|
# regardless of whether extractor is geo bypassable or not
|
|
|
|
|
country = self._downloader.params.get('geo_bypass_country', None)
|
|
|
|
|
|
|
|
|
|
# Otherwise use random country code from geo bypass context but
|
|
|
|
|
# only if extractor is known as geo bypassable
|
|
|
|
|
if not country:
|
|
|
|
|
countries = geo_bypass_context.get('countries')
|
|
|
|
|
if self._GEO_BYPASS and countries:
|
|
|
|
|
country = random.choice(countries)
|
|
|
|
|
|
|
|
|
|
if country:
|
|
|
|
|
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
|
2017-02-19 05:53:23 +09:00
|
|
|
|
if self._downloader.params.get('verbose', False):
|
2017-06-15 14:04:36 +09:00
|
|
|
|
self._downloader.to_screen(
|
2017-02-22 01:14:33 +09:00
|
|
|
|
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
|
2018-05-02 09:18:01 +09:00
|
|
|
|
% (self._x_forwarded_for_ip, country.upper()))
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
def extract(self, url):
|
|
|
|
|
"""Extracts URL information and returns it in list of dicts."""
|
2015-02-10 09:13:57 +09:00
|
|
|
|
try:
|
2017-02-04 20:49:58 +09:00
|
|
|
|
for _ in range(2):
|
|
|
|
|
try:
|
|
|
|
|
self.initialize()
|
2017-02-04 23:06:07 +09:00
|
|
|
|
ie_result = self._real_extract(url)
|
|
|
|
|
if self._x_forwarded_for_ip:
|
|
|
|
|
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
|
|
|
|
|
return ie_result
|
2017-02-04 20:49:58 +09:00
|
|
|
|
except GeoRestrictedError as e:
|
2017-02-19 05:53:23 +09:00
|
|
|
|
if self.__maybe_fake_ip_and_retry(e.countries):
|
|
|
|
|
continue
|
2017-02-04 20:49:58 +09:00
|
|
|
|
raise
|
2015-02-10 09:13:57 +09:00
|
|
|
|
except ExtractorError:
|
|
|
|
|
raise
|
|
|
|
|
except compat_http_client.IncompleteRead as e:
|
2016-01-11 00:17:47 +09:00
|
|
|
|
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
|
2015-02-10 23:55:51 +09:00
|
|
|
|
except (KeyError, StopIteration) as e:
|
2016-01-11 00:17:47 +09:00
|
|
|
|
raise ExtractorError('An extractor error has occurred.', cause=e)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2017-02-19 05:53:23 +09:00
|
|
|
|
def __maybe_fake_ip_and_retry(self, countries):
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if (not self._downloader.params.get('geo_bypass_country', None)
|
|
|
|
|
and self._GEO_BYPASS
|
|
|
|
|
and self._downloader.params.get('geo_bypass', True)
|
|
|
|
|
and not self._x_forwarded_for_ip
|
|
|
|
|
and countries):
|
2017-02-22 01:14:33 +09:00
|
|
|
|
country_code = random.choice(countries)
|
|
|
|
|
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
|
2017-02-19 05:53:23 +09:00
|
|
|
|
if self._x_forwarded_for_ip:
|
|
|
|
|
self.report_warning(
|
2017-02-22 01:14:33 +09:00
|
|
|
|
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
|
|
|
|
|
% (self._x_forwarded_for_ip, country_code.upper()))
|
2017-02-19 05:53:23 +09:00
|
|
|
|
return True
|
|
|
|
|
return False
|
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
def set_downloader(self, downloader):
|
|
|
|
|
"""Sets the downloader for this IE."""
|
|
|
|
|
self._downloader = downloader
|
|
|
|
|
|
2024-01-16 03:32:06 +09:00
|
|
|
|
@property
|
|
|
|
|
def cache(self):
|
|
|
|
|
return self._downloader.cache
|
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def cookiejar(self):
|
|
|
|
|
return self._downloader.cookiejar
|
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
def _real_initialize(self):
|
|
|
|
|
"""Real initialization process. Redefine in subclasses."""
|
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
|
"""Real extraction process. Redefine in subclasses."""
|
|
|
|
|
pass
|
|
|
|
|
|
2013-07-08 22:14:27 +09:00
|
|
|
|
@classmethod
|
|
|
|
|
def ie_key(cls):
|
|
|
|
|
"""A string for getting the InfoExtractor with get_info_extractor"""
|
2015-11-01 02:12:57 +09:00
|
|
|
|
return compat_str(cls.__name__[:-2])
|
2013-07-08 22:14:27 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
@property
|
|
|
|
|
def IE_NAME(self):
|
2015-11-01 02:12:57 +09:00
|
|
|
|
return compat_str(type(self).__name__[:-2])
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2018-06-18 06:01:48 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def __can_accept_status_code(err, expected_status):
|
|
|
|
|
assert isinstance(err, compat_urllib_error.HTTPError)
|
|
|
|
|
if expected_status is None:
|
|
|
|
|
return False
|
|
|
|
|
if isinstance(expected_status, compat_integer_types):
|
|
|
|
|
return err.code == expected_status
|
|
|
|
|
elif isinstance(expected_status, (list, tuple)):
|
|
|
|
|
return err.code in expected_status
|
|
|
|
|
elif callable(expected_status):
|
|
|
|
|
return expected_status(err.code) is True
|
|
|
|
|
else:
|
|
|
|
|
assert False
|
|
|
|
|
|
|
|
|
|
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
|
|
|
|
|
"""
|
|
|
|
|
Return the response handle.
|
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2013-06-24 02:57:38 +09:00
|
|
|
|
if note is None:
|
|
|
|
|
self.report_download_webpage(video_id)
|
|
|
|
|
elif note is not False:
|
2013-12-09 09:49:01 +09:00
|
|
|
|
if video_id is None:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('%s' % (note,))
|
2013-12-09 09:49:01 +09:00
|
|
|
|
else:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('%s: %s' % (video_id, note))
|
2017-12-23 22:57:35 +09:00
|
|
|
|
|
|
|
|
|
# Some sites check X-Forwarded-For HTTP header in order to figure out
|
|
|
|
|
# the origin of the client behind proxy. This allows bypassing geo
|
|
|
|
|
# restriction by faking this header's value to IP that belongs to some
|
|
|
|
|
# geo unrestricted country. We will do so once we encounter any
|
|
|
|
|
# geo restriction error.
|
|
|
|
|
if self._x_forwarded_for_ip:
|
|
|
|
|
if 'X-Forwarded-For' not in headers:
|
|
|
|
|
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
|
|
|
|
|
|
2016-04-01 01:58:38 +09:00
|
|
|
|
if isinstance(url_or_request, compat_urllib_request.Request):
|
|
|
|
|
url_or_request = update_Request(
|
|
|
|
|
url_or_request, data=data, headers=headers, query=query)
|
|
|
|
|
else:
|
2016-03-11 03:49:13 +09:00
|
|
|
|
if query:
|
|
|
|
|
url_or_request = update_url_query(url_or_request, query)
|
2016-04-21 14:06:06 +09:00
|
|
|
|
if data is not None or headers:
|
2016-04-01 01:58:38 +09:00
|
|
|
|
url_or_request = sanitized_Request(url_or_request, data, headers)
|
2020-09-18 05:41:16 +09:00
|
|
|
|
exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
|
|
|
|
|
if hasattr(ssl, 'CertificateError'):
|
|
|
|
|
exceptions.append(ssl.CertificateError)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
try:
|
2013-11-23 03:57:52 +09:00
|
|
|
|
return self._downloader.urlopen(url_or_request)
|
2020-09-18 05:41:16 +09:00
|
|
|
|
except tuple(exceptions) as err:
|
2018-06-18 06:01:48 +09:00
|
|
|
|
if isinstance(err, compat_urllib_error.HTTPError):
|
|
|
|
|
if self.__can_accept_status_code(err, expected_status):
|
2018-11-03 03:18:20 +09:00
|
|
|
|
# Retain reference to error to prevent file object from
|
|
|
|
|
# being closed before it can be read. Works around the
|
|
|
|
|
# effects of <https://bugs.python.org/issue15002>
|
|
|
|
|
# introduced in Python 3.4.1.
|
|
|
|
|
err.fp._error = err
|
2018-06-18 06:01:48 +09:00
|
|
|
|
return err.fp
|
|
|
|
|
|
2013-12-21 01:05:28 +09:00
|
|
|
|
if errnote is False:
|
|
|
|
|
return False
|
2013-06-24 02:57:38 +09:00
|
|
|
|
if errnote is None:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
errnote = 'Unable to download webpage'
|
2015-12-20 08:27:38 +09:00
|
|
|
|
|
2015-12-20 10:00:39 +09:00
|
|
|
|
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
|
2013-12-09 09:49:01 +09:00
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
|
|
|
|
|
else:
|
|
|
|
|
self._downloader.report_warning(errmsg)
|
|
|
|
|
return False
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2018-06-18 06:01:48 +09:00
|
|
|
|
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
|
|
|
|
|
"""
|
|
|
|
|
Return a tuple (page content as string, URL handle).
|
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2013-07-14 05:52:12 +09:00
|
|
|
|
# Strip hashes from the URL (#1038)
|
|
|
|
|
if isinstance(url_or_request, (compat_str, str)):
|
|
|
|
|
url_or_request = url_or_request.partition('#')[0]
|
|
|
|
|
|
2018-06-18 06:01:48 +09:00
|
|
|
|
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
|
2013-12-09 09:49:01 +09:00
|
|
|
|
if urlh is False:
|
|
|
|
|
assert not fatal
|
|
|
|
|
return False
|
2015-03-21 13:21:27 +09:00
|
|
|
|
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
|
2014-10-27 01:05:44 +09:00
|
|
|
|
return (content, urlh)
|
|
|
|
|
|
2015-03-21 13:21:27 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def _guess_encoding_from_content(content_type, webpage_bytes):
|
2013-06-24 02:57:38 +09:00
|
|
|
|
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
|
|
|
|
|
if m:
|
|
|
|
|
encoding = m.group(1)
|
|
|
|
|
else:
|
2013-08-29 18:35:15 +09:00
|
|
|
|
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
|
2013-08-28 20:59:08 +09:00
|
|
|
|
webpage_bytes[:1024])
|
|
|
|
|
if m:
|
|
|
|
|
encoding = m.group(1).decode('ascii')
|
2014-01-21 09:39:39 +09:00
|
|
|
|
elif webpage_bytes.startswith(b'\xff\xfe'):
|
|
|
|
|
encoding = 'utf-16'
|
2013-08-28 20:59:08 +09:00
|
|
|
|
else:
|
|
|
|
|
encoding = 'utf-8'
|
2015-03-21 13:21:27 +09:00
|
|
|
|
|
|
|
|
|
return encoding
|
|
|
|
|
|
2017-04-02 05:56:49 +09:00
|
|
|
|
def __check_blocked(self, content):
|
|
|
|
|
first_block = content[:512]
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if ('<title>Access to this site is blocked</title>' in content
|
|
|
|
|
and 'Websense' in first_block):
|
2017-04-02 05:56:49 +09:00
|
|
|
|
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
|
|
|
|
|
blocked_iframe = self._html_search_regex(
|
|
|
|
|
r'<iframe src="([^"]+)"', content,
|
|
|
|
|
'Websense information URL', default=None)
|
|
|
|
|
if blocked_iframe:
|
|
|
|
|
msg += ' Visit %s for more details' % blocked_iframe
|
|
|
|
|
raise ExtractorError(msg, expected=True)
|
|
|
|
|
if '<title>The URL you requested has been blocked</title>' in first_block:
|
|
|
|
|
msg = (
|
|
|
|
|
'Access to this webpage has been blocked by Indian censorship. '
|
|
|
|
|
'Use a VPN or proxy server (with --proxy) to route around it.')
|
|
|
|
|
block_msg = self._html_search_regex(
|
|
|
|
|
r'</h1><p>(.*?)</p>',
|
|
|
|
|
content, 'block message', default=None)
|
|
|
|
|
if block_msg:
|
|
|
|
|
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
|
|
|
|
|
raise ExtractorError(msg, expected=True)
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content
|
|
|
|
|
and 'blocklist.rkn.gov.ru' in content):
|
2017-04-02 05:56:49 +09:00
|
|
|
|
raise ExtractorError(
|
|
|
|
|
'Access to this webpage has been blocked by decision of the Russian government. '
|
|
|
|
|
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
|
|
|
|
|
expected=True)
|
|
|
|
|
|
2015-03-21 13:21:27 +09:00
|
|
|
|
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
|
|
|
|
|
content_type = urlh.headers.get('Content-Type', '')
|
|
|
|
|
webpage_bytes = urlh.read()
|
|
|
|
|
if prefix is not None:
|
|
|
|
|
webpage_bytes = prefix + webpage_bytes
|
|
|
|
|
if not encoding:
|
|
|
|
|
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
if self._downloader.params.get('dump_intermediate_pages', False):
|
2017-11-18 21:02:56 +09:00
|
|
|
|
self.to_screen('Dumping request to ' + urlh.geturl())
|
2013-06-24 02:57:38 +09:00
|
|
|
|
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
|
|
|
|
self._downloader.to_screen(dump)
|
2013-10-28 18:44:02 +09:00
|
|
|
|
if self._downloader.params.get('write_pages', False):
|
2017-11-18 21:02:56 +09:00
|
|
|
|
basen = '%s_%s' % (video_id, urlh.geturl())
|
2014-05-13 04:56:10 +09:00
|
|
|
|
if len(basen) > 240:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
|
2014-05-13 04:56:10 +09:00
|
|
|
|
basen = basen[:240 - len(h)] + h
|
|
|
|
|
raw_filename = basen + '.dump'
|
2013-10-28 18:44:02 +09:00
|
|
|
|
filename = sanitize_filename(raw_filename, restricted=True)
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('Saving request to ' + filename)
|
2014-10-14 23:43:48 +09:00
|
|
|
|
# Working around MAX_PATH limitation on Windows (see
|
|
|
|
|
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
2016-03-03 20:24:24 +09:00
|
|
|
|
if compat_os_name == 'nt':
|
2014-10-14 23:43:48 +09:00
|
|
|
|
absfilepath = os.path.abspath(filename)
|
|
|
|
|
if len(absfilepath) > 259:
|
|
|
|
|
filename = '\\\\?\\' + absfilepath
|
2013-10-28 18:44:02 +09:00
|
|
|
|
with open(filename, 'wb') as outf:
|
|
|
|
|
outf.write(webpage_bytes)
|
|
|
|
|
|
2014-04-08 06:09:53 +09:00
|
|
|
|
try:
|
|
|
|
|
content = webpage_bytes.decode(encoding, 'replace')
|
|
|
|
|
except LookupError:
|
|
|
|
|
content = webpage_bytes.decode('utf-8', 'replace')
|
2014-04-03 13:07:35 +09:00
|
|
|
|
|
2017-04-02 05:56:49 +09:00
|
|
|
|
self.__check_blocked(content)
|
2014-04-03 13:07:35 +09:00
|
|
|
|
|
2014-10-27 01:05:44 +09:00
|
|
|
|
return content
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2018-06-18 06:01:48 +09:00
|
|
|
|
def _download_webpage(
|
|
|
|
|
self, url_or_request, video_id, note=None, errnote=None,
|
|
|
|
|
fatal=True, tries=1, timeout=5, encoding=None, data=None,
|
|
|
|
|
headers={}, query={}, expected_status=None):
|
|
|
|
|
"""
|
|
|
|
|
Return the data of the page as a string.
|
|
|
|
|
|
|
|
|
|
Arguments:
|
|
|
|
|
url_or_request -- plain text URL as a string or
|
|
|
|
|
a compat_urllib_request.Requestobject
|
|
|
|
|
video_id -- Video/playlist/item identifier (string)
|
|
|
|
|
|
|
|
|
|
Keyword arguments:
|
|
|
|
|
note -- note printed before downloading (string)
|
|
|
|
|
errnote -- note printed in case of an error (string)
|
|
|
|
|
fatal -- flag denoting whether error should be considered fatal,
|
|
|
|
|
i.e. whether it should cause ExtractionError to be raised,
|
|
|
|
|
otherwise a warning will be reported and extraction continued
|
|
|
|
|
tries -- number of tries
|
|
|
|
|
timeout -- sleep interval between tries
|
|
|
|
|
encoding -- encoding for a page content decoding, guessed automatically
|
|
|
|
|
when not explicitly specified
|
|
|
|
|
data -- POST data (bytes)
|
|
|
|
|
headers -- HTTP headers (dict)
|
|
|
|
|
query -- URL query (dict)
|
|
|
|
|
expected_status -- allows to accept failed HTTP requests (non 2xx
|
|
|
|
|
status code) by explicitly specifying a set of accepted status
|
|
|
|
|
codes. Can be any of the following entities:
|
|
|
|
|
- an integer type specifying an exact failed status code to
|
|
|
|
|
accept
|
|
|
|
|
- a list or a tuple of integer types specifying a list of
|
|
|
|
|
failed status codes to accept
|
|
|
|
|
- a callable accepting an actual failed status code and
|
|
|
|
|
returning True if it should be accepted
|
|
|
|
|
Note that this argument does not affect success status codes (2xx)
|
|
|
|
|
which are always accepted.
|
|
|
|
|
"""
|
|
|
|
|
|
2014-12-04 22:11:27 +09:00
|
|
|
|
success = False
|
|
|
|
|
try_count = 0
|
|
|
|
|
while success is False:
|
|
|
|
|
try:
|
2018-06-18 06:01:48 +09:00
|
|
|
|
res = self._download_webpage_handle(
|
|
|
|
|
url_or_request, video_id, note, errnote, fatal,
|
|
|
|
|
encoding=encoding, data=data, headers=headers, query=query,
|
|
|
|
|
expected_status=expected_status)
|
2014-12-04 22:11:27 +09:00
|
|
|
|
success = True
|
|
|
|
|
except compat_http_client.IncompleteRead as e:
|
|
|
|
|
try_count += 1
|
|
|
|
|
if try_count >= tries:
|
|
|
|
|
raise e
|
|
|
|
|
self._sleep(timeout, video_id)
|
2013-12-09 09:49:01 +09:00
|
|
|
|
if res is False:
|
|
|
|
|
return res
|
|
|
|
|
else:
|
|
|
|
|
content, _ = res
|
|
|
|
|
return content
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2018-03-18 04:17:34 +09:00
|
|
|
|
def _download_xml_handle(
|
|
|
|
|
self, url_or_request, video_id, note='Downloading XML',
|
|
|
|
|
errnote='Unable to download XML', transform_source=None,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
fatal=True, encoding=None, data=None, headers={}, query={},
|
|
|
|
|
expected_status=None):
|
|
|
|
|
"""
|
2019-03-06 03:21:57 +09:00
|
|
|
|
Return a tuple (xml as an compat_etree_Element, URL handle).
|
2018-06-18 06:01:48 +09:00
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2018-03-18 04:17:34 +09:00
|
|
|
|
res = self._download_webpage_handle(
|
|
|
|
|
url_or_request, video_id, note, errnote, fatal=fatal,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
encoding=encoding, data=data, headers=headers, query=query,
|
|
|
|
|
expected_status=expected_status)
|
2018-03-18 04:17:34 +09:00
|
|
|
|
if res is False:
|
|
|
|
|
return res
|
|
|
|
|
xml_string, urlh = res
|
|
|
|
|
return self._parse_xml(
|
|
|
|
|
xml_string, video_id, transform_source=transform_source,
|
|
|
|
|
fatal=fatal), urlh
|
|
|
|
|
|
2018-06-18 06:01:48 +09:00
|
|
|
|
def _download_xml(
|
|
|
|
|
self, url_or_request, video_id,
|
|
|
|
|
note='Downloading XML', errnote='Unable to download XML',
|
|
|
|
|
transform_source=None, fatal=True, encoding=None,
|
|
|
|
|
data=None, headers={}, query={}, expected_status=None):
|
|
|
|
|
"""
|
2019-03-06 03:21:57 +09:00
|
|
|
|
Return the xml as an compat_etree_Element.
|
2018-06-18 06:01:48 +09:00
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2018-03-18 04:17:34 +09:00
|
|
|
|
res = self._download_xml_handle(
|
|
|
|
|
url_or_request, video_id, note=note, errnote=errnote,
|
|
|
|
|
transform_source=transform_source, fatal=fatal, encoding=encoding,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
data=data, headers=headers, query=query,
|
|
|
|
|
expected_status=expected_status)
|
2018-03-18 04:17:34 +09:00
|
|
|
|
return res if res is False else res[0]
|
2017-08-23 02:32:41 +09:00
|
|
|
|
|
|
|
|
|
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
|
2013-12-10 20:45:22 +09:00
|
|
|
|
if transform_source:
|
|
|
|
|
xml_string = transform_source(xml_string)
|
2017-08-23 02:32:41 +09:00
|
|
|
|
try:
|
|
|
|
|
return compat_etree_fromstring(xml_string.encode('utf-8'))
|
|
|
|
|
except compat_xml_parse_error as ve:
|
|
|
|
|
errmsg = '%s: Failed to parse XML ' % video_id
|
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(errmsg, cause=ve)
|
|
|
|
|
else:
|
|
|
|
|
self.report_warning(errmsg + str(ve))
|
2013-11-24 22:59:19 +09:00
|
|
|
|
|
2018-04-28 03:59:15 +09:00
|
|
|
|
def _download_json_handle(
|
|
|
|
|
self, url_or_request, video_id, note='Downloading JSON metadata',
|
|
|
|
|
errnote='Unable to download JSON metadata', transform_source=None,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
fatal=True, encoding=None, data=None, headers={}, query={},
|
|
|
|
|
expected_status=None):
|
|
|
|
|
"""
|
|
|
|
|
Return a tuple (JSON object, URL handle).
|
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2018-04-28 03:59:15 +09:00
|
|
|
|
res = self._download_webpage_handle(
|
2015-03-21 13:21:27 +09:00
|
|
|
|
url_or_request, video_id, note, errnote, fatal=fatal,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
encoding=encoding, data=data, headers=headers, query=query,
|
|
|
|
|
expected_status=expected_status)
|
2018-04-28 03:59:15 +09:00
|
|
|
|
if res is False:
|
|
|
|
|
return res
|
|
|
|
|
json_string, urlh = res
|
2014-12-05 20:07:06 +09:00
|
|
|
|
return self._parse_json(
|
2018-04-28 03:59:15 +09:00
|
|
|
|
json_string, video_id, transform_source=transform_source,
|
|
|
|
|
fatal=fatal), urlh
|
|
|
|
|
|
|
|
|
|
def _download_json(
|
|
|
|
|
self, url_or_request, video_id, note='Downloading JSON metadata',
|
|
|
|
|
errnote='Unable to download JSON metadata', transform_source=None,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
fatal=True, encoding=None, data=None, headers={}, query={},
|
|
|
|
|
expected_status=None):
|
|
|
|
|
"""
|
|
|
|
|
Return the JSON object as a dict.
|
|
|
|
|
|
|
|
|
|
See _download_webpage docstring for arguments specification.
|
|
|
|
|
"""
|
2018-04-28 03:59:15 +09:00
|
|
|
|
res = self._download_json_handle(
|
|
|
|
|
url_or_request, video_id, note=note, errnote=errnote,
|
|
|
|
|
transform_source=transform_source, fatal=fatal, encoding=encoding,
|
2018-06-18 06:01:48 +09:00
|
|
|
|
data=data, headers=headers, query=query,
|
|
|
|
|
expected_status=expected_status)
|
2018-04-28 03:59:15 +09:00
|
|
|
|
return res if res is False else res[0]
|
2014-12-05 20:07:06 +09:00
|
|
|
|
|
|
|
|
|
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
|
2014-02-10 01:56:10 +09:00
|
|
|
|
if transform_source:
|
|
|
|
|
json_string = transform_source(json_string)
|
2014-01-07 17:35:34 +09:00
|
|
|
|
try:
|
|
|
|
|
return json.loads(json_string)
|
|
|
|
|
except ValueError as ve:
|
2014-09-30 18:12:59 +09:00
|
|
|
|
errmsg = '%s: Failed to parse JSON ' % video_id
|
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(errmsg, cause=ve)
|
|
|
|
|
else:
|
|
|
|
|
self.report_warning(errmsg + str(ve))
|
2014-01-07 17:35:34 +09:00
|
|
|
|
|
2024-01-16 03:32:06 +09:00
|
|
|
|
def __ie_msg(self, *msg):
|
|
|
|
|
return '[{0}] {1}'.format(self.IE_NAME, ''.join(msg))
|
|
|
|
|
|
|
|
|
|
# msg, video_id=None, *args, only_once=False, **kwargs
|
|
|
|
|
def report_warning(self, msg, *args, **kwargs):
|
|
|
|
|
if len(args) > 0:
|
|
|
|
|
video_id = args[0]
|
|
|
|
|
args = args[1:]
|
|
|
|
|
else:
|
|
|
|
|
video_id = kwargs.pop('video_id', None)
|
2014-08-28 08:04:43 +09:00
|
|
|
|
idstr = '' if video_id is None else '%s: ' % video_id
|
2013-12-23 23:57:43 +09:00
|
|
|
|
self._downloader.report_warning(
|
2024-01-16 03:32:06 +09:00
|
|
|
|
self.__ie_msg(idstr, msg), *args, **kwargs)
|
2013-12-23 23:57:43 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
def to_screen(self, msg):
|
|
|
|
|
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
2024-01-16 03:32:06 +09:00
|
|
|
|
self._downloader.to_screen(self.__ie_msg(msg))
|
|
|
|
|
|
|
|
|
|
def write_debug(self, msg, only_once=False, _cache=[]):
|
|
|
|
|
'''Log debug message or Print message to stderr'''
|
|
|
|
|
if not self.get_param('verbose', False):
|
|
|
|
|
return
|
|
|
|
|
message = '[debug] ' + self.__ie_msg(msg)
|
|
|
|
|
logger = self.get_param('logger')
|
|
|
|
|
if logger:
|
|
|
|
|
logger.debug(message)
|
|
|
|
|
else:
|
|
|
|
|
if only_once and hash(message) in _cache:
|
|
|
|
|
return
|
|
|
|
|
self._downloader.to_stderr(message)
|
|
|
|
|
_cache.append(hash(message))
|
|
|
|
|
|
|
|
|
|
# name, default=None, *args, **kwargs
|
|
|
|
|
def get_param(self, name, *args, **kwargs):
|
|
|
|
|
default, args = (args[0], args[1:]) if len(args) > 0 else (kwargs.pop('default', None), args)
|
|
|
|
|
if self._downloader:
|
|
|
|
|
return self._downloader.params.get(name, default, *args, **kwargs)
|
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
|
def report_drm(self, video_id):
|
|
|
|
|
self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
def report_extraction(self, id_or_name):
|
|
|
|
|
"""Report information extraction."""
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('%s: Extracting information' % id_or_name)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
def report_download_webpage(self, video_id):
|
|
|
|
|
"""Report webpage download."""
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('%s: Downloading webpage' % video_id)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
def report_age_confirmation(self):
|
|
|
|
|
"""Report attempt to confirm age."""
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('Confirming age')
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2013-07-08 06:24:34 +09:00
|
|
|
|
def report_login(self):
|
|
|
|
|
"""Report attempt to log in."""
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self.to_screen('Logging in')
|
2013-07-08 06:24:34 +09:00
|
|
|
|
|
2015-08-27 00:24:47 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def raise_login_required(msg='This video is only available for registered users'):
|
|
|
|
|
raise ExtractorError(
|
|
|
|
|
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
|
|
|
|
|
expected=True)
|
|
|
|
|
|
2015-09-23 00:50:20 +09:00
|
|
|
|
@staticmethod
|
2017-02-04 20:49:58 +09:00
|
|
|
|
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
|
|
|
|
|
raise GeoRestrictedError(msg, countries=countries)
|
2015-09-23 00:50:20 +09:00
|
|
|
|
|
2024-01-16 03:32:06 +09:00
|
|
|
|
def raise_no_formats(self, msg, expected=False, video_id=None):
|
|
|
|
|
if expected and (
|
|
|
|
|
self.get_param('ignore_no_formats_error') or self.get_param('wait_for_video')):
|
|
|
|
|
self.report_warning(msg, video_id)
|
|
|
|
|
elif isinstance(msg, ExtractorError):
|
|
|
|
|
raise msg
|
|
|
|
|
else:
|
|
|
|
|
raise ExtractorError(msg, expected=expected, video_id=video_id)
|
|
|
|
|
|
2014-11-24 04:41:03 +09:00
|
|
|
|
# Methods for following #608
|
2013-12-20 04:28:52 +09:00
|
|
|
|
@staticmethod
|
2015-04-13 02:11:47 +09:00
|
|
|
|
def url_result(url, ie=None, video_id=None, video_title=None):
|
2015-07-24 02:37:45 +09:00
|
|
|
|
"""Returns a URL that points to a page that should be processed"""
|
2014-11-24 04:41:03 +09:00
|
|
|
|
# TODO: ie should be the class used for getting the info
|
2013-06-24 02:57:38 +09:00
|
|
|
|
video_info = {'_type': 'url',
|
|
|
|
|
'url': url,
|
|
|
|
|
'ie_key': ie}
|
2013-11-23 06:46:46 +09:00
|
|
|
|
if video_id is not None:
|
|
|
|
|
video_info['id'] = video_id
|
2015-04-13 02:11:47 +09:00
|
|
|
|
if video_title is not None:
|
|
|
|
|
video_info['title'] = video_title
|
2013-06-24 02:57:38 +09:00
|
|
|
|
return video_info
|
2014-11-24 04:41:03 +09:00
|
|
|
|
|
2017-07-16 06:33:14 +09:00
|
|
|
|
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
|
|
|
|
|
urls = orderedSet(
|
2017-03-09 08:13:54 +09:00
|
|
|
|
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
|
|
|
|
|
for m in matches)
|
|
|
|
|
return self.playlist_result(
|
2017-07-16 06:33:14 +09:00
|
|
|
|
urls, playlist_id=playlist_id, playlist_title=playlist_title)
|
2017-03-09 08:13:54 +09:00
|
|
|
|
|
2013-12-20 04:28:52 +09:00
|
|
|
|
@staticmethod
|
2014-12-07 04:46:30 +09:00
|
|
|
|
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
|
2013-06-24 02:57:38 +09:00
|
|
|
|
"""Returns a playlist"""
|
|
|
|
|
video_info = {'_type': 'playlist',
|
|
|
|
|
'entries': entries}
|
|
|
|
|
if playlist_id:
|
|
|
|
|
video_info['id'] = playlist_id
|
|
|
|
|
if playlist_title:
|
|
|
|
|
video_info['title'] = playlist_title
|
2014-12-07 04:46:30 +09:00
|
|
|
|
if playlist_description:
|
|
|
|
|
video_info['description'] = playlist_description
|
2013-06-24 02:57:38 +09:00
|
|
|
|
return video_info
|
|
|
|
|
|
2015-06-29 01:56:45 +09:00
|
|
|
|
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
2013-06-24 02:57:38 +09:00
|
|
|
|
"""
|
|
|
|
|
Perform a regex search on the given string, using a single or a list of
|
|
|
|
|
patterns returning the first matching group.
|
|
|
|
|
In case of failure return a default value or raise a WARNING or a
|
2013-10-23 21:38:03 +09:00
|
|
|
|
RegexNotFoundError, depending on fatal, specifying the field name.
|
2013-06-24 02:57:38 +09:00
|
|
|
|
"""
|
|
|
|
|
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
|
|
|
|
|
mobj = re.search(pattern, string, flags)
|
|
|
|
|
else:
|
|
|
|
|
for p in pattern:
|
|
|
|
|
mobj = re.search(p, string, flags)
|
2014-07-25 17:43:03 +09:00
|
|
|
|
if mobj:
|
|
|
|
|
break
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2016-03-03 20:24:24 +09:00
|
|
|
|
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
|
2014-08-28 08:04:43 +09:00
|
|
|
|
_name = '\033[0;34m%s\033[0m' % name
|
2013-06-24 02:57:38 +09:00
|
|
|
|
else:
|
|
|
|
|
_name = name
|
|
|
|
|
|
|
|
|
|
if mobj:
|
2014-11-05 07:14:16 +09:00
|
|
|
|
if group is None:
|
|
|
|
|
# return the first matching group
|
|
|
|
|
return next(g for g in mobj.groups() if g is not None)
|
2023-05-03 21:08:58 +09:00
|
|
|
|
elif isinstance(group, (list, tuple)):
|
|
|
|
|
return tuple(mobj.group(g) for g in group)
|
2014-11-05 07:14:16 +09:00
|
|
|
|
else:
|
|
|
|
|
return mobj.group(group)
|
2015-06-29 01:56:45 +09:00
|
|
|
|
elif default is not NO_DEFAULT:
|
2013-06-24 02:57:38 +09:00
|
|
|
|
return default
|
|
|
|
|
elif fatal:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
raise RegexNotFoundError('Unable to extract %s' % _name)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
else:
|
2015-04-17 21:55:24 +09:00
|
|
|
|
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
|
2013-06-24 02:57:38 +09:00
|
|
|
|
return None
|
|
|
|
|
|
2024-02-21 09:03:17 +09:00
|
|
|
|
def _search_json(self, start_pattern, string, name, video_id, **kwargs):
|
|
|
|
|
"""Searches string for the JSON object specified by start_pattern"""
|
|
|
|
|
|
|
|
|
|
# self, start_pattern, string, name, video_id, *, end_pattern='',
|
|
|
|
|
# contains_pattern=r'{(?s:.+)}', fatal=True, default=NO_DEFAULT
|
|
|
|
|
# NB: end_pattern is only used to reduce the size of the initial match
|
|
|
|
|
end_pattern = kwargs.pop('end_pattern', '')
|
|
|
|
|
# (?:[\s\S]) simulates (?(s):.) (eg)
|
|
|
|
|
contains_pattern = kwargs.pop('contains_pattern', r'{[\s\S]+}')
|
|
|
|
|
fatal = kwargs.pop('fatal', True)
|
|
|
|
|
default = kwargs.pop('default', NO_DEFAULT)
|
|
|
|
|
|
|
|
|
|
if default is NO_DEFAULT:
|
|
|
|
|
default, has_default = {}, False
|
|
|
|
|
else:
|
|
|
|
|
fatal, has_default = False, True
|
|
|
|
|
|
|
|
|
|
json_string = self._search_regex(
|
|
|
|
|
r'(?:{0})\s*(?P<json>{1})\s*(?:{2})'.format(
|
|
|
|
|
start_pattern, contains_pattern, end_pattern),
|
|
|
|
|
string, name, group='json', fatal=fatal, default=None if has_default else NO_DEFAULT)
|
|
|
|
|
if not json_string:
|
|
|
|
|
return default
|
|
|
|
|
|
|
|
|
|
# yt-dlp has a special JSON parser that allows trailing text.
|
|
|
|
|
# Until that arrives here, the diagnostic from the exception
|
|
|
|
|
# raised by json.loads() is used to extract the wanted text.
|
|
|
|
|
# Either way, it's a problem if a transform_source() can't
|
|
|
|
|
# handle the trailing text.
|
|
|
|
|
|
|
|
|
|
# force an exception
|
|
|
|
|
kwargs['fatal'] = True
|
|
|
|
|
|
|
|
|
|
# self._downloader._format_err(name, self._downloader.Styles.EMPHASIS)
|
|
|
|
|
for _ in range(2):
|
|
|
|
|
try:
|
|
|
|
|
# return self._parse_json(json_string, video_id, ignore_extra=True, **kwargs)
|
|
|
|
|
transform_source = kwargs.pop('transform_source', None)
|
|
|
|
|
if transform_source:
|
|
|
|
|
json_string = transform_source(json_string)
|
|
|
|
|
return self._parse_json(json_string, video_id, **compat_kwargs(kwargs))
|
|
|
|
|
except ExtractorError as e:
|
|
|
|
|
end = int_or_none(self._search_regex(r'\(char\s+(\d+)', error_to_compat_str(e), 'end', default=None))
|
|
|
|
|
if end is not None:
|
|
|
|
|
json_string = json_string[:end]
|
|
|
|
|
continue
|
|
|
|
|
msg = 'Unable to extract {0} - Failed to parse JSON'.format(name)
|
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(msg, cause=e.cause, video_id=video_id)
|
|
|
|
|
elif not has_default:
|
|
|
|
|
self.report_warning(
|
|
|
|
|
'{0}: {1}'.format(msg, error_to_compat_str(e)), video_id=video_id)
|
|
|
|
|
return default
|
|
|
|
|
|
2015-06-29 01:56:45 +09:00
|
|
|
|
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
|
2013-06-24 02:57:38 +09:00
|
|
|
|
"""
|
|
|
|
|
Like _search_regex, but strips HTML tags and unescapes entities.
|
|
|
|
|
"""
|
2014-11-05 07:14:16 +09:00
|
|
|
|
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
|
2023-05-03 21:08:58 +09:00
|
|
|
|
if isinstance(res, tuple):
|
|
|
|
|
return tuple(map(clean_html, res))
|
|
|
|
|
return clean_html(res)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
2016-08-14 19:48:13 +09:00
|
|
|
|
def _get_netrc_login_info(self, netrc_machine=None):
|
|
|
|
|
username = None
|
|
|
|
|
password = None
|
|
|
|
|
|
|
|
|
|
if self._downloader.params.get('usenetrc', False):
|
|
|
|
|
try:
|
2024-04-27 02:57:44 +09:00
|
|
|
|
netrc_machine = netrc_machine or self._NETRC_MACHINE
|
2016-08-14 19:48:13 +09:00
|
|
|
|
info = netrc.netrc().authenticators(netrc_machine)
|
|
|
|
|
if info is not None:
|
|
|
|
|
username = info[0]
|
|
|
|
|
password = info[2]
|
|
|
|
|
else:
|
2016-09-16 00:35:12 +09:00
|
|
|
|
raise netrc.NetrcParseError(
|
|
|
|
|
'No authenticators for %s' % netrc_machine)
|
2024-04-27 02:57:44 +09:00
|
|
|
|
except (AttributeError, IOError, netrc.NetrcParseError) as err:
|
2016-09-16 00:35:12 +09:00
|
|
|
|
self._downloader.report_warning(
|
|
|
|
|
'parsing .netrc: %s' % error_to_compat_str(err))
|
2016-08-14 19:48:13 +09:00
|
|
|
|
|
2016-09-16 00:35:12 +09:00
|
|
|
|
return username, password
|
2016-08-14 19:48:13 +09:00
|
|
|
|
|
2016-09-14 06:16:01 +09:00
|
|
|
|
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
|
2013-07-08 06:24:34 +09:00
|
|
|
|
"""
|
2015-04-30 00:03:10 +09:00
|
|
|
|
Get the login info as (username, password)
|
2016-09-16 00:34:29 +09:00
|
|
|
|
First look for the manually specified credentials using username_option
|
|
|
|
|
and password_option as keys in params dictionary. If no such credentials
|
|
|
|
|
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
|
|
|
|
|
value.
|
2013-07-08 06:24:34 +09:00
|
|
|
|
If there's no info available, return (None, None)
|
|
|
|
|
"""
|
|
|
|
|
if self._downloader is None:
|
|
|
|
|
return (None, None)
|
|
|
|
|
|
|
|
|
|
downloader_params = self._downloader.params
|
|
|
|
|
|
|
|
|
|
# Attempt to use provided username and password or .netrc data
|
2016-09-14 06:16:01 +09:00
|
|
|
|
if downloader_params.get(username_option) is not None:
|
|
|
|
|
username = downloader_params[username_option]
|
|
|
|
|
password = downloader_params[password_option]
|
2016-08-14 19:48:13 +09:00
|
|
|
|
else:
|
2016-09-14 06:16:01 +09:00
|
|
|
|
username, password = self._get_netrc_login_info(netrc_machine)
|
2014-11-24 04:41:03 +09:00
|
|
|
|
|
2016-09-16 00:26:37 +09:00
|
|
|
|
return username, password
|
2013-07-08 06:24:34 +09:00
|
|
|
|
|
2015-08-16 00:55:07 +09:00
|
|
|
|
def _get_tfa_info(self, note='two-factor verification code'):
|
2014-08-17 06:28:41 +09:00
|
|
|
|
"""
|
|
|
|
|
Get the two-factor authentication info
|
|
|
|
|
TODO - asking the user will be required for sms/phone verify
|
|
|
|
|
currently just uses the command line option
|
|
|
|
|
If there's no info available, return None
|
|
|
|
|
"""
|
|
|
|
|
if self._downloader is None:
|
|
|
|
|
return None
|
|
|
|
|
downloader_params = self._downloader.params
|
|
|
|
|
|
2016-02-14 17:25:04 +09:00
|
|
|
|
if downloader_params.get('twofactor') is not None:
|
2014-08-17 06:28:41 +09:00
|
|
|
|
return downloader_params['twofactor']
|
|
|
|
|
|
2015-08-16 00:55:07 +09:00
|
|
|
|
return compat_getpass('Type %s and press [Return]: ' % note)
|
2014-08-17 06:28:41 +09:00
|
|
|
|
|
2013-07-13 02:00:19 +09:00
|
|
|
|
# Helper functions for extracting OpenGraph info
|
|
|
|
|
@staticmethod
|
2013-11-15 20:24:54 +09:00
|
|
|
|
def _og_regexes(prop):
|
2023-02-14 11:47:09 +09:00
|
|
|
|
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?)(?=\s|/?>))'
|
2018-10-11 06:47:21 +09:00
|
|
|
|
property_re = (r'(?:name|property)=(?:\'og[:-]%(prop)s\'|"og[:-]%(prop)s"|\s*og[:-]%(prop)s\b)'
|
2015-10-14 23:49:39 +09:00
|
|
|
|
% {'prop': re.escape(prop)})
|
2013-11-15 20:54:13 +09:00
|
|
|
|
template = r'<meta[^>]+?%s[^>]+?%s'
|
2013-11-15 20:24:54 +09:00
|
|
|
|
return [
|
2013-11-15 20:54:13 +09:00
|
|
|
|
template % (property_re, content_re),
|
|
|
|
|
template % (content_re, property_re),
|
2013-11-15 20:24:54 +09:00
|
|
|
|
]
|
2013-07-13 02:00:19 +09:00
|
|
|
|
|
2015-07-29 06:43:03 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def _meta_regex(prop):
|
|
|
|
|
return r'''(?isx)<meta
|
2015-08-15 18:58:30 +09:00
|
|
|
|
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
|
2015-07-29 06:43:03 +09:00
|
|
|
|
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
|
|
|
|
|
|
2013-07-14 03:39:47 +09:00
|
|
|
|
def _og_search_property(self, prop, html, name=None, **kargs):
|
2016-08-03 00:55:14 +09:00
|
|
|
|
if not isinstance(prop, (list, tuple)):
|
|
|
|
|
prop = [prop]
|
2013-07-13 02:00:19 +09:00
|
|
|
|
if name is None:
|
2016-08-03 00:55:14 +09:00
|
|
|
|
name = 'OpenGraph %s' % prop[0]
|
|
|
|
|
og_regexes = []
|
|
|
|
|
for p in prop:
|
|
|
|
|
og_regexes.extend(self._og_regexes(p))
|
|
|
|
|
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
|
2013-11-12 18:36:23 +09:00
|
|
|
|
if escaped is None:
|
|
|
|
|
return None
|
|
|
|
|
return unescapeHTML(escaped)
|
2013-07-13 02:00:19 +09:00
|
|
|
|
|
|
|
|
|
def _og_search_thumbnail(self, html, **kargs):
|
2015-07-24 02:37:45 +09:00
|
|
|
|
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
|
2013-07-13 02:00:19 +09:00
|
|
|
|
|
|
|
|
|
def _og_search_description(self, html, **kargs):
|
|
|
|
|
return self._og_search_property('description', html, fatal=False, **kargs)
|
|
|
|
|
|
|
|
|
|
def _og_search_title(self, html, **kargs):
|
|
|
|
|
return self._og_search_property('title', html, **kargs)
|
|
|
|
|
|
2013-10-28 15:34:29 +09:00
|
|
|
|
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
|
2014-08-21 20:05:24 +09:00
|
|
|
|
regexes = self._og_regexes('video') + self._og_regexes('video:url')
|
|
|
|
|
if secure:
|
|
|
|
|
regexes = self._og_regexes('video:secure_url') + regexes
|
2013-10-28 15:34:29 +09:00
|
|
|
|
return self._html_search_regex(regexes, html, name, **kargs)
|
2013-07-13 02:00:19 +09:00
|
|
|
|
|
2014-06-26 23:34:36 +09:00
|
|
|
|
def _og_search_url(self, html, **kargs):
|
|
|
|
|
return self._og_search_property('url', html, **kargs)
|
|
|
|
|
|
2014-07-11 22:38:18 +09:00
|
|
|
|
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
|
2016-06-26 18:57:14 +09:00
|
|
|
|
if not isinstance(name, (list, tuple)):
|
|
|
|
|
name = [name]
|
2013-11-20 14:13:19 +09:00
|
|
|
|
if display_name is None:
|
2016-06-26 18:57:14 +09:00
|
|
|
|
display_name = name[0]
|
2013-11-20 14:13:19 +09:00
|
|
|
|
return self._html_search_regex(
|
2016-06-26 18:57:14 +09:00
|
|
|
|
[self._meta_regex(n) for n in name],
|
2014-11-05 07:14:16 +09:00
|
|
|
|
html, display_name, fatal=fatal, group='content', **kwargs)
|
2013-11-20 14:13:19 +09:00
|
|
|
|
|
|
|
|
|
def _dc_search_uploader(self, html):
|
|
|
|
|
return self._html_search_meta('dc.creator', html, 'uploader')
|
|
|
|
|
|
2013-10-06 13:06:30 +09:00
|
|
|
|
def _rta_search(self, html):
|
|
|
|
|
# See http://www.rtalabel.org/index.php?content=howtofaq#single
|
|
|
|
|
if re.search(r'(?ix)<meta\s+name="rating"\s+'
|
|
|
|
|
r' content="RTA-5042-1996-1400-1577-RTA"',
|
|
|
|
|
html):
|
|
|
|
|
return 18
|
|
|
|
|
return 0
|
|
|
|
|
|
2013-11-20 14:13:19 +09:00
|
|
|
|
def _media_rating_search(self, html):
|
|
|
|
|
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
|
|
|
|
|
rating = self._html_search_meta('rating', html)
|
|
|
|
|
|
|
|
|
|
if not rating:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
RATING_TABLE = {
|
|
|
|
|
'safe for kids': 0,
|
|
|
|
|
'general': 8,
|
|
|
|
|
'14 years': 14,
|
|
|
|
|
'mature': 17,
|
|
|
|
|
'restricted': 19,
|
|
|
|
|
}
|
2016-02-14 17:25:04 +09:00
|
|
|
|
return RATING_TABLE.get(rating.lower())
|
2013-11-20 14:13:19 +09:00
|
|
|
|
|
2015-02-09 00:39:00 +09:00
|
|
|
|
def _family_friendly_search(self, html):
|
2015-02-15 06:20:24 +09:00
|
|
|
|
# See http://schema.org/VideoObject
|
2017-08-12 19:11:35 +09:00
|
|
|
|
family_friendly = self._html_search_meta(
|
|
|
|
|
'isFamilyFriendly', html, default=None)
|
2015-02-09 00:39:00 +09:00
|
|
|
|
|
|
|
|
|
if not family_friendly:
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
RATING_TABLE = {
|
|
|
|
|
'1': 0,
|
|
|
|
|
'true': 0,
|
|
|
|
|
'0': 18,
|
|
|
|
|
'false': 18,
|
|
|
|
|
}
|
2016-02-14 17:25:04 +09:00
|
|
|
|
return RATING_TABLE.get(family_friendly.lower())
|
2015-02-09 00:39:00 +09:00
|
|
|
|
|
2014-01-30 02:03:32 +09:00
|
|
|
|
def _twitter_search_player(self, html):
|
|
|
|
|
return self._html_search_meta('twitter:player', html,
|
2014-11-24 05:39:15 +09:00
|
|
|
|
'twitter card player')
|
2014-01-30 02:03:32 +09:00
|
|
|
|
|
2016-07-09 05:28:04 +09:00
|
|
|
|
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
|
2020-05-03 01:40:30 +09:00
|
|
|
|
json_ld_list = list(re.finditer(JSON_LD_RE, html))
|
2016-08-09 00:36:18 +09:00
|
|
|
|
default = kwargs.get('default', NO_DEFAULT)
|
|
|
|
|
# JSON-LD may be malformed and thus `fatal` should be respected.
|
|
|
|
|
# At the same time `default` may be passed that assumes `fatal=False`
|
|
|
|
|
# for _search_regex. Let's simulate the same behavior here as well.
|
|
|
|
|
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
|
2020-05-03 01:40:30 +09:00
|
|
|
|
json_ld = []
|
|
|
|
|
for mobj in json_ld_list:
|
|
|
|
|
json_ld_item = self._parse_json(
|
|
|
|
|
mobj.group('json_ld'), video_id, fatal=fatal)
|
|
|
|
|
if not json_ld_item:
|
|
|
|
|
continue
|
|
|
|
|
if isinstance(json_ld_item, dict):
|
|
|
|
|
json_ld.append(json_ld_item)
|
|
|
|
|
elif isinstance(json_ld_item, (list, tuple)):
|
|
|
|
|
json_ld.extend(json_ld_item)
|
|
|
|
|
if json_ld:
|
|
|
|
|
json_ld = self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
|
|
|
|
|
if json_ld:
|
|
|
|
|
return json_ld
|
|
|
|
|
if default is not NO_DEFAULT:
|
|
|
|
|
return default
|
|
|
|
|
elif fatal:
|
|
|
|
|
raise RegexNotFoundError('Unable to extract JSON-LD')
|
|
|
|
|
else:
|
|
|
|
|
self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message())
|
|
|
|
|
return {}
|
2016-01-16 03:36:02 +09:00
|
|
|
|
|
2016-07-09 05:28:04 +09:00
|
|
|
|
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
|
2016-01-16 03:36:02 +09:00
|
|
|
|
if isinstance(json_ld, compat_str):
|
|
|
|
|
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
|
|
|
|
|
if not json_ld:
|
|
|
|
|
return {}
|
|
|
|
|
info = {}
|
2016-08-06 01:14:32 +09:00
|
|
|
|
if not isinstance(json_ld, (list, tuple, dict)):
|
|
|
|
|
return info
|
|
|
|
|
if isinstance(json_ld, dict):
|
|
|
|
|
json_ld = [json_ld]
|
2017-04-19 00:21:38 +09:00
|
|
|
|
|
2018-04-28 04:48:03 +09:00
|
|
|
|
INTERACTION_TYPE_MAP = {
|
|
|
|
|
'CommentAction': 'comment',
|
|
|
|
|
'AgreeAction': 'like',
|
|
|
|
|
'DisagreeAction': 'dislike',
|
|
|
|
|
'LikeAction': 'like',
|
|
|
|
|
'DislikeAction': 'dislike',
|
|
|
|
|
'ListenAction': 'view',
|
|
|
|
|
'WatchAction': 'view',
|
|
|
|
|
'ViewAction': 'view',
|
|
|
|
|
}
|
|
|
|
|
|
2020-12-13 22:24:13 +09:00
|
|
|
|
def extract_interaction_type(e):
|
|
|
|
|
interaction_type = e.get('interactionType')
|
|
|
|
|
if isinstance(interaction_type, dict):
|
|
|
|
|
interaction_type = interaction_type.get('@type')
|
|
|
|
|
return str_or_none(interaction_type)
|
|
|
|
|
|
2018-04-28 04:48:03 +09:00
|
|
|
|
def extract_interaction_statistic(e):
|
|
|
|
|
interaction_statistic = e.get('interactionStatistic')
|
2020-12-13 22:24:13 +09:00
|
|
|
|
if isinstance(interaction_statistic, dict):
|
|
|
|
|
interaction_statistic = [interaction_statistic]
|
2018-04-28 04:48:03 +09:00
|
|
|
|
if not isinstance(interaction_statistic, list):
|
|
|
|
|
return
|
|
|
|
|
for is_e in interaction_statistic:
|
|
|
|
|
if not isinstance(is_e, dict):
|
|
|
|
|
continue
|
|
|
|
|
if is_e.get('@type') != 'InteractionCounter':
|
|
|
|
|
continue
|
2020-12-13 22:24:13 +09:00
|
|
|
|
interaction_type = extract_interaction_type(is_e)
|
|
|
|
|
if not interaction_type:
|
2018-04-28 04:48:03 +09:00
|
|
|
|
continue
|
2020-09-19 08:33:17 +09:00
|
|
|
|
# For interaction count some sites provide string instead of
|
|
|
|
|
# an integer (as per spec) with non digit characters (e.g. ",")
|
|
|
|
|
# so extracting count with more relaxed str_to_int
|
|
|
|
|
interaction_count = str_to_int(is_e.get('userInteractionCount'))
|
2018-04-28 04:48:03 +09:00
|
|
|
|
if interaction_count is None:
|
|
|
|
|
continue
|
|
|
|
|
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
|
|
|
|
|
if not count_kind:
|
|
|
|
|
continue
|
|
|
|
|
count_key = '%s_count' % count_kind
|
|
|
|
|
if info.get(count_key) is not None:
|
|
|
|
|
continue
|
|
|
|
|
info[count_key] = interaction_count
|
|
|
|
|
|
2017-04-19 00:21:38 +09:00
|
|
|
|
def extract_video_object(e):
|
|
|
|
|
assert e['@type'] == 'VideoObject'
|
2021-04-05 03:16:17 +09:00
|
|
|
|
author = e.get('author')
|
2017-04-19 00:21:38 +09:00
|
|
|
|
info.update({
|
2018-10-29 02:19:08 +09:00
|
|
|
|
'url': url_or_none(e.get('contentUrl')),
|
2017-04-19 00:21:38 +09:00
|
|
|
|
'title': unescapeHTML(e.get('name')),
|
|
|
|
|
'description': unescapeHTML(e.get('description')),
|
2018-10-29 02:19:08 +09:00
|
|
|
|
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
|
2017-04-19 00:21:38 +09:00
|
|
|
|
'duration': parse_duration(e.get('duration')),
|
|
|
|
|
'timestamp': unified_timestamp(e.get('uploadDate')),
|
2021-04-05 03:16:17 +09:00
|
|
|
|
# author can be an instance of 'Organization' or 'Person' types.
|
|
|
|
|
# both types can have 'name' property(inherited from 'Thing' type). [1]
|
|
|
|
|
# however some websites are using 'Text' type instead.
|
|
|
|
|
# 1. https://schema.org/VideoObject
|
|
|
|
|
'uploader': author.get('name') if isinstance(author, dict) else author if isinstance(author, compat_str) else None,
|
2017-04-19 00:21:38 +09:00
|
|
|
|
'filesize': float_or_none(e.get('contentSize')),
|
|
|
|
|
'tbr': int_or_none(e.get('bitrate')),
|
|
|
|
|
'width': int_or_none(e.get('width')),
|
|
|
|
|
'height': int_or_none(e.get('height')),
|
2017-04-30 23:11:55 +09:00
|
|
|
|
'view_count': int_or_none(e.get('interactionCount')),
|
2017-04-19 00:21:38 +09:00
|
|
|
|
})
|
2018-04-28 04:48:03 +09:00
|
|
|
|
extract_interaction_statistic(e)
|
2017-04-19 00:21:38 +09:00
|
|
|
|
|
2016-08-06 01:14:32 +09:00
|
|
|
|
for e in json_ld:
|
2020-05-03 01:40:30 +09:00
|
|
|
|
if '@context' in e:
|
2016-08-06 01:14:32 +09:00
|
|
|
|
item_type = e.get('@type')
|
|
|
|
|
if expected_type is not None and expected_type != item_type:
|
2020-05-03 01:40:30 +09:00
|
|
|
|
continue
|
2017-07-01 00:19:06 +09:00
|
|
|
|
if item_type in ('TVEpisode', 'Episode'):
|
2019-01-08 12:02:49 +09:00
|
|
|
|
episode_name = unescapeHTML(e.get('name'))
|
2016-08-06 01:14:32 +09:00
|
|
|
|
info.update({
|
2019-01-08 12:02:49 +09:00
|
|
|
|
'episode': episode_name,
|
2016-08-06 01:14:32 +09:00
|
|
|
|
'episode_number': int_or_none(e.get('episodeNumber')),
|
|
|
|
|
'description': unescapeHTML(e.get('description')),
|
|
|
|
|
})
|
2019-01-08 12:02:49 +09:00
|
|
|
|
if not info.get('title') and episode_name:
|
|
|
|
|
info['title'] = episode_name
|
2016-08-06 01:14:32 +09:00
|
|
|
|
part_of_season = e.get('partOfSeason')
|
2017-07-01 00:19:06 +09:00
|
|
|
|
if isinstance(part_of_season, dict) and part_of_season.get('@type') in ('TVSeason', 'Season', 'CreativeWorkSeason'):
|
2019-01-27 06:36:58 +09:00
|
|
|
|
info.update({
|
|
|
|
|
'season': unescapeHTML(part_of_season.get('name')),
|
|
|
|
|
'season_number': int_or_none(part_of_season.get('seasonNumber')),
|
|
|
|
|
})
|
2016-08-07 02:58:38 +09:00
|
|
|
|
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
|
2017-07-01 00:19:06 +09:00
|
|
|
|
if isinstance(part_of_series, dict) and part_of_series.get('@type') in ('TVSeries', 'Series', 'CreativeWorkSeries'):
|
2016-08-06 01:14:32 +09:00
|
|
|
|
info['series'] = unescapeHTML(part_of_series.get('name'))
|
2019-01-08 12:02:00 +09:00
|
|
|
|
elif item_type == 'Movie':
|
|
|
|
|
info.update({
|
|
|
|
|
'title': unescapeHTML(e.get('name')),
|
|
|
|
|
'description': unescapeHTML(e.get('description')),
|
|
|
|
|
'duration': parse_duration(e.get('duration')),
|
|
|
|
|
'timestamp': unified_timestamp(e.get('dateCreated')),
|
|
|
|
|
})
|
2018-01-28 01:23:36 +09:00
|
|
|
|
elif item_type in ('Article', 'NewsArticle'):
|
2016-08-06 01:14:32 +09:00
|
|
|
|
info.update({
|
|
|
|
|
'timestamp': parse_iso8601(e.get('datePublished')),
|
|
|
|
|
'title': unescapeHTML(e.get('headline')),
|
|
|
|
|
'description': unescapeHTML(e.get('articleBody')),
|
|
|
|
|
})
|
|
|
|
|
elif item_type == 'VideoObject':
|
2017-04-19 00:21:38 +09:00
|
|
|
|
extract_video_object(e)
|
2020-05-03 01:40:30 +09:00
|
|
|
|
if expected_type is None:
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
break
|
2017-07-01 00:19:06 +09:00
|
|
|
|
video = e.get('video')
|
|
|
|
|
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
|
|
|
|
|
extract_video_object(video)
|
2020-05-03 01:40:30 +09:00
|
|
|
|
if expected_type is None:
|
|
|
|
|
continue
|
|
|
|
|
else:
|
|
|
|
|
break
|
2016-01-16 03:36:02 +09:00
|
|
|
|
return dict((k, v) for k, v in info.items() if v is not None)
|
|
|
|
|
|
2023-05-06 03:25:42 +09:00
|
|
|
|
def _search_nextjs_data(self, webpage, video_id, **kw):
|
2024-04-27 02:57:44 +09:00
|
|
|
|
# ..., *, transform_source=None, fatal=True, default=NO_DEFAULT
|
|
|
|
|
|
|
|
|
|
# TODO: remove this backward compat
|
|
|
|
|
default = kw.get('default', NO_DEFAULT)
|
|
|
|
|
if default == '{}':
|
|
|
|
|
kw['default'] = {}
|
|
|
|
|
kw = compat_kwargs(kw)
|
|
|
|
|
|
|
|
|
|
return self._search_json(
|
|
|
|
|
r'''<script\s[^>]*?\bid\s*=\s*('|")__NEXT_DATA__\1[^>]*>''',
|
|
|
|
|
webpage, 'next.js data', video_id, end_pattern='</script>',
|
|
|
|
|
**kw)
|
2023-05-06 03:25:42 +09:00
|
|
|
|
|
|
|
|
|
def _search_nuxt_data(self, webpage, video_id, *args, **kwargs):
|
|
|
|
|
"""Parses Nuxt.js metadata. This works as long as the function __NUXT__ invokes is a pure function"""
|
|
|
|
|
|
|
|
|
|
# self, webpage, video_id, context_name='__NUXT__', *, fatal=True, traverse=('data', 0)
|
|
|
|
|
context_name = args[0] if len(args) > 0 else kwargs.get('context_name', '__NUXT__')
|
|
|
|
|
fatal = kwargs.get('fatal', True)
|
|
|
|
|
traverse = kwargs.get('traverse', ('data', 0))
|
|
|
|
|
|
|
|
|
|
re_ctx = re.escape(context_name)
|
|
|
|
|
|
|
|
|
|
FUNCTION_RE = (r'\(\s*function\s*\((?P<arg_keys>[\s\S]*?)\)\s*\{\s*'
|
|
|
|
|
r'return\s+(?P<js>\{[\s\S]*?})\s*;?\s*}\s*\((?P<arg_vals>[\s\S]*?)\)')
|
|
|
|
|
|
|
|
|
|
js, arg_keys, arg_vals = self._search_regex(
|
|
|
|
|
(p.format(re_ctx, FUNCTION_RE) for p in
|
|
|
|
|
(r'<script>\s*window\s*\.\s*{0}\s*=\s*{1}\s*\)\s*;?\s*</script>',
|
|
|
|
|
r'{0}\s*\([\s\S]*?{1}')),
|
|
|
|
|
webpage, context_name, group=('js', 'arg_keys', 'arg_vals'),
|
|
|
|
|
default=NO_DEFAULT if fatal else (None, None, None))
|
|
|
|
|
if js is None:
|
|
|
|
|
return {}
|
|
|
|
|
|
|
|
|
|
args = dict(zip(arg_keys.split(','), map(json.dumps, self._parse_json(
|
|
|
|
|
'[{0}]'.format(arg_vals), video_id, transform_source=js_to_json, fatal=fatal) or ())))
|
|
|
|
|
|
|
|
|
|
ret = self._parse_json(js, video_id, transform_source=functools.partial(js_to_json, vars=args), fatal=fatal)
|
|
|
|
|
return traverse_obj(ret, traverse) or {}
|
|
|
|
|
|
2015-07-11 00:49:09 +09:00
|
|
|
|
@staticmethod
|
2015-07-15 01:36:30 +09:00
|
|
|
|
def _hidden_inputs(html):
|
2015-09-12 00:07:32 +09:00
|
|
|
|
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
|
2015-08-16 00:52:22 +09:00
|
|
|
|
hidden_inputs = {}
|
2016-09-15 23:54:48 +09:00
|
|
|
|
for input in re.findall(r'(?i)(<input[^>]+>)', html):
|
|
|
|
|
attrs = extract_attributes(input)
|
|
|
|
|
if not input:
|
2015-08-16 00:52:22 +09:00
|
|
|
|
continue
|
2016-09-15 23:54:48 +09:00
|
|
|
|
if attrs.get('type') not in ('hidden', 'submit'):
|
2015-08-16 00:52:22 +09:00
|
|
|
|
continue
|
2016-09-15 23:54:48 +09:00
|
|
|
|
name = attrs.get('name') or attrs.get('id')
|
|
|
|
|
value = attrs.get('value')
|
|
|
|
|
if name and value is not None:
|
|
|
|
|
hidden_inputs[name] = value
|
2015-08-16 00:52:22 +09:00
|
|
|
|
return hidden_inputs
|
2015-07-11 00:49:09 +09:00
|
|
|
|
|
2015-07-15 01:38:10 +09:00
|
|
|
|
def _form_hidden_inputs(self, form_id, html):
|
|
|
|
|
form = self._search_regex(
|
2015-09-11 23:43:05 +09:00
|
|
|
|
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
|
2015-07-15 01:38:10 +09:00
|
|
|
|
html, '%s form' % form_id, group='form')
|
|
|
|
|
return self._hidden_inputs(form)
|
|
|
|
|
|
2015-04-21 00:13:31 +09:00
|
|
|
|
def _sort_formats(self, formats, field_preference=None):
|
2014-01-27 15:31:54 +09:00
|
|
|
|
if not formats:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
raise ExtractorError('No video formats found')
|
2014-01-27 15:31:54 +09:00
|
|
|
|
|
2016-01-28 00:11:17 +09:00
|
|
|
|
for f in formats:
|
|
|
|
|
# Automatically determine tbr when missing based on abr and vbr (improves
|
|
|
|
|
# formats sorting in some cases)
|
2016-01-30 04:47:46 +09:00
|
|
|
|
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
|
2016-01-28 00:11:17 +09:00
|
|
|
|
f['tbr'] = f['abr'] + f['vbr']
|
|
|
|
|
|
2013-12-24 20:25:22 +09:00
|
|
|
|
def _formats_key(f):
|
2013-12-24 20:40:23 +09:00
|
|
|
|
# TODO remove the following workaround
|
|
|
|
|
from ..utils import determine_ext
|
|
|
|
|
if not f.get('ext') and 'url' in f:
|
|
|
|
|
f['ext'] = determine_ext(f['url'])
|
|
|
|
|
|
2015-04-21 00:13:31 +09:00
|
|
|
|
if isinstance(field_preference, (list, tuple)):
|
2016-06-26 23:09:07 +09:00
|
|
|
|
return tuple(
|
|
|
|
|
f.get(field)
|
|
|
|
|
if f.get(field) is not None
|
|
|
|
|
else ('' if field == 'format_id' else -1)
|
|
|
|
|
for field in field_preference)
|
2015-04-21 00:13:31 +09:00
|
|
|
|
|
2013-12-24 20:25:22 +09:00
|
|
|
|
preference = f.get('preference')
|
|
|
|
|
if preference is None:
|
2015-12-06 05:14:43 +09:00
|
|
|
|
preference = 0
|
2013-12-24 20:25:22 +09:00
|
|
|
|
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
|
|
|
|
|
preference -= 0.5
|
|
|
|
|
|
2016-08-04 17:24:20 +09:00
|
|
|
|
protocol = f.get('protocol') or determine_protocol(f)
|
|
|
|
|
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
|
2015-12-06 05:14:43 +09:00
|
|
|
|
|
2013-12-24 20:25:22 +09:00
|
|
|
|
if f.get('vcodec') == 'none': # audio only
|
2016-02-11 18:55:50 +09:00
|
|
|
|
preference -= 50
|
2013-12-24 20:25:22 +09:00
|
|
|
|
if self._downloader.params.get('prefer_free_formats'):
|
2014-08-28 08:04:43 +09:00
|
|
|
|
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
|
2013-12-24 20:25:22 +09:00
|
|
|
|
else:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
|
2013-12-24 20:25:22 +09:00
|
|
|
|
ext_preference = 0
|
|
|
|
|
try:
|
|
|
|
|
audio_ext_preference = ORDER.index(f['ext'])
|
|
|
|
|
except ValueError:
|
|
|
|
|
audio_ext_preference = -1
|
|
|
|
|
else:
|
2016-02-11 18:55:50 +09:00
|
|
|
|
if f.get('acodec') == 'none': # video only
|
|
|
|
|
preference -= 40
|
2013-12-24 20:25:22 +09:00
|
|
|
|
if self._downloader.params.get('prefer_free_formats'):
|
2014-08-28 08:04:43 +09:00
|
|
|
|
ORDER = ['flv', 'mp4', 'webm']
|
2013-12-24 20:25:22 +09:00
|
|
|
|
else:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
ORDER = ['webm', 'flv', 'mp4']
|
2013-12-24 20:25:22 +09:00
|
|
|
|
try:
|
|
|
|
|
ext_preference = ORDER.index(f['ext'])
|
|
|
|
|
except ValueError:
|
|
|
|
|
ext_preference = -1
|
|
|
|
|
audio_ext_preference = 0
|
|
|
|
|
|
|
|
|
|
return (
|
|
|
|
|
preference,
|
2014-11-20 20:06:33 +09:00
|
|
|
|
f.get('language_preference') if f.get('language_preference') is not None else -1,
|
2014-01-07 01:15:27 +09:00
|
|
|
|
f.get('quality') if f.get('quality') is not None else -1,
|
2014-01-07 18:25:34 +09:00
|
|
|
|
f.get('tbr') if f.get('tbr') is not None else -1,
|
2015-02-16 12:37:55 +09:00
|
|
|
|
f.get('filesize') if f.get('filesize') is not None else -1,
|
2013-12-24 20:25:22 +09:00
|
|
|
|
f.get('vbr') if f.get('vbr') is not None else -1,
|
2015-02-03 18:53:05 +09:00
|
|
|
|
f.get('height') if f.get('height') is not None else -1,
|
|
|
|
|
f.get('width') if f.get('width') is not None else -1,
|
2015-12-06 05:14:43 +09:00
|
|
|
|
proto_preference,
|
2015-02-06 23:16:43 +09:00
|
|
|
|
ext_preference,
|
2013-12-24 20:25:22 +09:00
|
|
|
|
f.get('abr') if f.get('abr') is not None else -1,
|
|
|
|
|
audio_ext_preference,
|
2014-10-30 17:40:52 +09:00
|
|
|
|
f.get('fps') if f.get('fps') is not None else -1,
|
2014-07-21 19:02:44 +09:00
|
|
|
|
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
|
2014-10-25 07:10:11 +09:00
|
|
|
|
f.get('source_preference') if f.get('source_preference') is not None else -1,
|
2015-05-07 00:24:24 +09:00
|
|
|
|
f.get('format_id') if f.get('format_id') is not None else '',
|
2013-12-24 20:25:22 +09:00
|
|
|
|
)
|
|
|
|
|
formats.sort(key=_formats_key)
|
2013-11-20 14:13:19 +09:00
|
|
|
|
|
2015-01-26 03:32:31 +09:00
|
|
|
|
def _check_formats(self, formats, video_id):
|
|
|
|
|
if formats:
|
|
|
|
|
formats[:] = filter(
|
|
|
|
|
lambda f: self._is_valid_url(
|
|
|
|
|
f['url'], video_id,
|
|
|
|
|
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
|
|
|
|
|
formats)
|
|
|
|
|
|
2016-02-22 04:19:39 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def _remove_duplicate_formats(formats):
|
|
|
|
|
format_urls = set()
|
|
|
|
|
unique_formats = []
|
|
|
|
|
for f in formats:
|
|
|
|
|
if f['url'] not in format_urls:
|
|
|
|
|
format_urls.add(f['url'])
|
|
|
|
|
unique_formats.append(f)
|
|
|
|
|
formats[:] = unique_formats
|
|
|
|
|
|
2017-02-03 13:10:13 +09:00
|
|
|
|
def _is_valid_url(self, url, video_id, item='video', headers={}):
|
2015-03-03 01:38:44 +09:00
|
|
|
|
url = self._proto_relative_url(url, scheme='http:')
|
|
|
|
|
# For now assume non HTTP(S) URLs always valid
|
|
|
|
|
if not (url.startswith('http://') or url.startswith('https://')):
|
|
|
|
|
return True
|
2015-01-26 03:32:31 +09:00
|
|
|
|
try:
|
2017-02-03 13:10:13 +09:00
|
|
|
|
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
|
2015-01-26 03:32:31 +09:00
|
|
|
|
return True
|
2020-11-19 01:31:35 +09:00
|
|
|
|
except ExtractorError as e:
|
2019-10-03 02:53:07 +09:00
|
|
|
|
self.to_screen(
|
2020-11-19 01:31:35 +09:00
|
|
|
|
'%s: %s URL is invalid, skipping: %s'
|
|
|
|
|
% (video_id, item, error_to_compat_str(e.cause)))
|
2019-10-03 02:53:07 +09:00
|
|
|
|
return False
|
2015-01-26 03:32:31 +09:00
|
|
|
|
|
2014-05-05 10:12:41 +09:00
|
|
|
|
def http_scheme(self):
|
2014-10-24 22:34:19 +09:00
|
|
|
|
""" Either "http:" or "https:", depending on the user's preferences """
|
2014-05-05 10:12:41 +09:00
|
|
|
|
return (
|
|
|
|
|
'http:'
|
|
|
|
|
if self._downloader.params.get('prefer_insecure', False)
|
|
|
|
|
else 'https:')
|
|
|
|
|
|
2014-05-13 16:42:38 +09:00
|
|
|
|
def _proto_relative_url(self, url, scheme=None):
|
|
|
|
|
if url is None:
|
|
|
|
|
return url
|
|
|
|
|
if url.startswith('//'):
|
|
|
|
|
if scheme is None:
|
|
|
|
|
scheme = self.http_scheme()
|
|
|
|
|
return scheme + url
|
|
|
|
|
else:
|
|
|
|
|
return url
|
|
|
|
|
|
2014-07-11 17:57:08 +09:00
|
|
|
|
def _sleep(self, timeout, video_id, msg_template=None):
|
|
|
|
|
if msg_template is None:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
|
2014-07-11 17:57:08 +09:00
|
|
|
|
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
|
|
|
|
|
self.to_screen(msg)
|
|
|
|
|
time.sleep(timeout)
|
|
|
|
|
|
2015-07-17 13:02:49 +09:00
|
|
|
|
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
|
2015-10-02 02:03:31 +09:00
|
|
|
|
transform_source=lambda s: fix_xml_ampersands(s).strip(),
|
2019-11-16 07:44:14 +09:00
|
|
|
|
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
|
2014-07-28 22:42:19 +09:00
|
|
|
|
manifest = self._download_xml(
|
|
|
|
|
manifest_url, video_id, 'Downloading f4m manifest',
|
2015-07-16 04:14:08 +09:00
|
|
|
|
'Unable to download f4m manifest',
|
|
|
|
|
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
|
2019-03-09 21:14:41 +09:00
|
|
|
|
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244)
|
2015-10-02 02:03:31 +09:00
|
|
|
|
transform_source=transform_source,
|
2019-11-16 07:44:14 +09:00
|
|
|
|
fatal=fatal, data=data, headers=headers, query=query)
|
2015-10-02 02:03:31 +09:00
|
|
|
|
|
|
|
|
|
if manifest is False:
|
2015-12-27 23:33:39 +09:00
|
|
|
|
return []
|
2014-07-28 22:25:56 +09:00
|
|
|
|
|
2016-03-13 06:16:08 +09:00
|
|
|
|
return self._parse_f4m_formats(
|
|
|
|
|
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
|
2016-05-27 01:03:03 +09:00
|
|
|
|
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
|
2016-03-13 06:16:08 +09:00
|
|
|
|
|
|
|
|
|
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
|
|
|
|
|
transform_source=lambda s: fix_xml_ampersands(s).strip(),
|
2016-05-27 01:03:03 +09:00
|
|
|
|
fatal=True, m3u8_id=None):
|
2019-03-06 03:21:57 +09:00
|
|
|
|
if not isinstance(manifest, compat_etree_Element) and not fatal:
|
2019-03-06 01:45:40 +09:00
|
|
|
|
return []
|
|
|
|
|
|
2015-07-31 01:34:38 +09:00
|
|
|
|
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
|
|
|
|
|
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
|
|
|
|
|
if akamai_pv is not None and ';' in akamai_pv.text:
|
|
|
|
|
playerVerificationChallenge = akamai_pv.text.split(';')[0]
|
|
|
|
|
if playerVerificationChallenge.strip() != '':
|
|
|
|
|
return []
|
|
|
|
|
|
2014-07-28 22:25:56 +09:00
|
|
|
|
formats = []
|
2014-10-24 13:17:39 +09:00
|
|
|
|
manifest_version = '1.0'
|
2014-08-25 20:03:08 +09:00
|
|
|
|
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
|
2014-10-24 06:11:10 +09:00
|
|
|
|
if not media_nodes:
|
2014-10-24 13:17:39 +09:00
|
|
|
|
manifest_version = '2.0'
|
2014-10-24 06:11:10 +09:00
|
|
|
|
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
|
2016-03-27 10:42:38 +09:00
|
|
|
|
# Remove unsupported DRM protected media from final formats
|
2019-03-09 21:14:41 +09:00
|
|
|
|
# rendition (see https://github.com/ytdl-org/youtube-dl/issues/8573).
|
2016-03-27 10:42:38 +09:00
|
|
|
|
media_nodes = remove_encrypted_media(media_nodes)
|
|
|
|
|
if not media_nodes:
|
|
|
|
|
return formats
|
2017-11-05 00:10:55 +09:00
|
|
|
|
|
|
|
|
|
manifest_base_url = get_base_url(manifest)
|
2016-05-26 22:41:47 +09:00
|
|
|
|
|
2016-06-08 01:19:33 +09:00
|
|
|
|
bootstrap_info = xpath_element(
|
2016-05-26 22:41:47 +09:00
|
|
|
|
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
|
|
|
|
|
'bootstrap info', default=None)
|
|
|
|
|
|
2016-10-19 22:42:48 +09:00
|
|
|
|
vcodec = None
|
|
|
|
|
mime_type = xpath_text(
|
|
|
|
|
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
|
|
|
|
|
'base URL', default=None)
|
|
|
|
|
if mime_type and mime_type.startswith('audio/'):
|
|
|
|
|
vcodec = 'none'
|
|
|
|
|
|
2014-08-25 20:03:08 +09:00
|
|
|
|
for i, media_el in enumerate(media_nodes):
|
2016-05-27 04:47:44 +09:00
|
|
|
|
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
|
|
|
|
width = int_or_none(media_el.attrib.get('width'))
|
|
|
|
|
height = int_or_none(media_el.attrib.get('height'))
|
|
|
|
|
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
|
2016-05-27 01:03:03 +09:00
|
|
|
|
# If <bootstrapInfo> is present, the specified f4m is a
|
|
|
|
|
# stream-level manifest, and only set-level manifests may refer to
|
|
|
|
|
# external resources. See section 11.4 and section 4 of F4M spec
|
|
|
|
|
if bootstrap_info is None:
|
|
|
|
|
media_url = None
|
|
|
|
|
# @href is introduced in 2.0, see section 11.6 of F4M spec
|
|
|
|
|
if manifest_version == '2.0':
|
|
|
|
|
media_url = media_el.attrib.get('href')
|
|
|
|
|
if media_url is None:
|
|
|
|
|
media_url = media_el.attrib.get('url')
|
2015-07-16 04:25:33 +09:00
|
|
|
|
if not media_url:
|
|
|
|
|
continue
|
2015-07-16 04:14:52 +09:00
|
|
|
|
manifest_url = (
|
|
|
|
|
media_url if media_url.startswith('http://') or media_url.startswith('https://')
|
2017-11-05 00:10:55 +09:00
|
|
|
|
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
|
2015-07-16 04:15:15 +09:00
|
|
|
|
# If media_url is itself a f4m manifest do the recursive extraction
|
|
|
|
|
# since bitrates in parent manifest (this one) and media_url manifest
|
|
|
|
|
# may differ leading to inability to resolve the format by requested
|
|
|
|
|
# bitrate in f4m downloader
|
2016-05-26 22:55:43 +09:00
|
|
|
|
ext = determine_ext(manifest_url)
|
|
|
|
|
if ext == 'f4m':
|
2016-05-27 04:47:44 +09:00
|
|
|
|
f4m_formats = self._extract_f4m_formats(
|
2016-03-13 06:16:08 +09:00
|
|
|
|
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
|
2016-05-27 04:47:44 +09:00
|
|
|
|
transform_source=transform_source, fatal=fatal)
|
|
|
|
|
# Sometimes stream-level manifest contains single media entry that
|
|
|
|
|
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
|
|
|
|
|
# At the same time parent's media entry in set-level manifest may
|
|
|
|
|
# contain it. We will copy it from parent in such cases.
|
|
|
|
|
if len(f4m_formats) == 1:
|
|
|
|
|
f = f4m_formats[0]
|
|
|
|
|
f.update({
|
|
|
|
|
'tbr': f.get('tbr') or tbr,
|
|
|
|
|
'width': f.get('width') or width,
|
|
|
|
|
'height': f.get('height') or height,
|
|
|
|
|
'format_id': f.get('format_id') if not tbr else format_id,
|
2016-10-19 22:42:48 +09:00
|
|
|
|
'vcodec': vcodec,
|
2016-05-27 04:47:44 +09:00
|
|
|
|
})
|
|
|
|
|
formats.extend(f4m_formats)
|
2015-07-16 04:15:15 +09:00
|
|
|
|
continue
|
2016-05-26 22:55:43 +09:00
|
|
|
|
elif ext == 'm3u8':
|
|
|
|
|
formats.extend(self._extract_m3u8_formats(
|
|
|
|
|
manifest_url, video_id, 'mp4', preference=preference,
|
2016-05-27 02:41:27 +09:00
|
|
|
|
m3u8_id=m3u8_id, fatal=fatal))
|
2016-05-26 22:55:43 +09:00
|
|
|
|
continue
|
2014-07-28 22:25:56 +09:00
|
|
|
|
formats.append({
|
2016-05-27 04:47:44 +09:00
|
|
|
|
'format_id': format_id,
|
2014-07-28 22:25:56 +09:00
|
|
|
|
'url': manifest_url,
|
2016-09-17 23:33:38 +09:00
|
|
|
|
'manifest_url': manifest_url,
|
2016-06-08 01:19:33 +09:00
|
|
|
|
'ext': 'flv' if bootstrap_info is not None else None,
|
2017-11-05 00:11:39 +09:00
|
|
|
|
'protocol': 'f4m',
|
2014-08-25 20:03:08 +09:00
|
|
|
|
'tbr': tbr,
|
2016-05-27 04:47:44 +09:00
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
2016-10-19 22:42:48 +09:00
|
|
|
|
'vcodec': vcodec,
|
2015-02-06 01:16:27 +09:00
|
|
|
|
'preference': preference,
|
2014-07-28 22:25:56 +09:00
|
|
|
|
})
|
|
|
|
|
return formats
|
|
|
|
|
|
2016-05-21 14:15:28 +09:00
|
|
|
|
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
|
|
|
|
|
return {
|
2015-03-07 01:53:53 +09:00
|
|
|
|
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
|
2014-08-26 19:51:13 +09:00
|
|
|
|
'url': m3u8_url,
|
|
|
|
|
'ext': ext,
|
|
|
|
|
'protocol': 'm3u8',
|
2016-08-07 18:58:11 +09:00
|
|
|
|
'preference': preference - 100 if preference else -100,
|
2014-08-26 19:51:13 +09:00
|
|
|
|
'resolution': 'multiple',
|
|
|
|
|
'format_note': 'Quality selection URL',
|
2016-05-21 14:15:28 +09:00
|
|
|
|
}
|
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
def _report_ignoring_subs(self, name):
|
|
|
|
|
self.report_warning(bug_reports_message(
|
|
|
|
|
'Ignoring subtitle tracks found in the {0} manifest; '
|
|
|
|
|
'if any subtitle tracks are missing,'.format(name)
|
|
|
|
|
), only_once=True)
|
|
|
|
|
|
2016-05-21 14:15:28 +09:00
|
|
|
|
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
|
|
|
|
|
entry_protocol='m3u8', preference=None,
|
|
|
|
|
m3u8_id=None, note=None, errnote=None,
|
2019-11-16 07:44:14 +09:00
|
|
|
|
fatal=True, live=False, data=None, headers={},
|
|
|
|
|
query={}):
|
2015-11-01 03:01:34 +09:00
|
|
|
|
res = self._download_webpage_handle(
|
2014-10-27 10:28:37 +09:00
|
|
|
|
m3u8_url, video_id,
|
2015-06-07 17:33:22 +09:00
|
|
|
|
note=note or 'Downloading m3u8 information',
|
2015-07-06 09:39:38 +09:00
|
|
|
|
errnote=errnote or 'Failed to download m3u8 information',
|
2019-11-16 07:44:14 +09:00
|
|
|
|
fatal=fatal, data=data, headers=headers, query=query)
|
2017-04-22 09:01:00 +09:00
|
|
|
|
|
2015-11-01 03:01:34 +09:00
|
|
|
|
if res is False:
|
2015-12-27 23:33:39 +09:00
|
|
|
|
return []
|
2017-04-22 09:01:00 +09:00
|
|
|
|
|
2015-11-01 03:01:34 +09:00
|
|
|
|
m3u8_doc, urlh = res
|
2015-09-09 03:35:41 +09:00
|
|
|
|
m3u8_url = urlh.geturl()
|
2016-02-27 10:01:11 +09:00
|
|
|
|
|
2017-04-22 09:01:00 +09:00
|
|
|
|
return self._parse_m3u8_formats(
|
|
|
|
|
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
|
|
|
|
|
preference=preference, m3u8_id=m3u8_id, live=live)
|
|
|
|
|
|
|
|
|
|
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
|
|
|
|
|
entry_protocol='m3u8', preference=None,
|
|
|
|
|
m3u8_id=None, live=False):
|
2017-02-11 00:51:41 +09:00
|
|
|
|
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
|
|
|
|
|
return []
|
|
|
|
|
|
2017-11-15 01:41:30 +09:00
|
|
|
|
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
|
|
|
|
|
return []
|
|
|
|
|
|
2017-04-26 00:07:10 +09:00
|
|
|
|
formats = []
|
2016-09-04 19:42:15 +09:00
|
|
|
|
|
|
|
|
|
format_url = lambda u: (
|
|
|
|
|
u
|
|
|
|
|
if re.match(r'^https?://', u)
|
|
|
|
|
else compat_urlparse.urljoin(m3u8_url, u))
|
|
|
|
|
|
2017-04-22 09:01:00 +09:00
|
|
|
|
# References:
|
|
|
|
|
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
|
2019-03-09 21:14:41 +09:00
|
|
|
|
# 2. https://github.com/ytdl-org/youtube-dl/issues/12211
|
|
|
|
|
# 3. https://github.com/ytdl-org/youtube-dl/issues/18923
|
2017-04-22 09:01:00 +09:00
|
|
|
|
|
|
|
|
|
# We should try extracting formats only from master playlists [1, 4.3.4],
|
|
|
|
|
# i.e. playlists that describe available qualities. On the other hand
|
|
|
|
|
# media playlists [1, 4.3.3] should be returned as is since they contain
|
|
|
|
|
# just the media without qualities renditions.
|
2016-02-27 10:01:11 +09:00
|
|
|
|
# Fortunately, master playlist can be easily distinguished from media
|
2017-04-22 09:01:00 +09:00
|
|
|
|
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
|
2020-11-22 00:00:05 +09:00
|
|
|
|
# master playlist tags MUST NOT appear in a media playlist and vice versa.
|
2017-04-22 09:01:00 +09:00
|
|
|
|
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
|
|
|
|
|
# media playlist and MUST NOT appear in master playlist thus we can
|
|
|
|
|
# clearly detect media playlist with this criterion.
|
|
|
|
|
|
2016-02-27 10:01:11 +09:00
|
|
|
|
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
|
2016-01-27 01:44:44 +09:00
|
|
|
|
return [{
|
|
|
|
|
'url': m3u8_url,
|
|
|
|
|
'format_id': m3u8_id,
|
|
|
|
|
'ext': ext,
|
|
|
|
|
'protocol': entry_protocol,
|
|
|
|
|
'preference': preference,
|
|
|
|
|
}]
|
2017-04-22 09:01:00 +09:00
|
|
|
|
|
|
|
|
|
groups = {}
|
|
|
|
|
last_stream_inf = {}
|
|
|
|
|
|
|
|
|
|
def extract_media(x_media_line):
|
|
|
|
|
media = parse_m3u8_attributes(x_media_line)
|
|
|
|
|
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
|
|
|
|
|
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
|
|
|
|
|
if not (media_type and group_id and name):
|
|
|
|
|
return
|
|
|
|
|
groups.setdefault(group_id, []).append(media)
|
|
|
|
|
if media_type not in ('VIDEO', 'AUDIO'):
|
|
|
|
|
return
|
|
|
|
|
media_url = media.get('URI')
|
|
|
|
|
if media_url:
|
|
|
|
|
format_id = []
|
2017-10-29 09:05:55 +09:00
|
|
|
|
for v in (m3u8_id, group_id, name):
|
2017-04-22 09:01:00 +09:00
|
|
|
|
if v:
|
|
|
|
|
format_id.append(v)
|
|
|
|
|
f = {
|
|
|
|
|
'format_id': '-'.join(format_id),
|
|
|
|
|
'url': format_url(media_url),
|
2017-04-28 05:00:14 +09:00
|
|
|
|
'manifest_url': m3u8_url,
|
2017-04-22 09:01:00 +09:00
|
|
|
|
'language': media.get('LANGUAGE'),
|
|
|
|
|
'ext': ext,
|
|
|
|
|
'protocol': entry_protocol,
|
|
|
|
|
'preference': preference,
|
|
|
|
|
}
|
|
|
|
|
if media_type == 'AUDIO':
|
|
|
|
|
f['vcodec'] = 'none'
|
|
|
|
|
formats.append(f)
|
|
|
|
|
|
|
|
|
|
def build_stream_name():
|
|
|
|
|
# Despite specification does not mention NAME attribute for
|
2017-04-23 13:51:53 +09:00
|
|
|
|
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
|
|
|
|
|
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
|
2017-04-23 13:49:57 +09:00
|
|
|
|
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
|
2017-04-22 09:01:00 +09:00
|
|
|
|
stream_name = last_stream_inf.get('NAME')
|
|
|
|
|
if stream_name:
|
|
|
|
|
return stream_name
|
|
|
|
|
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
|
|
|
|
|
# from corresponding rendition group
|
|
|
|
|
stream_group_id = last_stream_inf.get('VIDEO')
|
|
|
|
|
if not stream_group_id:
|
|
|
|
|
return
|
|
|
|
|
stream_group = groups.get(stream_group_id)
|
|
|
|
|
if not stream_group:
|
|
|
|
|
return stream_group_id
|
|
|
|
|
rendition = stream_group[0]
|
|
|
|
|
return rendition.get('NAME') or stream_group_id
|
|
|
|
|
|
2019-01-20 05:35:02 +09:00
|
|
|
|
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
|
2019-01-20 05:25:15 +09:00
|
|
|
|
# chance to detect video only formats when EXT-X-STREAM-INF tags
|
|
|
|
|
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
|
|
|
|
|
for line in m3u8_doc.splitlines():
|
|
|
|
|
if line.startswith('#EXT-X-MEDIA:'):
|
|
|
|
|
extract_media(line)
|
|
|
|
|
|
2014-08-26 19:51:13 +09:00
|
|
|
|
for line in m3u8_doc.splitlines():
|
|
|
|
|
if line.startswith('#EXT-X-STREAM-INF:'):
|
2017-04-22 09:01:00 +09:00
|
|
|
|
last_stream_inf = parse_m3u8_attributes(line)
|
2014-08-26 19:51:13 +09:00
|
|
|
|
elif line.startswith('#') or not line.strip():
|
|
|
|
|
continue
|
|
|
|
|
else:
|
2017-04-23 13:33:19 +09:00
|
|
|
|
tbr = float_or_none(
|
2019-05-11 05:56:22 +09:00
|
|
|
|
last_stream_inf.get('AVERAGE-BANDWIDTH')
|
|
|
|
|
or last_stream_inf.get('BANDWIDTH'), scale=1000)
|
2015-03-07 01:52:50 +09:00
|
|
|
|
format_id = []
|
|
|
|
|
if m3u8_id:
|
|
|
|
|
format_id.append(m3u8_id)
|
2017-04-22 09:01:00 +09:00
|
|
|
|
stream_name = build_stream_name()
|
2016-04-26 23:30:24 +09:00
|
|
|
|
# Bandwidth of live streams may differ over time thus making
|
|
|
|
|
# format_id unpredictable. So it's better to keep provided
|
|
|
|
|
# format_id intact.
|
2016-04-29 23:49:04 +09:00
|
|
|
|
if not live:
|
2016-05-15 06:34:35 +09:00
|
|
|
|
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
|
2016-09-17 23:33:38 +09:00
|
|
|
|
manifest_url = format_url(line.strip())
|
2014-08-26 19:51:13 +09:00
|
|
|
|
f = {
|
2015-03-07 01:52:50 +09:00
|
|
|
|
'format_id': '-'.join(format_id),
|
2016-09-17 23:33:38 +09:00
|
|
|
|
'url': manifest_url,
|
2017-04-26 00:07:10 +09:00
|
|
|
|
'manifest_url': m3u8_url,
|
2014-08-26 19:51:13 +09:00
|
|
|
|
'tbr': tbr,
|
|
|
|
|
'ext': ext,
|
2017-04-22 09:01:00 +09:00
|
|
|
|
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
|
2014-09-24 21:16:56 +09:00
|
|
|
|
'protocol': entry_protocol,
|
|
|
|
|
'preference': preference,
|
2014-08-26 19:51:13 +09:00
|
|
|
|
}
|
2017-04-22 09:01:00 +09:00
|
|
|
|
resolution = last_stream_inf.get('RESOLUTION')
|
2014-08-26 19:51:13 +09:00
|
|
|
|
if resolution:
|
2016-11-04 07:02:31 +09:00
|
|
|
|
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
|
|
|
|
|
if mobj:
|
|
|
|
|
f['width'] = int(mobj.group('width'))
|
|
|
|
|
f['height'] = int(mobj.group('height'))
|
2016-07-13 23:54:43 +09:00
|
|
|
|
# Unified Streaming Platform
|
|
|
|
|
mobj = re.search(
|
|
|
|
|
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
|
|
|
|
|
if mobj:
|
|
|
|
|
abr, vbr = mobj.groups()
|
|
|
|
|
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
|
2016-02-27 09:48:13 +09:00
|
|
|
|
f.update({
|
2016-07-13 23:54:43 +09:00
|
|
|
|
'vbr': vbr,
|
|
|
|
|
'abr': abr,
|
2016-02-27 09:48:13 +09:00
|
|
|
|
})
|
2017-04-22 09:01:00 +09:00
|
|
|
|
codecs = parse_codecs(last_stream_inf.get('CODECS'))
|
|
|
|
|
f.update(codecs)
|
|
|
|
|
audio_group_id = last_stream_inf.get('AUDIO')
|
|
|
|
|
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
|
|
|
|
|
# references a rendition group MUST have a CODECS attribute.
|
|
|
|
|
# However, this is not always respected, for example, [2]
|
|
|
|
|
# contains EXT-X-STREAM-INF tag which references AUDIO
|
|
|
|
|
# rendition group but does not have CODECS and despite
|
2018-09-15 03:53:01 +09:00
|
|
|
|
# referencing an audio group it represents a complete
|
|
|
|
|
# (with audio and video) format. So, for such cases we will
|
|
|
|
|
# ignore references to rendition groups and treat them
|
2017-04-22 09:01:00 +09:00
|
|
|
|
# as complete formats.
|
|
|
|
|
if audio_group_id and codecs and f.get('vcodec') != 'none':
|
|
|
|
|
audio_group = groups.get(audio_group_id)
|
|
|
|
|
if audio_group and audio_group[0].get('URI'):
|
|
|
|
|
# TODO: update acodec for audio only formats with
|
|
|
|
|
# the same GROUP-ID
|
|
|
|
|
f['acodec'] = 'none'
|
2014-08-26 19:51:13 +09:00
|
|
|
|
formats.append(f)
|
2019-11-27 06:01:34 +09:00
|
|
|
|
|
|
|
|
|
# for DailyMotion
|
|
|
|
|
progressive_uri = last_stream_inf.get('PROGRESSIVE-URI')
|
|
|
|
|
if progressive_uri:
|
|
|
|
|
http_f = f.copy()
|
|
|
|
|
del http_f['manifest_url']
|
|
|
|
|
http_f.update({
|
|
|
|
|
'format_id': f['format_id'].replace('hls-', 'http-'),
|
|
|
|
|
'protocol': 'http',
|
|
|
|
|
'url': progressive_uri,
|
|
|
|
|
})
|
|
|
|
|
formats.append(http_f)
|
|
|
|
|
|
2017-04-22 09:01:00 +09:00
|
|
|
|
last_stream_inf = {}
|
2014-08-26 19:51:13 +09:00
|
|
|
|
return formats
|
|
|
|
|
|
2015-08-02 04:13:21 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def _xpath_ns(path, namespace=None):
|
|
|
|
|
if not namespace:
|
|
|
|
|
return path
|
|
|
|
|
out = []
|
|
|
|
|
for c in path.split('/'):
|
|
|
|
|
if not c or c == '.':
|
|
|
|
|
out.append(c)
|
|
|
|
|
else:
|
|
|
|
|
out.append('{%s}%s' % (namespace, c))
|
|
|
|
|
return '/'.join(out)
|
|
|
|
|
|
2016-03-12 06:37:07 +09:00
|
|
|
|
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
|
|
|
|
|
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
2015-02-03 07:38:35 +09:00
|
|
|
|
if smil is False:
|
|
|
|
|
assert not fatal
|
|
|
|
|
return []
|
2014-12-10 01:28:28 +09:00
|
|
|
|
|
2015-08-02 04:31:17 +09:00
|
|
|
|
namespace = self._parse_smil_namespace(smil)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
|
|
|
|
return self._parse_smil_formats(
|
|
|
|
|
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
|
|
|
|
|
|
|
|
|
|
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
|
|
|
|
|
smil = self._download_smil(smil_url, video_id, fatal=fatal)
|
|
|
|
|
if smil is False:
|
|
|
|
|
return {}
|
|
|
|
|
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
|
|
|
|
|
|
2016-03-12 06:37:07 +09:00
|
|
|
|
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
|
2015-08-02 04:13:21 +09:00
|
|
|
|
return self._download_xml(
|
|
|
|
|
smil_url, video_id, 'Downloading SMIL file',
|
2016-03-12 06:37:07 +09:00
|
|
|
|
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
|
|
|
|
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
|
2015-08-02 04:31:17 +09:00
|
|
|
|
namespace = self._parse_smil_namespace(smil)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
|
|
|
|
formats = self._parse_smil_formats(
|
|
|
|
|
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
|
|
|
|
|
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
|
|
|
|
|
|
|
|
|
|
video_id = os.path.splitext(url_basename(smil_url))[0]
|
|
|
|
|
title = None
|
|
|
|
|
description = None
|
2015-10-02 01:18:59 +09:00
|
|
|
|
upload_date = None
|
2015-08-02 04:13:21 +09:00
|
|
|
|
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
|
|
|
|
|
name = meta.attrib.get('name')
|
|
|
|
|
content = meta.attrib.get('content')
|
|
|
|
|
if not name or not content:
|
|
|
|
|
continue
|
|
|
|
|
if not title and name == 'title':
|
|
|
|
|
title = content
|
|
|
|
|
elif not description and name in ('description', 'abstract'):
|
|
|
|
|
description = content
|
2015-10-02 01:18:59 +09:00
|
|
|
|
elif not upload_date and name == 'date':
|
|
|
|
|
upload_date = unified_strdate(content)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
2015-10-02 01:08:16 +09:00
|
|
|
|
thumbnails = [{
|
|
|
|
|
'id': image.get('type'),
|
|
|
|
|
'url': image.get('src'),
|
|
|
|
|
'width': int_or_none(image.get('width')),
|
|
|
|
|
'height': int_or_none(image.get('height')),
|
|
|
|
|
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
|
|
|
|
|
|
2015-08-02 04:13:21 +09:00
|
|
|
|
return {
|
|
|
|
|
'id': video_id,
|
|
|
|
|
'title': title or video_id,
|
|
|
|
|
'description': description,
|
2015-10-02 01:18:59 +09:00
|
|
|
|
'upload_date': upload_date,
|
2015-10-02 01:08:16 +09:00
|
|
|
|
'thumbnails': thumbnails,
|
2015-08-02 04:13:21 +09:00
|
|
|
|
'formats': formats,
|
|
|
|
|
'subtitles': subtitles,
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-02 04:31:17 +09:00
|
|
|
|
def _parse_smil_namespace(self, smil):
|
|
|
|
|
return self._search_regex(
|
|
|
|
|
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
|
|
|
|
|
|
2015-08-20 00:11:25 +09:00
|
|
|
|
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
2015-08-02 04:13:21 +09:00
|
|
|
|
base = smil_url
|
|
|
|
|
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
|
|
|
|
|
b = meta.get('base') or meta.get('httpBase')
|
|
|
|
|
if b:
|
|
|
|
|
base = b
|
|
|
|
|
break
|
2014-12-10 01:28:28 +09:00
|
|
|
|
|
|
|
|
|
formats = []
|
|
|
|
|
rtmp_count = 0
|
2015-08-02 04:13:21 +09:00
|
|
|
|
http_count = 0
|
2016-01-27 01:44:44 +09:00
|
|
|
|
m3u8_count = 0
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
2016-02-12 01:58:48 +09:00
|
|
|
|
srcs = []
|
2016-05-20 20:02:53 +09:00
|
|
|
|
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
|
|
|
|
|
for medium in media:
|
|
|
|
|
src = medium.get('src')
|
2016-02-12 01:58:48 +09:00
|
|
|
|
if not src or src in srcs:
|
2015-08-02 04:13:21 +09:00
|
|
|
|
continue
|
2016-02-12 01:58:48 +09:00
|
|
|
|
srcs.append(src)
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
2016-05-20 20:02:53 +09:00
|
|
|
|
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
|
|
|
|
|
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
|
|
|
|
|
width = int_or_none(medium.get('width'))
|
|
|
|
|
height = int_or_none(medium.get('height'))
|
|
|
|
|
proto = medium.get('proto')
|
|
|
|
|
ext = medium.get('ext')
|
2015-08-02 04:13:21 +09:00
|
|
|
|
src_ext = determine_ext(src)
|
2016-05-20 20:02:53 +09:00
|
|
|
|
streamer = medium.get('streamer') or base
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
|
|
|
|
if proto == 'rtmp' or streamer.startswith('rtmp'):
|
|
|
|
|
rtmp_count += 1
|
|
|
|
|
formats.append({
|
|
|
|
|
'url': streamer,
|
|
|
|
|
'play_path': src,
|
|
|
|
|
'ext': 'flv',
|
|
|
|
|
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
|
|
|
|
|
'tbr': bitrate,
|
|
|
|
|
'filesize': filesize,
|
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
|
|
|
|
})
|
2015-08-20 00:11:25 +09:00
|
|
|
|
if transform_rtmp_url:
|
|
|
|
|
streamer, src = transform_rtmp_url(streamer, src)
|
|
|
|
|
formats[-1].update({
|
|
|
|
|
'url': streamer,
|
|
|
|
|
'play_path': src,
|
|
|
|
|
})
|
2015-08-02 04:13:21 +09:00
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
|
2016-02-13 01:38:48 +09:00
|
|
|
|
src_url = src_url.strip()
|
2015-08-02 04:13:21 +09:00
|
|
|
|
|
|
|
|
|
if proto == 'm3u8' or src_ext == 'm3u8':
|
2016-01-27 01:44:44 +09:00
|
|
|
|
m3u8_formats = self._extract_m3u8_formats(
|
|
|
|
|
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
|
|
|
|
|
if len(m3u8_formats) == 1:
|
|
|
|
|
m3u8_count += 1
|
|
|
|
|
m3u8_formats[0].update({
|
|
|
|
|
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
|
|
|
|
|
'tbr': bitrate,
|
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
|
|
|
|
})
|
|
|
|
|
formats.extend(m3u8_formats)
|
2018-07-19 02:29:18 +09:00
|
|
|
|
elif src_ext == 'f4m':
|
2015-08-02 04:13:21 +09:00
|
|
|
|
f4m_url = src_url
|
|
|
|
|
if not f4m_params:
|
|
|
|
|
f4m_params = {
|
|
|
|
|
'hdcore': '3.2.0',
|
|
|
|
|
'plugin': 'flowplayer-3.2.0.1',
|
|
|
|
|
}
|
|
|
|
|
f4m_url += '&' if '?' in f4m_url else '?'
|
2016-03-26 04:46:57 +09:00
|
|
|
|
f4m_url += compat_urllib_parse_urlencode(f4m_params)
|
2015-12-29 03:58:24 +09:00
|
|
|
|
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
|
2018-07-19 02:29:18 +09:00
|
|
|
|
elif src_ext == 'mpd':
|
|
|
|
|
formats.extend(self._extract_mpd_formats(
|
|
|
|
|
src_url, video_id, mpd_id='dash', fatal=False))
|
|
|
|
|
elif re.search(r'\.ism/[Mm]anifest', src_url):
|
|
|
|
|
formats.extend(self._extract_ism_formats(
|
|
|
|
|
src_url, video_id, ism_id='mss', fatal=False))
|
|
|
|
|
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
|
2015-08-02 04:13:21 +09:00
|
|
|
|
http_count += 1
|
|
|
|
|
formats.append({
|
|
|
|
|
'url': src_url,
|
|
|
|
|
'ext': ext or src_ext or 'flv',
|
|
|
|
|
'format_id': 'http-%d' % (bitrate or http_count),
|
|
|
|
|
'tbr': bitrate,
|
|
|
|
|
'filesize': filesize,
|
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
|
|
|
|
})
|
2015-02-22 17:16:51 +09:00
|
|
|
|
|
2014-12-10 01:28:28 +09:00
|
|
|
|
return formats
|
|
|
|
|
|
2015-08-20 03:56:17 +09:00
|
|
|
|
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
2016-02-10 01:15:41 +09:00
|
|
|
|
urls = []
|
2015-08-02 04:13:21 +09:00
|
|
|
|
subtitles = {}
|
|
|
|
|
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
|
|
|
|
|
src = textstream.get('src')
|
2016-02-10 01:15:41 +09:00
|
|
|
|
if not src or src in urls:
|
2015-08-02 04:13:21 +09:00
|
|
|
|
continue
|
2016-02-10 01:15:41 +09:00
|
|
|
|
urls.append(src)
|
2016-04-02 03:39:02 +09:00
|
|
|
|
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
|
2015-08-21 00:18:58 +09:00
|
|
|
|
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
|
2015-08-02 04:13:21 +09:00
|
|
|
|
subtitles.setdefault(lang, []).append({
|
|
|
|
|
'url': src,
|
|
|
|
|
'ext': ext,
|
|
|
|
|
})
|
|
|
|
|
return subtitles
|
2015-02-22 17:16:51 +09:00
|
|
|
|
|
2018-03-18 04:46:50 +09:00
|
|
|
|
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
|
2015-08-09 22:41:55 +09:00
|
|
|
|
xspf = self._download_xml(
|
2018-03-18 04:46:50 +09:00
|
|
|
|
xspf_url, playlist_id, 'Downloading xpsf playlist',
|
2015-08-09 22:41:55 +09:00
|
|
|
|
'Unable to download xspf manifest', fatal=fatal)
|
|
|
|
|
if xspf is False:
|
|
|
|
|
return []
|
2018-03-18 04:46:50 +09:00
|
|
|
|
return self._parse_xspf(
|
|
|
|
|
xspf, playlist_id, xspf_url=xspf_url,
|
|
|
|
|
xspf_base_url=base_url(xspf_url))
|
2015-08-09 22:07:18 +09:00
|
|
|
|
|
2018-03-18 04:46:50 +09:00
|
|
|
|
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
|
2015-08-09 22:07:18 +09:00
|
|
|
|
NS_MAP = {
|
|
|
|
|
'xspf': 'http://xspf.org/ns/0/',
|
|
|
|
|
's1': 'http://static.streamone.nl/player/ns/0',
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
entries = []
|
2018-03-18 04:46:50 +09:00
|
|
|
|
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
|
2015-08-09 22:07:18 +09:00
|
|
|
|
title = xpath_text(
|
2015-08-09 22:18:50 +09:00
|
|
|
|
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
|
2015-08-09 22:07:18 +09:00
|
|
|
|
description = xpath_text(
|
|
|
|
|
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
|
|
|
|
|
thumbnail = xpath_text(
|
|
|
|
|
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
|
|
|
|
|
duration = float_or_none(
|
|
|
|
|
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
|
|
|
|
|
|
2018-03-18 04:46:50 +09:00
|
|
|
|
formats = []
|
|
|
|
|
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
|
|
|
|
|
format_url = urljoin(xspf_base_url, location.text)
|
|
|
|
|
if not format_url:
|
|
|
|
|
continue
|
|
|
|
|
formats.append({
|
|
|
|
|
'url': format_url,
|
|
|
|
|
'manifest_url': xspf_url,
|
|
|
|
|
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
|
|
|
|
|
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
|
|
|
|
|
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
|
|
|
|
|
})
|
2015-08-09 22:07:18 +09:00
|
|
|
|
self._sort_formats(formats)
|
|
|
|
|
|
|
|
|
|
entries.append({
|
|
|
|
|
'id': playlist_id,
|
|
|
|
|
'title': title,
|
|
|
|
|
'description': description,
|
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
|
'duration': duration,
|
|
|
|
|
'formats': formats,
|
|
|
|
|
})
|
|
|
|
|
return entries
|
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
def _extract_mpd_formats(self, *args, **kwargs):
|
|
|
|
|
fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs)
|
|
|
|
|
if subs:
|
|
|
|
|
self._report_ignoring_subs('DASH')
|
|
|
|
|
return fmts
|
|
|
|
|
|
|
|
|
|
def _extract_mpd_formats_and_subtitles(
|
|
|
|
|
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
|
|
|
|
|
fatal=True, data=None, headers=None, query=None):
|
|
|
|
|
|
|
|
|
|
# TODO: or not? param not yet implemented
|
|
|
|
|
if self.get_param('ignore_no_formats_error'):
|
|
|
|
|
fatal = False
|
|
|
|
|
|
2018-03-18 04:46:50 +09:00
|
|
|
|
res = self._download_xml_handle(
|
2016-02-03 02:07:07 +09:00
|
|
|
|
mpd_url, video_id,
|
2024-01-28 00:29:25 +09:00
|
|
|
|
note='Downloading MPD manifest' if note is None else note,
|
|
|
|
|
errnote='Failed to download MPD manifest' if errnote is None else errnote,
|
|
|
|
|
fatal=fatal, data=data, headers=headers or {}, query=query or {})
|
2016-02-03 02:07:07 +09:00
|
|
|
|
if res is False:
|
2024-01-28 00:29:25 +09:00
|
|
|
|
return [], {}
|
2018-03-18 04:46:50 +09:00
|
|
|
|
mpd_doc, urlh = res
|
2019-04-22 01:20:28 +09:00
|
|
|
|
if mpd_doc is None:
|
2024-01-28 00:29:25 +09:00
|
|
|
|
return [], {}
|
|
|
|
|
|
|
|
|
|
# We could have been redirected to a new url when we retrieved our mpd file.
|
|
|
|
|
mpd_url = urlh.geturl()
|
|
|
|
|
mpd_base_url = base_url(mpd_url)
|
2016-02-03 02:07:07 +09:00
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
return self._parse_mpd_formats_and_subtitles(
|
2021-02-01 22:30:59 +09:00
|
|
|
|
mpd_doc, mpd_id, mpd_base_url, mpd_url)
|
2016-01-30 23:52:23 +09:00
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
def _parse_mpd_formats(self, *args, **kwargs):
|
|
|
|
|
fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs)
|
|
|
|
|
if subs:
|
|
|
|
|
self._report_ignoring_subs('DASH')
|
|
|
|
|
return fmts
|
|
|
|
|
|
|
|
|
|
def _parse_mpd_formats_and_subtitles(
|
|
|
|
|
self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None):
|
2016-07-24 12:27:16 +09:00
|
|
|
|
"""
|
|
|
|
|
Parse formats from MPD manifest.
|
|
|
|
|
References:
|
|
|
|
|
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
|
|
|
|
|
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
|
|
|
|
|
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
|
|
|
|
|
"""
|
2024-01-28 00:29:25 +09:00
|
|
|
|
# TODO: param not yet implemented: default like previous yt-dl logic
|
|
|
|
|
if not self.get_param('dynamic_mpd', False):
|
|
|
|
|
if mpd_doc.get('type') == 'dynamic':
|
|
|
|
|
return [], {}
|
2016-01-30 23:52:23 +09:00
|
|
|
|
|
2016-02-06 22:03:48 +09:00
|
|
|
|
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
|
2016-02-03 06:02:08 +09:00
|
|
|
|
|
|
|
|
|
def _add_ns(path):
|
|
|
|
|
return self._xpath_ns(path, namespace)
|
|
|
|
|
|
2016-02-04 02:44:43 +09:00
|
|
|
|
def is_drm_protected(element):
|
|
|
|
|
return element.find(_add_ns('ContentProtection')) is not None
|
|
|
|
|
|
2024-01-28 00:45:43 +09:00
|
|
|
|
from ..utils import YoutubeDLHandler
|
|
|
|
|
fix_path = YoutubeDLHandler._fix_path
|
|
|
|
|
|
|
|
|
|
def resolve_base_url(element, parent_base_url=None):
|
|
|
|
|
# TODO: use native XML traversal when ready
|
|
|
|
|
b_url = traverse_obj(element, (
|
|
|
|
|
T(lambda e: e.find(_add_ns('BaseURL')).text)))
|
|
|
|
|
if parent_base_url and b_url:
|
|
|
|
|
if not parent_base_url[-1] in ('/', ':'):
|
|
|
|
|
parent_base_url += '/'
|
|
|
|
|
b_url = compat_urlparse.urljoin(parent_base_url, b_url)
|
|
|
|
|
if b_url:
|
|
|
|
|
b_url = fix_path(b_url)
|
|
|
|
|
return b_url or parent_base_url
|
|
|
|
|
|
2016-02-03 02:07:07 +09:00
|
|
|
|
def extract_multisegment_info(element, ms_parent_info):
|
|
|
|
|
ms_info = ms_parent_info.copy()
|
2024-01-28 00:45:43 +09:00
|
|
|
|
base_url = ms_info['base_url'] = resolve_base_url(element, ms_info.get('base_url'))
|
2016-09-06 03:21:57 +09:00
|
|
|
|
|
|
|
|
|
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
|
|
|
|
|
# common attributes and elements. We will only extract relevant
|
|
|
|
|
# for us.
|
|
|
|
|
def extract_common(source):
|
|
|
|
|
segment_timeline = source.find(_add_ns('SegmentTimeline'))
|
|
|
|
|
if segment_timeline is not None:
|
|
|
|
|
s_e = segment_timeline.findall(_add_ns('S'))
|
|
|
|
|
if s_e:
|
|
|
|
|
ms_info['total_number'] = 0
|
|
|
|
|
ms_info['s'] = []
|
|
|
|
|
for s in s_e:
|
|
|
|
|
r = int(s.get('r', 0))
|
|
|
|
|
ms_info['total_number'] += 1 + r
|
|
|
|
|
ms_info['s'].append({
|
|
|
|
|
't': int(s.get('t', 0)),
|
|
|
|
|
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
|
|
|
|
|
'd': int(s.attrib['d']),
|
|
|
|
|
'r': r,
|
|
|
|
|
})
|
|
|
|
|
start_number = source.get('startNumber')
|
|
|
|
|
if start_number:
|
|
|
|
|
ms_info['start_number'] = int(start_number)
|
|
|
|
|
timescale = source.get('timescale')
|
|
|
|
|
if timescale:
|
|
|
|
|
ms_info['timescale'] = int(timescale)
|
|
|
|
|
segment_duration = source.get('duration')
|
|
|
|
|
if segment_duration:
|
2017-08-16 01:58:00 +09:00
|
|
|
|
ms_info['segment_duration'] = float(segment_duration)
|
2016-09-06 03:21:57 +09:00
|
|
|
|
|
|
|
|
|
def extract_Initialization(source):
|
|
|
|
|
initialization = source.find(_add_ns('Initialization'))
|
|
|
|
|
if initialization is not None:
|
2024-01-28 00:37:08 +09:00
|
|
|
|
ms_info['initialization_url'] = initialization.get('sourceURL') or base_url
|
|
|
|
|
initialization_url_range = initialization.get('range')
|
|
|
|
|
if initialization_url_range:
|
|
|
|
|
ms_info['initialization_url_range'] = initialization_url_range
|
2016-09-06 03:21:57 +09:00
|
|
|
|
|
2016-02-03 06:02:08 +09:00
|
|
|
|
segment_list = element.find(_add_ns('SegmentList'))
|
2016-02-03 02:07:07 +09:00
|
|
|
|
if segment_list is not None:
|
2016-09-06 03:21:57 +09:00
|
|
|
|
extract_common(segment_list)
|
|
|
|
|
extract_Initialization(segment_list)
|
2016-02-03 06:02:08 +09:00
|
|
|
|
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
|
2024-01-28 00:37:08 +09:00
|
|
|
|
segment_urls = traverse_obj(segment_urls_e, (
|
|
|
|
|
Ellipsis, T(lambda e: e.attrib), 'media'))
|
|
|
|
|
if segment_urls:
|
|
|
|
|
ms_info['segment_urls'] = segment_urls
|
|
|
|
|
segment_urls_range = traverse_obj(segment_urls_e, (
|
|
|
|
|
Ellipsis, T(lambda e: e.attrib), 'mediaRange',
|
|
|
|
|
T(lambda r: re.findall(r'^\d+-\d+$', r)), 0))
|
|
|
|
|
if segment_urls_range:
|
|
|
|
|
ms_info['segment_urls_range'] = segment_urls_range
|
|
|
|
|
if not segment_urls:
|
|
|
|
|
ms_info['segment_urls'] = [base_url for _ in segment_urls_range]
|
2016-02-03 02:07:07 +09:00
|
|
|
|
else:
|
2016-02-03 06:02:08 +09:00
|
|
|
|
segment_template = element.find(_add_ns('SegmentTemplate'))
|
2016-02-03 02:07:07 +09:00
|
|
|
|
if segment_template is not None:
|
2016-09-06 03:21:57 +09:00
|
|
|
|
extract_common(segment_template)
|
2017-01-29 08:57:39 +09:00
|
|
|
|
media = segment_template.get('media')
|
|
|
|
|
if media:
|
|
|
|
|
ms_info['media'] = media
|
2016-02-03 02:07:07 +09:00
|
|
|
|
initialization = segment_template.get('initialization')
|
|
|
|
|
if initialization:
|
2017-01-29 08:57:39 +09:00
|
|
|
|
ms_info['initialization'] = initialization
|
2016-02-03 02:07:07 +09:00
|
|
|
|
else:
|
2016-09-06 03:21:57 +09:00
|
|
|
|
extract_Initialization(segment_template)
|
2016-02-03 02:07:07 +09:00
|
|
|
|
return ms_info
|
2016-01-30 22:27:43 +09:00
|
|
|
|
|
2016-02-03 02:07:07 +09:00
|
|
|
|
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
|
2024-01-28 00:29:25 +09:00
|
|
|
|
formats, subtitles = [], {}
|
|
|
|
|
stream_numbers = collections.defaultdict(int)
|
2024-01-28 00:45:43 +09:00
|
|
|
|
mpd_base_url = resolve_base_url(mpd_doc, mpd_base_url or mpd_url)
|
2016-02-03 06:02:08 +09:00
|
|
|
|
for period in mpd_doc.findall(_add_ns('Period')):
|
2016-02-03 02:07:07 +09:00
|
|
|
|
period_duration = parse_duration(period.get('duration')) or mpd_duration
|
|
|
|
|
period_ms_info = extract_multisegment_info(period, {
|
|
|
|
|
'start_number': 1,
|
|
|
|
|
'timescale': 1,
|
2024-01-28 00:45:43 +09:00
|
|
|
|
'base_url': mpd_base_url,
|
2016-02-03 02:07:07 +09:00
|
|
|
|
})
|
2016-02-03 06:02:08 +09:00
|
|
|
|
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
|
2016-02-04 02:44:43 +09:00
|
|
|
|
if is_drm_protected(adaptation_set):
|
|
|
|
|
continue
|
2024-01-28 00:29:25 +09:00
|
|
|
|
adaptation_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
|
2016-02-03 06:02:08 +09:00
|
|
|
|
for representation in adaptation_set.findall(_add_ns('Representation')):
|
2016-02-04 02:44:43 +09:00
|
|
|
|
if is_drm_protected(representation):
|
|
|
|
|
continue
|
2016-02-03 02:07:07 +09:00
|
|
|
|
representation_attrib = adaptation_set.attrib.copy()
|
|
|
|
|
representation_attrib.update(representation.attrib)
|
2016-07-24 12:27:16 +09:00
|
|
|
|
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
|
2016-03-12 00:49:55 +09:00
|
|
|
|
mime_type = representation_attrib['mimeType']
|
2024-01-28 00:29:25 +09:00
|
|
|
|
content_type = representation_attrib.get('contentType') or mime_type.split('/')[0]
|
|
|
|
|
codec_str = representation_attrib.get('codecs', '')
|
|
|
|
|
# Some kind of binary subtitle found in some youtube livestreams
|
|
|
|
|
if mime_type == 'application/x-rawcc':
|
|
|
|
|
codecs = {'scodec': codec_str}
|
|
|
|
|
else:
|
|
|
|
|
codecs = parse_codecs(codec_str)
|
|
|
|
|
if content_type not in ('video', 'audio', 'text'):
|
|
|
|
|
if mime_type == 'image/jpeg':
|
|
|
|
|
content_type = mime_type
|
|
|
|
|
elif codecs.get('vcodec', 'none') != 'none':
|
|
|
|
|
content_type = 'video'
|
|
|
|
|
elif codecs.get('acodec', 'none') != 'none':
|
|
|
|
|
content_type = 'audio'
|
|
|
|
|
elif codecs.get('scodec', 'none') != 'none':
|
|
|
|
|
content_type = 'text'
|
|
|
|
|
elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'):
|
|
|
|
|
content_type = 'text'
|
|
|
|
|
else:
|
|
|
|
|
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
representation_id = representation_attrib.get('id')
|
|
|
|
|
lang = representation_attrib.get('lang')
|
|
|
|
|
url_el = representation.find(_add_ns('BaseURL'))
|
|
|
|
|
filesize = int_or_none(url_el.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
|
|
|
|
|
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
|
|
|
|
|
format_id = join_nonempty(representation_id or content_type, mpd_id)
|
|
|
|
|
if content_type in ('video', 'audio'):
|
2016-02-03 02:07:07 +09:00
|
|
|
|
f = {
|
2016-02-11 18:33:26 +09:00
|
|
|
|
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
|
2016-09-17 22:35:22 +09:00
|
|
|
|
'manifest_url': mpd_url,
|
2016-03-12 00:49:55 +09:00
|
|
|
|
'ext': mimetype2ext(mime_type),
|
2016-02-03 02:07:07 +09:00
|
|
|
|
'width': int_or_none(representation_attrib.get('width')),
|
|
|
|
|
'height': int_or_none(representation_attrib.get('height')),
|
2017-04-23 13:33:19 +09:00
|
|
|
|
'tbr': float_or_none(bandwidth, 1000),
|
2016-02-03 02:07:07 +09:00
|
|
|
|
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
|
|
|
|
|
'fps': int_or_none(representation_attrib.get('frameRate')),
|
2016-02-03 21:24:07 +09:00
|
|
|
|
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
|
2016-02-03 02:07:07 +09:00
|
|
|
|
'format_note': 'DASH %s' % content_type,
|
2016-02-10 04:05:39 +09:00
|
|
|
|
'filesize': filesize,
|
2017-12-31 06:02:46 +09:00
|
|
|
|
'container': mimetype2ext(mime_type) + '_dash',
|
2016-02-03 02:07:07 +09:00
|
|
|
|
}
|
2024-01-28 00:29:25 +09:00
|
|
|
|
f.update(codecs)
|
|
|
|
|
elif content_type == 'text':
|
|
|
|
|
f = {
|
|
|
|
|
'ext': mimetype2ext(mime_type),
|
|
|
|
|
'manifest_url': mpd_url,
|
|
|
|
|
'filesize': filesize,
|
|
|
|
|
}
|
|
|
|
|
elif content_type == 'image/jpeg':
|
|
|
|
|
# See test case in VikiIE
|
|
|
|
|
# https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1
|
|
|
|
|
f = {
|
|
|
|
|
'format_id': format_id,
|
|
|
|
|
'ext': 'mhtml',
|
|
|
|
|
'manifest_url': mpd_url,
|
|
|
|
|
'format_note': 'DASH storyboards (jpeg)',
|
|
|
|
|
'acodec': 'none',
|
|
|
|
|
'vcodec': 'none',
|
|
|
|
|
}
|
|
|
|
|
if is_drm_protected(adaptation_set) or is_drm_protected(representation):
|
|
|
|
|
f['has_drm'] = True
|
|
|
|
|
representation_ms_info = extract_multisegment_info(representation, adaptation_set_ms_info)
|
|
|
|
|
|
|
|
|
|
def prepare_template(template_name, identifiers):
|
|
|
|
|
tmpl = representation_ms_info[template_name]
|
|
|
|
|
# First of, % characters outside $...$ templates
|
|
|
|
|
# must be escaped by doubling for proper processing
|
|
|
|
|
# by % operator string formatting used further (see
|
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/16867).
|
|
|
|
|
t = ''
|
|
|
|
|
in_template = False
|
|
|
|
|
for c in tmpl:
|
|
|
|
|
t += c
|
|
|
|
|
if c == '$':
|
|
|
|
|
in_template = not in_template
|
|
|
|
|
elif c == '%' and not in_template:
|
2018-07-01 04:00:16 +09:00
|
|
|
|
t += c
|
2024-01-28 00:29:25 +09:00
|
|
|
|
# Next, $...$ templates are translated to their
|
|
|
|
|
# %(...) counterparts to be used with % operator
|
|
|
|
|
t = t.replace('$RepresentationID$', representation_id)
|
|
|
|
|
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
|
|
|
|
|
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
|
|
|
|
|
t.replace('$$', '$')
|
|
|
|
|
return t
|
|
|
|
|
|
|
|
|
|
# @initialization is a regular template like @media one
|
|
|
|
|
# so it should be handled just the same way (see
|
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/11605)
|
|
|
|
|
if 'initialization' in representation_ms_info:
|
|
|
|
|
initialization_template = prepare_template(
|
|
|
|
|
'initialization',
|
|
|
|
|
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
|
|
|
|
|
# $Time$ shall not be included for @initialization thus
|
|
|
|
|
# only $Bandwidth$ remains
|
|
|
|
|
('Bandwidth', ))
|
|
|
|
|
representation_ms_info['initialization_url'] = initialization_template % {
|
|
|
|
|
'Bandwidth': bandwidth,
|
|
|
|
|
}
|
2016-07-24 12:27:16 +09:00
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
def location_key(location):
|
|
|
|
|
return 'url' if re.match(r'^https?://', location) else 'path'
|
|
|
|
|
|
2024-01-28 00:37:08 +09:00
|
|
|
|
def calc_segment_duration():
|
|
|
|
|
return float_or_none(
|
|
|
|
|
representation_ms_info['segment_duration'],
|
|
|
|
|
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
|
|
|
|
|
|
2024-01-28 00:29:25 +09:00
|
|
|
|
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
|
|
|
|
|
|
|
|
|
|
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
|
|
|
|
|
media_location_key = location_key(media_template)
|
|
|
|
|
|
|
|
|
|
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
|
|
|
|
|
# can't be used at the same time
|
|
|
|
|
if '%(Number' in media_template and 's' not in representation_ms_info:
|
|
|
|
|
segment_duration = None
|
|
|
|
|
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
|
|
|
|
|
segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale'])
|
|
|
|
|
representation_ms_info['total_number'] = int(math.ceil(
|
|
|
|
|
float_or_none(period_duration, segment_duration, default=0)))
|
|
|
|
|
representation_ms_info['fragments'] = [{
|
|
|
|
|
media_location_key: media_template % {
|
|
|
|
|
'Number': segment_number,
|
|
|
|
|
'Bandwidth': bandwidth,
|
|
|
|
|
},
|
|
|
|
|
'duration': segment_duration,
|
|
|
|
|
} for segment_number in range(
|
|
|
|
|
representation_ms_info['start_number'],
|
|
|
|
|
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
|
|
|
|
|
else:
|
|
|
|
|
# $Number*$ or $Time$ in media template with S list available
|
|
|
|
|
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
|
|
|
|
|
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
|
|
|
|
|
representation_ms_info['fragments'] = []
|
|
|
|
|
segment_time = 0
|
|
|
|
|
segment_d = None
|
|
|
|
|
segment_number = representation_ms_info['start_number']
|
|
|
|
|
|
|
|
|
|
def add_segment_url():
|
|
|
|
|
segment_url = media_template % {
|
|
|
|
|
'Time': segment_time,
|
|
|
|
|
'Bandwidth': bandwidth,
|
|
|
|
|
'Number': segment_number,
|
|
|
|
|
}
|
|
|
|
|
representation_ms_info['fragments'].append({
|
|
|
|
|
media_location_key: segment_url,
|
|
|
|
|
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
|
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
for num, s in enumerate(representation_ms_info['s']):
|
|
|
|
|
segment_time = s.get('t') or segment_time
|
|
|
|
|
segment_d = s['d']
|
|
|
|
|
add_segment_url()
|
|
|
|
|
segment_number += 1
|
|
|
|
|
for r in range(s.get('r', 0)):
|
|
|
|
|
segment_time += segment_d
|
2016-07-24 12:27:16 +09:00
|
|
|
|
add_segment_url()
|
2016-09-06 03:21:57 +09:00
|
|
|
|
segment_number += 1
|
2024-01-28 00:29:25 +09:00
|
|
|
|
segment_time += segment_d
|
|
|
|
|
elif 'segment_urls' in representation_ms_info:
|
|
|
|
|
fragments = []
|
|
|
|
|
if 's' in representation_ms_info:
|
2016-09-06 03:21:57 +09:00
|
|
|
|
# No media template
|
|
|
|
|
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
|
|
|
|
|
# or any YouTube dashsegments video
|
2017-01-29 07:36:53 +09:00
|
|
|
|
segment_index = 0
|
|
|
|
|
timescale = representation_ms_info['timescale']
|
|
|
|
|
for s in representation_ms_info['s']:
|
|
|
|
|
duration = float_or_none(s['d'], timescale)
|
2016-09-06 03:21:57 +09:00
|
|
|
|
for r in range(s.get('r', 0) + 1):
|
2017-08-05 08:57:19 +09:00
|
|
|
|
segment_uri = representation_ms_info['segment_urls'][segment_index]
|
2016-09-06 03:21:57 +09:00
|
|
|
|
fragments.append({
|
2017-08-05 08:57:19 +09:00
|
|
|
|
location_key(segment_uri): segment_uri,
|
2017-01-29 07:36:53 +09:00
|
|
|
|
'duration': duration,
|
2016-09-06 03:21:57 +09:00
|
|
|
|
})
|
2017-01-29 07:36:53 +09:00
|
|
|
|
segment_index += 1
|
2024-01-28 00:37:08 +09:00
|
|
|
|
elif 'segment_urls_range' in representation_ms_info:
|
|
|
|
|
# Segment URLs with mediaRange
|
|
|
|
|
# Example: https://kinescope.io/200615537/master.mpd
|
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/30235
|
|
|
|
|
# or any mpd generated with Bento4 `mp4dash --no-split --use-segment-list`
|
|
|
|
|
segment_duration = calc_segment_duration()
|
|
|
|
|
for segment_url, segment_url_range in zip(
|
|
|
|
|
representation_ms_info['segment_urls'], representation_ms_info['segment_urls_range']):
|
|
|
|
|
fragments.append({
|
|
|
|
|
location_key(segment_url): segment_url,
|
|
|
|
|
'range': segment_url_range,
|
|
|
|
|
'duration': segment_duration,
|
|
|
|
|
})
|
|
|
|
|
else:
|
2017-11-25 10:13:23 +09:00
|
|
|
|
# Segment URLs with no SegmentTimeline
|
|
|
|
|
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
|
2019-03-09 21:14:41 +09:00
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/pull/14844
|
2024-01-28 00:37:08 +09:00
|
|
|
|
segment_duration = calc_segment_duration()
|
2017-11-25 10:13:23 +09:00
|
|
|
|
for segment_url in representation_ms_info['segment_urls']:
|
2024-01-28 00:37:08 +09:00
|
|
|
|
fragments.append({
|
2017-11-25 10:13:23 +09:00
|
|
|
|
location_key(segment_url): segment_url,
|
2024-01-28 00:37:08 +09:00
|
|
|
|
'duration': segment_duration,
|
|
|
|
|
})
|
|
|
|
|
representation_ms_info['fragments'] = fragments
|
|
|
|
|
|
|
|
|
|
# If there is a fragments key available then we correctly recognized fragmented media.
|
|
|
|
|
# Otherwise we will assume unfragmented media with direct access. Technically, such
|
|
|
|
|
# assumption is not necessarily correct since we may simply have no support for
|
|
|
|
|
# some forms of fragmented media renditions yet, but for now we'll use this fallback.
|
|
|
|
|
if 'fragments' in representation_ms_info:
|
2024-01-28 00:45:43 +09:00
|
|
|
|
base_url = representation_ms_info['base_url']
|
2024-01-28 00:37:08 +09:00
|
|
|
|
f.update({
|
|
|
|
|
# NB: mpd_url may be empty when MPD manifest is parsed from a string
|
|
|
|
|
'url': mpd_url or base_url,
|
|
|
|
|
'fragment_base_url': base_url,
|
|
|
|
|
'fragments': [],
|
|
|
|
|
'protocol': 'http_dash_segments',
|
|
|
|
|
})
|
|
|
|
|
if 'initialization_url' in representation_ms_info and 'initialization_url_range' in representation_ms_info:
|
|
|
|
|
# Initialization URL with range (accompanied by Segment URLs with mediaRange above)
|
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/30235
|
|
|
|
|
initialization_url = representation_ms_info['initialization_url']
|
|
|
|
|
f['fragments'].append({
|
|
|
|
|
location_key(initialization_url): initialization_url,
|
|
|
|
|
'range': representation_ms_info['initialization_url_range'],
|
2016-01-30 22:42:27 +09:00
|
|
|
|
})
|
2024-01-28 00:37:08 +09:00
|
|
|
|
elif 'initialization_url' in representation_ms_info:
|
|
|
|
|
initialization_url = representation_ms_info['initialization_url']
|
|
|
|
|
if not f.get('url'):
|
|
|
|
|
f['url'] = initialization_url
|
|
|
|
|
f['fragments'].append({location_key(initialization_url): initialization_url})
|
|
|
|
|
elif 'initialization_url_range' in representation_ms_info:
|
|
|
|
|
# no Initialization URL but range (accompanied by no Segment URLs but mediaRange above)
|
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/27575
|
|
|
|
|
f['fragments'].append({
|
|
|
|
|
location_key(base_url): base_url,
|
|
|
|
|
'range': representation_ms_info['initialization_url_range'],
|
|
|
|
|
})
|
|
|
|
|
f['fragments'].extend(representation_ms_info['fragments'])
|
|
|
|
|
if not period_duration:
|
|
|
|
|
period_duration = sum(traverse_obj(representation_ms_info, (
|
|
|
|
|
'fragments', Ellipsis, 'duration', T(float_or_none))))
|
2016-01-30 22:05:55 +09:00
|
|
|
|
else:
|
2024-01-28 00:29:25 +09:00
|
|
|
|
# Assuming direct URL to unfragmented media.
|
|
|
|
|
f['url'] = representation_ms_info['base_url']
|
|
|
|
|
if content_type in ('video', 'audio', 'image/jpeg'):
|
|
|
|
|
f['manifest_stream_number'] = stream_numbers[f['url']]
|
|
|
|
|
stream_numbers[f['url']] += 1
|
|
|
|
|
formats.append(f)
|
|
|
|
|
elif content_type == 'text':
|
|
|
|
|
subtitles.setdefault(lang or 'und', []).append(f)
|
|
|
|
|
return formats, subtitles
|
2016-01-30 22:05:55 +09:00
|
|
|
|
|
2019-11-16 07:44:14 +09:00
|
|
|
|
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
|
2018-03-18 04:46:50 +09:00
|
|
|
|
res = self._download_xml_handle(
|
2016-10-20 00:22:40 +09:00
|
|
|
|
ism_url, video_id,
|
|
|
|
|
note=note or 'Downloading ISM manifest',
|
|
|
|
|
errnote=errnote or 'Failed to download ISM manifest',
|
2019-11-16 07:44:14 +09:00
|
|
|
|
fatal=fatal, data=data, headers=headers, query=query)
|
2016-10-20 00:22:40 +09:00
|
|
|
|
if res is False:
|
|
|
|
|
return []
|
2018-03-18 04:46:50 +09:00
|
|
|
|
ism_doc, urlh = res
|
2020-04-08 00:54:34 +09:00
|
|
|
|
if ism_doc is None:
|
|
|
|
|
return []
|
2016-10-20 00:22:40 +09:00
|
|
|
|
|
2020-02-29 21:17:27 +09:00
|
|
|
|
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
|
2016-10-20 00:22:40 +09:00
|
|
|
|
|
|
|
|
|
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
|
2017-05-14 08:11:45 +09:00
|
|
|
|
"""
|
|
|
|
|
Parse formats from ISM manifest.
|
|
|
|
|
References:
|
|
|
|
|
1. [MS-SSTR]: Smooth Streaming Protocol,
|
|
|
|
|
https://msdn.microsoft.com/en-us/library/ff469518.aspx
|
|
|
|
|
"""
|
2016-10-20 00:22:40 +09:00
|
|
|
|
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
duration = int(ism_doc.attrib['Duration'])
|
|
|
|
|
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
|
|
|
|
|
|
|
|
|
|
formats = []
|
|
|
|
|
for stream in ism_doc.findall('StreamIndex'):
|
|
|
|
|
stream_type = stream.get('Type')
|
|
|
|
|
if stream_type not in ('video', 'audio'):
|
|
|
|
|
continue
|
|
|
|
|
url_pattern = stream.attrib['Url']
|
|
|
|
|
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
|
|
|
|
|
stream_name = stream.get('Name')
|
|
|
|
|
for track in stream.findall('QualityLevel'):
|
2016-11-13 06:15:51 +09:00
|
|
|
|
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
|
2016-10-20 00:22:40 +09:00
|
|
|
|
# TODO: add support for WVC1 and WMAP
|
|
|
|
|
if fourcc not in ('H264', 'AVC1', 'AACL'):
|
|
|
|
|
self.report_warning('%s is not a supported codec' % fourcc)
|
|
|
|
|
continue
|
|
|
|
|
tbr = int(track.attrib['Bitrate']) // 1000
|
2017-05-14 08:11:45 +09:00
|
|
|
|
# [1] does not mention Width and Height attributes. However,
|
|
|
|
|
# they're often present while MaxWidth and MaxHeight are
|
|
|
|
|
# missing, so should be used as fallbacks
|
|
|
|
|
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
|
|
|
|
|
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
|
2016-10-20 00:22:40 +09:00
|
|
|
|
sampling_rate = int_or_none(track.get('SamplingRate'))
|
|
|
|
|
|
|
|
|
|
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
|
|
|
|
|
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
|
|
|
|
|
|
|
|
|
|
fragments = []
|
|
|
|
|
fragment_ctx = {
|
|
|
|
|
'time': 0,
|
|
|
|
|
}
|
|
|
|
|
stream_fragments = stream.findall('c')
|
|
|
|
|
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
|
|
|
|
|
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
|
|
|
|
|
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
|
|
|
|
|
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
|
|
|
|
|
if not fragment_ctx['duration']:
|
|
|
|
|
try:
|
|
|
|
|
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
|
|
|
|
|
except IndexError:
|
|
|
|
|
next_fragment_time = duration
|
2016-11-02 04:21:43 +09:00
|
|
|
|
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
|
2016-10-20 00:22:40 +09:00
|
|
|
|
for _ in range(fragment_repeat):
|
|
|
|
|
fragments.append({
|
2016-11-02 04:21:43 +09:00
|
|
|
|
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
|
2016-10-20 00:22:40 +09:00
|
|
|
|
'duration': fragment_ctx['duration'] / stream_timescale,
|
|
|
|
|
})
|
|
|
|
|
fragment_ctx['time'] += fragment_ctx['duration']
|
|
|
|
|
|
|
|
|
|
format_id = []
|
|
|
|
|
if ism_id:
|
|
|
|
|
format_id.append(ism_id)
|
|
|
|
|
if stream_name:
|
|
|
|
|
format_id.append(stream_name)
|
|
|
|
|
format_id.append(compat_str(tbr))
|
|
|
|
|
|
|
|
|
|
formats.append({
|
|
|
|
|
'format_id': '-'.join(format_id),
|
|
|
|
|
'url': ism_url,
|
|
|
|
|
'manifest_url': ism_url,
|
|
|
|
|
'ext': 'ismv' if stream_type == 'video' else 'isma',
|
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
|
|
|
|
'tbr': tbr,
|
|
|
|
|
'asr': sampling_rate,
|
|
|
|
|
'vcodec': 'none' if stream_type == 'audio' else fourcc,
|
|
|
|
|
'acodec': 'none' if stream_type == 'video' else fourcc,
|
|
|
|
|
'protocol': 'ism',
|
|
|
|
|
'fragments': fragments,
|
|
|
|
|
'_download_params': {
|
|
|
|
|
'duration': duration,
|
|
|
|
|
'timescale': stream_timescale,
|
|
|
|
|
'width': width or 0,
|
|
|
|
|
'height': height or 0,
|
|
|
|
|
'fourcc': fourcc,
|
|
|
|
|
'codec_private_data': track.get('CodecPrivateData'),
|
|
|
|
|
'sampling_rate': sampling_rate,
|
|
|
|
|
'channels': int_or_none(track.get('Channels', 2)),
|
|
|
|
|
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
|
|
|
|
|
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
|
|
|
|
|
},
|
|
|
|
|
})
|
|
|
|
|
return formats
|
|
|
|
|
|
2017-02-25 19:40:05 +09:00
|
|
|
|
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None):
|
2018-03-20 01:43:53 +09:00
|
|
|
|
def absolute_url(item_url):
|
|
|
|
|
return urljoin(base_url, item_url)
|
2016-03-17 02:50:45 +09:00
|
|
|
|
|
|
|
|
|
def parse_content_type(content_type):
|
|
|
|
|
if not content_type:
|
|
|
|
|
return {}
|
|
|
|
|
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
|
|
|
|
|
if ctr:
|
|
|
|
|
mimetype, codecs = ctr.groups()
|
|
|
|
|
f = parse_codecs(codecs)
|
|
|
|
|
f['ext'] = mimetype2ext(mimetype)
|
|
|
|
|
return f
|
|
|
|
|
return {}
|
|
|
|
|
|
2023-05-06 03:25:42 +09:00
|
|
|
|
def _media_formats(src, cur_media_type, type_info=None):
|
|
|
|
|
type_info = type_info or {}
|
2016-08-20 00:53:47 +09:00
|
|
|
|
full_url = absolute_url(src)
|
2017-08-12 18:48:11 +09:00
|
|
|
|
ext = type_info.get('ext') or determine_ext(full_url)
|
2016-12-18 01:03:13 +09:00
|
|
|
|
if ext == 'm3u8':
|
2016-08-20 00:53:47 +09:00
|
|
|
|
is_plain_url = False
|
|
|
|
|
formats = self._extract_m3u8_formats(
|
2016-08-22 03:18:46 +09:00
|
|
|
|
full_url, video_id, ext='mp4',
|
2017-02-25 19:40:05 +09:00
|
|
|
|
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
|
2017-08-20 16:16:58 +09:00
|
|
|
|
preference=preference, fatal=False)
|
2016-12-18 01:03:13 +09:00
|
|
|
|
elif ext == 'mpd':
|
|
|
|
|
is_plain_url = False
|
|
|
|
|
formats = self._extract_mpd_formats(
|
2017-08-20 16:16:58 +09:00
|
|
|
|
full_url, video_id, mpd_id=mpd_id, fatal=False)
|
2016-08-20 00:53:47 +09:00
|
|
|
|
else:
|
|
|
|
|
is_plain_url = True
|
|
|
|
|
formats = [{
|
|
|
|
|
'url': full_url,
|
|
|
|
|
'vcodec': 'none' if cur_media_type == 'audio' else None,
|
2023-05-06 03:25:42 +09:00
|
|
|
|
'ext': ext,
|
2016-08-20 00:53:47 +09:00
|
|
|
|
}]
|
|
|
|
|
return is_plain_url, formats
|
|
|
|
|
|
2016-03-17 02:50:45 +09:00
|
|
|
|
entries = []
|
2017-07-09 18:29:52 +09:00
|
|
|
|
# amp-video and amp-audio are very similar to their HTML5 counterparts
|
|
|
|
|
# so we wll include them right here (see
|
|
|
|
|
# https://www.ampproject.org/docs/reference/components/amp-video)
|
2020-12-07 02:59:25 +09:00
|
|
|
|
# For dl8-* tags see https://delight-vr.com/documentation/dl8-video/
|
2023-05-06 03:25:42 +09:00
|
|
|
|
_MEDIA_TAG_NAME_RE = r'(?:(?:amp|dl8(?:-live)?)-)?(video(?:-js)?|audio)'
|
2020-12-09 02:05:21 +09:00
|
|
|
|
media_tags = [(media_tag, media_tag_name, media_type, '')
|
|
|
|
|
for media_tag, media_tag_name, media_type
|
|
|
|
|
in re.findall(r'(?s)(<(%s)[^>]*/>)' % _MEDIA_TAG_NAME_RE, webpage)]
|
2017-02-06 02:20:30 +09:00
|
|
|
|
media_tags.extend(re.findall(
|
|
|
|
|
# We only allow video|audio followed by a whitespace or '>'.
|
|
|
|
|
# Allowing more characters may end up in significant slow down (see
|
2019-03-09 21:14:41 +09:00
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/11979, example URL:
|
2017-02-06 02:20:30 +09:00
|
|
|
|
# http://www.porntrex.com/maps/videositemap.xml).
|
2020-12-07 02:55:49 +09:00
|
|
|
|
r'(?s)(<(?P<tag>%s)(?:\s+[^>]*)?>)(.*?)</(?P=tag)>' % _MEDIA_TAG_NAME_RE, webpage))
|
2020-12-07 02:45:16 +09:00
|
|
|
|
for media_tag, _, media_type, media_content in media_tags:
|
2016-03-17 02:50:45 +09:00
|
|
|
|
media_info = {
|
|
|
|
|
'formats': [],
|
|
|
|
|
'subtitles': {},
|
|
|
|
|
}
|
|
|
|
|
media_attributes = extract_attributes(media_tag)
|
2019-05-24 01:52:11 +09:00
|
|
|
|
src = strip_or_none(media_attributes.get('src'))
|
2016-03-17 02:50:45 +09:00
|
|
|
|
if src:
|
2023-05-06 03:25:42 +09:00
|
|
|
|
f = parse_content_type(media_attributes.get('type'))
|
|
|
|
|
_, formats = _media_formats(src, media_type, f)
|
2016-08-20 00:53:47 +09:00
|
|
|
|
media_info['formats'].extend(formats)
|
2018-03-20 01:43:53 +09:00
|
|
|
|
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
|
2016-03-17 02:50:45 +09:00
|
|
|
|
if media_content:
|
|
|
|
|
for source_tag in re.findall(r'<source[^>]+>', media_content):
|
2019-03-17 11:09:32 +09:00
|
|
|
|
s_attr = extract_attributes(source_tag)
|
|
|
|
|
# data-video-src and data-src are non standard but seen
|
|
|
|
|
# several times in the wild
|
2019-05-24 01:52:11 +09:00
|
|
|
|
src = strip_or_none(dict_get(s_attr, ('src', 'data-video-src', 'data-src')))
|
2016-03-17 02:50:45 +09:00
|
|
|
|
if not src:
|
|
|
|
|
continue
|
2019-03-17 11:09:32 +09:00
|
|
|
|
f = parse_content_type(s_attr.get('type'))
|
2017-08-12 21:24:26 +09:00
|
|
|
|
is_plain_url, formats = _media_formats(src, media_type, f)
|
2016-08-20 00:53:47 +09:00
|
|
|
|
if is_plain_url:
|
2019-03-17 11:09:32 +09:00
|
|
|
|
# width, height, res, label and title attributes are
|
|
|
|
|
# all not standard but seen several times in the wild
|
|
|
|
|
labels = [
|
|
|
|
|
s_attr.get(lbl)
|
|
|
|
|
for lbl in ('label', 'title')
|
|
|
|
|
if str_or_none(s_attr.get(lbl))
|
|
|
|
|
]
|
|
|
|
|
width = int_or_none(s_attr.get('width'))
|
2019-05-11 05:56:22 +09:00
|
|
|
|
height = (int_or_none(s_attr.get('height'))
|
|
|
|
|
or int_or_none(s_attr.get('res')))
|
2019-03-17 11:09:32 +09:00
|
|
|
|
if not width or not height:
|
|
|
|
|
for lbl in labels:
|
|
|
|
|
resolution = parse_resolution(lbl)
|
|
|
|
|
if not resolution:
|
|
|
|
|
continue
|
|
|
|
|
width = width or resolution.get('width')
|
|
|
|
|
height = height or resolution.get('height')
|
|
|
|
|
for lbl in labels:
|
|
|
|
|
tbr = parse_bitrate(lbl)
|
|
|
|
|
if tbr:
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
|
tbr = None
|
2017-08-27 05:27:05 +09:00
|
|
|
|
f.update({
|
2019-03-17 11:09:32 +09:00
|
|
|
|
'width': width,
|
|
|
|
|
'height': height,
|
|
|
|
|
'tbr': tbr,
|
|
|
|
|
'format_id': s_attr.get('label') or s_attr.get('title'),
|
2017-08-27 05:27:05 +09:00
|
|
|
|
})
|
2016-08-20 00:53:47 +09:00
|
|
|
|
f.update(formats[0])
|
|
|
|
|
media_info['formats'].append(f)
|
|
|
|
|
else:
|
|
|
|
|
media_info['formats'].extend(formats)
|
2016-03-17 02:50:45 +09:00
|
|
|
|
for track_tag in re.findall(r'<track[^>]+>', media_content):
|
|
|
|
|
track_attributes = extract_attributes(track_tag)
|
|
|
|
|
kind = track_attributes.get('kind')
|
2016-09-24 15:20:42 +09:00
|
|
|
|
if not kind or kind in ('subtitles', 'captions'):
|
2019-05-24 01:52:11 +09:00
|
|
|
|
src = strip_or_none(track_attributes.get('src'))
|
2016-03-17 02:50:45 +09:00
|
|
|
|
if not src:
|
|
|
|
|
continue
|
|
|
|
|
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
|
|
|
|
|
media_info['subtitles'].setdefault(lang, []).append({
|
|
|
|
|
'url': absolute_url(src),
|
|
|
|
|
})
|
2018-06-29 03:25:05 +09:00
|
|
|
|
for f in media_info['formats']:
|
|
|
|
|
f.setdefault('http_headers', {})['Referer'] = base_url
|
2016-09-24 15:20:42 +09:00
|
|
|
|
if media_info['formats'] or media_info['subtitles']:
|
2016-03-17 02:50:45 +09:00
|
|
|
|
entries.append(media_info)
|
|
|
|
|
return entries
|
|
|
|
|
|
2017-01-13 18:08:51 +09:00
|
|
|
|
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
|
2020-12-20 04:08:44 +09:00
|
|
|
|
signed = 'hdnea=' in manifest_url
|
|
|
|
|
if not signed:
|
|
|
|
|
# https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html
|
|
|
|
|
manifest_url = re.sub(
|
|
|
|
|
r'(?:b=[\d,-]+|(?:__a__|attributes)=off|__b__=\d+)&?',
|
|
|
|
|
'', manifest_url).strip('?')
|
|
|
|
|
|
2016-08-22 15:47:25 +09:00
|
|
|
|
formats = []
|
2020-11-22 20:54:55 +09:00
|
|
|
|
|
2016-09-25 05:55:53 +09:00
|
|
|
|
hdcore_sign = 'hdcore=3.7.0'
|
2017-05-05 00:04:25 +09:00
|
|
|
|
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
|
2017-01-13 18:08:51 +09:00
|
|
|
|
hds_host = hosts.get('hds')
|
|
|
|
|
if hds_host:
|
|
|
|
|
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
|
2016-09-25 05:55:53 +09:00
|
|
|
|
if 'hdcore=' not in f4m_url:
|
|
|
|
|
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
|
|
|
|
|
f4m_formats = self._extract_f4m_formats(
|
|
|
|
|
f4m_url, video_id, f4m_id='hds', fatal=False)
|
|
|
|
|
for entry in f4m_formats:
|
|
|
|
|
entry.update({'extra_param_to_segment_url': hdcore_sign})
|
|
|
|
|
formats.extend(f4m_formats)
|
2020-11-22 20:54:55 +09:00
|
|
|
|
|
2017-01-13 18:08:51 +09:00
|
|
|
|
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
|
|
|
|
|
hls_host = hosts.get('hls')
|
|
|
|
|
if hls_host:
|
|
|
|
|
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
|
2020-12-03 08:33:55 +09:00
|
|
|
|
m3u8_formats = self._extract_m3u8_formats(
|
2016-08-22 15:47:25 +09:00
|
|
|
|
m3u8_url, video_id, 'mp4', 'm3u8_native',
|
2020-12-03 08:33:55 +09:00
|
|
|
|
m3u8_id='hls', fatal=False)
|
|
|
|
|
formats.extend(m3u8_formats)
|
2020-11-22 20:54:55 +09:00
|
|
|
|
|
|
|
|
|
http_host = hosts.get('http')
|
2020-12-20 04:08:44 +09:00
|
|
|
|
if http_host and m3u8_formats and not signed:
|
2020-12-03 08:33:55 +09:00
|
|
|
|
REPL_REGEX = r'https?://[^/]+/i/([^,]+),([^/]+),([^/]+)\.csmil/.+'
|
2020-11-22 20:54:55 +09:00
|
|
|
|
qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
|
|
|
|
|
qualities_length = len(qualities)
|
2020-12-03 08:33:55 +09:00
|
|
|
|
if len(m3u8_formats) in (qualities_length, qualities_length + 1):
|
2020-11-22 20:54:55 +09:00
|
|
|
|
i = 0
|
2020-12-03 08:33:55 +09:00
|
|
|
|
for f in m3u8_formats:
|
|
|
|
|
if f['vcodec'] != 'none':
|
2020-11-22 20:54:55 +09:00
|
|
|
|
for protocol in ('http', 'https'):
|
|
|
|
|
http_f = f.copy()
|
|
|
|
|
del http_f['manifest_url']
|
|
|
|
|
http_url = re.sub(
|
2020-12-03 05:37:14 +09:00
|
|
|
|
REPL_REGEX, protocol + r'://%s/\g<1>%s\3' % (http_host, qualities[i]), f['url'])
|
2020-11-22 20:54:55 +09:00
|
|
|
|
http_f.update({
|
|
|
|
|
'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
|
|
|
|
|
'url': http_url,
|
|
|
|
|
'protocol': protocol,
|
|
|
|
|
})
|
2020-12-03 08:33:55 +09:00
|
|
|
|
formats.append(http_f)
|
2020-11-22 20:54:55 +09:00
|
|
|
|
i += 1
|
|
|
|
|
|
2016-08-22 15:47:25 +09:00
|
|
|
|
return formats
|
|
|
|
|
|
2016-09-17 03:30:38 +09:00
|
|
|
|
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
|
2017-11-02 01:39:26 +09:00
|
|
|
|
query = compat_urlparse.urlparse(url).query
|
2016-09-17 03:30:38 +09:00
|
|
|
|
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
|
2018-02-06 01:41:55 +09:00
|
|
|
|
mobj = re.search(
|
|
|
|
|
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
|
|
|
|
|
url_base = mobj.group('url')
|
|
|
|
|
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
|
2016-09-17 03:30:38 +09:00
|
|
|
|
formats = []
|
2017-11-02 01:39:26 +09:00
|
|
|
|
|
|
|
|
|
def manifest_url(manifest):
|
|
|
|
|
m_url = '%s/%s' % (http_base_url, manifest)
|
|
|
|
|
if query:
|
|
|
|
|
m_url += '?%s' % query
|
|
|
|
|
return m_url
|
|
|
|
|
|
2016-09-17 03:30:38 +09:00
|
|
|
|
if 'm3u8' not in skip_protocols:
|
|
|
|
|
formats.extend(self._extract_m3u8_formats(
|
2017-11-02 01:39:26 +09:00
|
|
|
|
manifest_url('playlist.m3u8'), video_id, 'mp4',
|
2016-09-17 03:30:38 +09:00
|
|
|
|
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
|
|
|
|
|
if 'f4m' not in skip_protocols:
|
|
|
|
|
formats.extend(self._extract_f4m_formats(
|
2017-11-02 01:39:26 +09:00
|
|
|
|
manifest_url('manifest.f4m'),
|
2016-09-17 03:30:38 +09:00
|
|
|
|
video_id, f4m_id='hds', fatal=False))
|
2016-10-19 22:57:12 +09:00
|
|
|
|
if 'dash' not in skip_protocols:
|
|
|
|
|
formats.extend(self._extract_mpd_formats(
|
2017-11-02 01:39:26 +09:00
|
|
|
|
manifest_url('manifest.mpd'),
|
2016-10-19 22:57:12 +09:00
|
|
|
|
video_id, mpd_id='dash', fatal=False))
|
2016-09-17 03:30:38 +09:00
|
|
|
|
if re.search(r'(?:/smil:|\.smil)', url_base):
|
|
|
|
|
if 'smil' not in skip_protocols:
|
|
|
|
|
rtmp_formats = self._extract_smil_formats(
|
2017-11-02 01:39:26 +09:00
|
|
|
|
manifest_url('jwplayer.smil'),
|
2016-09-17 03:30:38 +09:00
|
|
|
|
video_id, fatal=False)
|
|
|
|
|
for rtmp_format in rtmp_formats:
|
|
|
|
|
rtsp_format = rtmp_format.copy()
|
|
|
|
|
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
|
|
|
|
|
del rtsp_format['play_path']
|
|
|
|
|
del rtsp_format['ext']
|
|
|
|
|
rtsp_format.update({
|
|
|
|
|
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
|
|
|
|
|
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
|
|
|
|
|
'protocol': 'rtsp',
|
|
|
|
|
})
|
|
|
|
|
formats.extend([rtmp_format, rtsp_format])
|
|
|
|
|
else:
|
|
|
|
|
for protocol in ('rtmp', 'rtsp'):
|
|
|
|
|
if protocol not in skip_protocols:
|
|
|
|
|
formats.append({
|
2017-05-18 00:19:33 +09:00
|
|
|
|
'url': '%s:%s' % (protocol, url_base),
|
2016-09-17 03:30:38 +09:00
|
|
|
|
'format_id': protocol,
|
|
|
|
|
'protocol': protocol,
|
|
|
|
|
})
|
|
|
|
|
return formats
|
|
|
|
|
|
2017-03-26 03:38:30 +09:00
|
|
|
|
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
|
2024-02-21 09:09:48 +09:00
|
|
|
|
return self._search_json(
|
|
|
|
|
r'''(?<!-)\bjwplayer\s*\(\s*(?P<q>'|")(?!(?P=q)).+(?P=q)\s*\)(?:(?!</script>).)*?\.\s*(?:setup\s*\(|(?P<load>load)\s*\(\s*\[)''',
|
|
|
|
|
webpage, 'JWPlayer data', video_id,
|
|
|
|
|
# must be a {...} or sequence, ending
|
|
|
|
|
contains_pattern=r'\{[\s\S]*}(?(load)(?:\s*,\s*\{[\s\S]*})*)', end_pattern=r'(?(load)\]|\))',
|
|
|
|
|
transform_source=transform_source, default=None)
|
2017-02-17 00:42:36 +09:00
|
|
|
|
|
|
|
|
|
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
|
2024-02-21 09:09:48 +09:00
|
|
|
|
# allow passing `transform_source` through to _find_jwplayer_data()
|
|
|
|
|
transform_source = kwargs.pop('transform_source', None)
|
|
|
|
|
kwfind = compat_kwargs({'transform_source': transform_source}) if transform_source else {}
|
|
|
|
|
|
|
|
|
|
jwplayer_data = self._find_jwplayer_data(webpage, video_id, **kwfind)
|
|
|
|
|
|
|
|
|
|
return self._parse_jwplayer_data(jwplayer_data, video_id, *args, **kwargs)
|
2017-02-17 00:42:36 +09:00
|
|
|
|
|
|
|
|
|
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
|
|
|
|
|
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
|
2022-11-11 09:49:13 +09:00
|
|
|
|
flat_pl = try_get(jwplayer_data, lambda x: x.get('playlist') or True)
|
|
|
|
|
if flat_pl is None:
|
|
|
|
|
# not even a dict
|
|
|
|
|
return []
|
|
|
|
|
|
2017-02-17 00:42:36 +09:00
|
|
|
|
# JWPlayer backward compatibility: flattened playlists
|
|
|
|
|
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
|
2022-11-11 09:49:13 +09:00
|
|
|
|
if flat_pl is True:
|
2017-02-17 00:42:36 +09:00
|
|
|
|
jwplayer_data = {'playlist': [jwplayer_data]}
|
|
|
|
|
|
|
|
|
|
entries = []
|
|
|
|
|
|
|
|
|
|
# JWPlayer backward compatibility: single playlist item
|
|
|
|
|
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
|
|
|
|
|
if not isinstance(jwplayer_data['playlist'], list):
|
|
|
|
|
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
|
|
|
|
|
|
|
|
|
|
for video_data in jwplayer_data['playlist']:
|
|
|
|
|
# JWPlayer backward compatibility: flattened sources
|
|
|
|
|
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
|
|
|
|
|
if 'sources' not in video_data:
|
|
|
|
|
video_data['sources'] = [video_data]
|
|
|
|
|
|
|
|
|
|
this_video_id = video_id or video_data['mediaid']
|
|
|
|
|
|
2017-03-06 01:28:32 +09:00
|
|
|
|
formats = self._parse_jwplayer_formats(
|
|
|
|
|
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
|
|
|
|
|
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
|
2017-02-17 00:42:36 +09:00
|
|
|
|
|
|
|
|
|
subtitles = {}
|
2024-02-21 09:09:48 +09:00
|
|
|
|
for track in traverse_obj(video_data, (
|
|
|
|
|
'tracks', lambda _, t: t.get('kind').lower() in ('captions', 'subtitles'))):
|
|
|
|
|
track_url = urljoin(base_url, track.get('file'))
|
|
|
|
|
if not track_url:
|
|
|
|
|
continue
|
|
|
|
|
subtitles.setdefault(track.get('label') or 'en', []).append({
|
|
|
|
|
'url': self._proto_relative_url(track_url)
|
|
|
|
|
})
|
2017-02-17 00:42:36 +09:00
|
|
|
|
|
2017-10-13 01:12:47 +09:00
|
|
|
|
entry = {
|
2017-02-17 00:42:36 +09:00
|
|
|
|
'id': this_video_id,
|
2017-10-13 01:12:47 +09:00
|
|
|
|
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
|
2019-11-09 21:11:59 +09:00
|
|
|
|
'description': clean_html(video_data.get('description')),
|
2019-01-20 21:31:41 +09:00
|
|
|
|
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
|
2017-02-17 00:42:36 +09:00
|
|
|
|
'timestamp': int_or_none(video_data.get('pubdate')),
|
|
|
|
|
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
|
|
|
|
|
'subtitles': subtitles,
|
2022-11-11 09:49:13 +09:00
|
|
|
|
'alt_title': clean_html(video_data.get('subtitle')), # attributes used e.g. by Tele5 ...
|
|
|
|
|
'genre': clean_html(video_data.get('genre')),
|
|
|
|
|
'channel': clean_html(dict_get(video_data, ('category', 'channel'))),
|
|
|
|
|
'season_number': int_or_none(video_data.get('season')),
|
|
|
|
|
'episode_number': int_or_none(video_data.get('episode')),
|
|
|
|
|
'release_year': int_or_none(video_data.get('releasedate')),
|
|
|
|
|
'age_limit': int_or_none(video_data.get('age_restriction')),
|
2017-10-13 01:12:47 +09:00
|
|
|
|
}
|
|
|
|
|
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
|
|
|
|
|
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
|
|
|
|
|
entry.update({
|
|
|
|
|
'_type': 'url_transparent',
|
|
|
|
|
'url': formats[0]['url'],
|
|
|
|
|
})
|
|
|
|
|
else:
|
2022-11-11 09:49:13 +09:00
|
|
|
|
# avoid exception in case of only sttls
|
|
|
|
|
if formats:
|
|
|
|
|
self._sort_formats(formats)
|
2017-10-13 01:12:47 +09:00
|
|
|
|
entry['formats'] = formats
|
|
|
|
|
entries.append(entry)
|
2017-02-17 00:42:36 +09:00
|
|
|
|
if len(entries) == 1:
|
|
|
|
|
return entries[0]
|
|
|
|
|
else:
|
|
|
|
|
return self.playlist_result(entries)
|
|
|
|
|
|
2017-03-06 01:22:27 +09:00
|
|
|
|
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
|
|
|
|
|
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
|
2022-11-11 09:49:13 +09:00
|
|
|
|
urls = set()
|
2017-03-06 01:22:27 +09:00
|
|
|
|
formats = []
|
2017-03-06 01:28:32 +09:00
|
|
|
|
for source in jwplayer_sources_data:
|
2017-06-15 00:02:15 +09:00
|
|
|
|
if not isinstance(source, dict):
|
|
|
|
|
continue
|
2019-01-20 21:31:41 +09:00
|
|
|
|
source_url = urljoin(
|
|
|
|
|
base_url, self._proto_relative_url(source.get('file')))
|
|
|
|
|
if not source_url or source_url in urls:
|
2017-04-17 16:48:24 +09:00
|
|
|
|
continue
|
2022-11-11 09:49:13 +09:00
|
|
|
|
urls.add(source_url)
|
2017-03-06 01:22:27 +09:00
|
|
|
|
source_type = source.get('type') or ''
|
|
|
|
|
ext = mimetype2ext(source_type) or determine_ext(source_url)
|
2022-11-11 09:49:13 +09:00
|
|
|
|
if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url:
|
2017-03-06 01:22:27 +09:00
|
|
|
|
formats.extend(self._extract_m3u8_formats(
|
2017-03-06 01:25:03 +09:00
|
|
|
|
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
|
|
|
|
m3u8_id=m3u8_id, fatal=False))
|
2022-11-11 09:49:13 +09:00
|
|
|
|
elif source_type == 'dash' or ext == 'mpd' or 'format=mpd-time-csf' in source_url:
|
2017-03-06 01:22:27 +09:00
|
|
|
|
formats.extend(self._extract_mpd_formats(
|
|
|
|
|
source_url, video_id, mpd_id=mpd_id, fatal=False))
|
2017-03-16 05:30:53 +09:00
|
|
|
|
elif ext == 'smil':
|
|
|
|
|
formats.extend(self._extract_smil_formats(
|
|
|
|
|
source_url, video_id, fatal=False))
|
2017-03-06 01:22:27 +09:00
|
|
|
|
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
|
2017-03-06 01:25:03 +09:00
|
|
|
|
elif source_type.startswith('audio') or ext in (
|
|
|
|
|
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
|
2017-03-06 01:22:27 +09:00
|
|
|
|
formats.append({
|
|
|
|
|
'url': source_url,
|
|
|
|
|
'vcodec': 'none',
|
|
|
|
|
'ext': ext,
|
|
|
|
|
})
|
|
|
|
|
else:
|
2022-11-11 09:49:13 +09:00
|
|
|
|
format_id = str_or_none(source.get('label'))
|
2017-03-06 01:22:27 +09:00
|
|
|
|
height = int_or_none(source.get('height'))
|
2022-11-11 09:49:13 +09:00
|
|
|
|
if height is None and format_id:
|
2017-03-06 01:22:27 +09:00
|
|
|
|
# Often no height is provided but there is a label in
|
2017-03-06 01:25:03 +09:00
|
|
|
|
# format like "1080p", "720p SD", or 1080.
|
2022-11-11 09:49:13 +09:00
|
|
|
|
height = parse_resolution(format_id).get('height')
|
2017-03-06 01:22:27 +09:00
|
|
|
|
a_format = {
|
|
|
|
|
'url': source_url,
|
|
|
|
|
'width': int_or_none(source.get('width')),
|
|
|
|
|
'height': height,
|
2022-10-11 21:36:44 +09:00
|
|
|
|
'tbr': int_or_none(source.get('bitrate'), scale=1000),
|
2022-11-11 09:49:13 +09:00
|
|
|
|
'filesize': int_or_none(source.get('filesize')),
|
2017-03-06 01:22:27 +09:00
|
|
|
|
'ext': ext,
|
|
|
|
|
}
|
2022-11-11 09:49:13 +09:00
|
|
|
|
if format_id:
|
|
|
|
|
a_format['format_id'] = format_id
|
|
|
|
|
|
2017-03-06 01:22:27 +09:00
|
|
|
|
if source_url.startswith('rtmp'):
|
|
|
|
|
a_format['ext'] = 'flv'
|
|
|
|
|
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
|
|
|
|
|
# of jwplayer.flash.swf
|
|
|
|
|
rtmp_url_parts = re.split(
|
2024-12-12 13:46:33 +09:00
|
|
|
|
r'((?:mp4|mp3|flv):)', source_url, maxsplit=1)
|
2017-03-06 01:22:27 +09:00
|
|
|
|
if len(rtmp_url_parts) == 3:
|
|
|
|
|
rtmp_url, prefix, play_path = rtmp_url_parts
|
|
|
|
|
a_format.update({
|
|
|
|
|
'url': rtmp_url,
|
|
|
|
|
'play_path': prefix + play_path,
|
|
|
|
|
})
|
|
|
|
|
if rtmp_params:
|
|
|
|
|
a_format.update(rtmp_params)
|
|
|
|
|
formats.append(a_format)
|
|
|
|
|
return formats
|
|
|
|
|
|
2014-09-28 15:53:52 +09:00
|
|
|
|
def _live_title(self, name):
|
|
|
|
|
""" Generate the title for a live video """
|
|
|
|
|
now = datetime.datetime.now()
|
2016-02-14 18:37:17 +09:00
|
|
|
|
now_str = now.strftime('%Y-%m-%d %H:%M')
|
2014-09-28 15:53:52 +09:00
|
|
|
|
return name + ' ' + now_str
|
|
|
|
|
|
2014-09-28 17:34:55 +09:00
|
|
|
|
def _int(self, v, name, fatal=False, **kwargs):
|
|
|
|
|
res = int_or_none(v, **kwargs)
|
|
|
|
|
if 'get_attr' in kwargs:
|
|
|
|
|
print(getattr(v, kwargs['get_attr']))
|
|
|
|
|
if res is None:
|
|
|
|
|
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(msg)
|
|
|
|
|
else:
|
|
|
|
|
self._downloader.report_warning(msg)
|
|
|
|
|
return res
|
|
|
|
|
|
|
|
|
|
def _float(self, v, name, fatal=False, **kwargs):
|
|
|
|
|
res = float_or_none(v, **kwargs)
|
|
|
|
|
if res is None:
|
|
|
|
|
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
|
|
|
|
if fatal:
|
|
|
|
|
raise ExtractorError(msg)
|
|
|
|
|
else:
|
|
|
|
|
self._downloader.report_warning(msg)
|
|
|
|
|
return res
|
|
|
|
|
|
2017-04-25 22:12:54 +09:00
|
|
|
|
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
|
|
|
|
|
path='/', secure=False, discard=False, rest={}, **kwargs):
|
2020-05-05 08:00:37 +09:00
|
|
|
|
cookie = compat_cookiejar_Cookie(
|
2017-09-17 14:53:04 +09:00
|
|
|
|
0, name, value, port, port is not None, domain, True,
|
2017-04-25 22:12:54 +09:00
|
|
|
|
domain.startswith('.'), path, True, secure, expire_time,
|
|
|
|
|
discard, None, None, rest)
|
2014-11-30 08:03:59 +09:00
|
|
|
|
self._downloader.cookiejar.set_cookie(cookie)
|
|
|
|
|
|
2015-07-30 07:20:37 +09:00
|
|
|
|
def _get_cookies(self, url):
|
2021-04-06 16:22:28 +09:00
|
|
|
|
""" Return a compat_cookies_SimpleCookie with the cookies for the url """
|
2015-11-22 01:18:17 +09:00
|
|
|
|
req = sanitized_Request(url)
|
2015-07-30 07:20:37 +09:00
|
|
|
|
self._downloader.cookiejar.add_cookie_header(req)
|
2021-04-06 16:22:28 +09:00
|
|
|
|
return compat_cookies_SimpleCookie(req.get_header('Cookie'))
|
2015-07-30 07:20:37 +09:00
|
|
|
|
|
2019-05-18 05:17:15 +09:00
|
|
|
|
def _apply_first_set_cookie_header(self, url_handle, cookie):
|
2019-05-21 01:23:18 +09:00
|
|
|
|
"""
|
|
|
|
|
Apply first Set-Cookie header instead of the last. Experimental.
|
|
|
|
|
|
|
|
|
|
Some sites (e.g. [1-3]) may serve two cookies under the same name
|
|
|
|
|
in Set-Cookie header and expect the first (old) one to be set rather
|
|
|
|
|
than second (new). However, as of RFC6265 the newer one cookie
|
|
|
|
|
should be set into cookie store what actually happens.
|
|
|
|
|
We will workaround this issue by resetting the cookie to
|
|
|
|
|
the first one manually.
|
|
|
|
|
1. https://new.vk.com/
|
|
|
|
|
2. https://github.com/ytdl-org/youtube-dl/issues/9841#issuecomment-227871201
|
|
|
|
|
3. https://learning.oreilly.com/
|
|
|
|
|
"""
|
2019-05-18 05:17:15 +09:00
|
|
|
|
for header, cookies in url_handle.headers.items():
|
|
|
|
|
if header.lower() != 'set-cookie':
|
|
|
|
|
continue
|
|
|
|
|
if sys.version_info[0] >= 3:
|
|
|
|
|
cookies = cookies.encode('iso-8859-1')
|
|
|
|
|
cookies = cookies.decode('utf-8')
|
|
|
|
|
cookie_value = re.search(
|
|
|
|
|
r'%s=(.+?);.*?\b[Dd]omain=(.+?)(?:[,;]|$)' % cookie, cookies)
|
|
|
|
|
if cookie_value:
|
|
|
|
|
value, domain = cookie_value.groups()
|
|
|
|
|
self._set_cookie(domain, cookie, value)
|
|
|
|
|
break
|
|
|
|
|
|
2015-01-07 15:20:20 +09:00
|
|
|
|
def get_testcases(self, include_onlymatching=False):
|
|
|
|
|
t = getattr(self, '_TEST', None)
|
|
|
|
|
if t:
|
|
|
|
|
assert not hasattr(self, '_TESTS'), \
|
|
|
|
|
'%s has _TEST and _TESTS' % type(self).__name__
|
|
|
|
|
tests = [t]
|
|
|
|
|
else:
|
|
|
|
|
tests = getattr(self, '_TESTS', [])
|
|
|
|
|
for t in tests:
|
|
|
|
|
if not include_onlymatching and t.get('only_matching', False):
|
|
|
|
|
continue
|
|
|
|
|
t['name'] = type(self).__name__[:-len('IE')]
|
|
|
|
|
yield t
|
|
|
|
|
|
|
|
|
|
def is_suitable(self, age_limit):
|
|
|
|
|
""" Test whether the extractor is generally suitable for the given
|
|
|
|
|
age limit (i.e. pornographic sites are not, all others usually are) """
|
|
|
|
|
|
|
|
|
|
any_restricted = False
|
|
|
|
|
for tc in self.get_testcases(include_onlymatching=False):
|
2016-07-27 01:54:06 +09:00
|
|
|
|
if tc.get('playlist', []):
|
2015-01-07 15:20:20 +09:00
|
|
|
|
tc = tc['playlist'][0]
|
|
|
|
|
is_restricted = age_restricted(
|
|
|
|
|
tc.get('info_dict', {}).get('age_limit'), age_limit)
|
|
|
|
|
if not is_restricted:
|
|
|
|
|
return True
|
|
|
|
|
any_restricted = any_restricted or is_restricted
|
|
|
|
|
return not any_restricted
|
|
|
|
|
|
2015-02-16 02:03:41 +09:00
|
|
|
|
def extract_subtitles(self, *args, **kwargs):
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if (self._downloader.params.get('writesubtitles', False)
|
|
|
|
|
or self._downloader.params.get('listsubtitles')):
|
2015-02-18 06:16:29 +09:00
|
|
|
|
return self._get_subtitles(*args, **kwargs)
|
|
|
|
|
return {}
|
2015-02-16 02:03:41 +09:00
|
|
|
|
|
|
|
|
|
def _get_subtitles(self, *args, **kwargs):
|
2016-02-14 18:37:17 +09:00
|
|
|
|
raise NotImplementedError('This method must be implemented by subclasses')
|
2015-02-16 02:03:41 +09:00
|
|
|
|
|
2015-08-21 02:37:07 +09:00
|
|
|
|
@staticmethod
|
|
|
|
|
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
|
|
|
|
""" Merge subtitle items for one language. Items with duplicated URLs
|
|
|
|
|
will be dropped. """
|
|
|
|
|
list1_urls = set([item['url'] for item in subtitle_list1])
|
|
|
|
|
ret = list(subtitle_list1)
|
|
|
|
|
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
|
@classmethod
|
2024-04-27 02:57:44 +09:00
|
|
|
|
def _merge_subtitles(cls, subtitle_dict1, *subtitle_dicts, **kwargs):
|
|
|
|
|
""" Merge subtitle dictionaries, language by language. """
|
|
|
|
|
|
|
|
|
|
# ..., * , target=None
|
|
|
|
|
target = kwargs.get('target') or dict(subtitle_dict1)
|
|
|
|
|
|
|
|
|
|
for subtitle_dict in subtitle_dicts:
|
|
|
|
|
for lang in subtitle_dict:
|
|
|
|
|
target[lang] = cls._merge_subtitle_items(target.get(lang, []), subtitle_dict[lang])
|
|
|
|
|
return target
|
2015-08-21 02:37:07 +09:00
|
|
|
|
|
2015-02-17 05:44:17 +09:00
|
|
|
|
def extract_automatic_captions(self, *args, **kwargs):
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if (self._downloader.params.get('writeautomaticsub', False)
|
|
|
|
|
or self._downloader.params.get('listsubtitles')):
|
2015-02-18 06:16:29 +09:00
|
|
|
|
return self._get_automatic_captions(*args, **kwargs)
|
|
|
|
|
return {}
|
2015-02-17 05:44:17 +09:00
|
|
|
|
|
|
|
|
|
def _get_automatic_captions(self, *args, **kwargs):
|
2016-02-14 18:37:17 +09:00
|
|
|
|
raise NotImplementedError('This method must be implemented by subclasses')
|
2015-02-17 05:44:17 +09:00
|
|
|
|
|
2016-03-01 04:01:33 +09:00
|
|
|
|
def mark_watched(self, *args, **kwargs):
|
2019-05-11 05:56:22 +09:00
|
|
|
|
if (self._downloader.params.get('mark_watched', False)
|
|
|
|
|
and (self._get_login_info()[0] is not None
|
|
|
|
|
or self._downloader.params.get('cookiefile') is not None)):
|
2016-03-01 04:01:33 +09:00
|
|
|
|
self._mark_watched(*args, **kwargs)
|
|
|
|
|
|
|
|
|
|
def _mark_watched(self, *args, **kwargs):
|
|
|
|
|
raise NotImplementedError('This method must be implemented by subclasses')
|
|
|
|
|
|
2016-07-04 00:23:48 +09:00
|
|
|
|
def geo_verification_headers(self):
|
|
|
|
|
headers = {}
|
|
|
|
|
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
|
|
|
|
|
if geo_verification_proxy:
|
|
|
|
|
headers['Ytdl-request-proxy'] = geo_verification_proxy
|
|
|
|
|
return headers
|
|
|
|
|
|
2016-10-07 20:20:53 +09:00
|
|
|
|
def _generic_id(self, url):
|
|
|
|
|
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
|
|
|
|
|
|
|
|
|
def _generic_title(self, url):
|
|
|
|
|
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
|
|
|
|
|
|
2024-04-27 02:57:44 +09:00
|
|
|
|
def _yes_playlist(self, playlist_id, video_id, *args, **kwargs):
|
|
|
|
|
# smuggled_data=None, *, playlist_label='playlist', video_label='video'
|
|
|
|
|
smuggled_data = args[0] if len(args) == 1 else kwargs.get('smuggled_data')
|
|
|
|
|
playlist_label = kwargs.get('playlist_label', 'playlist')
|
|
|
|
|
video_label = kwargs.get('video_label', 'video')
|
|
|
|
|
|
|
|
|
|
if not playlist_id or not video_id:
|
|
|
|
|
return not video_id
|
|
|
|
|
|
|
|
|
|
no_playlist = (smuggled_data or {}).get('force_noplaylist')
|
|
|
|
|
if no_playlist is not None:
|
|
|
|
|
return not no_playlist
|
|
|
|
|
|
|
|
|
|
video_id = '' if video_id is True else ' ' + video_id
|
|
|
|
|
noplaylist = self.get_param('noplaylist')
|
|
|
|
|
self.to_screen(
|
|
|
|
|
'Downloading just the {0}{1} because of --no-playlist'.format(video_label, video_id)
|
|
|
|
|
if noplaylist else
|
|
|
|
|
'Downloading {0}{1} - add --no-playlist to download just the {2}{3}'.format(
|
|
|
|
|
playlist_label, '' if playlist_id is True else ' ' + playlist_id,
|
|
|
|
|
video_label, video_id))
|
|
|
|
|
return not noplaylist
|
|
|
|
|
|
2013-10-06 13:06:30 +09:00
|
|
|
|
|
2013-06-24 02:57:38 +09:00
|
|
|
|
class SearchInfoExtractor(InfoExtractor):
|
|
|
|
|
"""
|
|
|
|
|
Base class for paged search queries extractors.
|
2015-07-24 02:37:45 +09:00
|
|
|
|
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
|
2013-06-24 02:57:38 +09:00
|
|
|
|
Instances should define _SEARCH_KEY and _MAX_RESULTS.
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def _make_valid_url(cls):
|
|
|
|
|
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def suitable(cls, url):
|
|
|
|
|
return re.match(cls._make_valid_url(), url) is not None
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, query):
|
|
|
|
|
mobj = re.match(self._make_valid_url(), query)
|
|
|
|
|
if mobj is None:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
raise ExtractorError('Invalid search query "%s"' % query)
|
2013-06-24 02:57:38 +09:00
|
|
|
|
|
|
|
|
|
prefix = mobj.group('prefix')
|
|
|
|
|
query = mobj.group('query')
|
|
|
|
|
if prefix == '':
|
|
|
|
|
return self._get_n_results(query, 1)
|
|
|
|
|
elif prefix == 'all':
|
|
|
|
|
return self._get_n_results(query, self._MAX_RESULTS)
|
|
|
|
|
else:
|
|
|
|
|
n = int(prefix)
|
|
|
|
|
if n <= 0:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
|
2013-06-24 02:57:38 +09:00
|
|
|
|
elif n > self._MAX_RESULTS:
|
2014-08-28 08:04:43 +09:00
|
|
|
|
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
2013-06-24 02:57:38 +09:00
|
|
|
|
n = self._MAX_RESULTS
|
|
|
|
|
return self._get_n_results(query, n)
|
|
|
|
|
|
|
|
|
|
def _get_n_results(self, query, n):
|
|
|
|
|
"""Get a specified number of results for a query"""
|
2016-02-14 18:37:17 +09:00
|
|
|
|
raise NotImplementedError('This method must be implemented by subclasses')
|
2013-07-02 01:52:19 +09:00
|
|
|
|
|
|
|
|
|
@property
|
|
|
|
|
def SEARCH_KEY(self):
|
|
|
|
|
return self._SEARCH_KEY
|