mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-01-10 05:20:09 +09:00
Compare commits
7 Commits
e39dff7f57
...
48291b3e0e
Author | SHA1 | Date | |
---|---|---|---|
|
48291b3e0e | ||
|
1036478d13 | ||
|
00ad2b8ca1 | ||
|
ab7c61ca29 | ||
|
3872619ed5 | ||
|
264544f90e | ||
|
9af0f299bf |
188
youtube_dl/extractor/ant1newsgr.py
Normal file
188
youtube_dl/extractor/ant1newsgr.py
Normal file
@ -0,0 +1,188 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
smuggle_url,
|
||||
unsmuggle_url,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class Ant1NewsGrBaseIE(InfoExtractor):
|
||||
@staticmethod
|
||||
def _smuggle_parent_info(url, **info_dict):
|
||||
return smuggle_url(url, {'parent_info': info_dict})
|
||||
|
||||
@staticmethod
|
||||
def _unsmuggle_parent_info(url):
|
||||
unsmuggled_url, data = unsmuggle_url(url, default={'parent_info': {}})
|
||||
return unsmuggled_url, data['parent_info']
|
||||
|
||||
def _download_api_data(self, netloc, cid, scheme='https'):
|
||||
url_parts = (scheme, netloc, self._API_PATH, None, None, None)
|
||||
url = compat_urlparse.urlunparse(url_parts)
|
||||
query = {'cid': cid}
|
||||
return self._download_json(
|
||||
url, cid,
|
||||
'Downloading JSON',
|
||||
'Unable to download JSON',
|
||||
query=query)
|
||||
|
||||
def _download_and_extract_api_data(self, video_id, *args, **kwargs):
|
||||
info = self._download_api_data(*args, **kwargs)
|
||||
try:
|
||||
source = info['url']
|
||||
except KeyError:
|
||||
raise ExtractorError('no source found for %s' % video_id)
|
||||
formats = self._extract_m3u8_formats(source, video_id, 'mp4') \
|
||||
if determine_ext(source) == 'm3u8' else [source]
|
||||
self._sort_formats(formats)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['title'],
|
||||
'thumbnail': info['thumb'],
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class Ant1NewsGrWatchIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:watch'
|
||||
IE_DESC = 'ant1news.gr videos'
|
||||
_VALID_URL = r'https?://(?:www\.)?ant1news\.gr/watch/(?P<id>\d+)/'
|
||||
_API_PATH = '/templates/data/player'
|
||||
|
||||
_TEST = {
|
||||
'url': 'https://www.ant1news.gr/watch/1506168/ant1-news-09112021-stis-18-45',
|
||||
'md5': '60a984da5ffc98c9924e6d9dd46c6f04',
|
||||
'info_dict': {
|
||||
'id': '1506168',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:0ad00fa66ecf8aa233d26ab0dba7514a',
|
||||
'description': 'md5:18665af715a6dcfeac1d6153a44f16b0',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
scheme, netloc, _, _, _, _ = compat_urllib_parse_urlparse(url)
|
||||
info = self._download_and_extract_api_data(
|
||||
video_id, netloc, video_id, scheme=scheme)
|
||||
info['description'] = self._og_search_description(webpage)
|
||||
return info
|
||||
|
||||
|
||||
class Ant1NewsGrArticleIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:article'
|
||||
IE_DESC = 'ant1news.gr articles'
|
||||
_VALID_URL = r'https?://(?:www\.)?ant1news\.gr/[^/]+/article/(?P<id>\d+)/'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.ant1news.gr/afieromata/article/549468/o-tzeims-mpont-sta-meteora-oi-apeiles-kai-o-xesikomos-ton-kalogeron',
|
||||
'md5': 'eb635a194c15272c2611a751766b0200',
|
||||
'info_dict': {
|
||||
'id': '_xvg/m_cmbatw=',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:a93e8ecf2e4073bfdffcb38f59945411',
|
||||
},
|
||||
'expected_warnings': [r'^[Uu]nable to extract JSON-LD'],
|
||||
}, {
|
||||
'url': 'https://ant1news.gr/Society/article/620286/symmoria-anilikon-dikigoros-thymaton-ithelan-na-toys-apoteleiosoyn',
|
||||
'info_dict': {
|
||||
'id': '620286',
|
||||
'title': 'md5:91fe569e952e4d146485740ae927662b',
|
||||
},
|
||||
'expected_warnings': [r'^[Uu]nable to extract JSON-LD'],
|
||||
'playlist_mincount': 2,
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
info = self._search_json_ld(webpage, video_id,
|
||||
expected_type='NewsArticle',
|
||||
fatal=False)
|
||||
# workaround as _json_ld does not recognize @graph nesting
|
||||
if not info:
|
||||
info['title'] = self._og_search_title(webpage)
|
||||
embed_urls = list(Ant1NewsGrEmbedIE._extract_urls(webpage, url, **info))
|
||||
if not embed_urls:
|
||||
raise ExtractorError('no videos found for %s' % video_id)
|
||||
if len(embed_urls) == 1:
|
||||
return self.url_result(embed_urls[0], ie=Ant1NewsGrEmbedIE.ie_key(),
|
||||
video_title=info['title'])
|
||||
return self.playlist_from_matches(
|
||||
embed_urls, video_id, info['title'], ie=Ant1NewsGrEmbedIE.ie_key())
|
||||
|
||||
|
||||
class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE):
|
||||
IE_NAME = 'ant1newsgr:embed'
|
||||
IE_DESC = 'ant1news.gr embedded videos'
|
||||
_VALID_URL = r'''(?x)https?://(?:[a-zA-Z0-9\-]+\.)?
|
||||
(?:antenna|ant1news)\.gr/templates/pages/player
|
||||
\?(?:(?:cid=(?P<id>[^&#]+)|[^&=#]+=[^&#]+)&?)+'''
|
||||
_API_PATH = '/news/templates/data/jsonPlayer'
|
||||
|
||||
_TEST = {
|
||||
'url': 'https://www.antenna.gr/templates/pages/player?cid=3f_li_c_az_jw_y_u=&w=670&h=377',
|
||||
'md5': '12872b12af18b5dbf76528786728de8c',
|
||||
'info_dict': {
|
||||
'id': '3f_li_c_az_jw_y_u=',
|
||||
'ext': 'mp4',
|
||||
'title': 'md5:a30c93332455f53e1e84ae0724f0adf7',
|
||||
},
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _extract_urls(cls, webpage, origin_url=None, **parent_info):
|
||||
# make the scheme in _VALID_URL optional
|
||||
_URL_RE = r'(?:https?:)?//' + cls._VALID_URL.split('://', 1)[1]
|
||||
# simplify the query string part of _VALID_URL; after extracting iframe
|
||||
# src, the URL will be matched again
|
||||
_URL_RE = _URL_RE.split(r'\?', 1)[0] + r'\?(?:(?!(?P=_q1)).)+'
|
||||
EMBED_RE = r'''(?x)
|
||||
<iframe[^>]+?src=(?P<_q1>%(quot_re)s)(?P<url>%(url_re)s)(?P=_q1)
|
||||
''' % {'quot_re': r'["\']', 'url_re': _URL_RE}
|
||||
for mobj in re.finditer(EMBED_RE, webpage):
|
||||
url = unescapeHTML(mobj.group('url'))
|
||||
if url.startswith('//'):
|
||||
scheme = compat_urllib_parse_urlparse(origin_url).scheme \
|
||||
if origin_url else 'https'
|
||||
url = '%s:%s' % (scheme, url)
|
||||
if not cls.suitable(url):
|
||||
continue
|
||||
yield cls._smuggle_parent_info(url, **parent_info)
|
||||
|
||||
def _real_extract(self, url):
|
||||
url, parent_info = type(self)._unsmuggle_parent_info(url)
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# resolve any redirects, to derive the proper base URL for the API query
|
||||
canonical_url = self._request_webpage(
|
||||
HEADRequest(url), video_id,
|
||||
note='Resolve canonical player URL',
|
||||
errnote='Could not resolve canonical player URL').geturl()
|
||||
scheme, netloc, _, _, query, _ = compat_urllib_parse_urlparse(
|
||||
canonical_url)
|
||||
query = compat_parse_qs(query)
|
||||
cid = query['cid'][0]
|
||||
|
||||
info = self._download_and_extract_api_data(
|
||||
video_id, netloc, cid, scheme=scheme)
|
||||
if 'timestamp' not in info and 'timestamp' in parent_info:
|
||||
info['timestamp'] = parent_info['timestamp']
|
||||
return info
|
@ -1078,6 +1078,11 @@ from .rutube import (
|
||||
RutubePersonIE,
|
||||
RutubePlaylistIE,
|
||||
)
|
||||
from .ant1newsgr import (
|
||||
Ant1NewsGrWatchIE,
|
||||
Ant1NewsGrArticleIE,
|
||||
Ant1NewsGrEmbedIE,
|
||||
)
|
||||
from .rutv import RUTVIE
|
||||
from .ruutu import RuutuIE
|
||||
from .ruv import RuvIE
|
||||
|
@ -102,6 +102,7 @@ from .ustream import UstreamIE
|
||||
from .arte import ArteTVEmbedIE
|
||||
from .videopress import VideoPressIE
|
||||
from .rutube import RutubeIE
|
||||
from .ant1newsgr import Ant1NewsGrEmbedIE
|
||||
from .limelight import LimelightBaseIE
|
||||
from .anvato import AnvatoIE
|
||||
from .washingtonpost import WashingtonPostIE
|
||||
@ -3400,6 +3401,13 @@ class GenericIE(InfoExtractor):
|
||||
return self.playlist_from_matches(
|
||||
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
|
||||
|
||||
# Look for ant1news.gr embeds
|
||||
ant1newsgr_urls = list(Ant1NewsGrEmbedIE._extract_urls(webpage, url,
|
||||
title=video_title))
|
||||
if ant1newsgr_urls:
|
||||
return self.playlist_from_matches(
|
||||
ant1newsgr_urls, video_id, video_title, ie=Ant1NewsGrEmbedIE.ie_key())
|
||||
|
||||
# Look for WashingtonPost embeds
|
||||
wapo_urls = WashingtonPostIE._extract_urls(webpage)
|
||||
if wapo_urls:
|
||||
|
@ -9,6 +9,7 @@ import json
|
||||
import os.path
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import time
|
||||
import traceback
|
||||
|
||||
@ -67,6 +68,7 @@ from ..utils import (
|
||||
|
||||
class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
"""Provide base functions for Youtube extractors"""
|
||||
|
||||
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
|
||||
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
|
||||
|
||||
@ -138,7 +140,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
[2, 1, None, 1,
|
||||
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
|
||||
None, [], 4],
|
||||
1, [None, None, []], None, None, None, True
|
||||
1, [None, None, []], None, None, None, True,
|
||||
],
|
||||
username,
|
||||
]
|
||||
@ -160,7 +162,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
None, 1, None, [1, None, None, None, [password, None, True]],
|
||||
[
|
||||
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
|
||||
1, [None, None, []], None, None, None, True
|
||||
1, [None, None, []], None, None, None, True,
|
||||
]]
|
||||
|
||||
challenge_results = req(
|
||||
@ -213,7 +215,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
user_hash, None, 2, None,
|
||||
[
|
||||
9, None, None, None, None, None, None, None,
|
||||
[None, tfa_code, True, 2]
|
||||
[None, tfa_code, True, 2],
|
||||
]]
|
||||
|
||||
tfa_results = req(
|
||||
@ -284,7 +286,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20201021.03.00',
|
||||
}
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@ -385,7 +387,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': '2.20201021.03.00',
|
||||
}
|
||||
},
|
||||
},
|
||||
'query': query,
|
||||
}
|
||||
@ -462,7 +464,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
# (HTML, videodetails, metadata, renderers)
|
||||
'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
|
||||
'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
|
||||
['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl'])
|
||||
['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']),
|
||||
}
|
||||
if any((videodetails, metadata, renderers)):
|
||||
result = (
|
||||
@ -671,7 +673,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
|
||||
'description': '',
|
||||
'uploader': '8KVIDEO',
|
||||
'title': 'UHDTV TEST 8K VIDEO.mp4'
|
||||
'title': 'UHDTV TEST 8K VIDEO.mp4',
|
||||
},
|
||||
'params': {
|
||||
'youtube_include_dash_manifest': True,
|
||||
@ -711,7 +713,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
|
||||
'title': 'Burning Everyone\'s Koran',
|
||||
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
|
||||
}
|
||||
},
|
||||
},
|
||||
# Age-gated videos
|
||||
{
|
||||
@ -839,7 +841,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
},
|
||||
'expected_warnings': [
|
||||
'DASH manifest missing',
|
||||
]
|
||||
],
|
||||
},
|
||||
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
|
||||
{
|
||||
@ -1820,8 +1822,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
|
||||
# cpn generation algorithm is reverse engineered from base.js.
|
||||
# In fact it works even with dummy cpn.
|
||||
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
|
||||
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
|
||||
CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
|
||||
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
|
||||
|
||||
# more consistent results setting it to right before the end
|
||||
qs = parse_qs(playback_url)
|
||||
@ -1881,8 +1883,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError('Invalid URL: %s' % url)
|
||||
video_id = mobj.group(2)
|
||||
return video_id
|
||||
return mobj.group(2)
|
||||
|
||||
def _extract_chapters_from_json(self, data, video_id, duration):
|
||||
chapters_list = try_get(
|
||||
@ -2035,7 +2036,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
headers = {
|
||||
'X-YouTube-Client-Name': '85',
|
||||
'X-YouTube-Client-Version': '2.0',
|
||||
'Origin': 'https://www.youtube.com'
|
||||
'Origin': 'https://www.youtube.com',
|
||||
}
|
||||
|
||||
video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
|
||||
@ -2064,8 +2065,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
|
||||
|
||||
search_meta = (
|
||||
lambda x: self._html_search_meta(x, webpage, default=None)) \
|
||||
if webpage else lambda x: None
|
||||
(lambda x: self._html_search_meta(x, webpage, default=None))
|
||||
if webpage else lambda _: None)
|
||||
|
||||
video_details = player_response.get('videoDetails') or {}
|
||||
microformat = try_get(
|
||||
@ -2137,7 +2138,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
def build_fragments(f):
|
||||
return LazyList({
|
||||
'url': update_url_query(f['url'], {
|
||||
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize']))
|
||||
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])),
|
||||
})
|
||||
} for range_start in range(0, f['filesize'], CHUNK_SIZE))
|
||||
|
||||
@ -2236,7 +2237,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'protocol': 'http_dash_segments',
|
||||
'fragments': build_fragments(dct),
|
||||
} if dct['filesize'] else {
|
||||
'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
|
||||
'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful?
|
||||
})
|
||||
|
||||
formats.append(dct)
|
||||
@ -2414,9 +2415,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'is_live': is_live,
|
||||
}
|
||||
|
||||
pctr = try_get(
|
||||
pctr = traverse_obj(
|
||||
player_response,
|
||||
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
|
||||
('captions', 'playerCaptionsTracklistRenderer', T(dict)))
|
||||
if pctr:
|
||||
def process_language(container, base_url, lang_code, query):
|
||||
lang_subs = []
|
||||
@ -2430,31 +2431,34 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
})
|
||||
container[lang_code] = lang_subs
|
||||
|
||||
subtitles = {}
|
||||
for caption_track in (pctr.get('captionTracks') or []):
|
||||
base_url = caption_track.get('baseUrl')
|
||||
if not base_url:
|
||||
continue
|
||||
if caption_track.get('kind') != 'asr':
|
||||
lang_code = caption_track.get('languageCode')
|
||||
if not lang_code:
|
||||
def process_subtitles():
|
||||
subtitles = {}
|
||||
for caption_track in traverse_obj(pctr, (
|
||||
'captionTracks', lambda _, v: v.get('baseUrl'))):
|
||||
base_url = self._yt_urljoin(caption_track['baseUrl'])
|
||||
if not base_url:
|
||||
continue
|
||||
process_language(
|
||||
subtitles, base_url, lang_code, {})
|
||||
continue
|
||||
automatic_captions = {}
|
||||
for translation_language in (pctr.get('translationLanguages') or []):
|
||||
translation_language_code = translation_language.get('languageCode')
|
||||
if not translation_language_code:
|
||||
if caption_track.get('kind') != 'asr':
|
||||
lang_code = caption_track.get('languageCode')
|
||||
if not lang_code:
|
||||
continue
|
||||
process_language(
|
||||
subtitles, base_url, lang_code, {})
|
||||
continue
|
||||
process_language(
|
||||
automatic_captions, base_url, translation_language_code,
|
||||
{'tlang': translation_language_code})
|
||||
info['automatic_captions'] = automatic_captions
|
||||
info['subtitles'] = subtitles
|
||||
automatic_captions = {}
|
||||
for translation_language in traverse_obj(pctr, (
|
||||
'translationLanguages', lambda _, v: v.get('languageCode'))):
|
||||
translation_language_code = translation_language['languageCode']
|
||||
process_language(
|
||||
automatic_captions, base_url, translation_language_code,
|
||||
{'tlang': translation_language_code})
|
||||
info['automatic_captions'] = automatic_captions
|
||||
info['subtitles'] = subtitles
|
||||
|
||||
process_subtitles()
|
||||
|
||||
parsed_url = compat_urllib_parse_urlparse(url)
|
||||
for component in [parsed_url.fragment, parsed_url.query]:
|
||||
for component in (parsed_url.fragment, parsed_url.query):
|
||||
query = compat_parse_qs(component)
|
||||
for k, v in query.items():
|
||||
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
|
||||
@ -2684,7 +2688,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'title': 'Super Cooper Shorts - Shorts',
|
||||
'uploader': 'Super Cooper Shorts',
|
||||
'uploader_id': '@SuperCooperShorts',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
# Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
|
||||
'url': 'https://www.youtube.com/@emergencyawesome/shorts',
|
||||
@ -2738,7 +2742,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
|
||||
'uploader': 'ThirstForScience',
|
||||
'uploader_id': '@ThirstForScience',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
|
||||
'only_matching': True,
|
||||
@ -3037,7 +3041,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'uploader': '3Blue1Brown',
|
||||
'uploader_id': '@3blue1brown',
|
||||
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
@ -3335,7 +3339,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'client': {
|
||||
'clientName': 'WEB',
|
||||
'clientVersion': client_version,
|
||||
}
|
||||
},
|
||||
}
|
||||
visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
|
||||
|
||||
@ -3354,7 +3358,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
headers['x-goog-visitor-id'] = visitor_data
|
||||
data['continuation'] = continuation['continuation']
|
||||
data['clickTracking'] = {
|
||||
'clickTrackingParams': continuation['itct']
|
||||
'clickTrackingParams': continuation['itct'],
|
||||
}
|
||||
count = 0
|
||||
retries = 3
|
||||
@ -3613,7 +3617,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||
'uploader': 'milan',
|
||||
'uploader_id': '@milan5503',
|
||||
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
|
||||
'playlist_mincount': 455,
|
||||
@ -3623,7 +3627,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||
'uploader': 'LBK',
|
||||
'uploader_id': '@music_king',
|
||||
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
|
||||
'only_matching': True,
|
||||
@ -3734,7 +3738,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'youtube-dl test video',
|
||||
'title': 'youtube-dl test video',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
@ -3754,7 +3758,7 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
|
||||
'info_dict': {
|
||||
'id': 'youtube-dl test video',
|
||||
'title': 'youtube-dl test video',
|
||||
}
|
||||
},
|
||||
}]
|
||||
|
||||
|
||||
@ -3769,7 +3773,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
|
||||
'id': 'youtube-dl test video',
|
||||
'title': 'youtube-dl test video',
|
||||
},
|
||||
'params': {'playlistend': 5}
|
||||
'params': {'playlistend': 5},
|
||||
}, {
|
||||
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
|
||||
'only_matching': True,
|
||||
@ -3785,6 +3789,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
|
||||
class YoutubeFeedsInfoExtractor(YoutubeTabIE):
|
||||
"""
|
||||
Base class for feed extractors
|
||||
|
||||
Subclasses must define the _FEED_NAME property.
|
||||
"""
|
||||
_LOGIN_REQUIRED = True
|
||||
|
Loading…
Reference in New Issue
Block a user