Compare commits

...

16 Commits

Author SHA1 Message Date
Wong Yiu Hang
e5630cc4db
Merge adc6e01964 into 1036478d13 2025-01-06 10:52:22 -05:00
dirkf
1036478d13 [YouTube] Endure subtitle URLs are complete
* WEB URLs are, MWEB not
* resolves #33017
2025-01-06 01:39:04 +00:00
dirkf
00ad2b8ca1 [YouTube] Refactor subtitle processing
* move to internal function
* use `traverse-obj()`
2025-01-06 01:24:30 +00:00
dirkf
ab7c61ca29 [YouTube] Apply code style changes, trailing commas, etc 2025-01-06 01:22:16 +00:00
Wong Yiu Hang
adc6e01964 Release 2021.12.17
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE7X9b9Gs7vtgchzaOLDk+DxipI20FAmG7ijgACgkQLDk+Dxip
 I22UFg//XeU4ZtSI8szkW98sNPd8RRk908h/tNaGkitpd9u1rsQMbspDsjX6BYOn
 hCaD8Y3aavdQo+9a1uuJVtxfB3qh/ieElAP1VGZ94S5ID3DGQrEOg+/dEvLK2Gpv
 wH+tAK/pMW4TKrbq6Nb1jRjZLoaUT8Dy8Rz9HzZzzB1w9BaWseb4McsPPOfKbKB1
 MEr0gvUViC8wdhc8k7vvL216+P+a/Orws+ClSEHHSkEgCW3aQ36oeZz2K7Cyh85A
 pOOzdwn/LYhhlyAuqXjAfwk+0pgBEmx2g15Gig/j9CkPmFgpZWAhevfUHFkOgKhH
 u/eSKoxW/g+lpkjcspJ5jsfDFtv2aAuinJdm40aBq0mmTfHuzzIWwJQvtP0/lLVP
 GrAyLgGPnZkj3R+jhWVpl/dAO7HPhPGwxuBcjk7GadCvRSq+yR9TCDbUPe9WnaFM
 /MF1AYQn22lwsZayThoH8GGltOpy4VKPF6fiN/cxFdtglRDcyWbO1shzuzOJHj6D
 7BkSH1KIZ/vNzJNNS2PMAQCL6bXC74xPp7oXsG9CnKo0OzBzWadYwYHF+oJn+nSG
 byq3SSQhsBESLS4CRDC2RbT3uKjheNIYIilqGy80R3JGOHDxG60p3Cb1oNPtRULQ
 1B5iPLj7EdtxHmQ+jqssTkbYjOvpi0jRAvIaxQFFRFGvx6ev3AY=
 =J1bE
 -----END PGP SIGNATURE-----

Merge tag '2021.12.17' into loom

Release 2021.12.17
2022-02-03 17:09:13 +08:00
Wong Yiu Hang
e218b26725 [Loom] Add url_or_none back 2021-02-25 04:11:18 +08:00
Wong Yiu Hang
1b2651ed30 [Loom] Use url_result instead 2021-02-25 03:46:55 +08:00
Wong Yiu Hang
70b804526c [Loom] Move request back into _download_json 2021-02-25 03:20:51 +08:00
Wong Yiu Hang
81bd98a03f [Loom] Add fallback to mandatory attribute 2021-02-25 03:20:51 +08:00
Wong Yiu Hang
29c4168cec [Loom] Add missing parsing function 2021-02-25 03:20:51 +08:00
Wong Yiu Hang
34e6a6b559 [Loom] Moved functions to inline
Removed if statement parentheses
2021-02-25 03:20:51 +08:00
Wong Yiu Hang
c9f3667e2e [Loom] Update: Change test case to avoid a false-positive result from test/test_unicode_literals.py 2021-02-04 00:53:18 +08:00
Wong Yiu Hang
287e710bff [Loom] Add: Additional playlist extractor for folder support 2021-02-04 00:34:05 +08:00
Wong Yiu Hang
918f4f374a [Loom] Update: Move related member functions into LoomIE 2021-02-04 00:34:05 +08:00
Wong Yiu Hang
14df8ad329 Merge branch 'master' into loom 2021-02-04 00:33:10 +08:00
Wong Yiu Hang
2302f32ced [Loom] Add new extractor 2021-02-01 16:00:24 +08:00
3 changed files with 254 additions and 51 deletions

View File

@ -645,6 +645,10 @@ from .livestream import (
) )
from .lnkgo import LnkGoIE from .lnkgo import LnkGoIE
from .localnews8 import LocalNews8IE from .localnews8 import LocalNews8IE
from .loom import (
LoomIE,
LoomFolderIE
)
from .lovehomeporn import LoveHomePornIE from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE from .lrt import LRTIE
from .lynda import ( from .lynda import (

View File

@ -0,0 +1,194 @@
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request
)
from ..utils import (
int_or_none,
js_to_json,
try_get,
unified_timestamp,
url_or_none
)
class LoomBaseInfoIE(InfoExtractor):
_BASE_URL = 'https://www.loom.com/'
class LoomIE(LoomBaseInfoIE):
_VALID_URL = r'https?://(?:www\.)?loom\.com/share/(?!folder)(?P<id>[a-zA-Z0-9]+)'
_TESTS = [
{
'url': 'https://www.loom.com/share/31b41727a5b24dacb6c1417a565b2ebf',
'md5': '8b94361aabff2075141dc60bd6d35453',
'info_dict': {
'id': '31b41727a5b24dacb6c1417a565b2ebf',
'ext': 'mp4',
'title': 'How to resize your camera bubble',
'uploader': 'Allie Hitchcock',
'upload_date': '20201007',
'timestamp': 1602089241
}
},
{
'url': 'https://www.loom.com/share/7e5168ec3b0744cab5e08a340cc7e086',
'md5': '47dd14aa1d8054c249b68ca57ad9963f',
'info_dict': {
'id': '7e5168ec3b0744cab5e08a340cc7e086',
'ext': 'mp4',
'title': 'How to flip your camera ',
'uploader': 'Matthew Flores',
'upload_date': '20200423',
'timestamp': 1587646164
}
},
{
'url': 'https://www.loom.com/share/6670e3eba3c84dc09ada8306c7138075',
'md5': 'bfad8181ed49d6252b10dfdeb46c535e',
'info_dict': {
'id': '6670e3eba3c84dc09ada8306c7138075',
'ext': 'mp4',
'title': 'How to record your first video on Loom',
'uploader': 'Allie Hitchcock',
'upload_date': '20201118',
'timestamp': 1605729404
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
info_json = self._html_search_regex(
r'window.loomSSRVideo = (.+?);',
webpage,
'info')
info = self._parse_json(info_json, 'json', js_to_json)
formats = []
for type in ['transcoded-url', 'raw-url']:
json_doc = self._download_json(
self._BASE_URL + 'api/campaigns/sessions/' + video_id + '/' + type,
video_id, data={})
url = url_or_none(json_doc.get('url'))
part_credentials = json_doc.get('part_credentials')
ext = self._search_regex(
r'\.([a-zA-Z0-9]+)\?',
url, 'ext', default=None)
if ext != 'm3u8':
formats.append({
'url': url,
'ext': ext,
'format_id': type,
'width': int_or_none(try_get(info, lambda x: x['video_properties']['width'])),
'height': int_or_none(try_get(info, lambda x: x['video_properties']['height']))
})
else:
credentials = compat_urllib_parse_urlencode(part_credentials)
m3u8_formats = self._extract_m3u8_formats(url, video_id)
for item in m3u8_formats:
item['protocol'] = 'm3u8_native'
item['url'] += '?' + credentials
item['ext'] = 'mp4'
item['format_id'] = 'hls-' + str(item.get('height', 0))
item['extra_param_to_segment_url'] = credentials
for i in range(len(m3u8_formats)):
formats.insert(
(-1, len(formats))[i == len(m3u8_formats) - 1],
m3u8_formats[i])
return {
'id': info.get('id') or video_id,
'title': info.get('name'),
'formats': formats,
'thumbnails': [
{
'id': key,
'url': url_or_none(self._BASE_URL + value)
} for key, value in info.get('thumbnails').items()
],
'description': info.get('description'),
'uploader': info.get('owner_full_name'),
'timestamp': unified_timestamp(info.get('createdAt'))
}
class LoomFolderIE(LoomBaseInfoIE):
_VALID_URL = r'https?://(?:www\.)?loom\.com/share/folder/(?P<id>.+)/?'
_TESTS = [
{
'url': 'https://www.loom.com/share/folder/997db4db046f43e5912f10dc5f817b5c/List%20A-%20a%2C%20i%2C%20o',
'info_dict': {
'id': '9a8a87f6b6f546d9a400c8e7575ff7f2',
'title': 'List A- a, i, o'
},
'playlist_mincount': 12
},
{
'url': 'https://www.loom.com/share/folder/997db4db046f43e5912f10dc5f817b5c',
'info_dict': {
'id': '997db4db046f43e5912f10dc5f817b5c',
'title': 'Blending Lessons '
},
'playlist_mincount': 16
}
]
def _get_real_folder_id(self, path):
subfolders = re.match(
r'^([a-zA-Z0-9]+)(?:\/(.+))*$',
compat_urllib_parse_unquote(path))
folder_names = subfolders.groups()[1:]
parent_folder_id = subfolders.group(1)
if(folder_names[0] is None):
return path
# Fetch folder id
request = compat_urllib_request.Request(
self._BASE_URL + 'v1/folders/by_name',
json.dumps({
'folder_names': folder_names,
'parent_folder_id': parent_folder_id
}).encode('utf-8'))
json_doc = self._download_json(request, parent_folder_id)
return try_get(json_doc, lambda x: x['current_folder']['id'])
def _get_folder_info(self, folder_id):
json_doc = self._download_json(url_or_none(self._BASE_URL + 'v1/folders/' + folder_id), folder_id)
videos = []
# Recursive call for subfolder
for folder in json_doc.get('folders'):
subfolder_info = self._get_folder_info(folder.get('id'))
videos.extend(subfolder_info.get('entries'))
videos.extend([val.get('id') for val in json_doc.get('videos')])
return {
'id': folder_id,
'title': json_doc.get('name'),
'description': json_doc.get('description'),
'entries': videos
}
def _real_extract(self, url):
folder_id = self._match_id(url)
folder_id = self._get_real_folder_id(folder_id)
folder_info = self._get_folder_info(folder_id)
folder_info['_type'] = 'playlist'
for i in range(len(folder_info['entries'])):
video_id = folder_info['entries'][i]
folder_info['entries'][i] = self.url_result(url_or_none(self._BASE_URL + 'share/' + video_id), 'Loom', video_id)
return folder_info

View File

@ -9,6 +9,7 @@ import json
import os.path import os.path
import random import random
import re import re
import string
import time import time
import traceback import traceback
@ -67,6 +68,7 @@ from ..utils import (
class YoutubeBaseInfoExtractor(InfoExtractor): class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors""" """Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin' _LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge' _TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
@ -138,7 +140,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
[2, 1, None, 1, [2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4], None, [], 4],
1, [None, None, []], None, None, None, True 1, [None, None, []], None, None, None, True,
], ],
username, username,
] ]
@ -160,7 +162,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
None, 1, None, [1, None, None, None, [password, None, True]], None, 1, None, [1, None, None, None, [password, None, True]],
[ [
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4], None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True 1, [None, None, []], None, None, None, True,
]] ]]
challenge_results = req( challenge_results = req(
@ -213,7 +215,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
user_hash, None, 2, None, user_hash, None, 2, None,
[ [
9, None, None, None, None, None, None, None, 9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2] [None, tfa_code, True, 2],
]] ]]
tfa_results = req( tfa_results = req(
@ -284,7 +286,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': '2.20201021.03.00', 'clientVersion': '2.20201021.03.00',
} },
}, },
} }
@ -385,7 +387,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': '2.20201021.03.00', 'clientVersion': '2.20201021.03.00',
} },
}, },
'query': query, 'query': query,
} }
@ -462,7 +464,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
# (HTML, videodetails, metadata, renderers) # (HTML, videodetails, metadata, renderers)
'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']), 'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl', 'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']) ['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']),
} }
if any((videodetails, metadata, renderers)): if any((videodetails, metadata, renderers)):
result = ( result = (
@ -671,7 +673,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '', 'description': '',
'uploader': '8KVIDEO', 'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4' 'title': 'UHDTV TEST 8K VIDEO.mp4',
}, },
'params': { 'params': {
'youtube_include_dash_manifest': True, 'youtube_include_dash_manifest': True,
@ -711,7 +713,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist', 'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
'title': 'Burning Everyone\'s Koran', 'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html', 'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
} },
}, },
# Age-gated videos # Age-gated videos
{ {
@ -839,7 +841,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}, },
'expected_warnings': [ 'expected_warnings': [
'DASH manifest missing', 'DASH manifest missing',
] ],
}, },
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431) # Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{ {
@ -1820,8 +1822,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# cpn generation algorithm is reverse engineered from base.js. # cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn. # In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_' CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16)) cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
# more consistent results setting it to right before the end # more consistent results setting it to right before the end
qs = parse_qs(playback_url) qs = parse_qs(playback_url)
@ -1881,8 +1883,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE) mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None: if mobj is None:
raise ExtractorError('Invalid URL: %s' % url) raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2) return mobj.group(2)
return video_id
def _extract_chapters_from_json(self, data, video_id, duration): def _extract_chapters_from_json(self, data, video_id, duration):
chapters_list = try_get( chapters_list = try_get(
@ -2035,7 +2036,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
headers = { headers = {
'X-YouTube-Client-Name': '85', 'X-YouTube-Client-Name': '85',
'X-YouTube-Client-Version': '2.0', 'X-YouTube-Client-Version': '2.0',
'Origin': 'https://www.youtube.com' 'Origin': 'https://www.youtube.com',
} }
video_info = self._call_api('player', query, video_id, fatal=False, headers=headers) video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
@ -2064,8 +2065,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)]) return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
search_meta = ( search_meta = (
lambda x: self._html_search_meta(x, webpage, default=None)) \ (lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else lambda x: None if webpage else lambda _: None)
video_details = player_response.get('videoDetails') or {} video_details = player_response.get('videoDetails') or {}
microformat = try_get( microformat = try_get(
@ -2137,7 +2138,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def build_fragments(f): def build_fragments(f):
return LazyList({ return LazyList({
'url': update_url_query(f['url'], { 'url': update_url_query(f['url'], {
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])) 'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])),
}) })
} for range_start in range(0, f['filesize'], CHUNK_SIZE)) } for range_start in range(0, f['filesize'], CHUNK_SIZE))
@ -2236,7 +2237,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'protocol': 'http_dash_segments', 'protocol': 'http_dash_segments',
'fragments': build_fragments(dct), 'fragments': build_fragments(dct),
} if dct['filesize'] else { } if dct['filesize'] else {
'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful? 'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful?
}) })
formats.append(dct) formats.append(dct)
@ -2414,9 +2415,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'is_live': is_live, 'is_live': is_live,
} }
pctr = try_get( pctr = traverse_obj(
player_response, player_response,
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict) ('captions', 'playerCaptionsTracklistRenderer', T(dict)))
if pctr: if pctr:
def process_language(container, base_url, lang_code, query): def process_language(container, base_url, lang_code, query):
lang_subs = [] lang_subs = []
@ -2430,9 +2431,11 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
}) })
container[lang_code] = lang_subs container[lang_code] = lang_subs
def process_subtitles():
subtitles = {} subtitles = {}
for caption_track in (pctr.get('captionTracks') or []): for caption_track in traverse_obj(pctr, (
base_url = caption_track.get('baseUrl') 'captionTracks', lambda _, v: v.get('baseUrl'))):
base_url = self._yt_urljoin(caption_track['baseUrl'])
if not base_url: if not base_url:
continue continue
if caption_track.get('kind') != 'asr': if caption_track.get('kind') != 'asr':
@ -2443,18 +2446,19 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
subtitles, base_url, lang_code, {}) subtitles, base_url, lang_code, {})
continue continue
automatic_captions = {} automatic_captions = {}
for translation_language in (pctr.get('translationLanguages') or []): for translation_language in traverse_obj(pctr, (
translation_language_code = translation_language.get('languageCode') 'translationLanguages', lambda _, v: v.get('languageCode'))):
if not translation_language_code: translation_language_code = translation_language['languageCode']
continue
process_language( process_language(
automatic_captions, base_url, translation_language_code, automatic_captions, base_url, translation_language_code,
{'tlang': translation_language_code}) {'tlang': translation_language_code})
info['automatic_captions'] = automatic_captions info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles info['subtitles'] = subtitles
process_subtitles()
parsed_url = compat_urllib_parse_urlparse(url) parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]: for component in (parsed_url.fragment, parsed_url.query):
query = compat_parse_qs(component) query = compat_parse_qs(component)
for k, v in query.items(): for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]: for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
@ -2684,7 +2688,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'title': 'Super Cooper Shorts - Shorts', 'title': 'Super Cooper Shorts - Shorts',
'uploader': 'Super Cooper Shorts', 'uploader': 'Super Cooper Shorts',
'uploader_id': '@SuperCooperShorts', 'uploader_id': '@SuperCooperShorts',
} },
}, { }, {
# Channel that does not have a Shorts tab. Test should just download videos on Home tab instead # Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
'url': 'https://www.youtube.com/@emergencyawesome/shorts', 'url': 'https://www.youtube.com/@emergencyawesome/shorts',
@ -2738,7 +2742,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'description': 'md5:609399d937ea957b0f53cbffb747a14c', 'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience', 'uploader': 'ThirstForScience',
'uploader_id': '@ThirstForScience', 'uploader_id': '@ThirstForScience',
} },
}, { }, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists', 'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True, 'only_matching': True,
@ -3037,7 +3041,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'uploader': '3Blue1Brown', 'uploader': '3Blue1Brown',
'uploader_id': '@3blue1brown', 'uploader_id': '@3blue1brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw', 'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
} },
}] }]
@classmethod @classmethod
@ -3335,7 +3339,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'client': { 'client': {
'clientName': 'WEB', 'clientName': 'WEB',
'clientVersion': client_version, 'clientVersion': client_version,
} },
} }
visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str) visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
@ -3354,7 +3358,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
headers['x-goog-visitor-id'] = visitor_data headers['x-goog-visitor-id'] = visitor_data
data['continuation'] = continuation['continuation'] data['continuation'] = continuation['continuation']
data['clickTracking'] = { data['clickTracking'] = {
'clickTrackingParams': continuation['itct'] 'clickTrackingParams': continuation['itct'],
} }
count = 0 count = 0
retries = 3 retries = 3
@ -3613,7 +3617,7 @@ class YoutubePlaylistIE(InfoExtractor):
'uploader': 'milan', 'uploader': 'milan',
'uploader_id': '@milan5503', 'uploader_id': '@milan5503',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw', 'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
} },
}, { }, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl', 'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 455, 'playlist_mincount': 455,
@ -3623,7 +3627,7 @@ class YoutubePlaylistIE(InfoExtractor):
'uploader': 'LBK', 'uploader': 'LBK',
'uploader_id': '@music_king', 'uploader_id': '@music_king',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA', 'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
} },
}, { }, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw', 'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True, 'only_matching': True,
@ -3734,7 +3738,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
'info_dict': { 'info_dict': {
'id': 'youtube-dl test video', 'id': 'youtube-dl test video',
'title': 'youtube-dl test video', 'title': 'youtube-dl test video',
} },
}] }]
def _get_n_results(self, query, n): def _get_n_results(self, query, n):
@ -3754,7 +3758,7 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
'info_dict': { 'info_dict': {
'id': 'youtube-dl test video', 'id': 'youtube-dl test video',
'title': 'youtube-dl test video', 'title': 'youtube-dl test video',
} },
}] }]
@ -3769,7 +3773,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
'id': 'youtube-dl test video', 'id': 'youtube-dl test video',
'title': 'youtube-dl test video', 'title': 'youtube-dl test video',
}, },
'params': {'playlistend': 5} 'params': {'playlistend': 5},
}, { }, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB', 'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True, 'only_matching': True,
@ -3785,6 +3789,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
class YoutubeFeedsInfoExtractor(YoutubeTabIE): class YoutubeFeedsInfoExtractor(YoutubeTabIE):
""" """
Base class for feed extractors Base class for feed extractors
Subclasses must define the _FEED_NAME property. Subclasses must define the _FEED_NAME property.
""" """
_LOGIN_REQUIRED = True _LOGIN_REQUIRED = True