Compare commits

...

8 Commits

Author SHA1 Message Date
Zenon Mousmoulas
4af8019166
Merge 96a0ad4778 into 1036478d13 2025-01-06 19:20:30 +02:00
dirkf
1036478d13 [YouTube] Endure subtitle URLs are complete
* WEB URLs are, MWEB not
* resolves #33017
2025-01-06 01:39:04 +00:00
dirkf
00ad2b8ca1 [YouTube] Refactor subtitle processing
* move to internal function
* use `traverse-obj()`
2025-01-06 01:24:30 +00:00
dirkf
ab7c61ca29 [YouTube] Apply code style changes, trailing commas, etc 2025-01-06 01:22:16 +00:00
Zenon Mousmoulas
96a0ad4778 MegaTVComEmbedIE: Make canonical URL extraction more robust 2021-11-13 11:50:22 +02:00
Zenon Mousmoulas
28fddc1758 Fix copy/paste typo 2021-11-13 11:39:29 +02:00
Zenon Mousmoulas
a5ec30e106 Address PR comments about escapes 2021-11-13 08:42:15 +02:00
Zenon Mousmoulas
34c3b06402 Add MegaTVCom IEs
* Add new IEs
  * MegaTVComBaseIE: Base IE class
  * MegaTVComIE: Extract from TV VOD pages and news articles, i.e. all
    sorts of pages showing videos on megatv.com
  * MegaTVComEmbedIE: Extract iframe-embeddable megatv.com videos
* When video_id is not matched in the URL, namely for news articles,
  extract it (article_id) from a particular element on the web page
* Derive metadata and sources directly from the web page, from data
  attributes of the player placeholder element and other commonly used
  elements
* Let MegaTVComEmbedIE defer to MegaTVComIE for extraction, as the
  metadata on the embeddable page are some times slightly different, for
  the same video
2021-11-11 15:40:14 +02:00
4 changed files with 270 additions and 51 deletions

View File

@ -1078,6 +1078,10 @@ from .rutube import (
RutubePersonIE,
RutubePlaylistIE,
)
from .megatvcom import (
MegaTVComIE,
MegaTVComEmbedIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .ruv import RuvIE

View File

@ -102,6 +102,7 @@ from .ustream import UstreamIE
from .arte import ArteTVEmbedIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .megatvcom import MegaTVComEmbedIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
@ -3400,6 +3401,12 @@ class GenericIE(InfoExtractor):
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for megatv.com embeds
megatvcom_urls = list(MegaTVComEmbedIE._extract_urls(webpage, url))
if megatvcom_urls:
return self.playlist_from_matches(
megatvcom_urls, video_id, video_title, ie=MegaTVComEmbedIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:

View File

@ -0,0 +1,203 @@
# coding: utf-8
from __future__ import unicode_literals
import hashlib
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_parse_qs,
compat_urllib_parse_urlparse,
)
from ..utils import (
HEADRequest,
ExtractorError,
determine_ext,
get_element_by_class,
unified_timestamp,
extract_attributes,
clean_html,
unescapeHTML,
)
class MegaTVComBaseIE(InfoExtractor):
_PLAYER_DIV_ID = 'player_div_id'
def _extract_player_attrs(self, webpage):
PLAYER_DIV_RE = r'''(?x)
<div(?:
id=(?P<_q1>["'])(?P<%(pdi)s>%(pdi)s)(?P=_q1)|
[^>]*?
)+>
''' % {'pdi': self._PLAYER_DIV_ID}
for mobj in re.finditer(PLAYER_DIV_RE, webpage):
if mobj.group(self._PLAYER_DIV_ID):
player_el = mobj.group(0)
break
else:
raise ExtractorError('no <div id="%s"> element found in webpage' %
self._PLAYER_DIV_ID)
return {
re.sub(r'^data-(?:kwik_)?', '', k): v
for k, v in extract_attributes(player_el).items()
if k not in ('id',)
}
class MegaTVComIE(MegaTVComBaseIE):
IE_NAME = 'megatvcom'
IE_DESC = 'megatv.com videos'
_VALID_URL = r'https?://(?:www\.)?megatv\.com/(?:(?!\d{4})[^/]+/(?P<id>\d+)/[^/]+|\d{4}/\d{2}/\d{2}/.+)'
_TESTS = [{
'url': 'https://www.megatv.com/2021/10/23/egkainia-gia-ti-nea-skini-omega-tou-dimotikou-theatrou-peiraia/',
'md5': '2ebe96661cb81854889053cebb661068',
'info_dict': {
'id': '520979',
'ext': 'mp4',
'title': 'md5:70eef71a9cd2c1ecff7ee428354dded2',
'description': 'md5:0209fa8d318128569c0d256a5c404db1',
'timestamp': 1634975747,
'upload_date': '20211023',
},
}, {
'url': 'https://www.megatv.com/tvshows/527800/epeisodio-65-12/',
'md5': '8ab0c9d664cea11678670202b87bb2b1',
'info_dict': {
'id': '527800',
'ext': 'mp4',
'title': 'md5:fc322cb51f682eecfe2f54cd5ab3a157',
'description': 'md5:b2b7ed3690a78f2a0156eb790fdc00df',
'timestamp': 1636048859,
'upload_date': '20211104',
},
}]
def _match_article_id(self, webpage):
ART_RE = r'''(?x)
<article(?:
id=(?P<_q2>["'])Article_(?P<article>\d+)(?P=_q2)|
[^>]*?
)+>
'''
return compat_str(self._search_regex(ART_RE, webpage, 'article_id',
group='article'))
def _real_extract(self, url):
video_id = self._match_id(url)
_is_article = video_id == 'None'
webpage = self._download_webpage(url,
'N/A' if _is_article else
video_id)
if _is_article:
video_id = self._match_article_id(webpage)
player_attrs = self._extract_player_attrs(webpage)
title = player_attrs.get('label') or self._og_search_title(webpage)
description = clean_html(get_element_by_class(
'article-wrapper' if _is_article else 'story_content',
webpage))
if not description:
description = self._og_search_description(webpage)
thumbnail = player_attrs.get('image') or \
self._og_search_thumbnail(webpage)
timestamp = unified_timestamp(self._html_search_meta(
'article:published_time', webpage))
try:
source = player_attrs['source']
except KeyError:
raise ExtractorError('no source found for %s' % video_id)
formats = self._extract_m3u8_formats(source, video_id, 'mp4') \
if determine_ext(source) == 'm3u8' else [source]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'timestamp': timestamp,
'formats': formats,
}
class MegaTVComEmbedIE(MegaTVComBaseIE):
IE_NAME = 'megatvcom:embed'
IE_DESC = 'megatv.com embedded videos'
_VALID_URL = r'https?://(?:www\.)?megatv\.com/embed/?\?p=\d+'
_TESTS = [{
'url': 'https://www.megatv.com/embed/?p=2020520979',
'md5': '2ebe96661cb81854889053cebb661068',
'info_dict': {
'id': '520979',
'ext': 'mp4',
'title': 'md5:70eef71a9cd2c1ecff7ee428354dded2',
'description': 'md5:0209fa8d318128569c0d256a5c404db1',
'timestamp': 1634975747,
'upload_date': '20211023',
},
}, {
'url': 'https://www.megatv.com/embed/?p=2020534081',
'md5': 'f9a15e315acbf01b128e8efa3f75aab3',
'info_dict': {
'id': '534081',
'ext': 'mp4',
'title': 'md5:062e9d5976ef854d8bdc1f5724d9b2d0',
'description': 'md5:36dbe4c3762d2ede9513eea8d07f6d52',
'timestamp': 1636376351,
'upload_date': '20211108',
},
}]
@classmethod
def _extract_urls(cls, webpage, origin_url=None):
# make the scheme in _VALID_URL optional
_URL_RE = r'(?:https?:)?//' + cls._VALID_URL.split('://', 1)[1]
EMBED_RE = r'''(?x)
<iframe[^>]+?src=(?P<_q1>%(quot_re)s)
(?P<url>%(url_re)s)(?P=_q1)
''' % {'quot_re': r'["\']', 'url_re': _URL_RE}
for mobj in re.finditer(EMBED_RE, webpage):
url = unescapeHTML(mobj.group('url'))
if url.startswith('//'):
scheme = compat_urllib_parse_urlparse(origin_url).scheme \
if origin_url else 'https'
url = '%s:%s' % (scheme, url)
yield url
def _match_canonical_url(self, webpage):
LINK_RE = r'''(?x)
<link(?:
rel=(?P<_q1>%(quot_re)s)(?P<canonical>canonical)(?P=_q1)|
href=(?P<_q2>%(quot_re)s)(?P<href>(?:(?!(?P=_q2)).)+)(?P=_q2)|
[^>]*?
)+>
''' % {'quot_re': r'["\']'}
for mobj in re.finditer(LINK_RE, webpage):
canonical, href = mobj.group('canonical', 'href')
if canonical and href:
return unescapeHTML(href)
def _real_extract(self, url):
webpage = self._download_webpage(url, 'N/A')
player_attrs = self._extract_player_attrs(webpage)
canonical_url = player_attrs.get('share_url') or \
self._match_canonical_url(webpage)
if not canonical_url:
raise ExtractorError('canonical URL not found')
video_id = compat_parse_qs(compat_urllib_parse_urlparse(
canonical_url).query)['p'][0]
# Resolve the canonical URL, following redirects, and defer to
# megatvcom, as the metadata extracted from the embeddable page some
# times are slightly different, for the same video
canonical_url = self._request_webpage(
HEADRequest(canonical_url), video_id,
note='Resolve canonical URL',
errnote='Could not resolve canonical URL').geturl()
return self.url_result(
canonical_url,
MegaTVComIE.ie_key(),
video_id
)

View File

@ -9,6 +9,7 @@ import json
import os.path
import random
import re
import string
import time
import traceback
@ -67,6 +68,7 @@ from ..utils import (
class YoutubeBaseInfoExtractor(InfoExtractor):
"""Provide base functions for Youtube extractors"""
_LOGIN_URL = 'https://accounts.google.com/ServiceLogin'
_TWOFACTOR_URL = 'https://accounts.google.com/signin/challenge'
@ -138,7 +140,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
[2, 1, None, 1,
'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn',
None, [], 4],
1, [None, None, []], None, None, None, True
1, [None, None, []], None, None, None, True,
],
username,
]
@ -160,7 +162,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
None, 1, None, [1, None, None, None, [password, None, True]],
[
None, None, [2, 1, None, 1, 'https://accounts.google.com/ServiceLogin?passive=true&continue=https%3A%2F%2Fwww.youtube.com%2Fsignin%3Fnext%3D%252F%26action_handle_signin%3Dtrue%26hl%3Den%26app%3Ddesktop%26feature%3Dsign_in_button&hl=en&service=youtube&uilel=3&requestPath=%2FServiceLogin&Page=PasswordSeparationSignIn', None, [], 4],
1, [None, None, []], None, None, None, True
1, [None, None, []], None, None, None, True,
]]
challenge_results = req(
@ -213,7 +215,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
user_hash, None, 2, None,
[
9, None, None, None, None, None, None, None,
[None, tfa_code, True, 2]
[None, tfa_code, True, 2],
]]
tfa_results = req(
@ -284,7 +286,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
},
}
@ -385,7 +387,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'client': {
'clientName': 'WEB',
'clientVersion': '2.20201021.03.00',
}
},
},
'query': query,
}
@ -462,7 +464,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
# (HTML, videodetails, metadata, renderers)
'name': ('content', 'author', (('ownerChannelName', None), 'title'), ['text']),
'url': ('href', 'ownerProfileUrl', 'vanityChannelUrl',
['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl'])
['navigationEndpoint', 'browseEndpoint', 'canonicalBaseUrl']),
}
if any((videodetails, metadata, renderers)):
result = (
@ -671,7 +673,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader_url': r're:https?://(?:www\.)?youtube\.com/user/8KVIDEO',
'description': '',
'uploader': '8KVIDEO',
'title': 'UHDTV TEST 8K VIDEO.mp4'
'title': 'UHDTV TEST 8K VIDEO.mp4',
},
'params': {
'youtube_include_dash_manifest': True,
@ -711,7 +713,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'uploader_url': r're:https?://(?:www\.)?youtube\.com/@theamazingatheist',
'title': 'Burning Everyone\'s Koran',
'description': 'SUBSCRIBE: http://www.youtube.com/saturninefilms \r\n\r\nEven Obama has taken a stand against freedom on this issue: http://www.huffingtonpost.com/2010/09/09/obama-gma-interview-quran_n_710282.html',
}
},
},
# Age-gated videos
{
@ -839,7 +841,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
},
'expected_warnings': [
'DASH manifest missing',
]
],
},
# Olympics (https://github.com/ytdl-org/youtube-dl/issues/4431)
{
@ -1820,8 +1822,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# cpn generation algorithm is reverse engineered from base.js.
# In fact it works even with dummy cpn.
CPN_ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(0, 16))
CPN_ALPHABET = string.ascii_letters + string.digits + '-_'
cpn = ''.join(CPN_ALPHABET[random.randint(0, 256) & 63] for _ in range(16))
# more consistent results setting it to right before the end
qs = parse_qs(playback_url)
@ -1881,8 +1883,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
mobj = re.match(cls._VALID_URL, url, re.VERBOSE)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(2)
return video_id
return mobj.group(2)
def _extract_chapters_from_json(self, data, video_id, duration):
chapters_list = try_get(
@ -2035,7 +2036,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
headers = {
'X-YouTube-Client-Name': '85',
'X-YouTube-Client-Version': '2.0',
'Origin': 'https://www.youtube.com'
'Origin': 'https://www.youtube.com',
}
video_info = self._call_api('player', query, video_id, fatal=False, headers=headers)
@ -2064,8 +2065,8 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
return ''.join([r['text'] for r in runs if isinstance(r.get('text'), compat_str)])
search_meta = (
lambda x: self._html_search_meta(x, webpage, default=None)) \
if webpage else lambda x: None
(lambda x: self._html_search_meta(x, webpage, default=None))
if webpage else lambda _: None)
video_details = player_response.get('videoDetails') or {}
microformat = try_get(
@ -2137,7 +2138,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
def build_fragments(f):
return LazyList({
'url': update_url_query(f['url'], {
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize']))
'range': '{0}-{1}'.format(range_start, min(range_start + CHUNK_SIZE - 1, f['filesize'])),
})
} for range_start in range(0, f['filesize'], CHUNK_SIZE))
@ -2236,7 +2237,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'protocol': 'http_dash_segments',
'fragments': build_fragments(dct),
} if dct['filesize'] else {
'downloader_options': {'http_chunk_size': CHUNK_SIZE} # No longer useful?
'downloader_options': {'http_chunk_size': CHUNK_SIZE}, # No longer useful?
})
formats.append(dct)
@ -2414,9 +2415,9 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'is_live': is_live,
}
pctr = try_get(
pctr = traverse_obj(
player_response,
lambda x: x['captions']['playerCaptionsTracklistRenderer'], dict)
('captions', 'playerCaptionsTracklistRenderer', T(dict)))
if pctr:
def process_language(container, base_url, lang_code, query):
lang_subs = []
@ -2430,31 +2431,34 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
})
container[lang_code] = lang_subs
subtitles = {}
for caption_track in (pctr.get('captionTracks') or []):
base_url = caption_track.get('baseUrl')
if not base_url:
continue
if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
def process_subtitles():
subtitles = {}
for caption_track in traverse_obj(pctr, (
'captionTracks', lambda _, v: v.get('baseUrl'))):
base_url = self._yt_urljoin(caption_track['baseUrl'])
if not base_url:
continue
process_language(
subtitles, base_url, lang_code, {})
continue
automatic_captions = {}
for translation_language in (pctr.get('translationLanguages') or []):
translation_language_code = translation_language.get('languageCode')
if not translation_language_code:
if caption_track.get('kind') != 'asr':
lang_code = caption_track.get('languageCode')
if not lang_code:
continue
process_language(
subtitles, base_url, lang_code, {})
continue
process_language(
automatic_captions, base_url, translation_language_code,
{'tlang': translation_language_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
automatic_captions = {}
for translation_language in traverse_obj(pctr, (
'translationLanguages', lambda _, v: v.get('languageCode'))):
translation_language_code = translation_language['languageCode']
process_language(
automatic_captions, base_url, translation_language_code,
{'tlang': translation_language_code})
info['automatic_captions'] = automatic_captions
info['subtitles'] = subtitles
process_subtitles()
parsed_url = compat_urllib_parse_urlparse(url)
for component in [parsed_url.fragment, parsed_url.query]:
for component in (parsed_url.fragment, parsed_url.query):
query = compat_parse_qs(component)
for k, v in query.items():
for d_k, s_ks in [('start', ('start', 't')), ('end', ('end',))]:
@ -2684,7 +2688,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'title': 'Super Cooper Shorts - Shorts',
'uploader': 'Super Cooper Shorts',
'uploader_id': '@SuperCooperShorts',
}
},
}, {
# Channel that does not have a Shorts tab. Test should just download videos on Home tab instead
'url': 'https://www.youtube.com/@emergencyawesome/shorts',
@ -2738,7 +2742,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'description': 'md5:609399d937ea957b0f53cbffb747a14c',
'uploader': 'ThirstForScience',
'uploader_id': '@ThirstForScience',
}
},
}, {
'url': 'https://www.youtube.com/c/ChristophLaimer/playlists',
'only_matching': True,
@ -3037,7 +3041,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'uploader': '3Blue1Brown',
'uploader_id': '@3blue1brown',
'channel_id': 'UCYO_jab_esuFRV4b17AJtAw',
}
},
}]
@classmethod
@ -3335,7 +3339,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
'client': {
'clientName': 'WEB',
'clientVersion': client_version,
}
},
}
visitor_data = try_get(context, lambda x: x['client']['visitorData'], compat_str)
@ -3354,7 +3358,7 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
headers['x-goog-visitor-id'] = visitor_data
data['continuation'] = continuation['continuation']
data['clickTracking'] = {
'clickTrackingParams': continuation['itct']
'clickTrackingParams': continuation['itct'],
}
count = 0
retries = 3
@ -3613,7 +3617,7 @@ class YoutubePlaylistIE(InfoExtractor):
'uploader': 'milan',
'uploader_id': '@milan5503',
'channel_id': 'UCEI1-PVPcYXjB73Hfelbmaw',
}
},
}, {
'url': 'http://www.youtube.com/embed/_xDOZElKyNU?list=PLsyOSbh5bs16vubvKePAQ1x3PhKavfBIl',
'playlist_mincount': 455,
@ -3623,7 +3627,7 @@ class YoutubePlaylistIE(InfoExtractor):
'uploader': 'LBK',
'uploader_id': '@music_king',
'channel_id': 'UC21nz3_MesPLqtDqwdvnoxA',
}
},
}, {
'url': 'TLGGrESM50VT6acwMjAyMjAxNw',
'only_matching': True,
@ -3734,7 +3738,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubeBaseInfoExtractor):
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
},
}]
def _get_n_results(self, query, n):
@ -3754,7 +3758,7 @@ class YoutubeSearchDateIE(YoutubeSearchIE):
'info_dict': {
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
}
},
}]
@ -3769,7 +3773,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
'id': 'youtube-dl test video',
'title': 'youtube-dl test video',
},
'params': {'playlistend': 5}
'params': {'playlistend': 5},
}, {
'url': 'https://www.youtube.com/results?q=test&sp=EgQIBBgB',
'only_matching': True,
@ -3785,6 +3789,7 @@ class YoutubeSearchURLIE(YoutubeBaseInfoExtractor):
class YoutubeFeedsInfoExtractor(YoutubeTabIE):
"""
Base class for feed extractors
Subclasses must define the _FEED_NAME property.
"""
_LOGIN_REQUIRED = True