mirror of
https://github.com/ytdl-org/youtube-dl
synced 2025-10-24 00:58:37 +09:00
Compare commits
58 Commits
aed617e311
...
2021.01.03
Author | SHA1 | Date | |
---|---|---|---|
![]() |
8e953dcbb1 | ||
![]() |
f4afb9a6a8 | ||
![]() |
d5b8cf093c | ||
![]() |
5c6e84c0ff | ||
![]() |
1aaee908b9 | ||
![]() |
b2d9fd9c9f | ||
![]() |
bc2f83b95e | ||
![]() |
85de33b04e | ||
![]() |
7dfd966848 | ||
![]() |
a25d03d7cb | ||
![]() |
cabfd4b1f0 | ||
![]() |
7b643d4cd0 | ||
![]() |
1f1d01d498 | ||
![]() |
21a42e2588 | ||
![]() |
2df93a0c4a | ||
![]() |
75972e200d | ||
![]() |
d0d838638c | ||
![]() |
8c17afc471 | ||
![]() |
40d66e07df | ||
![]() |
ab89a8678b | ||
![]() |
4d7d056909 | ||
![]() |
c35bc82606 | ||
![]() |
2f56caf083 | ||
![]() |
4066945919 | ||
![]() |
2a84694b1e | ||
![]() |
4046ffe1e1 | ||
![]() |
d1d0612160 | ||
![]() |
7b0f04ed1f | ||
![]() |
2e21b06ea2 | ||
![]() |
a6f75e6e89 | ||
![]() |
bd18824c2a | ||
![]() |
bdd044e67b | ||
![]() |
f7e95fb2a0 | ||
![]() |
9dd674e1d2 | ||
![]() |
9c1e164e0c | ||
![]() |
c706fbe9fe | ||
![]() |
ebdcf70b0d | ||
![]() |
5966095e65 | ||
![]() |
9ee984fc76 | ||
![]() |
53528e1d23 | ||
![]() |
c931c4b8dd | ||
![]() |
7acd042bbb | ||
![]() |
bcfe485e01 | ||
![]() |
479cc6d5a1 | ||
![]() |
38286ee729 | ||
![]() |
1a95953867 | ||
![]() |
71febd1c52 | ||
![]() |
f1bc56c99b | ||
![]() |
64e419bd73 | ||
![]() |
782ea947b4 | ||
![]() |
f27224d57b | ||
![]() |
c007188598 | ||
![]() |
af93ecfd88 | ||
![]() |
794771a164 | ||
![]() |
6f2eaaf73d | ||
![]() |
4c7a4dbc4d | ||
![]() |
f86b299d0e | ||
![]() |
e474996541 |
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
6
.github/ISSUE_TEMPLATE/1_broken_site.md
vendored
@@ -18,7 +18,7 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.01.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@@ -26,7 +26,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.26**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2021.01.03**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar issues including closed ones
|
||||
@@ -41,7 +41,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2020.12.26
|
||||
[debug] youtube-dl version 2021.01.03
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
@@ -19,7 +19,7 @@ labels: 'site-support-request'
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.01.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that site you are requesting is not dedicated to copyright infringement, see https://yt-dl.org/copyright-infringement. youtube-dl does not support such sites. In order for site support request to be accepted all provided example URLs should not violate any copyrights.
|
||||
- Search the bugtracker for similar site support requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a new site support request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.26**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2021.01.03**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that none of provided URLs violate any copyrights
|
||||
- [ ] I've searched the bugtracker for similar site support requests including closed ones
|
||||
|
@@ -18,13 +18,13 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.01.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes (like this [x])
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a site feature request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.26**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2021.01.03**
|
||||
- [ ] I've searched the bugtracker for similar site feature requests including closed ones
|
||||
|
||||
|
||||
|
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
6
.github/ISSUE_TEMPLATE/4_bug_report.md
vendored
@@ -18,7 +18,7 @@ title: ''
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.01.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Make sure that all provided video/audio/playlist URLs (if any) are alive and playable in a browser.
|
||||
- Make sure that all URLs and arguments with special characters are properly quoted or escaped as explained in http://yt-dl.org/escape.
|
||||
- Search the bugtracker for similar issues: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
@@ -27,7 +27,7 @@ Carefully read and work through this check list in order to prevent the most com
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a broken site support issue
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.26**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2021.01.03**
|
||||
- [ ] I've checked that all provided URLs are alive and playable in a browser
|
||||
- [ ] I've checked that all URLs and arguments with special characters are properly quoted or escaped
|
||||
- [ ] I've searched the bugtracker for similar bug reports including closed ones
|
||||
@@ -43,7 +43,7 @@ Add the `-v` flag to your command line you run youtube-dl with (`youtube-dl -v <
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
[debug] Encodings: locale cp1251, fs mbcs, out cp866, pref cp1251
|
||||
[debug] youtube-dl version 2020.12.26
|
||||
[debug] youtube-dl version 2021.01.03
|
||||
[debug] Python version 2.7.11 - Windows-2003Server-5.2.3790-SP2
|
||||
[debug] exe versions: ffmpeg N-75573-g1d0487f, ffprobe N-75573-g1d0487f, rtmpdump 2.4
|
||||
[debug] Proxy map: {}
|
||||
|
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
4
.github/ISSUE_TEMPLATE/5_feature_request.md
vendored
@@ -19,13 +19,13 @@ labels: 'request'
|
||||
|
||||
<!--
|
||||
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2020.12.26. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2021.01.03. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
|
||||
- Search the bugtracker for similar feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
|
||||
- Finally, put x into all relevant boxes (like this [x])
|
||||
-->
|
||||
|
||||
- [ ] I'm reporting a feature request
|
||||
- [ ] I've verified that I'm running youtube-dl version **2020.12.26**
|
||||
- [ ] I've verified that I'm running youtube-dl version **2021.01.03**
|
||||
- [ ] I've searched the bugtracker for similar feature requests including closed ones
|
||||
|
||||
|
||||
|
65
ChangeLog
65
ChangeLog
@@ -1,3 +1,68 @@
|
||||
version 2021.01.03
|
||||
|
||||
Extractors
|
||||
* [nrk] Improve series metadata extraction (#27473)
|
||||
+ [nrk] Extract subtitles
|
||||
* [nrk] Fix age limit extraction
|
||||
* [nrk] Improve video id extraction
|
||||
+ [nrk] Add support for podcasts (#27634, #27635)
|
||||
* [nrk] Generalize and delegate all item extractors to nrk
|
||||
+ [nrk] Add support for mp3 formats
|
||||
* [nrktv] Switch to playback endpoint
|
||||
* [vvvvid] Fix season metadata extraction (#18130)
|
||||
* [stitcher] Fix extraction (#20811, #27606)
|
||||
* [acast] Fix extraction (#21444, #27612, #27613)
|
||||
+ [arcpublishing] Add support for arcpublishing.com (#2298, #9340, #17200)
|
||||
+ [sky] Add support for Sports News articles and Brighcove videos (#13054)
|
||||
+ [vvvvid] Extract akamai formats
|
||||
* [vvvvid] Skip unplayable episodes (#27599)
|
||||
* [yandexvideo] Fix extraction for Python 3.4
|
||||
|
||||
|
||||
version 2020.12.31
|
||||
|
||||
Core
|
||||
* [utils] Accept only supported protocols in url_or_none
|
||||
* [YoutubeDL] Allow format filtering using audio language (#16209)
|
||||
|
||||
Extractors
|
||||
+ [redditr] Extract all thumbnails (#27503)
|
||||
* [vvvvid] Improve info extraction
|
||||
+ [vvvvid] Add support for playlists (#18130, #27574)
|
||||
+ [yandexdisk] Extract info from webpage
|
||||
* [yandexdisk] Fix extraction (#17861, #27131)
|
||||
* [yandexvideo] Use old API call as fallback
|
||||
* [yandexvideo] Fix extraction (#25000)
|
||||
- [nbc] Remove CSNNE extractor
|
||||
* [nbc] Fix NBCSport VPlayer URL extraction (#16640)
|
||||
+ [aenetworks] Add support for biography.com (#3863)
|
||||
* [uktvplay] Match new video URLs (#17909)
|
||||
* [sevenplay] Detect API errors
|
||||
* [tenplay] Fix format extraction (#26653)
|
||||
* [brightcove] Raise error for DRM protected videos (#23467, #27568)
|
||||
|
||||
|
||||
version 2020.12.29
|
||||
|
||||
Extractors
|
||||
* [youtube] Improve yt initial data extraction (#27524)
|
||||
* [youtube:tab] Improve URL matching #27559)
|
||||
* [youtube:tab] Restore retry on browse requests (#27313, #27564)
|
||||
* [aparat] Fix extraction (#22285, #22611, #23348, #24354, #24591, #24904,
|
||||
#25418, #26070, #26350, #26738, #27563)
|
||||
- [brightcove] Remove sonyliv specific code
|
||||
* [piksel] Improve format extraction
|
||||
+ [zype] Add support for uplynk videos
|
||||
+ [toggle] Add support for live.mewatch.sg (#27555)
|
||||
+ [go] Add support for fxnow.fxnetworks.com (#13972, #22467, #23754, #26826)
|
||||
* [teachable] Improve embed detection (#26923)
|
||||
* [mitele] Fix free video extraction (#24624, #25827, #26757)
|
||||
* [telecinco] Fix extraction
|
||||
* [youtube] Update invidious.snopyta.org (#22667)
|
||||
* [amcnetworks] Improve auth only video detection (#27548)
|
||||
+ [generic] Add support for VHX Embeds (#27546)
|
||||
|
||||
|
||||
version 2020.12.26
|
||||
|
||||
Extractors
|
||||
|
@@ -678,6 +678,7 @@ Also filtering work for comparisons `=` (equals), `^=` (starts with), `$=` (ends
|
||||
- `container`: Name of the container format
|
||||
- `protocol`: The protocol that will be used for the actual download, lower-case (`http`, `https`, `rtsp`, `rtmp`, `rtmpe`, `mms`, `f4m`, `ism`, `http_dash_segments`, `m3u8`, or `m3u8_native`)
|
||||
- `format_id`: A short description of the format
|
||||
- `language`: Language code
|
||||
|
||||
Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain).
|
||||
|
||||
|
@@ -57,6 +57,7 @@
|
||||
- **appletrailers**
|
||||
- **appletrailers:section**
|
||||
- **archive.org**: archive.org videos
|
||||
- **ArcPublishing**
|
||||
- **ARD**
|
||||
- **ARD:mediathek**
|
||||
- **ARDBetaMediathek**
|
||||
@@ -104,6 +105,7 @@
|
||||
- **BilibiliAudioAlbum**
|
||||
- **BiliBiliPlayer**
|
||||
- **BioBioChileTV**
|
||||
- **Biography**
|
||||
- **BIQLE**
|
||||
- **BitChute**
|
||||
- **BitChuteChannel**
|
||||
@@ -197,7 +199,6 @@
|
||||
- **CrooksAndLiars**
|
||||
- **crunchyroll**
|
||||
- **crunchyroll:playlist**
|
||||
- **CSNNE**
|
||||
- **CSpan**: C-SPAN
|
||||
- **CtsNews**: 華視新聞
|
||||
- **CTV**
|
||||
@@ -317,7 +318,6 @@
|
||||
- **Funk**
|
||||
- **Fusion**
|
||||
- **Fux**
|
||||
- **FXNetworks**
|
||||
- **Gaia**
|
||||
- **GameInformer**
|
||||
- **GameSpot**
|
||||
@@ -350,6 +350,7 @@
|
||||
- **hgtv.com:show**
|
||||
- **HiDive**
|
||||
- **HistoricFilms**
|
||||
- **history:player**
|
||||
- **history:topic**: History.com Topic
|
||||
- **hitbox**
|
||||
- **hitbox:live**
|
||||
@@ -610,6 +611,7 @@
|
||||
- **Npr**
|
||||
- **NRK**
|
||||
- **NRKPlaylist**
|
||||
- **NRKRadioPodkast**
|
||||
- **NRKSkole**: NRK Skole
|
||||
- **NRKTV**: NRK TV and NRK Radio
|
||||
- **NRKTVDirekte**: NRK TV Direkte and NRK Radio Direkte
|
||||
@@ -813,12 +815,13 @@
|
||||
- **ShowRoomLive**
|
||||
- **Sina**
|
||||
- **sky.it**
|
||||
- **sky:news**
|
||||
- **sky:sports**
|
||||
- **sky:sports:news**
|
||||
- **skyacademy.it**
|
||||
- **SkylineWebcams**
|
||||
- **SkyNews**
|
||||
- **skynewsarabia:article**
|
||||
- **skynewsarabia:video**
|
||||
- **SkySports**
|
||||
- **Slideshare**
|
||||
- **SlidesLive**
|
||||
- **Slutload**
|
||||
@@ -1089,6 +1092,7 @@
|
||||
- **vube**: Vube.com
|
||||
- **VuClip**
|
||||
- **VVVVID**
|
||||
- **VVVVIDShow**
|
||||
- **VyboryMos**
|
||||
- **Vzaar**
|
||||
- **Wakanim**
|
||||
|
@@ -554,6 +554,11 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(url_or_none('http$://foo.de'), None)
|
||||
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
|
||||
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
|
||||
self.assertEqual(url_or_none('s3://foo.de'), None)
|
||||
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
|
||||
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
|
||||
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
|
||||
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
|
||||
|
||||
def test_parse_age_limit(self):
|
||||
self.assertEqual(parse_age_limit(None), None)
|
||||
|
@@ -1083,7 +1083,7 @@ class YoutubeDL(object):
|
||||
'*=': lambda attr, value: value in attr,
|
||||
}
|
||||
str_operator_rex = re.compile(r'''(?x)
|
||||
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
|
||||
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id|language)
|
||||
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?
|
||||
\s*(?P<value>[a-zA-Z0-9._-]+)
|
||||
\s*$
|
||||
|
@@ -2,21 +2,47 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import functools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
OnDemandPagedList,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class ACastIE(InfoExtractor):
|
||||
class ACastBaseIE(InfoExtractor):
|
||||
def _extract_episode(self, episode, show_info):
|
||||
title = episode['title']
|
||||
info = {
|
||||
'id': episode['id'],
|
||||
'display_id': episode.get('episodeUrl'),
|
||||
'url': episode['url'],
|
||||
'title': title,
|
||||
'description': clean_html(episode.get('description') or episode.get('summary')),
|
||||
'thumbnail': episode.get('image'),
|
||||
'timestamp': parse_iso8601(episode.get('publishDate')),
|
||||
'duration': int_or_none(episode.get('duration')),
|
||||
'filesize': int_or_none(episode.get('contentLength')),
|
||||
'season_number': int_or_none(episode.get('season')),
|
||||
'episode': title,
|
||||
'episode_number': int_or_none(episode.get('episode')),
|
||||
}
|
||||
info.update(show_info)
|
||||
return info
|
||||
|
||||
def _extract_show_info(self, show):
|
||||
return {
|
||||
'creator': show.get('author'),
|
||||
'series': show.get('title'),
|
||||
}
|
||||
|
||||
def _call_api(self, path, video_id, query=None):
|
||||
return self._download_json(
|
||||
'https://feeder.acast.com/api/v1/shows/' + path, video_id, query=query)
|
||||
|
||||
|
||||
class ACastIE(ACastBaseIE):
|
||||
IE_NAME = 'acast'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
@@ -28,15 +54,15 @@ class ACastIE(InfoExtractor):
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'https://www.acast.com/sparpodcast/2.raggarmordet-rosterurdetforflutna',
|
||||
'md5': '16d936099ec5ca2d5869e3a813ee8dc4',
|
||||
'md5': 'f5598f3ad1e4776fed12ec1407153e4b',
|
||||
'info_dict': {
|
||||
'id': '2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||
'ext': 'mp3',
|
||||
'title': '2. Raggarmordet - Röster ur det förflutna',
|
||||
'description': 'md5:4f81f6d8cf2e12ee21a321d8bca32db4',
|
||||
'description': 'md5:a992ae67f4d98f1c0141598f7bebbf67',
|
||||
'timestamp': 1477346700,
|
||||
'upload_date': '20161024',
|
||||
'duration': 2766.602563,
|
||||
'duration': 2766,
|
||||
'creator': 'Anton Berg & Martin Johnson',
|
||||
'series': 'Spår',
|
||||
'episode': '2. Raggarmordet - Röster ur det förflutna',
|
||||
@@ -45,7 +71,7 @@ class ACastIE(InfoExtractor):
|
||||
'url': 'http://embed.acast.com/adambuxton/ep.12-adam-joeschristmaspodcast2015',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://play.acast.com/s/rattegangspodden/s04e09-styckmordet-i-helenelund-del-22',
|
||||
'url': 'https://play.acast.com/s/rattegangspodden/s04e09styckmordetihelenelund-del2-2',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://play.acast.com/s/sparpodcast/2a92b283-1a75-4ad8-8396-499c641de0d9',
|
||||
@@ -54,40 +80,14 @@ class ACastIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel, display_id = re.match(self._VALID_URL, url).groups()
|
||||
s = self._download_json(
|
||||
'https://feeder.acast.com/api/v1/shows/%s/episodes/%s' % (channel, display_id),
|
||||
display_id)
|
||||
media_url = s['url']
|
||||
if re.search(r'[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}', display_id):
|
||||
episode_url = s.get('episodeUrl')
|
||||
if episode_url:
|
||||
display_id = episode_url
|
||||
else:
|
||||
channel, display_id = re.match(self._VALID_URL, s['link']).groups()
|
||||
cast_data = self._download_json(
|
||||
'https://play-api.acast.com/splash/%s/%s' % (channel, display_id),
|
||||
display_id)['result']
|
||||
e = cast_data['episode']
|
||||
title = e.get('name') or s['title']
|
||||
return {
|
||||
'id': compat_str(e['id']),
|
||||
'display_id': display_id,
|
||||
'url': media_url,
|
||||
'title': title,
|
||||
'description': e.get('summary') or clean_html(e.get('description') or s.get('description')),
|
||||
'thumbnail': e.get('image'),
|
||||
'timestamp': unified_timestamp(e.get('publishingDate') or s.get('publishDate')),
|
||||
'duration': float_or_none(e.get('duration') or s.get('duration')),
|
||||
'filesize': int_or_none(e.get('contentLength')),
|
||||
'creator': try_get(cast_data, lambda x: x['show']['author'], compat_str),
|
||||
'series': try_get(cast_data, lambda x: x['show']['name'], compat_str),
|
||||
'season_number': int_or_none(e.get('seasonNumber')),
|
||||
'episode': title,
|
||||
'episode_number': int_or_none(e.get('episodeNumber')),
|
||||
}
|
||||
episode = self._call_api(
|
||||
'%s/episodes/%s' % (channel, display_id),
|
||||
display_id, {'showInfo': 'true'})
|
||||
return self._extract_episode(
|
||||
episode, self._extract_show_info(episode.get('show') or {}))
|
||||
|
||||
|
||||
class ACastChannelIE(InfoExtractor):
|
||||
class ACastChannelIE(ACastBaseIE):
|
||||
IE_NAME = 'acast:channel'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
@@ -102,34 +102,24 @@ class ACastChannelIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '4efc5294-5385-4847-98bd-519799ce5786',
|
||||
'title': 'Today in Focus',
|
||||
'description': 'md5:9ba5564de5ce897faeb12963f4537a64',
|
||||
'description': 'md5:c09ce28c91002ce4ffce71d6504abaae',
|
||||
},
|
||||
'playlist_mincount': 35,
|
||||
'playlist_mincount': 200,
|
||||
}, {
|
||||
'url': 'http://play.acast.com/s/ft-banking-weekly',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_API_BASE_URL = 'https://play.acast.com/api/'
|
||||
_PAGE_SIZE = 10
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if ACastIE.suitable(url) else super(ACastChannelIE, cls).suitable(url)
|
||||
|
||||
def _fetch_page(self, channel_slug, page):
|
||||
casts = self._download_json(
|
||||
self._API_BASE_URL + 'channels/%s/acasts?page=%s' % (channel_slug, page),
|
||||
channel_slug, note='Download page %d of channel data' % page)
|
||||
for cast in casts:
|
||||
yield self.url_result(
|
||||
'https://play.acast.com/s/%s/%s' % (channel_slug, cast['url']),
|
||||
'ACast', cast['id'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel_slug = self._match_id(url)
|
||||
channel_data = self._download_json(
|
||||
self._API_BASE_URL + 'channels/%s' % channel_slug, channel_slug)
|
||||
entries = OnDemandPagedList(functools.partial(
|
||||
self._fetch_page, channel_slug), self._PAGE_SIZE)
|
||||
return self.playlist_result(entries, compat_str(
|
||||
channel_data['id']), channel_data['name'], channel_data.get('description'))
|
||||
show_slug = self._match_id(url)
|
||||
show = self._call_api(show_slug, show_slug)
|
||||
show_info = self._extract_show_info(show)
|
||||
entries = []
|
||||
for episode in (show.get('episodes') or []):
|
||||
entries.append(self._extract_episode(episode, show_info))
|
||||
return self.playlist_result(
|
||||
entries, show.get('id'), show.get('title'), show.get('description'))
|
||||
|
@@ -6,6 +6,7 @@ import re
|
||||
from .theplatform import ThePlatformIE
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
GeoRestrictedError,
|
||||
int_or_none,
|
||||
update_url_query,
|
||||
urlencode_postdata,
|
||||
@@ -28,6 +29,7 @@ class AENetworksBaseIE(ThePlatformIE):
|
||||
'lifetimemovieclub.com': ('LIFETIMEMOVIECLUB', 'lmc'),
|
||||
'fyi.tv': ('FYI', 'fyi'),
|
||||
'historyvault.com': (None, 'historyvault'),
|
||||
'biography.com': (None, 'biography'),
|
||||
}
|
||||
|
||||
def _extract_aen_smil(self, smil_url, video_id, auth=None):
|
||||
@@ -54,6 +56,8 @@ class AENetworksBaseIE(ThePlatformIE):
|
||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
||||
m_url, video_id, 'Downloading %s SMIL data' % (q.get('switch') or q['assetTypes']))
|
||||
except ExtractorError as e:
|
||||
if isinstance(e, GeoRestrictedError):
|
||||
raise
|
||||
last_e = e
|
||||
continue
|
||||
formats.extend(tp_formats)
|
||||
@@ -67,6 +71,34 @@ class AENetworksBaseIE(ThePlatformIE):
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _extract_aetn_info(self, domain, filter_key, filter_value, url):
|
||||
requestor_id, brand = self._DOMAIN_MAP[domain]
|
||||
result = self._download_json(
|
||||
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
||||
filter_value, query={'filter[%s]' % filter_key: filter_value})['results'][0]
|
||||
title = result['title']
|
||||
video_id = result['id']
|
||||
media_url = result['publicUrl']
|
||||
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
||||
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
||||
info = self._parse_theplatform_metadata(theplatform_metadata)
|
||||
auth = None
|
||||
if theplatform_metadata.get('AETN$isBehindWall'):
|
||||
resource = self._get_mvpd_resource(
|
||||
requestor_id, theplatform_metadata['title'],
|
||||
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
|
||||
theplatform_metadata['ratings'][0]['rating'])
|
||||
auth = self._extract_mvpd_auth(
|
||||
url, video_id, requestor_id, resource)
|
||||
info.update(self._extract_aen_smil(media_url, video_id, auth))
|
||||
info.update({
|
||||
'title': title,
|
||||
'series': result.get('seriesName'),
|
||||
'season_number': int_or_none(result.get('tvSeasonNumber')),
|
||||
'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
|
||||
})
|
||||
return info
|
||||
|
||||
|
||||
class AENetworksIE(AENetworksBaseIE):
|
||||
IE_NAME = 'aenetworks'
|
||||
@@ -139,32 +171,7 @@ class AENetworksIE(AENetworksBaseIE):
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, canonical = re.match(self._VALID_URL, url).groups()
|
||||
requestor_id, brand = self._DOMAIN_MAP[domain]
|
||||
result = self._download_json(
|
||||
'https://feeds.video.aetnd.com/api/v2/%s/videos' % brand,
|
||||
canonical, query={'filter[canonical]': '/' + canonical})['results'][0]
|
||||
title = result['title']
|
||||
video_id = result['id']
|
||||
media_url = result['publicUrl']
|
||||
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
|
||||
r'https?://link\.theplatform\.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
|
||||
info = self._parse_theplatform_metadata(theplatform_metadata)
|
||||
auth = None
|
||||
if theplatform_metadata.get('AETN$isBehindWall'):
|
||||
resource = self._get_mvpd_resource(
|
||||
requestor_id, theplatform_metadata['title'],
|
||||
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
|
||||
theplatform_metadata['ratings'][0]['rating'])
|
||||
auth = self._extract_mvpd_auth(
|
||||
url, video_id, requestor_id, resource)
|
||||
info.update(self._extract_aen_smil(media_url, video_id, auth))
|
||||
info.update({
|
||||
'title': title,
|
||||
'series': result.get('seriesName'),
|
||||
'season_number': int_or_none(result.get('tvSeasonNumber')),
|
||||
'episode_number': int_or_none(result.get('tvSeasonEpisodeNumber')),
|
||||
})
|
||||
return info
|
||||
return self._extract_aetn_info(domain, 'canonical', '/' + canonical, url)
|
||||
|
||||
|
||||
class AENetworksListBaseIE(AENetworksBaseIE):
|
||||
@@ -294,3 +301,42 @@ class HistoryTopicIE(AENetworksBaseIE):
|
||||
return self.url_result(
|
||||
'http://www.history.com/videos/' + display_id,
|
||||
AENetworksIE.ie_key())
|
||||
|
||||
|
||||
class HistoryPlayerIE(AENetworksBaseIE):
|
||||
IE_NAME = 'history:player'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:history|biography)\.com)/player/(?P<id>\d+)'
|
||||
_TESTS = []
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||
return self._extract_aetn_info(domain, 'id', video_id, url)
|
||||
|
||||
|
||||
class BiographyIE(AENetworksBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?biography\.com/video/(?P<id>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.biography.com/video/vincent-van-gogh-full-episode-2075049808',
|
||||
'info_dict': {
|
||||
'id': '30322987',
|
||||
'ext': 'mp4',
|
||||
'title': 'Vincent Van Gogh - Full Episode',
|
||||
'description': 'A full biography about the most influential 20th century painter, Vincent Van Gogh.',
|
||||
'timestamp': 1311970571,
|
||||
'upload_date': '20110729',
|
||||
'uploader': 'AENE-NEW',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
player_url = self._search_regex(
|
||||
r'<phoenix-iframe[^>]+src="(%s)' % HistoryPlayerIE._VALID_URL,
|
||||
webpage, 'player URL')
|
||||
return self.url_result(player_url, HistoryPlayerIE.ie_key())
|
||||
|
@@ -3,6 +3,7 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
get_element_by_id,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
@@ -39,23 +40,15 @@ class AparatIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id, fatal=False)
|
||||
|
||||
if not webpage:
|
||||
# Note: There is an easier-to-parse configuration at
|
||||
# http://www.aparat.com/video/video/config/videohash/%video_id
|
||||
# but the URL in there does not work
|
||||
webpage = self._download_webpage(
|
||||
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
|
||||
video_id)
|
||||
|
||||
options = self._parse_json(
|
||||
self._search_regex(
|
||||
r'options\s*=\s*JSON\.parse\(\s*(["\'])(?P<value>(?:(?!\1).)+)\1\s*\)',
|
||||
webpage, 'options', group='value'),
|
||||
video_id)
|
||||
|
||||
player = options['plugins']['sabaPlayerPlugin']
|
||||
options = self._parse_json(self._search_regex(
|
||||
r'options\s*=\s*({.+?})\s*;', webpage, 'options'), video_id)
|
||||
|
||||
formats = []
|
||||
for sources in player['multiSRC']:
|
||||
for sources in (options.get('multiSRC') or []):
|
||||
for item in sources:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
@@ -85,11 +78,12 @@ class AparatIE(InfoExtractor):
|
||||
info = self._search_json_ld(webpage, video_id, default={})
|
||||
|
||||
if not info.get('title'):
|
||||
info['title'] = player['title']
|
||||
info['title'] = get_element_by_id('videoTitle', webpage) or \
|
||||
self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True)
|
||||
|
||||
return merge_dicts(info, {
|
||||
'id': video_id,
|
||||
'thumbnail': url_or_none(options.get('poster')),
|
||||
'duration': int_or_none(player.get('duration')),
|
||||
'duration': int_or_none(options.get('duration')),
|
||||
'formats': formats,
|
||||
})
|
||||
|
174
youtube_dl/extractor/arcpublishing.py
Normal file
174
youtube_dl/extractor/arcpublishing.py
Normal file
@@ -0,0 +1,174 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class ArcPublishingIE(InfoExtractor):
|
||||
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
|
||||
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
|
||||
_TESTS = [{
|
||||
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
|
||||
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
|
||||
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.actionnewsjax.com/video/live-stream/
|
||||
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
|
||||
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
|
||||
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
|
||||
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
|
||||
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
|
||||
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
|
||||
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
|
||||
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
|
||||
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
|
||||
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
|
||||
'only_matching': True,
|
||||
}]
|
||||
_POWA_DEFAULTS = [
|
||||
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
|
||||
([
|
||||
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
|
||||
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
|
||||
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
|
||||
], 'video-api-cdn.%s.arcpublishing.com/api'),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _extract_urls(webpage):
|
||||
entries = []
|
||||
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
|
||||
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
|
||||
powa = extract_attributes(powa_el) or {}
|
||||
org = powa.get('data-org')
|
||||
uuid = powa.get('data-uuid')
|
||||
if org and uuid:
|
||||
entries.append('arcpublishing:%s:%s' % (org, uuid))
|
||||
return entries
|
||||
|
||||
def _real_extract(self, url):
|
||||
org, uuid = re.match(self._VALID_URL, url).groups()
|
||||
for orgs, tmpl in self._POWA_DEFAULTS:
|
||||
if org in orgs:
|
||||
base_api_tmpl = tmpl
|
||||
break
|
||||
else:
|
||||
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
|
||||
if org == 'wapo':
|
||||
org = 'washpost'
|
||||
video = self._download_json(
|
||||
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
|
||||
uuid, query={'uuid': uuid})[0]
|
||||
title = video['headlines']['basic']
|
||||
is_live = video.get('status') == 'live'
|
||||
|
||||
urls = []
|
||||
formats = []
|
||||
for s in video.get('streams', []):
|
||||
s_url = s.get('url')
|
||||
if not s_url or s_url in urls:
|
||||
continue
|
||||
urls.append(s_url)
|
||||
stream_type = s.get('stream_type')
|
||||
if stream_type == 'smil':
|
||||
smil_formats = self._extract_smil_formats(
|
||||
s_url, uuid, fatal=False)
|
||||
for f in smil_formats:
|
||||
if f['url'].endswith('/cfx/st'):
|
||||
f['app'] = 'cfx/st'
|
||||
if not f['play_path'].startswith('mp4:'):
|
||||
f['play_path'] = 'mp4:' + f['play_path']
|
||||
if isinstance(f['tbr'], float):
|
||||
f['vbr'] = f['tbr'] * 1000
|
||||
del f['tbr']
|
||||
f['format_id'] = 'rtmp-%d' % f['vbr']
|
||||
formats.extend(smil_formats)
|
||||
elif stream_type in ('ts', 'hls'):
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False)
|
||||
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
|
||||
continue
|
||||
for f in m3u8_formats:
|
||||
if f.get('acodec') == 'none':
|
||||
f['preference'] = -40
|
||||
elif f.get('vcodec') == 'none':
|
||||
f['preference'] = -50
|
||||
height = f.get('height')
|
||||
if not height:
|
||||
continue
|
||||
vbr = self._search_regex(
|
||||
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
|
||||
if vbr:
|
||||
f['vbr'] = int(vbr)
|
||||
formats.extend(m3u8_formats)
|
||||
else:
|
||||
vbr = int_or_none(s.get('bitrate'))
|
||||
formats.append({
|
||||
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
|
||||
'vbr': vbr,
|
||||
'width': int_or_none(s.get('width')),
|
||||
'height': int_or_none(s.get('height')),
|
||||
'filesize': int_or_none(s.get('filesize')),
|
||||
'url': s_url,
|
||||
'preference': -1,
|
||||
})
|
||||
self._sort_formats(
|
||||
formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id'))
|
||||
|
||||
subtitles = {}
|
||||
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
|
||||
subtitle_url = subtitle.get('url')
|
||||
if subtitle_url:
|
||||
subtitles.setdefault('en', []).append({'url': subtitle_url})
|
||||
|
||||
return {
|
||||
'id': uuid,
|
||||
'title': self._live_title(title) if is_live else title,
|
||||
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
|
||||
'description': try_get(video, lambda x: x['subheadlines']['basic']),
|
||||
'formats': formats,
|
||||
'duration': int_or_none(video.get('duration'), 100),
|
||||
'timestamp': parse_iso8601(video.get('created_date')),
|
||||
'subtitles': subtitles,
|
||||
'is_live': is_live,
|
||||
}
|
@@ -471,13 +471,18 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
def _parse_brightcove_metadata(self, json_data, video_id, headers={}):
|
||||
title = json_data['name'].strip()
|
||||
|
||||
num_drm_sources = 0
|
||||
formats = []
|
||||
for source in json_data.get('sources', []):
|
||||
sources = json_data.get('sources') or []
|
||||
for source in sources:
|
||||
container = source.get('container')
|
||||
ext = mimetype2ext(source.get('type'))
|
||||
src = source.get('src')
|
||||
# https://support.brightcove.com/playback-api-video-fields-reference#key_systems_object
|
||||
if ext == 'ism' or container == 'WVM' or source.get('key_systems'):
|
||||
if container == 'WVM' or source.get('key_systems'):
|
||||
num_drm_sources += 1
|
||||
continue
|
||||
elif ext == 'ism':
|
||||
continue
|
||||
elif ext == 'm3u8' or container == 'M2TS':
|
||||
if not src:
|
||||
@@ -534,20 +539,15 @@ class BrightcoveNewIE(AdobePassIE):
|
||||
'format_id': build_format_id('rtmp'),
|
||||
})
|
||||
formats.append(f)
|
||||
if not formats:
|
||||
# for sonyliv.com DRM protected videos
|
||||
s3_source_url = json_data.get('custom_fields', {}).get('s3sourceurl')
|
||||
if s3_source_url:
|
||||
formats.append({
|
||||
'url': s3_source_url,
|
||||
'format_id': 'source',
|
||||
})
|
||||
|
||||
errors = json_data.get('errors')
|
||||
if not formats and errors:
|
||||
error = errors[0]
|
||||
raise ExtractorError(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
if not formats:
|
||||
errors = json_data.get('errors')
|
||||
if errors:
|
||||
error = errors[0]
|
||||
raise ExtractorError(
|
||||
error.get('message') or error.get('error_subcode') or error['error_code'], expected=True)
|
||||
if sources and num_drm_sources == len(sources):
|
||||
raise ExtractorError('This video is DRM protected.', expected=True)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
|
@@ -33,6 +33,8 @@ from .aenetworks import (
|
||||
AENetworksCollectionIE,
|
||||
AENetworksShowIE,
|
||||
HistoryTopicIE,
|
||||
HistoryPlayerIE,
|
||||
BiographyIE,
|
||||
)
|
||||
from .afreecatv import AfreecaTVIE
|
||||
from .airmozilla import AirMozillaIE
|
||||
@@ -54,6 +56,7 @@ from .appletrailers import (
|
||||
AppleTrailersSectionIE,
|
||||
)
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .arcpublishing import ArcPublishingIE
|
||||
from .arkena import ArkenaIE
|
||||
from .ard import (
|
||||
ARDBetaMediathekIE,
|
||||
@@ -399,7 +402,6 @@ from .fujitv import FujiTVFODPlus7IE
|
||||
from .funimation import FunimationIE
|
||||
from .funk import FunkIE
|
||||
from .fusion import FusionIE
|
||||
from .fxnetworks import FXNetworksIE
|
||||
from .gaia import GaiaIE
|
||||
from .gameinformer import GameInformerIE
|
||||
from .gamespot import GameSpotIE
|
||||
@@ -691,7 +693,6 @@ from .nba import (
|
||||
NBAChannelIE,
|
||||
)
|
||||
from .nbc import (
|
||||
CSNNEIE,
|
||||
NBCIE,
|
||||
NBCNewsIE,
|
||||
NBCOlympicsIE,
|
||||
@@ -789,6 +790,7 @@ from .nrk import (
|
||||
NRKSkoleIE,
|
||||
NRKTVIE,
|
||||
NRKTVDirekteIE,
|
||||
NRKRadioPodkastIE,
|
||||
NRKTVEpisodeIE,
|
||||
NRKTVEpisodesIE,
|
||||
NRKTVSeasonIE,
|
||||
@@ -1052,6 +1054,7 @@ from .skynewsarabia import (
|
||||
from .sky import (
|
||||
SkyNewsIE,
|
||||
SkySportsIE,
|
||||
SkySportsNewsIE,
|
||||
)
|
||||
from .slideshare import SlideshareIE
|
||||
from .slideslive import SlidesLiveIE
|
||||
@@ -1425,7 +1428,10 @@ from .vshare import VShareIE
|
||||
from .medialaan import MedialaanIE
|
||||
from .vube import VubeIE
|
||||
from .vuclip import VuClipIE
|
||||
from .vvvvid import VVVVIDIE
|
||||
from .vvvvid import (
|
||||
VVVVIDIE,
|
||||
VVVVIDShowIE,
|
||||
)
|
||||
from .vyborymos import VyboryMosIE
|
||||
from .vzaar import VzaarIE
|
||||
from .wakanim import WakanimIE
|
||||
|
@@ -1,77 +0,0 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .adobepass import AdobePassIE
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
int_or_none,
|
||||
parse_age_limit,
|
||||
smuggle_url,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
|
||||
class FXNetworksIE(AdobePassIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:fxnetworks|simpsonsworld)\.com/video/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.fxnetworks.com/video/1032565827847',
|
||||
'md5': '8d99b97b4aa7a202f55b6ed47ea7e703',
|
||||
'info_dict': {
|
||||
'id': 'dRzwHC_MMqIv',
|
||||
'ext': 'mp4',
|
||||
'title': 'First Look: Better Things - Season 2',
|
||||
'description': 'Because real life is like a fart. Watch this FIRST LOOK to see what inspired the new season of Better Things.',
|
||||
'age_limit': 14,
|
||||
'uploader': 'NEWA-FNG-FX',
|
||||
'upload_date': '20170825',
|
||||
'timestamp': 1503686274,
|
||||
'episode_number': 0,
|
||||
'season_number': 2,
|
||||
'series': 'Better Things',
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
}, {
|
||||
'url': 'http://www.simpsonsworld.com/video/716094019682',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
if 'The content you are trying to access is not available in your region.' in webpage:
|
||||
self.raise_geo_restricted()
|
||||
video_data = extract_attributes(self._search_regex(
|
||||
r'(<a.+?rel="https?://link\.theplatform\.com/s/.+?</a>)', webpage, 'video data'))
|
||||
player_type = self._search_regex(r'playerType\s*=\s*[\'"]([^\'"]+)', webpage, 'player type', default=None)
|
||||
release_url = video_data['rel']
|
||||
title = video_data['data-title']
|
||||
rating = video_data.get('data-rating')
|
||||
query = {
|
||||
'mbr': 'true',
|
||||
}
|
||||
if player_type == 'movies':
|
||||
query.update({
|
||||
'manifest': 'm3u',
|
||||
})
|
||||
else:
|
||||
query.update({
|
||||
'switch': 'http',
|
||||
})
|
||||
if video_data.get('data-req-auth') == '1':
|
||||
resource = self._get_mvpd_resource(
|
||||
video_data['data-channel'], title,
|
||||
video_data.get('data-guid'), rating)
|
||||
query['auth'] = self._extract_mvpd_auth(url, video_id, 'fx', resource)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),
|
||||
'series': video_data.get('data-show-title'),
|
||||
'episode_number': int_or_none(video_data.get('data-episode')),
|
||||
'season_number': int_or_none(video_data.get('data-season')),
|
||||
'thumbnail': video_data.get('data-large-thumb'),
|
||||
'age_limit': parse_age_limit(rating),
|
||||
'ie_key': 'ThePlatform',
|
||||
}
|
@@ -127,6 +127,7 @@ from .expressen import ExpressenIE
|
||||
from .zype import ZypeIE
|
||||
from .odnoklassniki import OdnoklassnikiIE
|
||||
from .kinja import KinjaEmbedIE
|
||||
from .arcpublishing import ArcPublishingIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@@ -2208,6 +2209,20 @@ class GenericIE(InfoExtractor):
|
||||
'uploader': 'OTT Videos',
|
||||
},
|
||||
},
|
||||
{
|
||||
# ArcPublishing PoWa video player
|
||||
'url': 'https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/',
|
||||
'md5': 'b03b2fac8680e1e5a7cc81a5c27e71b3',
|
||||
'info_dict': {
|
||||
'id': '8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
|
||||
'ext': 'mp4',
|
||||
'title': 'Senate candidates wave to voters on Anchorage streets',
|
||||
'description': 'md5:91f51a6511f090617353dc720318b20e',
|
||||
'timestamp': 1604378735,
|
||||
'upload_date': '20201103',
|
||||
'duration': 1581,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
@@ -2574,6 +2589,10 @@ class GenericIE(InfoExtractor):
|
||||
if tp_urls:
|
||||
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
|
||||
|
||||
arc_urls = ArcPublishingIE._extract_urls(webpage)
|
||||
if arc_urls:
|
||||
return self.playlist_from_matches(arc_urls, video_id, video_title, ie=ArcPublishingIE.ie_key())
|
||||
|
||||
# Look for embedded rtl.nl player
|
||||
matches = re.findall(
|
||||
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
|
||||
|
@@ -38,13 +38,17 @@ class GoIE(AdobePassIE):
|
||||
'disneynow': {
|
||||
'brand': '011',
|
||||
'resource_id': 'Disney',
|
||||
}
|
||||
},
|
||||
'fxnow.fxnetworks': {
|
||||
'brand': '025',
|
||||
'requestor_id': 'dtci',
|
||||
},
|
||||
}
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:(?P<sub_domain>%s)\.)?go|
|
||||
(?P<sub_domain_2>abc|freeform|disneynow)
|
||||
(?P<sub_domain_2>abc|freeform|disneynow|fxnow\.fxnetworks)
|
||||
)\.com/
|
||||
(?:
|
||||
(?:[^/]+/)*(?P<id>[Vv][Dd][Kk][Aa]\w+)|
|
||||
@@ -99,6 +103,19 @@ class GoIE(AdobePassIE):
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://fxnow.fxnetworks.com/shows/better-things/video/vdka12782841',
|
||||
'info_dict': {
|
||||
'id': 'VDKA12782841',
|
||||
'ext': 'mp4',
|
||||
'title': 'First Look: Better Things - Season 2',
|
||||
'description': 'md5:fa73584a95761c605d9d54904e35b407',
|
||||
},
|
||||
'params': {
|
||||
'geo_bypass_ip_block': '3.244.239.0/24',
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://abc.go.com/shows/the-catch/episode-guide/season-01/10-the-wedding',
|
||||
'only_matching': True,
|
||||
|
@@ -1,15 +1,14 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .telecinco import TelecincoIE
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
)
|
||||
|
||||
|
||||
class MiTeleIE(InfoExtractor):
|
||||
class MiTeleIE(TelecincoIE):
|
||||
IE_DESC = 'mitele.es'
|
||||
_VALID_URL = r'https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player'
|
||||
|
||||
@@ -31,7 +30,6 @@ class MiTeleIE(InfoExtractor):
|
||||
'timestamp': 1471209401,
|
||||
'upload_date': '20160814',
|
||||
},
|
||||
'add_ie': ['Ooyala'],
|
||||
}, {
|
||||
# no explicit title
|
||||
'url': 'http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player',
|
||||
@@ -54,7 +52,6 @@ class MiTeleIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['Ooyala'],
|
||||
}, {
|
||||
'url': 'http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player',
|
||||
'only_matching': True,
|
||||
@@ -70,16 +67,11 @@ class MiTeleIE(InfoExtractor):
|
||||
r'window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=\s*({.+})',
|
||||
webpage, 'Pre Player'), display_id)['prePlayer']
|
||||
title = pre_player['title']
|
||||
video = pre_player['video']
|
||||
video_id = video['dataMediaId']
|
||||
video_info = self._parse_content(pre_player['video'], url)
|
||||
content = pre_player.get('content') or {}
|
||||
info = content.get('info') or {}
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
# for some reason only HLS is supported
|
||||
'url': smuggle_url('ooyala:' + video_id, {'supportedformats': 'm3u8,dash'}),
|
||||
'id': video_id,
|
||||
video_info.update({
|
||||
'title': title,
|
||||
'description': info.get('synopsis'),
|
||||
'series': content.get('title'),
|
||||
@@ -87,7 +79,7 @@ class MiTeleIE(InfoExtractor):
|
||||
'episode': content.get('subtitle'),
|
||||
'episode_number': int_or_none(info.get('episode_number')),
|
||||
'duration': int_or_none(info.get('duration')),
|
||||
'thumbnail': video.get('dataPoster'),
|
||||
'age_limit': int_or_none(info.get('rating')),
|
||||
'timestamp': parse_iso8601(pre_player.get('publishedTime')),
|
||||
}
|
||||
})
|
||||
return video_info
|
||||
|
@@ -158,7 +158,8 @@ class NBCIE(AdobePassIE):
|
||||
|
||||
|
||||
class NBCSportsVPlayerIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://vplayer\.nbcsports\.com/(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)'
|
||||
_VALID_URL_BASE = r'https?://(?:vplayer\.nbcsports\.com|(?:www\.)?nbcsports\.com/vplayer)/'
|
||||
_VALID_URL = _VALID_URL_BASE + r'(?:[^/]+/)+(?P<id>[0-9a-zA-Z_]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/9CsDKds0kvHI',
|
||||
@@ -174,12 +175,15 @@ class NBCSportsVPlayerIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://vplayer.nbcsports.com/p/BxmELC/nbcsports_embed/select/media/_hqLjQ95yx8Z',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.nbcsports.com/vplayer/p/BxmELC/nbcsports/select/PHJSaFWbrTY9?form=html&autoPlay=true',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def _extract_url(webpage):
|
||||
iframe_m = re.search(
|
||||
r'<iframe[^>]+src="(?P<url>https?://vplayer\.nbcsports\.com/[^"]+)"', webpage)
|
||||
r'<(?:iframe[^>]+|div[^>]+data-(?:mpx-)?)src="(?P<url>%s[^"]+)"' % NBCSportsVPlayerIE._VALID_URL_BASE, webpage)
|
||||
if iframe_m:
|
||||
return iframe_m.group('url')
|
||||
|
||||
@@ -192,21 +196,29 @@ class NBCSportsVPlayerIE(InfoExtractor):
|
||||
|
||||
|
||||
class NBCSportsIE(InfoExtractor):
|
||||
# Does not include https because its certificate is invalid
|
||||
_VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?nbcsports\.com//?(?!vplayer/)(?:[^/]+/)+(?P<id>[0-9a-z-]+)'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
# iframe src
|
||||
'url': 'http://www.nbcsports.com//college-basketball/ncaab/tom-izzo-michigan-st-has-so-much-respect-duke',
|
||||
'info_dict': {
|
||||
'id': 'PHJSaFWbrTY9',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tom Izzo, Michigan St. has \'so much respect\' for Duke',
|
||||
'description': 'md5:ecb459c9d59e0766ac9c7d5d0eda8113',
|
||||
'uploader': 'NBCU-SPORTS',
|
||||
'upload_date': '20150330',
|
||||
'timestamp': 1427726529,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
# data-mpx-src
|
||||
'url': 'https://www.nbcsports.com/philadelphia/philadelphia-phillies/bruce-bochy-hector-neris-hes-idiot',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# data-src
|
||||
'url': 'https://www.nbcsports.com/boston/video/report-card-pats-secondary-no-match-josh-allen',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
@@ -274,33 +286,6 @@ class NBCSportsStreamIE(AdobePassIE):
|
||||
}
|
||||
|
||||
|
||||
class CSNNEIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?csnne\.com/video/(?P<id>[0-9a-z-]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.csnne.com/video/snc-evening-update-wright-named-red-sox-no-5-starter',
|
||||
'info_dict': {
|
||||
'id': 'yvBLLUgQ8WU0',
|
||||
'ext': 'mp4',
|
||||
'title': 'SNC evening update: Wright named Red Sox\' No. 5 starter.',
|
||||
'description': 'md5:1753cfee40d9352b19b4c9b3e589b9e3',
|
||||
'timestamp': 1459369979,
|
||||
'upload_date': '20160330',
|
||||
'uploader': 'NBCU-SPORTS',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': 'ThePlatform',
|
||||
'url': self._html_search_meta('twitter:player:stream', webpage),
|
||||
'display_id': display_id,
|
||||
}
|
||||
|
||||
|
||||
class NBCNewsIE(ThePlatformIE):
|
||||
_VALID_URL = r'(?x)https?://(?:www\.)?(?:nbcnews|today|msnbc)\.com/([^/]+/)*(?:.*-)?(?P<id>[^/?]+)'
|
||||
|
||||
|
@@ -90,7 +90,7 @@ class NhkVodIE(NhkBaseIE):
|
||||
_TESTS = [{
|
||||
# video clip
|
||||
'url': 'https://www3.nhk.or.jp/nhkworld/en/ondemand/video/9999011/',
|
||||
'md5': '256a1be14f48d960a7e61e2532d95ec3',
|
||||
'md5': '7a90abcfe610ec22a6bfe15bd46b30ca',
|
||||
'info_dict': {
|
||||
'id': 'a95j5iza',
|
||||
'ext': 'mp4',
|
||||
|
@@ -6,16 +6,13 @@ import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote,
|
||||
)
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_age_limit,
|
||||
parse_duration,
|
||||
str_or_none,
|
||||
try_get,
|
||||
urljoin,
|
||||
url_or_none,
|
||||
@@ -63,7 +60,8 @@ class NRKBaseIE(InfoExtractor):
|
||||
return self._download_json(
|
||||
urljoin('http://psapi.nrk.no/', path),
|
||||
video_id, note or 'Downloading %s JSON' % item,
|
||||
fatal=fatal, query=query)
|
||||
fatal=fatal, query=query,
|
||||
headers={'Accept-Encoding': 'gzip, deflate, br'})
|
||||
|
||||
|
||||
class NRKIE(NRKBaseIE):
|
||||
@@ -116,9 +114,39 @@ class NRKIE(NRKBaseIE):
|
||||
}, {
|
||||
'url': 'https://www.nrk.no/video/humor/kommentatorboksen-reiser-til-sjos_d1fda11f-a4ad-437a-a374-0398bc84e999',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# podcast
|
||||
'url': 'nrk:l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'nrk:podcast/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# clip
|
||||
'url': 'nrk:150533',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'nrk:clip/150533',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# program
|
||||
'url': 'nrk:MDDP12000117',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'nrk:program/ENRK10100318',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# direkte
|
||||
'url': 'nrk:nrk1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'nrk:channel/nrk1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_from_playback(self, video_id):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url).split('/')[-1]
|
||||
|
||||
path_templ = 'playback/%s/' + video_id
|
||||
|
||||
def call_playback_api(item, query=None):
|
||||
@@ -126,6 +154,8 @@ class NRKIE(NRKBaseIE):
|
||||
# known values for preferredCdn: akamai, iponly, minicdn and telenor
|
||||
manifest = call_playback_api('manifest', {'preferredCdn': 'akamai'})
|
||||
|
||||
video_id = try_get(manifest, lambda x: x['id'], compat_str) or video_id
|
||||
|
||||
if manifest.get('playability') == 'nonPlayable':
|
||||
self._raise_error(manifest['nonPlayable'])
|
||||
|
||||
@@ -140,8 +170,15 @@ class NRKIE(NRKBaseIE):
|
||||
format_url = url_or_none(asset.get('url'))
|
||||
if not format_url:
|
||||
continue
|
||||
if asset.get('format') == 'HLS' or determine_ext(format_url) == 'm3u8':
|
||||
asset_format = (asset.get('format') or '').lower()
|
||||
if asset_format == 'hls' or determine_ext(format_url) == 'm3u8':
|
||||
formats.extend(self._extract_nrk_formats(format_url, video_id))
|
||||
elif asset_format == 'mp3':
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': asset_format,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
data = call_playback_api('metadata')
|
||||
@@ -168,31 +205,94 @@ class NRKIE(NRKBaseIE):
|
||||
'height': int_or_none(image.get('pixelHeight')),
|
||||
})
|
||||
|
||||
return {
|
||||
subtitles = {}
|
||||
for sub in try_get(playable, lambda x: x['subtitles'], list) or []:
|
||||
if not isinstance(sub, dict):
|
||||
continue
|
||||
sub_url = url_or_none(sub.get('webVtt'))
|
||||
if not sub_url:
|
||||
continue
|
||||
sub_key = str_or_none(sub.get('language')) or 'nb'
|
||||
sub_type = str_or_none(sub.get('type'))
|
||||
if sub_type:
|
||||
sub_key += '-%s' % sub_type
|
||||
subtitles.setdefault(sub_key, []).append({
|
||||
'url': sub_url,
|
||||
})
|
||||
|
||||
legal_age = try_get(
|
||||
data, lambda x: x['legalAge']['body']['rating']['code'], compat_str)
|
||||
# https://en.wikipedia.org/wiki/Norwegian_Media_Authority
|
||||
if legal_age == 'A':
|
||||
age_limit = 0
|
||||
elif legal_age.isdigit():
|
||||
age_limit = int_or_none(legal_age)
|
||||
else:
|
||||
age_limit = None
|
||||
|
||||
is_series = try_get(data, lambda x: x['_links']['series']['name']) == 'series'
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'alt_title': alt_title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnails': thumbnails,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self._extract_from_playback(video_id)
|
||||
if is_series:
|
||||
series = season_id = season_number = episode = episode_number = None
|
||||
programs = self._call_api(
|
||||
'programs/%s' % video_id, video_id, 'programs', fatal=False)
|
||||
if programs and isinstance(programs, dict):
|
||||
series = str_or_none(programs.get('seriesTitle'))
|
||||
season_id = str_or_none(programs.get('seasonId'))
|
||||
season_number = int_or_none(programs.get('seasonNumber'))
|
||||
episode = str_or_none(programs.get('episodeTitle'))
|
||||
episode_number = int_or_none(programs.get('episodeNumber'))
|
||||
if not series:
|
||||
series = title
|
||||
if alt_title:
|
||||
title += ' - %s' % alt_title
|
||||
if not season_number:
|
||||
season_number = int_or_none(self._search_regex(
|
||||
r'Sesong\s+(\d+)', description or '', 'season number',
|
||||
default=None))
|
||||
if not episode:
|
||||
episode = alt_title if is_series else None
|
||||
if not episode_number:
|
||||
episode_number = int_or_none(self._search_regex(
|
||||
r'^(\d+)\.', episode or '', 'episode number',
|
||||
default=None))
|
||||
if not episode_number:
|
||||
episode_number = int_or_none(self._search_regex(
|
||||
r'\((\d+)\s*:\s*\d+\)', description or '',
|
||||
'episode number', default=None))
|
||||
info.update({
|
||||
'title': title,
|
||||
'series': series,
|
||||
'season_id': season_id,
|
||||
'season_number': season_number,
|
||||
'episode': episode,
|
||||
'episode_number': episode_number,
|
||||
})
|
||||
|
||||
return info
|
||||
|
||||
|
||||
class NRKTVIE(NRKBaseIE):
|
||||
class NRKTVIE(InfoExtractor):
|
||||
IE_DESC = 'NRK TV and NRK Radio'
|
||||
_EPISODE_RE = r'(?P<id>[a-zA-Z]{4}\d{8})'
|
||||
_VALID_URL = r'https?://(?:tv|radio)\.nrk(?:super)?\.no/(?:[^/]+/)*%s' % _EPISODE_RE
|
||||
_API_HOSTS = ('psapi-ne.nrk.no', 'psapi-we.nrk.no')
|
||||
_TESTS = [{
|
||||
'url': 'https://tv.nrk.no/program/MDDP12000117',
|
||||
'md5': 'c4a5960f1b00b40d47db65c1064e0ab1',
|
||||
'info_dict': {
|
||||
'id': 'MDDP12000117AA',
|
||||
'id': 'MDDP12000117',
|
||||
'ext': 'mp4',
|
||||
'title': 'Alarm Trolltunga',
|
||||
'description': 'md5:46923a6e6510eefcce23d5ef2a58f2ce',
|
||||
@@ -203,24 +303,27 @@ class NRKTVIE(NRKBaseIE):
|
||||
'url': 'https://tv.nrk.no/serie/20-spoersmaal-tv/MUHH48000314/23-05-2014',
|
||||
'md5': '8d40dab61cea8ab0114e090b029a0565',
|
||||
'info_dict': {
|
||||
'id': 'MUHH48000314AA',
|
||||
'id': 'MUHH48000314',
|
||||
'ext': 'mp4',
|
||||
'title': '20 spørsmål 23.05.2014',
|
||||
'title': '20 spørsmål - 23. mai 2014',
|
||||
'alt_title': '23. mai 2014',
|
||||
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
|
||||
'duration': 1741,
|
||||
'series': '20 spørsmål',
|
||||
'episode': '23.05.2014',
|
||||
'episode': '23. mai 2014',
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://tv.nrk.no/program/mdfp15000514',
|
||||
'info_dict': {
|
||||
'id': 'MDFP15000514CA',
|
||||
'id': 'MDFP15000514',
|
||||
'ext': 'mp4',
|
||||
'title': 'Grunnlovsjubiléet - Stor ståhei for ingenting 24.05.2014',
|
||||
'title': 'Kunnskapskanalen - Grunnlovsjubiléet - Stor ståhei for ingenting',
|
||||
'description': 'md5:89290c5ccde1b3a24bb8050ab67fe1db',
|
||||
'duration': 4605.08,
|
||||
'series': 'Kunnskapskanalen',
|
||||
'episode': '24.05.2014',
|
||||
'episode': 'Grunnlovsjubiléet - Stor ståhei for ingenting',
|
||||
'age_limit': 0,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -229,10 +332,11 @@ class NRKTVIE(NRKBaseIE):
|
||||
# single playlist video
|
||||
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015#del=2',
|
||||
'info_dict': {
|
||||
'id': 'MSPO40010515AH',
|
||||
'id': 'MSPO40010515',
|
||||
'ext': 'mp4',
|
||||
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
|
||||
'description': 'md5:c03aba1e917561eface5214020551b7a',
|
||||
'age_limit': 0,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -242,24 +346,27 @@ class NRKTVIE(NRKBaseIE):
|
||||
}, {
|
||||
'url': 'https://tv.nrk.no/serie/tour-de-ski/MSPO40010515/06-01-2015',
|
||||
'info_dict': {
|
||||
'id': 'MSPO40010515AH',
|
||||
'id': 'MSPO40010515',
|
||||
'ext': 'mp4',
|
||||
'title': 'Sprint fri teknikk, kvinner og menn 06.01.2015',
|
||||
'description': 'md5:c03aba1e917561eface5214020551b7a',
|
||||
'age_limit': 0,
|
||||
},
|
||||
'expected_warnings': ['Failed to download m3u8 information'],
|
||||
'skip': 'Ikke tilgjengelig utenfor Norge',
|
||||
}, {
|
||||
'url': 'https://tv.nrk.no/serie/anno/KMTE50001317/sesong-3/episode-13',
|
||||
'info_dict': {
|
||||
'id': 'KMTE50001317AA',
|
||||
'id': 'KMTE50001317',
|
||||
'ext': 'mp4',
|
||||
'title': 'Anno 13:30',
|
||||
'title': 'Anno - 13. episode',
|
||||
'description': 'md5:11d9613661a8dbe6f9bef54e3a4cbbfa',
|
||||
'duration': 2340,
|
||||
'series': 'Anno',
|
||||
'episode': '13:30',
|
||||
'episode': '13. episode',
|
||||
'season_number': 3,
|
||||
'episode_number': 13,
|
||||
'age_limit': 0,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -267,13 +374,14 @@ class NRKTVIE(NRKBaseIE):
|
||||
}, {
|
||||
'url': 'https://tv.nrk.no/serie/nytt-paa-nytt/MUHH46000317/27-01-2017',
|
||||
'info_dict': {
|
||||
'id': 'MUHH46000317AA',
|
||||
'id': 'MUHH46000317',
|
||||
'ext': 'mp4',
|
||||
'title': 'Nytt på Nytt 27.01.2017',
|
||||
'description': 'md5:5358d6388fba0ea6f0b6d11c48b9eb4b',
|
||||
'duration': 1796,
|
||||
'series': 'Nytt på nytt',
|
||||
'episode': '27.01.2017',
|
||||
'age_limit': 0,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -290,180 +398,26 @@ class NRKTVIE(NRKBaseIE):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_api_host = None
|
||||
|
||||
def _extract_from_mediaelement(self, video_id):
|
||||
api_hosts = (self._api_host, ) if self._api_host else self._API_HOSTS
|
||||
|
||||
for api_host in api_hosts:
|
||||
data = self._download_json(
|
||||
'http://%s/mediaelement/%s' % (api_host, video_id),
|
||||
video_id, 'Downloading mediaelement JSON',
|
||||
fatal=api_host == api_hosts[-1])
|
||||
if not data:
|
||||
continue
|
||||
self._api_host = api_host
|
||||
break
|
||||
|
||||
title = data.get('fullTitle') or data.get('mainTitle') or data['title']
|
||||
video_id = data.get('id') or video_id
|
||||
|
||||
urls = []
|
||||
entries = []
|
||||
|
||||
conviva = data.get('convivaStatistics') or {}
|
||||
live = (data.get('mediaElementType') == 'Live'
|
||||
or data.get('isLive') is True or conviva.get('isLive'))
|
||||
|
||||
def make_title(t):
|
||||
return self._live_title(t) if live else t
|
||||
|
||||
media_assets = data.get('mediaAssets')
|
||||
if media_assets and isinstance(media_assets, list):
|
||||
def video_id_and_title(idx):
|
||||
return ((video_id, title) if len(media_assets) == 1
|
||||
else ('%s-%d' % (video_id, idx), '%s (Part %d)' % (title, idx)))
|
||||
for num, asset in enumerate(media_assets, 1):
|
||||
asset_url = asset.get('url')
|
||||
if not asset_url or asset_url in urls:
|
||||
continue
|
||||
urls.append(asset_url)
|
||||
formats = self._extract_nrk_formats(asset_url, video_id)
|
||||
if not formats:
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
|
||||
entry_id, entry_title = video_id_and_title(num)
|
||||
duration = parse_duration(asset.get('duration'))
|
||||
subtitles = {}
|
||||
for subtitle in ('webVtt', 'timedText'):
|
||||
subtitle_url = asset.get('%sSubtitlesUrl' % subtitle)
|
||||
if subtitle_url:
|
||||
subtitles.setdefault('no', []).append({
|
||||
'url': compat_urllib_parse_unquote(subtitle_url)
|
||||
})
|
||||
entries.append({
|
||||
'id': asset.get('carrierId') or entry_id,
|
||||
'title': make_title(entry_title),
|
||||
'duration': duration,
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
'is_live': live,
|
||||
})
|
||||
|
||||
if not entries:
|
||||
media_url = data.get('mediaUrl')
|
||||
if media_url and media_url not in urls:
|
||||
formats = self._extract_nrk_formats(media_url, video_id)
|
||||
if formats:
|
||||
self._sort_formats(formats)
|
||||
duration = parse_duration(data.get('duration'))
|
||||
entries = [{
|
||||
'id': video_id,
|
||||
'title': make_title(title),
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'is_live': live,
|
||||
}]
|
||||
|
||||
if not entries:
|
||||
self._raise_error(data)
|
||||
|
||||
series = conviva.get('seriesName') or data.get('seriesTitle')
|
||||
episode = conviva.get('episodeName') or data.get('episodeNumberOrDate')
|
||||
|
||||
season_number = None
|
||||
episode_number = None
|
||||
if data.get('mediaElementType') == 'Episode':
|
||||
_season_episode = data.get('scoresStatistics', {}).get('springStreamStream') or \
|
||||
data.get('relativeOriginUrl', '')
|
||||
EPISODENUM_RE = [
|
||||
r'/s(?P<season>\d{,2})e(?P<episode>\d{,2})\.',
|
||||
r'/sesong-(?P<season>\d{,2})/episode-(?P<episode>\d{,2})',
|
||||
]
|
||||
season_number = int_or_none(self._search_regex(
|
||||
EPISODENUM_RE, _season_episode, 'season number',
|
||||
default=None, group='season'))
|
||||
episode_number = int_or_none(self._search_regex(
|
||||
EPISODENUM_RE, _season_episode, 'episode number',
|
||||
default=None, group='episode'))
|
||||
|
||||
thumbnails = None
|
||||
images = data.get('images')
|
||||
if images and isinstance(images, dict):
|
||||
web_images = images.get('webImages')
|
||||
if isinstance(web_images, list):
|
||||
thumbnails = [{
|
||||
'url': image['imageUrl'],
|
||||
'width': int_or_none(image.get('width')),
|
||||
'height': int_or_none(image.get('height')),
|
||||
} for image in web_images if image.get('imageUrl')]
|
||||
|
||||
description = data.get('description')
|
||||
category = data.get('mediaAnalytics', {}).get('category')
|
||||
|
||||
common_info = {
|
||||
'description': description,
|
||||
'series': series,
|
||||
'episode': episode,
|
||||
'season_number': season_number,
|
||||
'episode_number': episode_number,
|
||||
'categories': [category] if category else None,
|
||||
'age_limit': parse_age_limit(data.get('legalAge')),
|
||||
'thumbnails': thumbnails,
|
||||
}
|
||||
|
||||
vcodec = 'none' if data.get('mediaType') == 'Audio' else None
|
||||
|
||||
for entry in entries:
|
||||
entry.update(common_info)
|
||||
for f in entry['formats']:
|
||||
f['vcodec'] = vcodec
|
||||
|
||||
points = data.get('shortIndexPoints')
|
||||
if isinstance(points, list):
|
||||
chapters = []
|
||||
for next_num, point in enumerate(points, start=1):
|
||||
if not isinstance(point, dict):
|
||||
continue
|
||||
start_time = parse_duration(point.get('startPoint'))
|
||||
if start_time is None:
|
||||
continue
|
||||
end_time = parse_duration(
|
||||
data.get('duration')
|
||||
if next_num == len(points)
|
||||
else points[next_num].get('startPoint'))
|
||||
if end_time is None:
|
||||
continue
|
||||
chapters.append({
|
||||
'start_time': start_time,
|
||||
'end_time': end_time,
|
||||
'title': point.get('title'),
|
||||
})
|
||||
if chapters and len(entries) == 1:
|
||||
entries[0]['chapters'] = chapters
|
||||
|
||||
return self.playlist_result(entries, video_id, title, description)
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self._extract_from_mediaelement(video_id)
|
||||
return self.url_result(
|
||||
'nrk:%s' % video_id, ie=NRKIE.ie_key(), video_id=video_id)
|
||||
|
||||
|
||||
class NRKTVEpisodeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/\d+/episode/\d+)'
|
||||
_VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+/sesong/(?P<season_number>\d+)/episode/(?P<episode_number>\d+))'
|
||||
_TESTS = [{
|
||||
'url': 'https://tv.nrk.no/serie/hellums-kro/sesong/1/episode/2',
|
||||
'info_dict': {
|
||||
'id': 'MUHH36005220BA',
|
||||
'id': 'MUHH36005220',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kro, krig og kjærlighet 2:6',
|
||||
'description': 'md5:b32a7dc0b1ed27c8064f58b97bda4350',
|
||||
'duration': 1563,
|
||||
'title': 'Hellums kro - 2. Kro, krig og kjærlighet',
|
||||
'description': 'md5:ad92ddffc04cea8ce14b415deef81787',
|
||||
'duration': 1563.92,
|
||||
'series': 'Hellums kro',
|
||||
'season_number': 1,
|
||||
'episode_number': 2,
|
||||
'episode': '2:6',
|
||||
'episode': '2. Kro, krig og kjærlighet',
|
||||
'age_limit': 6,
|
||||
},
|
||||
'params': {
|
||||
@@ -472,15 +426,16 @@ class NRKTVEpisodeIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://tv.nrk.no/serie/backstage/sesong/1/episode/8',
|
||||
'info_dict': {
|
||||
'id': 'MSUI14000816AA',
|
||||
'id': 'MSUI14000816',
|
||||
'ext': 'mp4',
|
||||
'title': 'Backstage 8:30',
|
||||
'title': 'Backstage - 8. episode',
|
||||
'description': 'md5:de6ca5d5a2d56849e4021f2bf2850df4',
|
||||
'duration': 1320,
|
||||
'series': 'Backstage',
|
||||
'season_number': 1,
|
||||
'episode_number': 8,
|
||||
'episode': '8:30',
|
||||
'episode': '8. episode',
|
||||
'age_limit': 0,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -489,7 +444,7 @@ class NRKTVEpisodeIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
display_id, season_number, episode_number = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
@@ -501,10 +456,12 @@ class NRKTVEpisodeIE(InfoExtractor):
|
||||
assert re.match(NRKTVIE._EPISODE_RE, nrk_id)
|
||||
|
||||
info.update({
|
||||
'_type': 'url_transparent',
|
||||
'_type': 'url',
|
||||
'id': nrk_id,
|
||||
'url': 'nrk:%s' % nrk_id,
|
||||
'ie_key': NRKIE.ie_key(),
|
||||
'season_number': int(season_number),
|
||||
'episode_number': int(episode_number),
|
||||
})
|
||||
return info
|
||||
|
||||
@@ -518,8 +475,6 @@ class NRKTVSerieBaseIE(NRKBaseIE):
|
||||
nrk_id = episode.get('prfId') or episode.get('episodeId')
|
||||
if not nrk_id or not isinstance(nrk_id, compat_str):
|
||||
continue
|
||||
if not re.match(NRKTVIE._EPISODE_RE, nrk_id):
|
||||
continue
|
||||
entries.append(self.url_result(
|
||||
'nrk:%s' % nrk_id, ie=NRKIE.ie_key(), video_id=nrk_id))
|
||||
return entries
|
||||
@@ -531,6 +486,10 @@ class NRKTVSerieBaseIE(NRKBaseIE):
|
||||
if embedded.get(asset_key):
|
||||
return asset_key
|
||||
|
||||
@staticmethod
|
||||
def _catalog_name(serie_kind):
|
||||
return 'podcast' if serie_kind in ('podcast', 'podkast') else 'series'
|
||||
|
||||
def _entries(self, data, display_id):
|
||||
for page_num in itertools.count(1):
|
||||
embedded = data.get('_embedded') or data
|
||||
@@ -564,7 +523,16 @@ class NRKTVSerieBaseIE(NRKBaseIE):
|
||||
|
||||
|
||||
class NRKTVSeasonIE(NRKTVSerieBaseIE):
|
||||
_VALID_URL = r'https?://(?P<domain>tv|radio)\.nrk\.no/serie/(?P<serie>[^/]+)/(?:sesong/)?(?P<id>\d+)'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?P<domain>tv|radio)\.nrk\.no/
|
||||
(?P<serie_kind>serie|pod[ck]ast)/
|
||||
(?P<serie>[^/]+)/
|
||||
(?:
|
||||
(?:sesong/)?(?P<id>\d+)|
|
||||
sesong/(?P<id_2>[^/?#&]+)
|
||||
)
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'https://tv.nrk.no/serie/backstage/sesong/1',
|
||||
'info_dict': {
|
||||
@@ -600,19 +568,34 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
|
||||
# 180 entries, single page
|
||||
'url': 'https://tv.nrk.no/serie/spangas/sesong/1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/diagnose-kverulant',
|
||||
'info_dict': {
|
||||
'id': 'hele_historien/diagnose-kverulant',
|
||||
'title': 'Diagnose kverulant',
|
||||
},
|
||||
'playlist_mincount': 3,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/loerdagsraadet/sesong/202101',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url)
|
||||
return (False if NRKTVIE.suitable(url) or NRKTVEpisodeIE.suitable(url) or NRKRadioPodkastIE.suitable(url)
|
||||
else super(NRKTVSeasonIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
domain, serie, season_id = re.match(self._VALID_URL, url).groups()
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
domain = mobj.group('domain')
|
||||
serie_kind = mobj.group('serie_kind')
|
||||
serie = mobj.group('serie')
|
||||
season_id = mobj.group('id') or mobj.group('id_2')
|
||||
display_id = '%s/%s' % (serie, season_id)
|
||||
|
||||
data = self._call_api(
|
||||
'%s/catalog/series/%s/seasons/%s' % (domain, serie, season_id),
|
||||
'%s/catalog/%s/%s/seasons/%s'
|
||||
% (domain, self._catalog_name(serie_kind), serie, season_id),
|
||||
display_id, 'season', query={'pageSize': 50})
|
||||
|
||||
title = try_get(data, lambda x: x['titles']['title'], compat_str) or display_id
|
||||
@@ -622,7 +605,7 @@ class NRKTVSeasonIE(NRKTVSerieBaseIE):
|
||||
|
||||
|
||||
class NRKTVSeriesIE(NRKTVSerieBaseIE):
|
||||
_VALID_URL = r'https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/serie/(?P<id>[^/]+)'
|
||||
_VALID_URL = r'https?://(?P<domain>(?:tv|radio)\.nrk|(?:tv\.)?nrksuper)\.no/(?P<serie_kind>serie|pod[ck]ast)/(?P<id>[^/]+)'
|
||||
_TESTS = [{
|
||||
# new layout, instalments
|
||||
'url': 'https://tv.nrk.no/serie/groenn-glede',
|
||||
@@ -682,23 +665,33 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
|
||||
}, {
|
||||
'url': 'https://nrksuper.no/serie/labyrint',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers',
|
||||
'info_dict': {
|
||||
'id': 'ulrikkes_univers',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/nrkno-poddkast-26588-134079-05042018030000',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return (
|
||||
False if any(ie.suitable(url)
|
||||
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKTVSeasonIE))
|
||||
for ie in (NRKTVIE, NRKTVEpisodeIE, NRKRadioPodkastIE, NRKTVSeasonIE))
|
||||
else super(NRKTVSeriesIE, cls).suitable(url))
|
||||
|
||||
def _real_extract(self, url):
|
||||
site, series_id = re.match(self._VALID_URL, url).groups()
|
||||
site, serie_kind, series_id = re.match(self._VALID_URL, url).groups()
|
||||
is_radio = site == 'radio.nrk'
|
||||
domain = 'radio' if is_radio else 'tv'
|
||||
|
||||
size_prefix = 'p' if is_radio else 'embeddedInstalmentsP'
|
||||
series = self._call_api(
|
||||
'%s/catalog/series/%s' % (domain, series_id),
|
||||
'%s/catalog/%s/%s'
|
||||
% (domain, self._catalog_name(serie_kind), series_id),
|
||||
series_id, 'serie', query={size_prefix + 'ageSize': 50})
|
||||
titles = try_get(series, [
|
||||
lambda x: x['titles'],
|
||||
@@ -713,12 +706,14 @@ class NRKTVSeriesIE(NRKTVSerieBaseIE):
|
||||
embedded_seasons = embedded.get('seasons') or []
|
||||
if len(linked_seasons) > len(embedded_seasons):
|
||||
for season in linked_seasons:
|
||||
season_name = season.get('name')
|
||||
if season_name and isinstance(season_name, compat_str):
|
||||
season_url = urljoin(url, season.get('href'))
|
||||
if not season_url:
|
||||
season_name = season.get('name')
|
||||
if season_name and isinstance(season_name, compat_str):
|
||||
season_url = 'https://%s.nrk.no/serie/%s/sesong/%s' % (domain, series_id, season_name)
|
||||
if season_url:
|
||||
entries.append(self.url_result(
|
||||
'https://%s.nrk.no/serie/%s/sesong/%s'
|
||||
% (domain, series_id, season_name),
|
||||
ie=NRKTVSeasonIE.ie_key(),
|
||||
season_url, ie=NRKTVSeasonIE.ie_key(),
|
||||
video_title=season.get('title')))
|
||||
else:
|
||||
for season in embedded_seasons:
|
||||
@@ -743,6 +738,38 @@ class NRKTVDirekteIE(NRKTVIE):
|
||||
}]
|
||||
|
||||
|
||||
class NRKRadioPodkastIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://radio\.nrk\.no/pod[ck]ast/(?:[^/]+/)+(?P<id>l_[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
|
||||
'md5': '8d40dab61cea8ab0114e090b029a0565',
|
||||
'info_dict': {
|
||||
'id': 'MUHH48000314AA',
|
||||
'ext': 'mp4',
|
||||
'title': '20 spørsmål 23.05.2014',
|
||||
'description': 'md5:bdea103bc35494c143c6a9acdd84887a',
|
||||
'duration': 1741,
|
||||
'series': '20 spørsmål',
|
||||
'episode': '23.05.2014',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podcast/ulrikkes_univers/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/ulrikkes_univers/sesong/1/l_96f4f1b0-de54-4e6a-b4f1-b0de54fe6af8',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://radio.nrk.no/podkast/hele_historien/sesong/bortfoert-i-bergen/l_774d1a2c-7aa7-4965-8d1a-2c7aa7d9652c',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
return self.url_result(
|
||||
'nrk:%s' % video_id, ie=NRKIE.ie_key(), video_id=video_id)
|
||||
|
||||
|
||||
class NRKPlaylistBaseIE(InfoExtractor):
|
||||
def _extract_description(self, webpage):
|
||||
pass
|
||||
|
@@ -6,16 +6,33 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
dict_get,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
unescapeHTML,
|
||||
parse_iso8601,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class PikselIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://player\.piksel\.com/v/(?:refid/[^/]+/prefid/)?(?P<id>[a-z0-9_]+)'
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?:
|
||||
(?:
|
||||
player\.
|
||||
(?:
|
||||
olympusattelecom|
|
||||
vibebyvista
|
||||
)|
|
||||
(?:api|player)\.multicastmedia|
|
||||
(?:api-ovp|player)\.piksel
|
||||
)\.com|
|
||||
(?:
|
||||
mz-edge\.stream\.co|
|
||||
movie-s\.nhk\.or
|
||||
)\.jp|
|
||||
vidego\.baltimorecity\.gov
|
||||
)/v/(?:refid/(?P<refid>[^/]+)/prefid/)?(?P<id>[\w-]+)'''
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://player.piksel.com/v/ums2867l',
|
||||
@@ -56,46 +73,41 @@ class PikselIE(InfoExtractor):
|
||||
if mobj:
|
||||
return mobj.group('url')
|
||||
|
||||
def _call_api(self, app_token, resource, display_id, query, fatal=True):
|
||||
response = (self._download_json(
|
||||
'http://player.piksel.com/ws/ws_%s/api/%s/mode/json/apiv/5' % (resource, app_token),
|
||||
display_id, query=query, fatal=fatal) or {}).get('response')
|
||||
failure = try_get(response, lambda x: x['failure']['reason'])
|
||||
if failure:
|
||||
if fatal:
|
||||
raise ExtractorError(failure, expected=True)
|
||||
self.report_warning(failure)
|
||||
return response
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
ref_id, display_id = re.match(self._VALID_URL, url).groups()
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._search_regex(
|
||||
r'data-de-program-uuid=[\'"]([a-z0-9]+)',
|
||||
webpage, 'program uuid', default=display_id)
|
||||
app_token = self._search_regex([
|
||||
r'clientAPI\s*:\s*"([^"]+)"',
|
||||
r'data-de-api-key\s*=\s*"([^"]+)"'
|
||||
], webpage, 'app token')
|
||||
response = self._download_json(
|
||||
'http://player.piksel.com/ws/ws_program/api/%s/mode/json/apiv/5' % app_token,
|
||||
video_id, query={
|
||||
'v': video_id
|
||||
})['response']
|
||||
failure = response.get('failure')
|
||||
if failure:
|
||||
raise ExtractorError(response['failure']['reason'], expected=True)
|
||||
video_data = response['WsProgramResponse']['program']['asset']
|
||||
query = {'refid': ref_id, 'prefid': display_id} if ref_id else {'v': display_id}
|
||||
program = self._call_api(
|
||||
app_token, 'program', display_id, query)['WsProgramResponse']['program']
|
||||
video_id = program['uuid']
|
||||
video_data = program['asset']
|
||||
title = video_data['title']
|
||||
asset_type = dict_get(video_data, ['assetType', 'asset_type'])
|
||||
|
||||
formats = []
|
||||
|
||||
m3u8_url = dict_get(video_data, [
|
||||
'm3u8iPadURL',
|
||||
'ipadM3u8Url',
|
||||
'm3u8AndroidURL',
|
||||
'm3u8iPhoneURL',
|
||||
'iphoneM3u8Url'])
|
||||
if m3u8_url:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
|
||||
asset_type = dict_get(video_data, ['assetType', 'asset_type'])
|
||||
for asset_file in video_data.get('assetFiles', []):
|
||||
def process_asset_file(asset_file):
|
||||
if not asset_file:
|
||||
return
|
||||
# TODO: extract rtmp formats
|
||||
http_url = asset_file.get('http_url')
|
||||
if not http_url:
|
||||
continue
|
||||
return
|
||||
tbr = None
|
||||
vbr = int_or_none(asset_file.get('videoBitrate'), 1024)
|
||||
abr = int_or_none(asset_file.get('audioBitrate'), 1024)
|
||||
@@ -118,6 +130,43 @@ class PikselIE(InfoExtractor):
|
||||
'filesize': int_or_none(asset_file.get('filesize')),
|
||||
'tbr': tbr,
|
||||
})
|
||||
|
||||
def process_asset_files(asset_files):
|
||||
for asset_file in (asset_files or []):
|
||||
process_asset_file(asset_file)
|
||||
|
||||
process_asset_files(video_data.get('assetFiles'))
|
||||
process_asset_file(video_data.get('referenceFile'))
|
||||
if not formats:
|
||||
asset_id = video_data.get('assetid') or program.get('assetid')
|
||||
if asset_id:
|
||||
process_asset_files(try_get(self._call_api(
|
||||
app_token, 'asset_file', display_id, {
|
||||
'assetid': asset_id,
|
||||
}, False), lambda x: x['WsAssetFileResponse']['AssetFiles']))
|
||||
|
||||
m3u8_url = dict_get(video_data, [
|
||||
'm3u8iPadURL',
|
||||
'ipadM3u8Url',
|
||||
'm3u8AndroidURL',
|
||||
'm3u8iPhoneURL',
|
||||
'iphoneM3u8Url'])
|
||||
if m3u8_url:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
|
||||
smil_url = dict_get(video_data, ['httpSmil', 'hdSmil', 'rtmpSmil'])
|
||||
if smil_url:
|
||||
transform_source = None
|
||||
if ref_id == 'nhkworld':
|
||||
# TODO: figure out if this is something to be fixed in urljoin,
|
||||
# _parse_smil_formats or keep it here
|
||||
transform_source = lambda x: x.replace('src="/', 'src="').replace('/media"', '/media/"')
|
||||
formats.extend(self._extract_smil_formats(
|
||||
re.sub(r'/od/[^/]+/', '/od/http/', smil_url), video_id,
|
||||
transform_source=transform_source, fatal=False))
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
|
@@ -8,6 +8,7 @@ from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
@@ -56,7 +57,8 @@ class RedditRIE(InfoExtractor):
|
||||
'id': 'zv89llsvexdz',
|
||||
'ext': 'mp4',
|
||||
'title': 'That small heart attack.',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:4',
|
||||
'timestamp': 1501941939,
|
||||
'upload_date': '20170805',
|
||||
'uploader': 'Antw87',
|
||||
@@ -118,11 +120,34 @@ class RedditRIE(InfoExtractor):
|
||||
else:
|
||||
age_limit = None
|
||||
|
||||
thumbnails = []
|
||||
|
||||
def add_thumbnail(src):
|
||||
if not isinstance(src, dict):
|
||||
return
|
||||
thumbnail_url = url_or_none(src.get('url'))
|
||||
if not thumbnail_url:
|
||||
return
|
||||
thumbnails.append({
|
||||
'url': unescapeHTML(thumbnail_url),
|
||||
'width': int_or_none(src.get('width')),
|
||||
'height': int_or_none(src.get('height')),
|
||||
})
|
||||
|
||||
for image in try_get(data, lambda x: x['preview']['images']) or []:
|
||||
if not isinstance(image, dict):
|
||||
continue
|
||||
add_thumbnail(image.get('source'))
|
||||
resolutions = image.get('resolutions')
|
||||
if isinstance(resolutions, list):
|
||||
for resolution in resolutions:
|
||||
add_thumbnail(resolution)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': video_url,
|
||||
'title': data.get('title'),
|
||||
'thumbnail': url_or_none(data.get('thumbnail')),
|
||||
'thumbnails': thumbnails,
|
||||
'timestamp': float_or_none(data.get('created_utc')),
|
||||
'uploader': data.get('author'),
|
||||
'duration': int_or_none(try_get(
|
||||
|
@@ -4,8 +4,12 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .brightcove import BrightcoveNewIE
|
||||
from ..compat import compat_str
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
try_get,
|
||||
update_url_query,
|
||||
)
|
||||
@@ -41,16 +45,22 @@ class SevenPlusIE(BrightcoveNewIE):
|
||||
def _real_extract(self, url):
|
||||
path, episode_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
media = self._download_json(
|
||||
'https://videoservice.swm.digital/playback', episode_id, query={
|
||||
'appId': '7plus',
|
||||
'deviceType': 'web',
|
||||
'platformType': 'web',
|
||||
'accountId': 5303576322001,
|
||||
'referenceId': 'ref:' + episode_id,
|
||||
'deliveryId': 'csai',
|
||||
'videoType': 'vod',
|
||||
})['media']
|
||||
try:
|
||||
media = self._download_json(
|
||||
'https://videoservice.swm.digital/playback', episode_id, query={
|
||||
'appId': '7plus',
|
||||
'deviceType': 'web',
|
||||
'platformType': 'web',
|
||||
'accountId': 5303576322001,
|
||||
'referenceId': 'ref:' + episode_id,
|
||||
'deliveryId': 'csai',
|
||||
'videoType': 'vod',
|
||||
})['media']
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
raise ExtractorError(self._parse_json(
|
||||
e.cause.read().decode(), episode_id)[0]['error_code'], expected=True)
|
||||
raise
|
||||
|
||||
for source in media.get('sources', {}):
|
||||
src = source.get('src')
|
||||
|
@@ -1,6 +1,8 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
extract_attributes,
|
||||
@@ -11,36 +13,59 @@ from ..utils import (
|
||||
|
||||
|
||||
class SkyBaseIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_data = extract_attributes(self._search_regex(
|
||||
r'(<div.+?class="[^"]*sdc-article-video__media-ooyala[^"]*"[^>]+>)',
|
||||
webpage, 'video data'))
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
|
||||
_SDC_EL_REGEX = r'(?s)(<div[^>]+data-(?:component-name|fn)="sdc-(?:articl|sit)e-video"[^>]*>)'
|
||||
|
||||
video_url = 'ooyala:%s' % video_data['data-video-id']
|
||||
if video_data.get('data-token-required') == 'true':
|
||||
token_fetch_options = self._parse_json(video_data.get(
|
||||
'data-token-fetch-options', '{}'), video_id, fatal=False) or {}
|
||||
token_fetch_url = token_fetch_options.get('url')
|
||||
if token_fetch_url:
|
||||
embed_token = self._download_webpage(urljoin(
|
||||
url, token_fetch_url), video_id, fatal=False)
|
||||
if embed_token:
|
||||
video_url = smuggle_url(
|
||||
video_url, {'embed_token': embed_token.strip('"')})
|
||||
def _process_ooyala_element(self, webpage, sdc_el, url):
|
||||
sdc = extract_attributes(sdc_el)
|
||||
provider = sdc.get('data-provider')
|
||||
if provider == 'ooyala':
|
||||
video_id = sdc['data-sdc-video-id']
|
||||
video_url = 'ooyala:%s' % video_id
|
||||
ie_key = 'Ooyala'
|
||||
ooyala_el = self._search_regex(
|
||||
r'(<div[^>]+class="[^"]*\bsdc-article-video__media-ooyala\b[^"]*"[^>]+data-video-id="%s"[^>]*>)' % video_id,
|
||||
webpage, 'video data', fatal=False)
|
||||
if ooyala_el:
|
||||
ooyala_attrs = extract_attributes(ooyala_el) or {}
|
||||
if ooyala_attrs.get('data-token-required') == 'true':
|
||||
token_fetch_url = (self._parse_json(ooyala_attrs.get(
|
||||
'data-token-fetch-options', '{}'),
|
||||
video_id, fatal=False) or {}).get('url')
|
||||
if token_fetch_url:
|
||||
embed_token = self._download_json(urljoin(
|
||||
url, token_fetch_url), video_id, fatal=False)
|
||||
if embed_token:
|
||||
video_url = smuggle_url(
|
||||
video_url, {'embed_token': embed_token})
|
||||
elif provider == 'brightcove':
|
||||
video_id = sdc['data-video-id']
|
||||
account_id = sdc.get('data-account-id') or '6058004172001'
|
||||
player_id = sdc.get('data-player-id') or 'RC9PQUaJ6'
|
||||
video_url = self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id)
|
||||
ie_key = 'BrightcoveNew'
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ie_key': ie_key,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
info = self._process_ooyala_element(webpage, self._search_regex(
|
||||
self._SDC_EL_REGEX, webpage, 'sdc element'), url)
|
||||
info.update({
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': strip_or_none(self._og_search_description(webpage)),
|
||||
'ie_key': 'Ooyala',
|
||||
}
|
||||
})
|
||||
return info
|
||||
|
||||
|
||||
class SkySportsIE(SkyBaseIE):
|
||||
IE_NAME = 'sky:sports'
|
||||
_VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/([^/]+/)*(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine',
|
||||
@@ -62,15 +87,45 @@ class SkySportsIE(SkyBaseIE):
|
||||
|
||||
|
||||
class SkyNewsIE(SkyBaseIE):
|
||||
IE_NAME = 'sky:news'
|
||||
_VALID_URL = r'https?://news\.sky\.com/video/[0-9a-z-]+-(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'https://news.sky.com/video/russian-plane-inspected-after-deadly-fire-11712962',
|
||||
'md5': 'd6327e581473cea9976a3236ded370cd',
|
||||
'md5': '411e8893fd216c75eaf7e4c65d364115',
|
||||
'info_dict': {
|
||||
'id': '1ua21xaDE6lCtZDmbYfl8kwsKLooJbNM',
|
||||
'id': 'ref:1ua21xaDE6lCtZDmbYfl8kwsKLooJbNM',
|
||||
'ext': 'mp4',
|
||||
'title': 'Russian plane inspected after deadly fire',
|
||||
'description': 'The Russian Investigative Committee has released video of the wreckage of a passenger plane which caught fire near Moscow.',
|
||||
'uploader_id': '6058004172001',
|
||||
'timestamp': 1567112345,
|
||||
'upload_date': '20190829',
|
||||
},
|
||||
'add_ie': ['Ooyala'],
|
||||
'add_ie': ['BrightcoveNew'],
|
||||
}
|
||||
|
||||
|
||||
class SkySportsNewsIE(SkyBaseIE):
|
||||
IE_NAME = 'sky:sports:news'
|
||||
_VALID_URL = r'https?://(?:www\.)?skysports\.com/([^/]+/)*news/\d+/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.skysports.com/golf/news/12176/10871916/dustin-johnson-ready-to-conquer-players-championship-at-tpc-sawgrass',
|
||||
'info_dict': {
|
||||
'id': '10871916',
|
||||
'title': 'Dustin Johnson ready to conquer Players Championship at TPC Sawgrass',
|
||||
'description': 'Dustin Johnson is confident he can continue his dominant form in 2017 by adding the Players Championship to his list of victories.',
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
article_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, article_id)
|
||||
|
||||
entries = []
|
||||
for sdc_el in re.findall(self._SDC_EL_REGEX, webpage):
|
||||
entries.append(self._process_ooyala_element(webpage, sdc_el, url))
|
||||
|
||||
return self.playlist_result(
|
||||
entries, article_id, self._og_search_title(webpage),
|
||||
self._html_search_meta(['og:description', 'description'], webpage))
|
||||
|
@@ -4,25 +4,28 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
unescapeHTML,
|
||||
str_or_none,
|
||||
try_get,
|
||||
)
|
||||
|
||||
|
||||
class StitcherIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
|
||||
_VALID_URL = r'https?://(?:www\.)?stitcher\.com/(?:podcast|show)/(?:[^/]+/)+e(?:pisode)?/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
|
||||
'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
|
||||
'md5': 'e9635098e0da10b21a0e2b85585530f6',
|
||||
'info_dict': {
|
||||
'id': '40789481',
|
||||
'ext': 'mp3',
|
||||
'title': 'Machine Learning Mastery and Cancer Clusters',
|
||||
'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
|
||||
'description': 'md5:547adb4081864be114ae3831b4c2b42f',
|
||||
'duration': 1604,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'upload_date': '20180126',
|
||||
'timestamp': 1516989316,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
|
||||
@@ -38,6 +41,7 @@ class StitcherIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Page Not Found',
|
||||
}, {
|
||||
# escaped title
|
||||
'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
|
||||
@@ -45,37 +49,39 @@ class StitcherIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.stitcher.com/show/threedom/episode/circles-on-a-stick-200212584',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
audio_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id') or audio_id
|
||||
display_id, audio_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
resp = self._download_json(
|
||||
'https://api.prod.stitcher.com/episode/' + audio_id,
|
||||
display_id or audio_id)
|
||||
episode = try_get(resp, lambda x: x['data']['episodes'][0], dict)
|
||||
if not episode:
|
||||
raise ExtractorError(resp['errors'][0]['message'], expected=True)
|
||||
|
||||
episode = self._parse_json(
|
||||
js_to_json(self._search_regex(
|
||||
r'(?s)var\s+stitcher(?:Config)?\s*=\s*({.+?});\n', webpage, 'episode config')),
|
||||
display_id)['config']['episode']
|
||||
title = episode['title'].strip()
|
||||
audio_url = episode['audio_url']
|
||||
|
||||
title = unescapeHTML(episode['title'])
|
||||
formats = [{
|
||||
'url': episode[episode_key],
|
||||
'ext': determine_ext(episode[episode_key]) or 'mp3',
|
||||
'vcodec': 'none',
|
||||
} for episode_key in ('episodeURL',) if episode.get(episode_key)]
|
||||
description = self._search_regex(
|
||||
r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
|
||||
duration = int_or_none(episode.get('duration'))
|
||||
thumbnail = episode.get('episodeImage')
|
||||
thumbnail = None
|
||||
show_id = episode.get('show_id')
|
||||
if show_id and episode.get('classic_id') != -1:
|
||||
thumbnail = 'https://stitcher-classic.imgix.net/feedimages/%s.jpg' % show_id
|
||||
|
||||
return {
|
||||
'id': audio_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'description': clean_html(episode.get('html_description') or episode.get('description')),
|
||||
'duration': int_or_none(episode.get('duration')),
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
'url': audio_url,
|
||||
'vcodec': 'none',
|
||||
'timestamp': int_or_none(episode.get('date_created')),
|
||||
'season_number': int_or_none(episode.get('season')),
|
||||
'season_id': str_or_none(episode.get('season_id')),
|
||||
}
|
||||
|
@@ -140,7 +140,7 @@ class TeachableIE(TeachableBaseIE):
|
||||
@staticmethod
|
||||
def _is_teachable(webpage):
|
||||
return 'teachableTracker.linker:autoLink' in webpage and re.search(
|
||||
r'<link[^>]+href=["\']https?://process\.fs\.teachablecdn\.com',
|
||||
r'<link[^>]+href=["\']https?://(?:process\.fs|assets)\.teachablecdn\.com',
|
||||
webpage)
|
||||
|
||||
@staticmethod
|
||||
|
@@ -5,14 +5,11 @@ import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .ooyala import OoyalaIE
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
try_get,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
@@ -28,7 +25,7 @@ class TelecincoIE(InfoExtractor):
|
||||
'description': 'md5:716caf5601e25c3c5ab6605b1ae71529',
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': 'adb28c37238b675dad0f042292f209a7',
|
||||
'md5': '7ee56d665cfd241c0e6d80fd175068b0',
|
||||
'info_dict': {
|
||||
'id': 'JEA5ijCnF6p5W08A1rNKn7',
|
||||
'ext': 'mp4',
|
||||
@@ -38,7 +35,7 @@ class TelecincoIE(InfoExtractor):
|
||||
}]
|
||||
}, {
|
||||
'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
|
||||
'md5': '9468140ebc300fbb8b9d65dc6e5c4b43',
|
||||
'md5': 'c86fe0d99e3bdb46b7950d38bf6ef12a',
|
||||
'info_dict': {
|
||||
'id': 'jn24Od1zGLG4XUZcnUnZB6',
|
||||
'ext': 'mp4',
|
||||
@@ -48,7 +45,7 @@ class TelecincoIE(InfoExtractor):
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
|
||||
'md5': 'ae2dc6b7b50b2392076a51c0f70e01f6',
|
||||
'md5': 'eddb50291df704ce23c74821b995bcac',
|
||||
'info_dict': {
|
||||
'id': 'aywerkD2Sv1vGNqq9b85Q2',
|
||||
'ext': 'mp4',
|
||||
@@ -90,58 +87,24 @@ class TelecincoIE(InfoExtractor):
|
||||
|
||||
def _parse_content(self, content, url):
|
||||
video_id = content['dataMediaId']
|
||||
if content.get('dataCmsId') == 'ooyala':
|
||||
return self.url_result(
|
||||
'ooyala:%s' % video_id, OoyalaIE.ie_key(), video_id)
|
||||
config_url = urljoin(url, content['dataConfig'])
|
||||
config = self._download_json(
|
||||
config_url, video_id, 'Downloading config JSON')
|
||||
content['dataConfig'], video_id, 'Downloading config JSON')
|
||||
title = config['info']['title']
|
||||
|
||||
def mmc_url(mmc_type):
|
||||
return re.sub(
|
||||
r'/(?:flash|html5)\.json', '/%s.json' % mmc_type,
|
||||
config['services']['mmc'])
|
||||
|
||||
duration = None
|
||||
formats = []
|
||||
for mmc_type in ('flash', 'html5'):
|
||||
mmc = self._download_json(
|
||||
mmc_url(mmc_type), video_id,
|
||||
'Downloading %s mmc JSON' % mmc_type, fatal=False)
|
||||
if not mmc:
|
||||
continue
|
||||
if not duration:
|
||||
duration = int_or_none(mmc.get('duration'))
|
||||
for location in mmc['locations']:
|
||||
gat = self._proto_relative_url(location.get('gat'), 'http:')
|
||||
gcp = location.get('gcp')
|
||||
ogn = location.get('ogn')
|
||||
if None in (gat, gcp, ogn):
|
||||
continue
|
||||
token_data = {
|
||||
'gcp': gcp,
|
||||
'ogn': ogn,
|
||||
'sta': 0,
|
||||
}
|
||||
media = self._download_json(
|
||||
gat, video_id, data=json.dumps(token_data).encode('utf-8'),
|
||||
headers={
|
||||
'Content-Type': 'application/json;charset=utf-8',
|
||||
'Referer': url,
|
||||
}, fatal=False) or {}
|
||||
stream = media.get('stream') or media.get('file')
|
||||
if not stream:
|
||||
continue
|
||||
ext = determine_ext(stream)
|
||||
if ext == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
stream + '&hdcore=3.2.0&plugin=aasp-3.2.0.77.18',
|
||||
video_id, f4m_id='hds', fatal=False))
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
stream, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
services = config['services']
|
||||
caronte = self._download_json(services['caronte'], video_id)
|
||||
stream = caronte['dls'][0]['stream']
|
||||
headers = self.geo_verification_headers()
|
||||
headers.update({
|
||||
'Content-Type': 'application/json;charset=UTF-8',
|
||||
'Origin': re.match(r'https?://[^/]+', url).group(0),
|
||||
})
|
||||
cdn = self._download_json(
|
||||
caronte['cerbero'], video_id, data=json.dumps({
|
||||
'bbx': caronte['bbx'],
|
||||
'gbx': self._download_json(services['gbx'], video_id)['gbx'],
|
||||
}).encode(), headers=headers)['tokens']['1']['cdn']
|
||||
formats = self._extract_m3u8_formats(
|
||||
stream + '?' + cdn, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
@@ -149,7 +112,7 @@ class TelecincoIE(InfoExtractor):
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': content.get('dataPoster') or config.get('poster', {}).get('imageUrl'),
|
||||
'duration': duration,
|
||||
'duration': int_or_none(content.get('dataDuration')),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -3,9 +3,10 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
parse_age_limit,
|
||||
parse_iso8601,
|
||||
smuggle_url,
|
||||
# smuggle_url,
|
||||
)
|
||||
|
||||
|
||||
@@ -24,14 +25,16 @@ class TenPlayIE(InfoExtractor):
|
||||
'uploader_id': '2199827728001',
|
||||
},
|
||||
'params': {
|
||||
'format': 'bestvideo',
|
||||
# 'format': 'bestvideo',
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://10play.com.au/how-to-stay-married/web-extras/season-1/terrys-talks-ep-1-embracing-change/tpv190915ylupc',
|
||||
'only_matching': True,
|
||||
}]
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s'
|
||||
# BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/2199827728001/cN6vRtRQt_default/index.html?videoId=%s'
|
||||
_GEO_BYPASS = False
|
||||
_FASTLY_URL_TEMPL = 'https://10-selector.global.ssl.fastly.net/s/kYEXFC/media/%s?mbr=true&manifest=m3u&format=redirect'
|
||||
|
||||
def _real_extract(self, url):
|
||||
content_id = self._match_id(url)
|
||||
@@ -40,19 +43,28 @@ class TenPlayIE(InfoExtractor):
|
||||
video = data.get('video') or {}
|
||||
metadata = data.get('metaData') or {}
|
||||
brightcove_id = video.get('videoId') or metadata['showContentVideoId']
|
||||
brightcove_url = smuggle_url(
|
||||
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
|
||||
{'geo_countries': ['AU']})
|
||||
# brightcove_url = smuggle_url(
|
||||
# self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
|
||||
# {'geo_countries': ['AU']})
|
||||
m3u8_url = self._request_webpage(HEADRequest(
|
||||
self._FASTLY_URL_TEMPL % brightcove_id), brightcove_id).geturl()
|
||||
if '10play-not-in-oz' in m3u8_url:
|
||||
self.raise_geo_restricted(countries=['AU'])
|
||||
formats = self._extract_m3u8_formats(m3u8_url, brightcove_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': brightcove_url,
|
||||
'id': content_id,
|
||||
'title': video.get('title') or metadata.get('pageContentName') or metadata.get('showContentName'),
|
||||
# '_type': 'url_transparent',
|
||||
# 'url': brightcove_url,
|
||||
'formats': formats,
|
||||
'id': brightcove_id,
|
||||
'title': video.get('title') or metadata.get('pageContentName') or metadata['showContentName'],
|
||||
'description': video.get('description'),
|
||||
'age_limit': parse_age_limit(video.get('showRatingClassification') or metadata.get('showProgramClassification')),
|
||||
'series': metadata.get('showName'),
|
||||
'season': metadata.get('showContentSeason'),
|
||||
'timestamp': parse_iso8601(metadata.get('contentPublishDate') or metadata.get('pageContentPublishDate')),
|
||||
'ie_key': 'BrightcoveNew',
|
||||
'thumbnail': video.get('poster'),
|
||||
'uploader_id': '2199827728001',
|
||||
# 'ie_key': 'BrightcoveNew',
|
||||
}
|
||||
|
@@ -200,7 +200,7 @@ class ToggleIE(InfoExtractor):
|
||||
|
||||
class MeWatchIE(InfoExtractor):
|
||||
IE_NAME = 'mewatch'
|
||||
_VALID_URL = r'https?://(?:www\.)?mewatch\.sg/watch/[^/?#&]+-(?P<id>[0-9]+)'
|
||||
_VALID_URL = r'https?://(?:(?:www|live)\.)?mewatch\.sg/watch/[^/?#&]+-(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.mewatch.sg/watch/Recipe-Of-Life-E1-179371',
|
||||
'info_dict': {
|
||||
@@ -220,6 +220,9 @@ class MeWatchIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'https://www.mewatch.sg/watch/Little-Red-Dot-Detectives-S2-%E6%90%9C%E5%AF%86%E3%80%82%E6%89%93%E5%8D%A1%E3%80%82%E5%B0%8F%E7%BA%A2%E7%82%B9-S2-E1-176232',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://live.mewatch.sg/watch/Recipe-Of-Life-E41-189759',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -5,10 +5,9 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class UKTVPlayIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://uktvplay\.uktv\.co\.uk/.+?\?.*?\bvideo=(?P<id>\d+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://uktvplay\.uktv\.co\.uk/(?:.+?\?.*?\bvideo=|([^/]+/)*watch-online/)(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://uktvplay.uktv.co.uk/shows/world-at-war/c/200/watch-online/?video=2117008346001',
|
||||
'md5': '',
|
||||
'info_dict': {
|
||||
'id': '2117008346001',
|
||||
'ext': 'mp4',
|
||||
@@ -23,7 +22,11 @@ class UKTVPlayIE(InfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
'expected_warnings': ['Failed to download MPD manifest']
|
||||
}
|
||||
}, {
|
||||
'url': 'https://uktvplay.uktv.co.uk/shows/africa/watch-online/5983349675001',
|
||||
'only_matching': True,
|
||||
}]
|
||||
# BRIGHTCOVE_URL_TEMPLATE = 'https://players.brightcove.net/1242911124001/OrCyvJ2gyL_default/index.html?videoId=%s'
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911124001/H1xnMOqP_default/index.html?videoId=%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -12,7 +12,8 @@ from ..utils import (
|
||||
|
||||
|
||||
class VVVVIDIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?vvvvid\.it/(?:#!)?(?:show|anime|film|series)/(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)'
|
||||
_VALID_URL_BASE = r'https?://(?:www\.)?vvvvid\.it/(?:#!)?(?:show|anime|film|series)/'
|
||||
_VALID_URL = r'%s(?P<show_id>\d+)/[^/]+/(?P<season_id>\d+)/(?P<id>[0-9]+)' % _VALID_URL_BASE
|
||||
_TESTS = [{
|
||||
# video_type == 'video/vvvvid'
|
||||
'url': 'https://www.vvvvid.it/#!show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048/ping-pong',
|
||||
@@ -21,6 +22,15 @@ class VVVVIDIE(InfoExtractor):
|
||||
'id': '489048',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ping Pong',
|
||||
'duration': 239,
|
||||
'series': '"Perché dovrei guardarlo?" di Dario Moccia',
|
||||
'season_id': '437',
|
||||
'episode': 'Ping Pong',
|
||||
'episode_number': 1,
|
||||
'episode_id': '3334',
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'repost_count': int,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@@ -37,6 +47,9 @@ class VVVVIDIE(InfoExtractor):
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.vvvvid.it/show/434/perche-dovrei-guardarlo-di-dario-moccia/437/489048',
|
||||
'only_matching': True
|
||||
}]
|
||||
_conn_id = None
|
||||
|
||||
@@ -45,20 +58,35 @@ class VVVVIDIE(InfoExtractor):
|
||||
'https://www.vvvvid.it/user/login',
|
||||
None, headers=self.geo_verification_headers())['data']['conn_id']
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id, season_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
def _download_info(self, show_id, path, video_id, fatal=True):
|
||||
response = self._download_json(
|
||||
'https://www.vvvvid.it/vvvvid/ondemand/%s/season/%s' % (show_id, season_id),
|
||||
'https://www.vvvvid.it/vvvvid/ondemand/%s/%s' % (show_id, path),
|
||||
video_id, headers=self.geo_verification_headers(), query={
|
||||
'conn_id': self._conn_id,
|
||||
})
|
||||
if response['result'] == 'error':
|
||||
}, fatal=fatal)
|
||||
if not (response or fatal):
|
||||
return
|
||||
if response.get('result') == 'error':
|
||||
raise ExtractorError('%s said: %s' % (
|
||||
self.IE_NAME, response['message']), expected=True)
|
||||
return response['data']
|
||||
|
||||
def _extract_common_video_info(self, video_data):
|
||||
return {
|
||||
'thumbnail': video_data.get('thumbnail'),
|
||||
'episode_id': str_or_none(video_data.get('id')),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id, season_id, video_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
response = self._download_info(
|
||||
show_id, 'season/%s' % season_id, video_id)
|
||||
|
||||
vid = int(video_id)
|
||||
video_data = list(filter(
|
||||
lambda episode: episode.get('video_id') == vid, response['data']))[0]
|
||||
lambda episode: episode.get('video_id') == vid, response))[0]
|
||||
title = video_data['title']
|
||||
formats = []
|
||||
|
||||
# vvvvid embed_info decryption algorithm is reverse engineered from function $ds(h) at vvvvid.js
|
||||
@@ -115,6 +143,17 @@ class VVVVIDIE(InfoExtractor):
|
||||
|
||||
return d
|
||||
|
||||
info = {}
|
||||
|
||||
def metadata_from_url(r_url):
|
||||
if not info and r_url:
|
||||
mobj = re.search(r'_(?:S(\d+))?Ep(\d+)', r_url)
|
||||
if mobj:
|
||||
info['episode_number'] = int(mobj.group(2))
|
||||
season_number = mobj.group(1)
|
||||
if season_number:
|
||||
info['season_number'] = int(season_number)
|
||||
|
||||
for quality in ('_sd', ''):
|
||||
embed_code = video_data.get('embed_info' + quality)
|
||||
if not embed_code:
|
||||
@@ -122,7 +161,6 @@ class VVVVIDIE(InfoExtractor):
|
||||
embed_code = ds(embed_code)
|
||||
video_type = video_data.get('video_type')
|
||||
if video_type in ('video/rcs', 'video/kenc'):
|
||||
embed_code = re.sub(r'https?://([^/]+)/z/', r'https://\1/i/', embed_code).replace('/manifest.f4m', '/master.m3u8')
|
||||
if video_type == 'video/kenc':
|
||||
kenc = self._download_json(
|
||||
'https://www.vvvvid.it/kenc', video_id, query={
|
||||
@@ -133,26 +171,75 @@ class VVVVIDIE(InfoExtractor):
|
||||
kenc_message = kenc.get('message')
|
||||
if kenc_message:
|
||||
embed_code += '?' + ds(kenc_message)
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
embed_code, video_id, 'mp4',
|
||||
m3u8_id='hls', fatal=False))
|
||||
formats.extend(self._extract_akamai_formats(embed_code, video_id))
|
||||
else:
|
||||
formats.extend(self._extract_wowza_formats(
|
||||
'http://sb.top-ix.org/videomg/_definst_/mp4:%s/playlist.m3u8' % embed_code, video_id))
|
||||
metadata_from_url(embed_code)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
metadata_from_url(video_data.get('thumbnail'))
|
||||
info.update(self._extract_common_video_info(video_data))
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'title': video_data['title'],
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': video_data.get('thumbnail'),
|
||||
'duration': int_or_none(video_data.get('length')),
|
||||
'series': video_data.get('show_title'),
|
||||
'season_id': season_id,
|
||||
'season_number': video_data.get('season_number'),
|
||||
'episode_id': str_or_none(video_data.get('id')),
|
||||
'episode_number': int_or_none(video_data.get('number')),
|
||||
'episode_title': video_data['title'],
|
||||
'episode': title,
|
||||
'view_count': int_or_none(video_data.get('views')),
|
||||
'like_count': int_or_none(video_data.get('video_likes')),
|
||||
}
|
||||
'repost_count': int_or_none(video_data.get('video_shares')),
|
||||
})
|
||||
return info
|
||||
|
||||
|
||||
class VVVVIDShowIE(VVVVIDIE):
|
||||
_VALID_URL = r'(?P<base_url>%s(?P<id>\d+)(?:/(?P<show_title>[^/?&#]+))?)/?(?:[?#&]|$)' % VVVVIDIE._VALID_URL_BASE
|
||||
_TESTS = [{
|
||||
'url': 'https://www.vvvvid.it/show/156/psyco-pass',
|
||||
'info_dict': {
|
||||
'id': '156',
|
||||
'title': 'Psycho-Pass',
|
||||
'description': 'md5:94d572c0bd85894b193b8aebc9a3a806',
|
||||
},
|
||||
'playlist_count': 46,
|
||||
}, {
|
||||
'url': 'https://www.vvvvid.it/show/156',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
base_url, show_id, show_title = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
seasons = self._download_info(
|
||||
show_id, 'seasons/', show_title)
|
||||
|
||||
show_info = self._download_info(
|
||||
show_id, 'info/', show_title, fatal=False)
|
||||
|
||||
entries = []
|
||||
for season in (seasons or []):
|
||||
episodes = season.get('episodes') or []
|
||||
for episode in episodes:
|
||||
if episode.get('playable') is False:
|
||||
continue
|
||||
season_id = str_or_none(episode.get('season_id'))
|
||||
video_id = str_or_none(episode.get('video_id'))
|
||||
if not (season_id and video_id):
|
||||
continue
|
||||
info = self._extract_common_video_info(episode)
|
||||
info.update({
|
||||
'_type': 'url',
|
||||
'ie_key': VVVVIDIE.ie_key(),
|
||||
'url': '/'.join([base_url, season_id, video_id]),
|
||||
'title': episode.get('title'),
|
||||
'description': episode.get('description'),
|
||||
'season_id': season_id,
|
||||
})
|
||||
entries.append(info)
|
||||
|
||||
return self.playlist_result(
|
||||
entries, show_id, show_info.get('title'), show_info.get('description'))
|
||||
|
@@ -4,17 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
strip_jsonp,
|
||||
)
|
||||
|
||||
|
||||
class WashingtonPostIE(InfoExtractor):
|
||||
IE_NAME = 'washingtonpost'
|
||||
_VALID_URL = r'(?:washingtonpost:|https?://(?:www\.)?washingtonpost\.com/video/(?:[^/]+/)*)(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||
_VALID_URL = r'(?:washingtonpost:|https?://(?:www\.)?washingtonpost\.com/(?:video|posttv)/(?:[^/]+/)*)(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'
|
||||
_EMBED_URL = r'https?://(?:www\.)?washingtonpost\.com/video/c/embed/[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12}'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'https://www.washingtonpost.com/video/c/video/480ba4ee-1ec7-11e6-82c2-a7dcb313287d',
|
||||
'md5': '6f537e1334b714eb15f9563bd4b9cdfa',
|
||||
'info_dict': {
|
||||
@@ -23,10 +19,15 @@ class WashingtonPostIE(InfoExtractor):
|
||||
'title': 'Egypt finds belongings, debris from plane crash',
|
||||
'description': 'md5:a17ceee432f215a5371388c1f680bd86',
|
||||
'upload_date': '20160520',
|
||||
'uploader': 'Reuters',
|
||||
'timestamp': 1463778452,
|
||||
'timestamp': 1463775187,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.washingtonpost.com/video/world/egypt-finds-belongings-debris-from-plane-crash/2016/05/20/480ba4ee-1ec7-11e6-82c2-a7dcb313287d_video.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.washingtonpost.com/posttv/world/iraq-to-track-down-antiquities-after-islamic-state-museum-rampage/2015/02/28/7c57e916-bf86-11e4-9dfb-03366e719af8_video.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_urls(cls, webpage):
|
||||
@@ -35,73 +36,8 @@ class WashingtonPostIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_data = self._download_json(
|
||||
'http://www.washingtonpost.com/posttv/c/videojson/%s?resType=jsonp' % video_id,
|
||||
video_id, transform_source=strip_jsonp)[0]['contentConfig']
|
||||
title = video_data['title']
|
||||
|
||||
urls = []
|
||||
formats = []
|
||||
for s in video_data.get('streams', []):
|
||||
s_url = s.get('url')
|
||||
if not s_url or s_url in urls:
|
||||
continue
|
||||
urls.append(s_url)
|
||||
video_type = s.get('type')
|
||||
if video_type == 'smil':
|
||||
continue
|
||||
elif video_type in ('ts', 'hls') and ('_master.m3u8' in s_url or '_mobile.m3u8' in s_url):
|
||||
m3u8_formats = self._extract_m3u8_formats(
|
||||
s_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)
|
||||
for m3u8_format in m3u8_formats:
|
||||
width = m3u8_format.get('width')
|
||||
if not width:
|
||||
continue
|
||||
vbr = self._search_regex(
|
||||
r'%d_%d_(\d+)' % (width, m3u8_format['height']), m3u8_format['url'], 'vbr', default=None)
|
||||
if vbr:
|
||||
m3u8_format.update({
|
||||
'vbr': int_or_none(vbr),
|
||||
})
|
||||
formats.extend(m3u8_formats)
|
||||
else:
|
||||
width = int_or_none(s.get('width'))
|
||||
vbr = int_or_none(s.get('bitrate'))
|
||||
has_width = width != 0
|
||||
formats.append({
|
||||
'format_id': (
|
||||
'%s-%d-%d' % (video_type, width, vbr)
|
||||
if width
|
||||
else video_type),
|
||||
'vbr': vbr if has_width else None,
|
||||
'width': width,
|
||||
'height': int_or_none(s.get('height')),
|
||||
'acodec': s.get('audioCodec'),
|
||||
'vcodec': s.get('videoCodec') if has_width else 'none',
|
||||
'filesize': int_or_none(s.get('fileSize')),
|
||||
'url': s_url,
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8_native' if video_type in ('ts', 'hls') else None,
|
||||
})
|
||||
source_media_url = video_data.get('sourceMediaURL')
|
||||
if source_media_url:
|
||||
formats.append({
|
||||
'format_id': 'source_media',
|
||||
'url': source_media_url,
|
||||
})
|
||||
self._sort_formats(
|
||||
formats, ('width', 'height', 'vbr', 'filesize', 'tbr', 'format_id'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': video_data.get('blurb'),
|
||||
'uploader': video_data.get('credits', {}).get('source'),
|
||||
'formats': formats,
|
||||
'duration': int_or_none(video_data.get('videoDuration'), 100),
|
||||
'timestamp': int_or_none(
|
||||
video_data.get('dateConfig', {}).get('dateFirstPublished'), 1000),
|
||||
}
|
||||
return self.url_result(
|
||||
'arcpublishing:wapo:' + video_id, 'ArcPublishing', video_id)
|
||||
|
||||
|
||||
class WashingtonPostArticleIE(InfoExtractor):
|
||||
@@ -121,9 +57,8 @@ class WashingtonPostArticleIE(InfoExtractor):
|
||||
'title': 'Breaking Points: The Paper Mine',
|
||||
'duration': 1290,
|
||||
'description': 'Overly complicated paper pushing is nothing new to government bureaucracy. But the way federal retirement applications are filed may be the most outdated. David Fahrenthold explains.',
|
||||
'uploader': 'The Washington Post',
|
||||
'timestamp': 1395527908,
|
||||
'upload_date': '20140322',
|
||||
'timestamp': 1395440416,
|
||||
'upload_date': '20140321',
|
||||
},
|
||||
}, {
|
||||
'md5': '1fff6a689d8770966df78c8cb6c8c17c',
|
||||
@@ -133,9 +68,8 @@ class WashingtonPostArticleIE(InfoExtractor):
|
||||
'title': 'The town bureaucracy sustains',
|
||||
'description': 'Underneath the friendly town of Boyers is a sea of government paperwork. In a disused limestone mine, hundreds of locals now track, file and process retirement applications for the federal government. We set out to find out what it\'s like to do paperwork 230 feet underground.',
|
||||
'duration': 2220,
|
||||
'timestamp': 1395528005,
|
||||
'upload_date': '20140322',
|
||||
'uploader': 'The Washington Post',
|
||||
'timestamp': 1395441819,
|
||||
'upload_date': '20140321',
|
||||
},
|
||||
}],
|
||||
}, {
|
||||
@@ -151,8 +85,7 @@ class WashingtonPostArticleIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'description': 'Washington Post transportation reporter Ashley Halsey III explains why a plane\'s black box needs to be recovered from a crash site instead of having its information streamed in real time throughout the flight.',
|
||||
'upload_date': '20141230',
|
||||
'uploader': 'The Washington Post',
|
||||
'timestamp': 1419974765,
|
||||
'timestamp': 1419972442,
|
||||
'title': 'Why black boxes don’t transmit data in real time',
|
||||
}
|
||||
}]
|
||||
|
@@ -1,23 +1,43 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
try_get,
|
||||
urlencode_postdata,
|
||||
urljoin,
|
||||
)
|
||||
|
||||
|
||||
class YandexDiskIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://yadi\.sk/[di]/(?P<id>[^/?#&]+)'
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?P<domain>
|
||||
yadi\.sk|
|
||||
disk\.yandex\.
|
||||
(?:
|
||||
az|
|
||||
by|
|
||||
co(?:m(?:\.(?:am|ge|tr))?|\.il)|
|
||||
ee|
|
||||
fr|
|
||||
k[gz]|
|
||||
l[tv]|
|
||||
md|
|
||||
t[jm]|
|
||||
u[az]|
|
||||
ru
|
||||
)
|
||||
)/(?:[di]/|public.*?\bhash=)(?P<id>[^/?#&]+)'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://yadi.sk/i/VdOeDou8eZs6Y',
|
||||
'md5': '33955d7ae052f15853dc41f35f17581c',
|
||||
'md5': 'a4a8d52958c8fddcf9845935070402ae',
|
||||
'info_dict': {
|
||||
'id': 'VdOeDou8eZs6Y',
|
||||
'ext': 'mp4',
|
||||
@@ -27,92 +47,101 @@ class YandexDiskIE(InfoExtractor):
|
||||
'uploader_id': '300043621',
|
||||
'view_count': int,
|
||||
},
|
||||
'expected_warnings': ['Unable to download JSON metadata'],
|
||||
}, {
|
||||
'url': 'https://yadi.sk/d/h3WAXvDS3Li3Ce',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://yadi.sk/public?hash=5DZ296JK9GWCLp02f6jrObjnctjRxMs8L6%2B%2FuhNqk38%3D',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
status = self._download_webpage(
|
||||
'https://disk.yandex.com/auth/status', video_id, query={
|
||||
'urlOrigin': url,
|
||||
'source': 'public',
|
||||
'md5': 'false',
|
||||
})
|
||||
|
||||
sk = self._search_regex(
|
||||
r'(["\'])sk(?:External)?\1\s*:\s*(["\'])(?P<value>(?:(?!\2).)+)\2',
|
||||
status, 'sk', group='value')
|
||||
domain, video_id = re.match(self._VALID_URL, url).groups()
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
store = self._parse_json(self._search_regex(
|
||||
r'<script[^>]+id="store-prefetch"[^>]*>\s*({.+?})\s*</script>',
|
||||
webpage, 'store'), video_id)
|
||||
resource = store['resources'][store['rootResourceId']]
|
||||
|
||||
models = self._parse_json(
|
||||
self._search_regex(
|
||||
r'<script[^>]+id=["\']models-client[^>]+>\s*(\[.+?\])\s*</script',
|
||||
webpage, 'video JSON'),
|
||||
video_id)
|
||||
title = resource['name']
|
||||
meta = resource.get('meta') or {}
|
||||
|
||||
data = next(
|
||||
model['data'] for model in models
|
||||
if model.get('model') == 'resource')
|
||||
public_url = meta.get('short_url')
|
||||
if public_url:
|
||||
video_id = self._match_id(public_url)
|
||||
|
||||
video_hash = data['id']
|
||||
title = data['name']
|
||||
source_url = (self._download_json(
|
||||
'https://cloud-api.yandex.net/v1/disk/public/resources/download',
|
||||
video_id, query={'public_key': url}, fatal=False) or {}).get('href')
|
||||
video_streams = resource.get('videoStreams') or {}
|
||||
video_hash = resource.get('hash') or url
|
||||
environment = store.get('environment') or {}
|
||||
sk = environment.get('sk')
|
||||
yandexuid = environment.get('yandexuid')
|
||||
if sk and yandexuid and not (source_url and video_streams):
|
||||
self._set_cookie(domain, 'yandexuid', yandexuid)
|
||||
|
||||
models = self._download_json(
|
||||
'https://disk.yandex.com/models/', video_id,
|
||||
data=urlencode_postdata({
|
||||
'_model.0': 'videoInfo',
|
||||
'id.0': video_hash,
|
||||
'_model.1': 'do-get-resource-url',
|
||||
'id.1': video_hash,
|
||||
'version': '13.6',
|
||||
'sk': sk,
|
||||
}), query={'_m': 'videoInfo'})['models']
|
||||
|
||||
videos = try_get(models, lambda x: x[0]['data']['videos'], list) or []
|
||||
source_url = try_get(
|
||||
models, lambda x: x[1]['data']['file'], compat_str)
|
||||
def call_api(action):
|
||||
return (self._download_json(
|
||||
urljoin(url, '/public/api/') + action, video_id, data=json.dumps({
|
||||
'hash': video_hash,
|
||||
'sk': sk,
|
||||
}).encode(), headers={
|
||||
'Content-Type': 'text/plain',
|
||||
}, fatal=False) or {}).get('data') or {}
|
||||
if not source_url:
|
||||
# TODO: figure out how to detect if download limit has
|
||||
# been reached and then avoid unnecessary source format
|
||||
# extraction requests
|
||||
source_url = call_api('download-url').get('url')
|
||||
if not video_streams:
|
||||
video_streams = call_api('get-video-streams')
|
||||
|
||||
formats = []
|
||||
if source_url:
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'format_id': 'source',
|
||||
'ext': determine_ext(title, 'mp4'),
|
||||
'ext': determine_ext(title, meta.get('ext') or mimetype2ext(meta.get('mime_type')) or 'mp4'),
|
||||
'quality': 1,
|
||||
'filesize': int_or_none(meta.get('size'))
|
||||
})
|
||||
for video in videos:
|
||||
|
||||
for video in (video_streams.get('videos') or []):
|
||||
format_url = video.get('url')
|
||||
if not format_url:
|
||||
continue
|
||||
if determine_ext(format_url) == 'm3u8':
|
||||
if video.get('dimension') == 'adaptive':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
format_url, video_id, 'mp4', 'm3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
else:
|
||||
size = video.get('size') or {}
|
||||
height = int_or_none(size.get('height'))
|
||||
format_id = 'hls'
|
||||
if height:
|
||||
format_id += '-%dp' % height
|
||||
formats.append({
|
||||
'ext': 'mp4',
|
||||
'format_id': format_id,
|
||||
'height': height,
|
||||
'protocol': 'm3u8_native',
|
||||
'url': format_url,
|
||||
'width': int_or_none(size.get('width')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = float_or_none(try_get(
|
||||
models, lambda x: x[0]['data']['duration']), 1000)
|
||||
uploader = try_get(
|
||||
data, lambda x: x['user']['display_name'], compat_str)
|
||||
uploader_id = try_get(
|
||||
data, lambda x: x['user']['uid'], compat_str)
|
||||
view_count = int_or_none(try_get(
|
||||
data, lambda x: x['meta']['views_counter']))
|
||||
uid = resource.get('uid')
|
||||
display_name = try_get(store, lambda x: x['users'][uid]['displayName'])
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'view_count': view_count,
|
||||
'duration': float_or_none(video_streams.get('duration'), 1000),
|
||||
'uploader': display_name,
|
||||
'uploader_id': uid,
|
||||
'view_count': int_or_none(meta.get('views_counter')),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
try_get,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
@@ -13,26 +14,30 @@ class YandexVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
yandex\.ru(?:/portal/(?:video|efir))?/?\?.*?stream_id=|
|
||||
yandex\.ru(?:/(?:portal/(?:video|efir)|efir))?/?\?.*?stream_id=|
|
||||
frontend\.vh\.yandex\.ru/player/
|
||||
)
|
||||
(?P<id>[\da-f]+)
|
||||
(?P<id>(?:[\da-f]{32}|[\w-]{12}))
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'https://yandex.ru/portal/video?stream_id=4dbb262b4fe5cf15a215de4f34eee34d',
|
||||
'md5': '33955d7ae052f15853dc41f35f17581c',
|
||||
'url': 'https://yandex.ru/portal/video?stream_id=4dbb36ec4e0526d58f9f2dc8f0ecf374',
|
||||
'md5': 'e02a05bfaf0d9615ef07ae3a10f4faf4',
|
||||
'info_dict': {
|
||||
'id': '4dbb262b4fe5cf15a215de4f34eee34d',
|
||||
'id': '4dbb36ec4e0526d58f9f2dc8f0ecf374',
|
||||
'ext': 'mp4',
|
||||
'title': 'В Нью-Йорке баржи и теплоход оторвались от причала и расплылись по Гудзону',
|
||||
'description': '',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'timestamp': 0,
|
||||
'duration': 30,
|
||||
'title': 'Русский Вудсток - главный рок-фест в истории СССР / вДудь',
|
||||
'description': 'md5:7d6b8d4bc4a3b9a56499916c1ea5b5fa',
|
||||
'thumbnail': r're:^https?://',
|
||||
'timestamp': 1549972939,
|
||||
'duration': 5575,
|
||||
'age_limit': 18,
|
||||
'upload_date': '20190212',
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://yandex.ru/portal/efir?stream_id=4dbb36ec4e0526d58f9f2dc8f0ecf374&from=morda',
|
||||
'url': 'https://yandex.ru/portal/efir?stream_id=4dbb262b4fe5cf15a215de4f34eee34d&from=morda',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://yandex.ru/?stream_id=4dbb262b4fe5cf15a215de4f34eee34d',
|
||||
@@ -52,53 +57,88 @@ class YandexVideoIE(InfoExtractor):
|
||||
# DASH with DRM
|
||||
'url': 'https://yandex.ru/portal/video?from=morda&stream_id=485a92d94518d73a9d0ff778e13505f8',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://yandex.ru/efir?stream_active=watching&stream_id=v7a2dZ-v5mSI&from_block=efir_newtab',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
content = self._download_json(
|
||||
'https://frontend.vh.yandex.ru/v22/player/%s.json' % video_id,
|
||||
video_id, query={
|
||||
'stream_options': 'hires',
|
||||
'disable_trackings': 1,
|
||||
})['content']
|
||||
player = try_get((self._download_json(
|
||||
'https://frontend.vh.yandex.ru/graphql', video_id, data=('''{
|
||||
player(content_id: "%s") {
|
||||
computed_title
|
||||
content_url
|
||||
description
|
||||
dislikes
|
||||
duration
|
||||
likes
|
||||
program_title
|
||||
release_date
|
||||
release_date_ut
|
||||
release_year
|
||||
restriction_age
|
||||
season
|
||||
start_time
|
||||
streams
|
||||
thumbnail
|
||||
title
|
||||
views_count
|
||||
}
|
||||
}''' % video_id).encode(), fatal=False)), lambda x: x['player']['content'])
|
||||
if not player or player.get('error'):
|
||||
player = self._download_json(
|
||||
'https://frontend.vh.yandex.ru/v23/player/%s.json' % video_id,
|
||||
video_id, query={
|
||||
'stream_options': 'hires',
|
||||
'disable_trackings': 1,
|
||||
})
|
||||
content = player['content']
|
||||
|
||||
content_url = url_or_none(content.get('content_url')) or url_or_none(
|
||||
content['streams'][0]['url'])
|
||||
title = content.get('title') or content.get('computed_title')
|
||||
title = content.get('title') or content['computed_title']
|
||||
|
||||
ext = determine_ext(content_url)
|
||||
|
||||
if ext == 'm3u8':
|
||||
formats = self._extract_m3u8_formats(
|
||||
content_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls')
|
||||
elif ext == 'mpd':
|
||||
formats = self._extract_mpd_formats(
|
||||
content_url, video_id, mpd_id='dash')
|
||||
else:
|
||||
formats = [{'url': content_url}]
|
||||
formats = []
|
||||
streams = content.get('streams') or []
|
||||
streams.append({'url': content.get('content_url')})
|
||||
for stream in streams:
|
||||
content_url = url_or_none(stream.get('url'))
|
||||
if not content_url:
|
||||
continue
|
||||
ext = determine_ext(content_url)
|
||||
if ext == 'ismc':
|
||||
continue
|
||||
elif ext == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
content_url, video_id, 'mp4',
|
||||
'm3u8_native', m3u8_id='hls', fatal=False))
|
||||
elif ext == 'mpd':
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
content_url, video_id, mpd_id='dash', fatal=False))
|
||||
else:
|
||||
formats.append({'url': content_url})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
description = content.get('description')
|
||||
thumbnail = content.get('thumbnail')
|
||||
timestamp = (int_or_none(content.get('release_date'))
|
||||
or int_or_none(content.get('release_date_ut'))
|
||||
or int_or_none(content.get('start_time')))
|
||||
duration = int_or_none(content.get('duration'))
|
||||
series = content.get('program_title')
|
||||
age_limit = int_or_none(content.get('restriction_age'))
|
||||
season = content.get('season') or {}
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'description': content.get('description'),
|
||||
'thumbnail': content.get('thumbnail'),
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'series': series,
|
||||
'age_limit': age_limit,
|
||||
'duration': int_or_none(content.get('duration')),
|
||||
'series': content.get('program_title'),
|
||||
'age_limit': int_or_none(content.get('restriction_age')),
|
||||
'view_count': int_or_none(content.get('views_count')),
|
||||
'like_count': int_or_none(content.get('likes')),
|
||||
'dislike_count': int_or_none(content.get('dislikes')),
|
||||
'season_number': int_or_none(season.get('season_number')),
|
||||
'season_id': season.get('id'),
|
||||
'release_year': int_or_none(content.get('release_year')),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@@ -16,6 +16,7 @@ from ..jsinterp import JSInterpreter
|
||||
from ..swfinterp import SWFInterpreter
|
||||
from ..compat import (
|
||||
compat_chr,
|
||||
compat_HTTPError,
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
@@ -279,6 +280,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
|
||||
_YT_INITIAL_DATA_RE = r'(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;'
|
||||
_YT_INITIAL_PLAYER_RESPONSE_RE = r'ytInitialPlayerResponse\s*=\s*({.+?})\s*;'
|
||||
_YT_INITIAL_BOUNDARY_RE = r'(?:var\s+meta|</script|\n)'
|
||||
|
||||
def _call_api(self, ep, query, video_id):
|
||||
data = self._DEFAULT_API_DATA.copy()
|
||||
@@ -296,7 +298,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||
def _extract_yt_initial_data(self, video_id, webpage):
|
||||
return self._parse_json(
|
||||
self._search_regex(
|
||||
(r'%s\s*\n' % self._YT_INITIAL_DATA_RE,
|
||||
(r'%s\s*%s' % (self._YT_INITIAL_DATA_RE, self._YT_INITIAL_BOUNDARY_RE),
|
||||
self._YT_INITIAL_DATA_RE), webpage, 'yt initial data'),
|
||||
video_id)
|
||||
|
||||
@@ -321,7 +323,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
# Invidious instances taken from https://github.com/omarroth/invidious/wiki/Invidious-Instances
|
||||
(?:(?:www|dev)\.)?invidio\.us/|
|
||||
(?:(?:www|no)\.)?invidiou\.sh/|
|
||||
(?:(?:www|fi|de)\.)?invidious\.snopyta\.org/|
|
||||
(?:(?:www|fi)\.)?invidious\.snopyta\.org/|
|
||||
(?:www\.)?invidious\.kabi\.tk/|
|
||||
(?:www\.)?invidious\.13ad\.de/|
|
||||
(?:www\.)?invidious\.mastodon\.host/|
|
||||
@@ -1102,6 +1104,15 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
# another example of '};' in ytInitialData
|
||||
'url': 'https://www.youtube.com/watch?v=gVfgbahppCY',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://www.youtube.com/watch_popup?v=63RmMXCd_bQ',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -1701,7 +1712,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||
if not video_info and not player_response:
|
||||
player_response = extract_player_response(
|
||||
self._search_regex(
|
||||
(r'%s\s*(?:var\s+meta|</script|\n)' % self._YT_INITIAL_PLAYER_RESPONSE_RE,
|
||||
(r'%s\s*%s' % (self._YT_INITIAL_PLAYER_RESPONSE_RE, self._YT_INITIAL_BOUNDARY_RE),
|
||||
self._YT_INITIAL_PLAYER_RESPONSE_RE), video_webpage,
|
||||
'initial player response', default='{}'),
|
||||
video_id)
|
||||
@@ -2729,6 +2740,11 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if YoutubeIE.suitable(url) else super(
|
||||
YoutubeTabIE, cls).suitable(url)
|
||||
|
||||
def _extract_channel_id(self, webpage):
|
||||
channel_id = self._html_search_meta(
|
||||
'channelId', webpage, 'channel id', default=None)
|
||||
@@ -3009,10 +3025,24 @@ class YoutubeTabIE(YoutubeBaseInfoExtractor):
|
||||
for page_num in itertools.count(1):
|
||||
if not continuation:
|
||||
break
|
||||
browse = self._download_json(
|
||||
'https://www.youtube.com/browse_ajax', None,
|
||||
'Downloading page %d' % page_num,
|
||||
headers=headers, query=continuation, fatal=False)
|
||||
count = 0
|
||||
retries = 3
|
||||
while count <= retries:
|
||||
try:
|
||||
# Downloading page may result in intermittent 5xx HTTP error
|
||||
# that is usually worked around with a retry
|
||||
browse = self._download_json(
|
||||
'https://www.youtube.com/browse_ajax', None,
|
||||
'Downloading page %d%s'
|
||||
% (page_num, ' (retry #%d)' % count if count else ''),
|
||||
headers=headers, query=continuation)
|
||||
break
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (500, 503):
|
||||
count += 1
|
||||
if count <= retries:
|
||||
continue
|
||||
raise
|
||||
if not browse:
|
||||
break
|
||||
response = try_get(browse, lambda x: x[1]['response'], dict)
|
||||
|
@@ -85,7 +85,13 @@ class ZypeIE(InfoExtractor):
|
||||
else:
|
||||
m3u8_url = self._search_regex(
|
||||
r'(["\'])(?P<url>(?:(?!\1).)+\.m3u8(?:(?!\1).)*)\1',
|
||||
body, 'm3u8 url', group='url')
|
||||
body, 'm3u8 url', group='url', default=None)
|
||||
if not m3u8_url:
|
||||
source = self._parse_json(self._search_regex(
|
||||
r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', body,
|
||||
'source'), video_id, js_to_json)
|
||||
if source.get('integration') == 'verizon-media':
|
||||
m3u8_url = 'https://content.uplynk.com/%s.m3u8' % source['id']
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls')
|
||||
text_tracks = self._search_regex(
|
||||
|
@@ -3640,7 +3640,7 @@ def url_or_none(url):
|
||||
if not url or not isinstance(url, compat_str):
|
||||
return None
|
||||
url = url.strip()
|
||||
return url if re.match(r'^(?:[a-zA-Z][\da-zA-Z.+-]*:)?//', url) else None
|
||||
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
|
||||
|
||||
|
||||
def parse_duration(s):
|
||||
|
@@ -1,3 +1,3 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
__version__ = '2020.12.26'
|
||||
__version__ = '2021.01.03'
|
||||
|
Reference in New Issue
Block a user