2014-01-03 21:24:29 +09:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-12-26 17:48:24 +09:00
|
|
|
import re
|
|
|
|
import json
|
|
|
|
|
|
|
|
from .common import InfoExtractor
|
2014-12-13 20:24:42 +09:00
|
|
|
from ..compat import (
|
|
|
|
compat_str,
|
2014-01-12 07:26:35 +09:00
|
|
|
compat_urllib_parse,
|
|
|
|
compat_urllib_request,
|
2014-12-13 20:24:42 +09:00
|
|
|
)
|
|
|
|
from ..utils import (
|
2014-03-07 08:56:48 +09:00
|
|
|
ExtractorError,
|
2015-08-15 01:07:02 +09:00
|
|
|
clean_html,
|
2014-03-07 08:56:48 +09:00
|
|
|
int_or_none,
|
2014-01-12 07:26:35 +09:00
|
|
|
)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
|
|
|
|
2015-03-03 01:12:10 +09:00
|
|
|
class LyndaBaseIE(InfoExtractor):
|
|
|
|
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
|
|
|
|
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
|
2015-03-03 20:59:17 +09:00
|
|
|
_NETRC_MACHINE = 'lynda'
|
2015-03-03 01:12:10 +09:00
|
|
|
|
|
|
|
def _real_initialize(self):
|
|
|
|
self._login()
|
|
|
|
|
|
|
|
def _login(self):
|
|
|
|
(username, password) = self._get_login_info()
|
|
|
|
if username is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
login_form = {
|
2015-06-26 22:48:23 +09:00
|
|
|
'username': username.encode('utf-8'),
|
|
|
|
'password': password.encode('utf-8'),
|
2015-03-03 01:12:10 +09:00
|
|
|
'remember': 'false',
|
|
|
|
'stayPut': 'false'
|
|
|
|
}
|
|
|
|
request = compat_urllib_request.Request(
|
2015-06-26 22:36:04 +09:00
|
|
|
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
2015-03-03 01:12:10 +09:00
|
|
|
login_page = self._download_webpage(
|
|
|
|
request, None, 'Logging in as %s' % username)
|
|
|
|
|
|
|
|
# Not (yet) logged in
|
2015-08-05 23:06:48 +09:00
|
|
|
m = re.search(r'loginResultJson\s*=\s*\'(?P<json>[^\']+)\';', login_page)
|
2015-03-03 01:12:10 +09:00
|
|
|
if m is not None:
|
|
|
|
response = m.group('json')
|
|
|
|
response_json = json.loads(response)
|
|
|
|
state = response_json['state']
|
|
|
|
|
|
|
|
if state == 'notlogged':
|
|
|
|
raise ExtractorError(
|
|
|
|
'Unable to login, incorrect username and/or password',
|
|
|
|
expected=True)
|
|
|
|
|
|
|
|
# This is when we get popup:
|
|
|
|
# > You're already logged in to lynda.com on two devices.
|
|
|
|
# > If you log in here, we'll log you out of another device.
|
|
|
|
# So, we need to confirm this.
|
|
|
|
if state == 'conflicted':
|
|
|
|
confirm_form = {
|
|
|
|
'username': '',
|
|
|
|
'password': '',
|
|
|
|
'resolve': 'true',
|
|
|
|
'remember': 'false',
|
|
|
|
'stayPut': 'false',
|
|
|
|
}
|
|
|
|
request = compat_urllib_request.Request(
|
2015-06-26 22:46:42 +09:00
|
|
|
self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
|
2015-03-03 01:12:10 +09:00
|
|
|
login_page = self._download_webpage(
|
|
|
|
request, None,
|
|
|
|
'Confirming log in and log out from another device')
|
|
|
|
|
2015-08-05 23:06:48 +09:00
|
|
|
if all(not re.search(p, login_page) for p in ('isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
|
2015-08-15 01:07:02 +09:00
|
|
|
if 'login error' in login_page:
|
|
|
|
mobj = re.search(
|
|
|
|
r'(?s)<h1[^>]+class="topmost">(?P<title>[^<]+)</h1>\s*<div>(?P<description>.+?)</div>',
|
|
|
|
login_page)
|
|
|
|
if mobj:
|
|
|
|
raise ExtractorError(
|
|
|
|
'lynda returned error: %s - %s'
|
|
|
|
% (mobj.group('title'), clean_html(mobj.group('description'))),
|
|
|
|
expected=True)
|
2015-03-03 01:12:10 +09:00
|
|
|
raise ExtractorError('Unable to log in')
|
|
|
|
|
2015-11-07 02:06:13 +09:00
|
|
|
def _logout(self):
|
|
|
|
self._download_webpage(
|
|
|
|
'http://www.lynda.com/ajax/logout.aspx', None,
|
|
|
|
'Logging out', 'Unable to log out', fatal=False)
|
|
|
|
|
2015-03-03 01:12:10 +09:00
|
|
|
|
|
|
|
class LyndaIE(LyndaBaseIE):
|
2014-01-03 21:24:29 +09:00
|
|
|
IE_NAME = 'lynda'
|
|
|
|
IE_DESC = 'lynda.com videos'
|
2015-03-03 01:12:10 +09:00
|
|
|
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
|
2014-01-12 07:26:35 +09:00
|
|
|
_NETRC_MACHINE = 'lynda'
|
|
|
|
|
|
|
|
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
|
|
|
|
2015-02-27 23:56:06 +09:00
|
|
|
_TESTS = [{
|
2014-01-03 21:24:29 +09:00
|
|
|
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
|
|
|
|
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
|
2014-01-07 15:14:12 +09:00
|
|
|
'info_dict': {
|
2014-03-08 00:11:01 +09:00
|
|
|
'id': '114408',
|
|
|
|
'ext': 'mp4',
|
2014-01-03 21:24:29 +09:00
|
|
|
'title': 'Using the exercise files',
|
|
|
|
'duration': 68
|
2013-12-26 17:48:24 +09:00
|
|
|
}
|
2015-02-27 23:56:06 +09:00
|
|
|
}, {
|
|
|
|
'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0',
|
|
|
|
'only_matching': True,
|
|
|
|
}]
|
2014-01-12 07:26:35 +09:00
|
|
|
|
2013-12-26 17:48:24 +09:00
|
|
|
def _real_extract(self, url):
|
2015-03-03 01:12:10 +09:00
|
|
|
video_id = self._match_id(url)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2015-03-03 01:12:10 +09:00
|
|
|
page = self._download_webpage(
|
|
|
|
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
|
|
|
|
video_id, 'Downloading video JSON')
|
2013-12-26 17:48:24 +09:00
|
|
|
video_json = json.loads(page)
|
|
|
|
|
2014-01-12 07:26:35 +09:00
|
|
|
if 'Status' in video_json:
|
2015-03-03 01:12:10 +09:00
|
|
|
raise ExtractorError(
|
|
|
|
'lynda returned error: %s' % video_json['Message'], expected=True)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-01-03 21:24:29 +09:00
|
|
|
if video_json['HasAccess'] is False:
|
2015-08-27 00:25:53 +09:00
|
|
|
self.raise_login_required('Video %s is only available for members' % video_id)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-03-08 00:11:01 +09:00
|
|
|
video_id = compat_str(video_json['ID'])
|
2014-01-03 21:24:29 +09:00
|
|
|
duration = video_json['DurationInSeconds']
|
|
|
|
title = video_json['Title']
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-03-07 08:56:48 +09:00
|
|
|
formats = []
|
|
|
|
|
|
|
|
fmts = video_json.get('Formats')
|
|
|
|
if fmts:
|
|
|
|
formats.extend([
|
|
|
|
{
|
|
|
|
'url': fmt['Url'],
|
2014-01-03 21:24:29 +09:00
|
|
|
'ext': fmt['Extension'],
|
|
|
|
'width': fmt['Width'],
|
|
|
|
'height': fmt['Height'],
|
|
|
|
'filesize': fmt['FileSize'],
|
2014-01-06 01:59:33 +09:00
|
|
|
'format_id': str(fmt['Resolution'])
|
2014-03-07 08:56:48 +09:00
|
|
|
} for fmt in fmts])
|
|
|
|
|
|
|
|
prioritized_streams = video_json.get('PrioritizedStreams')
|
|
|
|
if prioritized_streams:
|
2015-10-18 04:36:03 +09:00
|
|
|
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
|
|
|
|
formats.extend([
|
|
|
|
{
|
|
|
|
'url': video_url,
|
|
|
|
'width': int_or_none(format_id),
|
|
|
|
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
|
|
|
|
} for format_id, video_url in prioritized_stream.items()
|
|
|
|
])
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2015-01-26 03:33:42 +09:00
|
|
|
self._check_formats(formats, video_id)
|
2013-12-26 17:48:24 +09:00
|
|
|
self._sort_formats(formats)
|
2014-01-12 07:26:35 +09:00
|
|
|
|
2015-02-19 01:46:33 +09:00
|
|
|
subtitles = self.extract_subtitles(video_id, page)
|
2014-01-12 07:26:35 +09:00
|
|
|
|
2013-12-26 17:48:24 +09:00
|
|
|
return {
|
|
|
|
'id': video_id,
|
|
|
|
'title': title,
|
|
|
|
'duration': duration,
|
2014-01-06 01:59:33 +09:00
|
|
|
'subtitles': subtitles,
|
2013-12-26 17:48:24 +09:00
|
|
|
'formats': formats
|
|
|
|
}
|
2014-01-12 07:26:35 +09:00
|
|
|
|
2015-02-19 01:46:33 +09:00
|
|
|
def _fix_subtitles(self, subs):
|
|
|
|
srt = ''
|
2015-03-02 18:49:39 +09:00
|
|
|
seq_counter = 0
|
2015-02-19 01:46:33 +09:00
|
|
|
for pos in range(0, len(subs) - 1):
|
|
|
|
seq_current = subs[pos]
|
|
|
|
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
|
|
|
|
if m_current is None:
|
|
|
|
continue
|
|
|
|
seq_next = subs[pos + 1]
|
|
|
|
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
|
|
|
|
if m_next is None:
|
2014-01-06 01:59:33 +09:00
|
|
|
continue
|
2015-02-19 01:46:33 +09:00
|
|
|
appear_time = m_current.group('timecode')
|
|
|
|
disappear_time = m_next.group('timecode')
|
2015-03-02 18:49:39 +09:00
|
|
|
text = seq_current['Caption'].strip()
|
|
|
|
if text:
|
|
|
|
seq_counter += 1
|
|
|
|
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text)
|
2015-02-19 01:46:33 +09:00
|
|
|
if srt:
|
|
|
|
return srt
|
|
|
|
|
|
|
|
def _get_subtitles(self, video_id, webpage):
|
2014-01-06 01:59:33 +09:00
|
|
|
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
2015-02-19 01:46:33 +09:00
|
|
|
subs = self._download_json(url, None, False)
|
|
|
|
if subs:
|
|
|
|
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
|
|
|
|
else:
|
|
|
|
return {}
|
2013-12-26 17:48:24 +09:00
|
|
|
|
|
|
|
|
2015-03-03 01:12:10 +09:00
|
|
|
class LyndaCourseIE(LyndaBaseIE):
|
2014-01-03 21:24:29 +09:00
|
|
|
IE_NAME = 'lynda:course'
|
|
|
|
IE_DESC = 'lynda.com online courses'
|
2013-12-26 17:48:24 +09:00
|
|
|
|
|
|
|
# Course link equals to welcome/introduction video link of same course
|
|
|
|
# We will recognize it as course link
|
|
|
|
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
course_path = mobj.group('coursepath')
|
|
|
|
course_id = mobj.group('courseid')
|
2014-11-24 04:41:03 +09:00
|
|
|
|
2015-03-03 01:12:10 +09:00
|
|
|
page = self._download_webpage(
|
|
|
|
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
|
|
|
course_id, 'Downloading course JSON')
|
2013-12-26 17:48:24 +09:00
|
|
|
course_json = json.loads(page)
|
|
|
|
|
2015-11-07 02:06:13 +09:00
|
|
|
self._logout()
|
|
|
|
|
2014-01-03 21:24:29 +09:00
|
|
|
if 'Status' in course_json and course_json['Status'] == 'NotFound':
|
2015-03-03 01:12:10 +09:00
|
|
|
raise ExtractorError(
|
|
|
|
'Course %s does not exist' % course_id, expected=True)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
|
|
|
unaccessible_videos = 0
|
|
|
|
videos = []
|
|
|
|
|
2014-03-07 08:56:48 +09:00
|
|
|
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
|
|
|
|
# by single video API anymore
|
|
|
|
|
2014-01-03 21:24:29 +09:00
|
|
|
for chapter in course_json['Chapters']:
|
|
|
|
for video in chapter['Videos']:
|
2015-03-03 01:12:10 +09:00
|
|
|
if video['HasAccess'] is False:
|
2013-12-26 17:48:24 +09:00
|
|
|
unaccessible_videos += 1
|
|
|
|
continue
|
2014-01-03 21:24:29 +09:00
|
|
|
videos.append(video['ID'])
|
2013-12-26 17:48:24 +09:00
|
|
|
|
|
|
|
if unaccessible_videos > 0:
|
2015-03-03 01:12:10 +09:00
|
|
|
self._downloader.report_warning(
|
|
|
|
'%s videos are only available for members (or paid members) and will not be downloaded. '
|
|
|
|
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-01-03 21:24:29 +09:00
|
|
|
entries = [
|
2015-03-03 01:12:10 +09:00
|
|
|
self.url_result(
|
|
|
|
'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
|
|
|
|
'Lynda')
|
2014-01-03 21:24:29 +09:00
|
|
|
for video_id in videos]
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-01-03 21:24:29 +09:00
|
|
|
course_title = course_json['Title']
|
2013-12-26 17:48:24 +09:00
|
|
|
|
2014-11-24 04:41:03 +09:00
|
|
|
return self.playlist_result(entries, course_id, course_title)
|