2013-06-19 05:14:21 +09:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
2014-01-05 09:52:03 +09:00
from __future__ import absolute_import , unicode_literals
2013-06-19 05:14:21 +09:00
2013-12-10 06:00:42 +09:00
import collections
2014-03-13 23:30:25 +09:00
import datetime
2013-10-06 11:27:09 +09:00
import errno
2013-06-19 05:14:21 +09:00
import io
2013-11-20 14:18:24 +09:00
import json
2014-03-30 13:02:41 +09:00
import locale
2013-06-19 05:14:21 +09:00
import os
2013-11-23 03:57:52 +09:00
import platform
2013-06-19 05:14:21 +09:00
import re
import shutil
2013-11-23 03:57:52 +09:00
import subprocess
2013-06-19 05:14:21 +09:00
import socket
import sys
import time
import traceback
2013-11-17 19:39:52 +09:00
if os . name == ' nt ' :
import ctypes
2013-11-18 00:47:52 +09:00
from . utils import (
2013-11-23 03:57:52 +09:00
compat_cookiejar ,
2014-10-01 00:27:53 +09:00
compat_expanduser ,
2013-11-18 00:47:52 +09:00
compat_http_client ,
compat_str ,
compat_urllib_error ,
compat_urllib_request ,
2014-09-13 22:59:16 +09:00
escape_url ,
2013-11-18 00:47:52 +09:00
ContentTooShortError ,
date_from_str ,
DateRange ,
2014-04-30 17:02:03 +09:00
DEFAULT_OUTTMPL ,
2013-11-18 00:47:52 +09:00
determine_ext ,
DownloadError ,
encodeFilename ,
ExtractorError ,
2013-11-25 11:12:26 +09:00
format_bytes ,
2013-12-16 12:15:10 +09:00
formatSeconds ,
2013-12-10 02:29:07 +09:00
get_term_width ,
2013-11-18 00:47:52 +09:00
locked_file ,
2013-11-23 03:57:52 +09:00
make_HTTPS_handler ,
2013-11-18 00:47:52 +09:00
MaxDownloadsReached ,
2014-01-20 19:36:47 +09:00
PagedList ,
2013-11-18 00:47:52 +09:00
PostProcessingError ,
2013-11-23 03:57:52 +09:00
platform_name ,
2013-11-18 00:47:52 +09:00
preferredencoding ,
SameFileError ,
sanitize_filename ,
subtitles_filename ,
takewhile_inclusive ,
UnavailableVideoError ,
2013-12-17 12:13:36 +09:00
url_basename ,
2013-11-18 00:47:52 +09:00
write_json_file ,
write_string ,
2013-11-23 03:57:52 +09:00
YoutubeDLHandler ,
2014-01-04 21:13:51 +09:00
prepend_extension ,
2013-11-18 00:47:52 +09:00
)
2014-09-03 19:41:05 +09:00
from . cache import Cache
2013-06-28 06:51:06 +09:00
from . extractor import get_info_extractor , gen_extractors
2013-09-24 00:59:27 +09:00
from . downloader import get_suitable_downloader
2014-10-27 00:31:52 +09:00
from . postprocessor import FFmpegMergerPP , FFmpegPostProcessor
2013-11-23 03:57:52 +09:00
from . version import __version__
2013-06-19 05:14:21 +09:00
class YoutubeDL ( object ) :
""" YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it , among some other tasks . In most cases there should be one per
program . As , given a video URL , the downloader doesn ' t know how to
extract all the needed information , task that InfoExtractors do , it
has to pass the URL to one of them .
For this , YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order . When it is passed
a URL , the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it . The InfoExtractor extracts
all the information about the video or videos the URL refers to , and
YoutubeDL process the extracted information , possibly using a File
Downloader to download the video .
YoutubeDL objects accept a lot of parameters . In order not to saturate
the object constructor with arguments , it receives a dictionary of
options instead . These options are available through the params
attribute for the InfoExtractors to use . The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it , so this is a " mutual registration " .
Available options :
username : Username for authentication purposes .
password : Password for authentication purposes .
2013-06-26 05:22:32 +09:00
videopassword : Password for acces a video .
2013-06-19 05:14:21 +09:00
usenetrc : Use netrc for authentication instead .
verbose : Print additional info to stdout .
quiet : Do not print messages to stdout .
2014-03-26 08:43:46 +09:00
no_warnings : Do not print out anything for warnings .
2013-06-19 05:14:21 +09:00
forceurl : Force printing final URL .
forcetitle : Force printing title .
forceid : Force printing ID .
forcethumbnail : Force printing thumbnail URL .
forcedescription : Force printing description .
forcefilename : Force printing final filename .
2013-12-16 12:15:10 +09:00
forceduration : Force printing duration .
2013-11-20 14:18:24 +09:00
forcejson : Force printing info_dict as JSON .
2014-10-25 07:30:57 +09:00
dump_single_json : Force printing the info_dict of the whole playlist
( or video ) as a single JSON line .
2013-06-19 05:14:21 +09:00
simulate : Do not download the video files .
format : Video format code .
format_limit : Highest quality format to try .
outtmpl : Template for output names .
restrictfilenames : Do not allow " & " and spaces in file names
ignoreerrors : Do not stop on download errors .
nooverwrites : Prevent overwriting files .
playliststart : Playlist item to start at .
playlistend : Playlist item to end at .
matchtitle : Download only matching titles .
rejecttitle : Reject downloads for matching titles .
2013-11-24 14:08:11 +09:00
logger : Log messages to a logging . Logger instance .
2013-06-19 05:14:21 +09:00
logtostderr : Log messages to stderr instead of stdout .
writedescription : Write the video description to a . description file
writeinfojson : Write the video description to a . info . json file
2013-10-14 14:18:58 +09:00
writeannotations : Write the video annotations to a . annotations . xml file
2013-06-19 05:14:21 +09:00
writethumbnail : Write the thumbnail image to a file
writesubtitles : Write the video subtitles to a file
2013-06-26 06:45:16 +09:00
writeautomaticsub : Write the automatic subtitles to a file
2013-06-19 05:14:21 +09:00
allsubtitles : Downloads all the subtitles of the video
2013-09-14 18:14:40 +09:00
( requires writesubtitles or writeautomaticsub )
2013-06-19 05:14:21 +09:00
listsubtitles : Lists all available subtitles for the video
2013-06-26 18:59:29 +09:00
subtitlesformat : Subtitle format [ srt / sbv / vtt ] ( default = srt )
2013-08-24 01:34:57 +09:00
subtitleslangs : List of languages of the subtitles to download
2013-06-19 05:14:21 +09:00
keepvideo : Keep the video file after post - processing
daterange : A DateRange object , download only if the upload_date is in the range .
skip_download : Skip the actual download of the video file
2013-09-22 18:09:25 +09:00
cachedir : Location of the cache files in the filesystem .
2014-09-03 19:41:05 +09:00
False to disable filesystem cache .
2013-10-01 05:26:25 +09:00
noplaylist : Download single video instead of a playlist if in doubt .
2013-10-06 13:06:30 +09:00
age_limit : An integer representing the user ' s age in years.
Unsuitable videos for the given age are skipped .
2013-12-16 11:09:49 +09:00
min_views : An integer representing the minimum view count the video
must have in order to not be skipped .
Videos without view count information are always
downloaded . None for no limit .
max_views : An integer representing the maximum view count .
Videos that are more popular than that are not
downloaded .
Videos without view count information are always
downloaded . None for no limit .
download_archive : File name of a file where all downloads are recorded .
2013-10-06 11:27:09 +09:00
Videos already present in the file are not downloaded
again .
2013-11-23 03:57:52 +09:00
cookiefile : File name where cookies should be read from and dumped to .
2013-11-24 23:03:25 +09:00
nocheckcertificate : Do not verify SSL certificates
2014-03-21 08:33:53 +09:00
prefer_insecure : Use HTTP instead of HTTPS to retrieve information .
At the moment , this is only supported by YouTube .
2013-11-24 23:03:25 +09:00
proxy : URL of the proxy server to use
2013-12-01 19:42:02 +09:00
socket_timeout : Time to wait for unresponsive hosts , in seconds
2013-12-09 12:08:51 +09:00
bidi_workaround : Work around buggy terminals without bidirectional text
support , using fridibi
2013-12-29 23:28:32 +09:00
debug_printtraffic : Print out sent and received HTTP traffic
2014-01-21 10:09:49 +09:00
include_ads : Download ads as well
2014-01-22 22:16:43 +09:00
default_search : Prepend this string if an input url is not valid .
' auto ' for elaborate guessing
2014-03-30 13:02:41 +09:00
encoding : Use this encoding instead of the system - specified .
2014-08-21 18:52:07 +09:00
extract_flat : Do not resolve URLs , return the immediate result .
2014-10-24 21:48:12 +09:00
Pass in ' in_playlist ' to only show this behavior for
playlist items .
2013-10-22 21:49:34 +09:00
2013-06-19 05:14:21 +09:00
The following parameters are not used by YoutubeDL itself , they are used by
the FileDownloader :
nopart , updatetime , buffersize , ratelimit , min_filesize , max_filesize , test ,
noresizebuffer , retries , continuedl , noprogress , consoletitle
2014-01-09 01:53:34 +09:00
The following options are used by the post processors :
prefer_ffmpeg : If True , use ffmpeg instead of avconv if both are available ,
otherwise prefer avconv .
2014-08-25 17:18:01 +09:00
exec_cmd : Arbitrary command to run after downloading
2013-06-19 05:14:21 +09:00
"""
params = None
_ies = [ ]
_pps = [ ]
_download_retcode = None
_num_downloads = None
_screen_file = None
2013-11-29 23:25:09 +09:00
def __init__ ( self , params = None ) :
2013-06-19 05:14:21 +09:00
""" Create a FileDownloader object with the given options. """
2013-12-31 21:34:52 +09:00
if params is None :
params = { }
2013-06-19 05:14:21 +09:00
self . _ies = [ ]
2013-07-08 22:14:27 +09:00
self . _ies_instances = { }
2013-06-19 05:14:21 +09:00
self . _pps = [ ]
2013-12-23 18:37:27 +09:00
self . _progress_hooks = [ ]
2013-06-19 05:14:21 +09:00
self . _download_retcode = 0
self . _num_downloads = 0
self . _screen_file = [ sys . stdout , sys . stderr ] [ params . get ( ' logtostderr ' , False ) ]
2013-12-09 12:08:51 +09:00
self . _err_file = sys . stderr
2013-12-31 21:34:52 +09:00
self . params = params
2014-09-03 19:41:05 +09:00
self . cache = Cache ( self )
2013-09-21 18:48:07 +09:00
2013-12-09 12:08:51 +09:00
if params . get ( ' bidi_workaround ' , False ) :
2013-12-10 02:29:07 +09:00
try :
import pty
master , slave = pty . openpty ( )
width = get_term_width ( )
if width is None :
width_args = [ ]
else :
width_args = [ ' -w ' , str ( width ) ]
2013-12-23 12:19:20 +09:00
sp_kwargs = dict (
2013-12-10 02:29:07 +09:00
stdin = subprocess . PIPE ,
stdout = slave ,
stderr = self . _err_file )
2013-12-23 12:19:20 +09:00
try :
self . _output_process = subprocess . Popen (
[ ' bidiv ' ] + width_args , * * sp_kwargs
)
except OSError :
self . _output_process = subprocess . Popen (
[ ' fribidi ' , ' -c ' , ' UTF-8 ' ] + width_args , * * sp_kwargs )
self . _output_channel = os . fdopen ( master , ' rb ' )
2013-12-10 02:29:07 +09:00
except OSError as ose :
if ose . errno == 2 :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH. ' )
2013-12-10 02:29:07 +09:00
else :
raise
2013-12-09 12:08:51 +09:00
2013-09-21 18:48:07 +09:00
if ( sys . version_info > = ( 3 , ) and sys . platform != ' win32 ' and
sys . getfilesystemencoding ( ) in [ ' ascii ' , ' ANSI_X3.4-1968 ' ]
2014-10-10 17:33:41 +09:00
and not params . get ( ' restrictfilenames ' , False ) ) :
2013-09-21 18:48:07 +09:00
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self . report_warning (
2014-01-05 09:52:03 +09:00
' Assuming --restrict-filenames since file system encoding '
2014-10-10 00:00:24 +09:00
' cannot encode all characters. '
2014-01-05 09:52:03 +09:00
' Set the LC_ALL environment variable to fix this. ' )
2013-11-27 02:53:36 +09:00
self . params [ ' restrictfilenames ' ] = True
2013-09-21 18:48:07 +09:00
2013-11-26 05:55:20 +09:00
if ' %(stitle)s ' in self . params . get ( ' outtmpl ' , ' ' ) :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' %(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead. ' )
2013-06-19 05:14:21 +09:00
2013-11-23 03:57:52 +09:00
self . _setup_opener ( )
2013-06-19 05:14:21 +09:00
def add_info_extractor ( self , ie ) :
""" Add an InfoExtractor object to the end of the list. """
self . _ies . append ( ie )
2013-07-08 22:14:27 +09:00
self . _ies_instances [ ie . ie_key ( ) ] = ie
2013-06-19 05:14:21 +09:00
ie . set_downloader ( self )
2013-07-08 22:14:27 +09:00
def get_info_extractor ( self , ie_key ) :
"""
Get an instance of an IE with name ie_key , it will try to get one from
the _ies list , if there ' s no instance it will create a new one and add
it to the extractor list .
"""
ie = self . _ies_instances . get ( ie_key )
if ie is None :
ie = get_info_extractor ( ie_key ) ( )
self . add_info_extractor ( ie )
return ie
2013-06-28 06:51:06 +09:00
def add_default_info_extractors ( self ) :
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractors ( ) :
self . add_info_extractor ( ie )
2013-06-19 05:14:21 +09:00
def add_post_processor ( self , pp ) :
""" Add a PostProcessor object to the end of the chain. """
self . _pps . append ( pp )
pp . set_downloader ( self )
2013-12-23 18:37:27 +09:00
def add_progress_hook ( self , ph ) :
""" Add the progress hook (currently only for the file downloader) """
self . _progress_hooks . append ( ph )
2013-09-24 01:09:28 +09:00
2013-12-10 02:29:07 +09:00
def _bidi_workaround ( self , message ) :
2013-12-23 12:19:20 +09:00
if not hasattr ( self , ' _output_channel ' ) :
2013-12-10 02:29:07 +09:00
return message
2013-12-23 12:19:20 +09:00
assert hasattr ( self , ' _output_process ' )
2014-07-26 06:37:32 +09:00
assert isinstance ( message , compat_str )
2014-01-05 09:52:03 +09:00
line_count = message . count ( ' \n ' ) + 1
self . _output_process . stdin . write ( ( message + ' \n ' ) . encode ( ' utf-8 ' ) )
2013-12-23 12:19:20 +09:00
self . _output_process . stdin . flush ( )
2014-01-05 09:52:03 +09:00
res = ' ' . join ( self . _output_channel . readline ( ) . decode ( ' utf-8 ' )
2013-12-10 02:29:07 +09:00
for _ in range ( line_count ) )
2014-01-05 09:52:03 +09:00
return res [ : - len ( ' \n ' ) ]
2013-12-10 02:29:07 +09:00
2013-06-19 05:14:21 +09:00
def to_screen ( self , message , skip_eol = False ) :
2013-12-09 12:08:51 +09:00
""" Print message to stdout if not in quiet mode. """
return self . to_stdout ( message , skip_eol , check_quiet = True )
2014-04-08 02:57:42 +09:00
def _write_string ( self , s , out = None ) :
2014-04-08 05:48:13 +09:00
write_string ( s , out = out , encoding = self . params . get ( ' encoding ' ) )
2014-04-08 02:57:42 +09:00
2013-12-09 12:08:51 +09:00
def to_stdout ( self , message , skip_eol = False , check_quiet = False ) :
2013-06-19 05:14:21 +09:00
""" Print message to stdout if not in quiet mode. """
2013-11-24 14:08:11 +09:00
if self . params . get ( ' logger ' ) :
2013-11-23 17:22:18 +09:00
self . params [ ' logger ' ] . debug ( message )
2013-12-09 12:08:51 +09:00
elif not check_quiet or not self . params . get ( ' quiet ' , False ) :
2013-12-10 02:29:07 +09:00
message = self . _bidi_workaround ( message )
2014-01-05 09:52:03 +09:00
terminator = [ ' \n ' , ' ' ] [ skip_eol ]
2013-06-19 05:14:21 +09:00
output = message + terminator
2013-12-10 02:29:07 +09:00
2014-04-08 02:57:42 +09:00
self . _write_string ( output , self . _screen_file )
2013-06-19 05:14:21 +09:00
def to_stderr ( self , message ) :
""" Print message to stderr. """
2014-07-26 06:37:32 +09:00
assert isinstance ( message , compat_str )
2013-11-24 14:08:11 +09:00
if self . params . get ( ' logger ' ) :
2013-11-23 17:22:18 +09:00
self . params [ ' logger ' ] . error ( message )
else :
2013-12-10 02:29:07 +09:00
message = self . _bidi_workaround ( message )
2014-01-05 09:52:03 +09:00
output = message + ' \n '
2014-04-08 02:57:42 +09:00
self . _write_string ( output , self . _err_file )
2013-06-19 05:14:21 +09:00
2013-11-17 19:39:52 +09:00
def to_console_title ( self , message ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if os . name == ' nt ' and ctypes . windll . kernel32 . GetConsoleWindow ( ) :
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes . windll . kernel32 . SetConsoleTitleW ( ctypes . c_wchar_p ( message ) )
elif ' TERM ' in os . environ :
2014-04-08 02:57:42 +09:00
self . _write_string ( ' \033 ]0; %s \007 ' % message , self . _screen_file )
2013-11-17 19:39:52 +09:00
2013-11-18 05:05:14 +09:00
def save_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if ' TERM ' in os . environ :
2013-11-19 00:35:41 +09:00
# Save the title on stack
2014-04-08 02:57:42 +09:00
self . _write_string ( ' \033 [22;0t ' , self . _screen_file )
2013-11-18 05:05:14 +09:00
def restore_console_title ( self ) :
if not self . params . get ( ' consoletitle ' , False ) :
return
if ' TERM ' in os . environ :
2013-11-19 00:35:41 +09:00
# Restore the title from stack
2014-04-08 02:57:42 +09:00
self . _write_string ( ' \033 [23;0t ' , self . _screen_file )
2013-11-18 05:05:14 +09:00
def __enter__ ( self ) :
self . save_console_title ( )
return self
def __exit__ ( self , * args ) :
self . restore_console_title ( )
2014-01-25 20:02:43 +09:00
2013-11-23 03:57:52 +09:00
if self . params . get ( ' cookiefile ' ) is not None :
self . cookiejar . save ( )
2013-11-18 05:05:14 +09:00
2013-06-19 05:14:21 +09:00
def trouble ( self , message = None , tb = None ) :
""" Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not , this method may throw an exception or
not when errors are found , after printing the message .
tb , if given , is additional traceback information .
"""
if message is not None :
self . to_stderr ( message )
if self . params . get ( ' verbose ' ) :
if tb is None :
if sys . exc_info ( ) [ 0 ] : # if .trouble has been called from an except block
2014-01-05 09:52:03 +09:00
tb = ' '
2013-06-19 05:14:21 +09:00
if hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
2014-01-05 09:52:03 +09:00
tb + = ' ' . join ( traceback . format_exception ( * sys . exc_info ( ) [ 1 ] . exc_info ) )
2013-06-19 05:14:21 +09:00
tb + = compat_str ( traceback . format_exc ( ) )
else :
tb_data = traceback . format_list ( traceback . extract_stack ( ) )
2014-01-05 09:52:03 +09:00
tb = ' ' . join ( tb_data )
2013-06-19 05:14:21 +09:00
self . to_stderr ( tb )
if not self . params . get ( ' ignoreerrors ' , False ) :
if sys . exc_info ( ) [ 0 ] and hasattr ( sys . exc_info ( ) [ 1 ] , ' exc_info ' ) and sys . exc_info ( ) [ 1 ] . exc_info [ 0 ] :
exc_info = sys . exc_info ( ) [ 1 ] . exc_info
else :
exc_info = sys . exc_info ( )
raise DownloadError ( message , exc_info )
self . _download_retcode = 1
def report_warning ( self , message ) :
'''
Print the message to stderr , it will be prefixed with ' WARNING: '
If stderr is a tty file the ' WARNING: ' will be colored
'''
2014-03-09 22:53:07 +09:00
if self . params . get ( ' logger ' ) is not None :
self . params [ ' logger ' ] . warning ( message )
2013-06-19 05:14:21 +09:00
else :
2014-03-26 08:43:46 +09:00
if self . params . get ( ' no_warnings ' ) :
return
2014-03-09 22:53:07 +09:00
if self . _err_file . isatty ( ) and os . name != ' nt ' :
_msg_header = ' \033 [0;33mWARNING: \033 [0m '
else :
_msg_header = ' WARNING: '
warning_message = ' %s %s ' % ( _msg_header , message )
self . to_stderr ( warning_message )
2013-06-19 05:14:21 +09:00
def report_error ( self , message , tb = None ) :
'''
Do the same as trouble , but prefixes the message with ' ERROR: ' , colored
in red if stderr is a tty file .
'''
2013-12-09 12:08:51 +09:00
if self . _err_file . isatty ( ) and os . name != ' nt ' :
2014-01-05 09:52:03 +09:00
_msg_header = ' \033 [0;31mERROR: \033 [0m '
2013-06-19 05:14:21 +09:00
else :
2014-01-05 09:52:03 +09:00
_msg_header = ' ERROR: '
error_message = ' %s %s ' % ( _msg_header , message )
2013-06-19 05:14:21 +09:00
self . trouble ( error_message , tb )
def report_file_already_downloaded ( self , file_name ) :
""" Report file has already been fully downloaded. """
try :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] %s has already been downloaded ' % file_name )
2013-11-18 00:47:52 +09:00
except UnicodeEncodeError :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] The file has already been downloaded ' )
2013-06-19 05:14:21 +09:00
def prepare_filename ( self , info_dict ) :
""" Generate the output filename. """
try :
template_dict = dict ( info_dict )
template_dict [ ' epoch ' ] = int ( time . time ( ) )
autonumber_size = self . params . get ( ' autonumber_size ' )
if autonumber_size is None :
autonumber_size = 5
2014-01-05 09:52:03 +09:00
autonumber_templ = ' % 0 ' + str ( autonumber_size ) + ' d '
2013-06-19 05:14:21 +09:00
template_dict [ ' autonumber ' ] = autonumber_templ % self . _num_downloads
2013-10-29 06:01:37 +09:00
if template_dict . get ( ' playlist_index ' ) is not None :
2014-08-25 01:49:04 +09:00
template_dict [ ' playlist_index ' ] = ' %0*d ' % ( len ( str ( template_dict [ ' n_entries ' ] ) ) , template_dict [ ' playlist_index ' ] )
2014-03-04 11:49:33 +09:00
if template_dict . get ( ' resolution ' ) is None :
if template_dict . get ( ' width ' ) and template_dict . get ( ' height ' ) :
template_dict [ ' resolution ' ] = ' %d x %d ' % ( template_dict [ ' width ' ] , template_dict [ ' height ' ] )
elif template_dict . get ( ' height ' ) :
2014-03-10 18:29:25 +09:00
template_dict [ ' resolution ' ] = ' %s p ' % template_dict [ ' height ' ]
2014-03-04 11:49:33 +09:00
elif template_dict . get ( ' width ' ) :
2014-03-10 18:29:25 +09:00
template_dict [ ' resolution ' ] = ' ?x %d ' % template_dict [ ' width ' ]
2013-06-19 05:14:21 +09:00
2013-10-23 05:28:19 +09:00
sanitize = lambda k , v : sanitize_filename (
2013-12-10 19:23:35 +09:00
compat_str ( v ) ,
2013-06-19 05:14:21 +09:00
restricted = self . params . get ( ' restrictfilenames ' ) ,
2014-01-05 09:52:03 +09:00
is_id = ( k == ' id ' ) )
2013-10-23 05:28:19 +09:00
template_dict = dict ( ( k , sanitize ( k , v ) )
2013-12-10 19:23:35 +09:00
for k , v in template_dict . items ( )
if v is not None )
2014-01-05 09:52:03 +09:00
template_dict = collections . defaultdict ( lambda : ' NA ' , template_dict )
2013-06-19 05:14:21 +09:00
2014-04-30 17:02:03 +09:00
outtmpl = self . params . get ( ' outtmpl ' , DEFAULT_OUTTMPL )
2014-10-01 00:27:53 +09:00
tmpl = compat_expanduser ( outtmpl )
2013-10-23 05:28:19 +09:00
filename = tmpl % template_dict
2013-06-19 05:14:21 +09:00
return filename
except ValueError as err :
2014-01-05 09:52:03 +09:00
self . report_error ( ' Error in output template: ' + str ( err ) + ' (encoding: ' + repr ( preferredencoding ( ) ) + ' ) ' )
2013-06-19 05:14:21 +09:00
return None
def _match_entry ( self , info_dict ) :
""" Returns None iff the file should be downloaded """
2014-01-05 09:52:03 +09:00
video_title = info_dict . get ( ' title ' , info_dict . get ( ' id ' , ' video ' ) )
2013-11-23 06:46:46 +09:00
if ' title ' in info_dict :
# This can happen when we're just evaluating the playlist
title = info_dict [ ' title ' ]
matchtitle = self . params . get ( ' matchtitle ' , False )
if matchtitle :
if not re . search ( matchtitle , title , re . IGNORECASE ) :
2014-01-05 09:52:03 +09:00
return ' " ' + title + ' " title did not match pattern " ' + matchtitle + ' " '
2013-11-23 06:46:46 +09:00
rejecttitle = self . params . get ( ' rejecttitle ' , False )
if rejecttitle :
if re . search ( rejecttitle , title , re . IGNORECASE ) :
2014-01-05 09:52:03 +09:00
return ' " ' + title + ' " title matched reject pattern " ' + rejecttitle + ' " '
2013-06-19 05:14:21 +09:00
date = info_dict . get ( ' upload_date ' , None )
if date is not None :
dateRange = self . params . get ( ' daterange ' , DateRange ( ) )
if date not in dateRange :
2014-01-05 09:52:03 +09:00
return ' %s upload date is not in range %s ' % ( date_from_str ( date ) . isoformat ( ) , dateRange )
2013-12-16 11:09:49 +09:00
view_count = info_dict . get ( ' view_count ' , None )
if view_count is not None :
min_views = self . params . get ( ' min_views ' )
if min_views is not None and view_count < min_views :
2014-01-05 09:52:03 +09:00
return ' Skipping %s , because it has not reached minimum view count ( %d / %d ) ' % ( video_title , view_count , min_views )
2013-12-16 11:09:49 +09:00
max_views = self . params . get ( ' max_views ' )
if max_views is not None and view_count > max_views :
2014-01-05 09:52:03 +09:00
return ' Skipping %s , because it has exceeded the maximum view count ( %d / %d ) ' % ( video_title , view_count , max_views )
2013-10-06 13:06:30 +09:00
age_limit = self . params . get ( ' age_limit ' )
if age_limit is not None :
2014-08-23 00:46:57 +09:00
actual_age_limit = info_dict . get ( ' age_limit ' )
if actual_age_limit is None :
actual_age_limit = 0
if age_limit < actual_age_limit :
2014-01-05 09:52:03 +09:00
return ' Skipping " ' + title + ' " because it is age restricted '
2013-10-06 11:27:09 +09:00
if self . in_download_archive ( info_dict ) :
2014-01-05 09:52:03 +09:00
return ' %s has already been recorded in archive ' % video_title
2013-06-19 05:14:21 +09:00
return None
2013-10-22 21:49:34 +09:00
2013-11-03 19:56:45 +09:00
@staticmethod
def add_extra_info ( info_dict , extra_info ) :
''' Set the keys from extra_info in info dict if they are missing '''
for key , value in extra_info . items ( ) :
info_dict . setdefault ( key , value )
2013-12-05 22:29:08 +09:00
def extract_info ( self , url , download = True , ie_key = None , extra_info = { } ,
process = True ) :
2013-06-19 05:14:21 +09:00
'''
Returns a list with a dictionary for each video we find .
If ' download ' , also downloads the videos .
extra_info is a dict containing the extra values to add to each result
'''
2013-10-22 21:49:34 +09:00
2013-06-19 05:14:21 +09:00
if ie_key :
2013-07-08 22:14:27 +09:00
ies = [ self . get_info_extractor ( ie_key ) ]
2013-06-19 05:14:21 +09:00
else :
ies = self . _ies
for ie in ies :
if not ie . suitable ( url ) :
continue
if not ie . working ( ) :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' The program functionality for this site has been marked as broken, '
' and will probably not work. ' )
2013-06-19 05:14:21 +09:00
try :
ie_result = ie . extract ( url )
if ie_result is None : # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance ( ie_result , list ) :
# Backwards compatibility: old IE result format
ie_result = {
' _type ' : ' compat_list ' ,
' entries ' : ie_result ,
}
2014-03-24 00:06:03 +09:00
self . add_default_extra_info ( ie_result , ie , url )
2013-12-05 22:29:08 +09:00
if process :
return self . process_ie_result ( ie_result , download , extra_info )
else :
return ie_result
2013-06-19 05:14:21 +09:00
except ExtractorError as de : # An error we somewhat expected
self . report_error ( compat_str ( de ) , de . format_traceback ( ) )
break
2014-01-23 18:36:47 +09:00
except MaxDownloadsReached :
raise
2013-06-19 05:14:21 +09:00
except Exception as e :
if self . params . get ( ' ignoreerrors ' , False ) :
self . report_error ( compat_str ( e ) , tb = compat_str ( traceback . format_exc ( ) ) )
break
else :
raise
else :
2014-03-21 00:33:42 +09:00
self . report_error ( ' no suitable InfoExtractor for URL %s ' % url )
2013-10-22 21:49:34 +09:00
2014-03-24 00:06:03 +09:00
def add_default_extra_info ( self , ie_result , ie , url ) :
self . add_extra_info ( ie_result , {
' extractor ' : ie . IE_NAME ,
' webpage_url ' : url ,
' webpage_url_basename ' : url_basename ( url ) ,
' extractor_key ' : ie . ie_key ( ) ,
} )
2013-06-19 05:14:21 +09:00
def process_ie_result ( self , ie_result , download = True , extra_info = { } ) :
"""
Take the result of the ie ( may be modified ) and resolve all unresolved
references ( URLs , playlist items ) .
It will also download the videos if ' download ' .
Returns the resolved ie_result .
"""
2014-08-21 18:52:07 +09:00
result_type = ie_result . get ( ' _type ' , ' video ' )
2014-10-24 21:48:12 +09:00
if result_type in ( ' url ' , ' url_transparent ' ) :
extract_flat = self . params . get ( ' extract_flat ' , False )
if ( ( extract_flat == ' in_playlist ' and ' playlist ' in extra_info ) or
extract_flat is True ) :
if self . params . get ( ' forcejson ' , False ) :
self . to_stdout ( json . dumps ( ie_result ) )
2014-08-21 18:52:07 +09:00
return ie_result
2013-06-19 05:14:21 +09:00
if result_type == ' video ' :
2013-11-03 19:56:45 +09:00
self . add_extra_info ( ie_result , extra_info )
2013-11-15 19:04:26 +09:00
return self . process_video_result ( ie_result , download = download )
2013-06-19 05:14:21 +09:00
elif result_type == ' url ' :
# We have to add extra_info to the results because it may be
# contained in a playlist
return self . extract_info ( ie_result [ ' url ' ] ,
download ,
ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info )
2013-12-05 22:29:08 +09:00
elif result_type == ' url_transparent ' :
# Use the information from the embedding page
info = self . extract_info (
ie_result [ ' url ' ] , ie_key = ie_result . get ( ' ie_key ' ) ,
extra_info = extra_info , download = False , process = False )
def make_result ( embedded_info ) :
new_result = ie_result . copy ( )
for f in ( ' _type ' , ' url ' , ' ext ' , ' player_url ' , ' formats ' ,
2013-12-23 23:48:00 +09:00
' entries ' , ' ie_key ' , ' duration ' ,
2013-12-06 17:15:04 +09:00
' subtitles ' , ' annotations ' , ' format ' ,
' thumbnail ' , ' thumbnails ' ) :
2013-12-05 22:29:08 +09:00
if f in new_result :
del new_result [ f ]
if f in embedded_info :
new_result [ f ] = embedded_info [ f ]
return new_result
new_result = make_result ( info )
assert new_result . get ( ' _type ' ) != ' url_transparent '
if new_result . get ( ' _type ' ) == ' compat_list ' :
new_result [ ' entries ' ] = [
make_result ( e ) for e in new_result [ ' entries ' ] ]
return self . process_ie_result (
new_result , download = download , extra_info = extra_info )
2013-06-19 05:14:21 +09:00
elif result_type == ' playlist ' :
# We process each entry in the playlist
playlist = ie_result . get ( ' title ' , None ) or ie_result . get ( ' id ' , None )
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] Downloading playlist: %s ' % playlist )
2013-06-19 05:14:21 +09:00
playlist_results = [ ]
playliststart = self . params . get ( ' playliststart ' , 1 ) - 1
2013-12-16 21:16:20 +09:00
playlistend = self . params . get ( ' playlistend ' , None )
# For backwards compatibility, interpret -1 as whole list
2013-06-19 05:14:21 +09:00
if playlistend == - 1 :
2013-12-16 21:16:20 +09:00
playlistend = None
2013-06-19 05:14:21 +09:00
2014-01-20 19:36:47 +09:00
if isinstance ( ie_result [ ' entries ' ] , list ) :
n_all_entries = len ( ie_result [ ' entries ' ] )
entries = ie_result [ ' entries ' ] [ playliststart : playlistend ]
n_entries = len ( entries )
self . to_screen (
" [ %s ] playlist %s : Collected %d video ids (downloading %d of them) " %
( ie_result [ ' extractor ' ] , playlist , n_all_entries , n_entries ) )
else :
assert isinstance ( ie_result [ ' entries ' ] , PagedList )
entries = ie_result [ ' entries ' ] . getslice (
playliststart , playlistend )
n_entries = len ( entries )
self . to_screen (
" [ %s ] playlist %s : Downloading %d videos " %
( ie_result [ ' extractor ' ] , playlist , n_entries ) )
2013-06-19 05:14:21 +09:00
2013-10-22 21:49:34 +09:00
for i , entry in enumerate ( entries , 1 ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] Downloading video # %s of %s ' % ( i , n_entries ) )
2013-06-19 05:14:21 +09:00
extra = {
2014-08-25 01:49:04 +09:00
' n_entries ' : n_entries ,
2013-10-22 21:49:34 +09:00
' playlist ' : playlist ,
' playlist_index ' : i + playliststart ,
2013-11-03 19:56:45 +09:00
' extractor ' : ie_result [ ' extractor ' ] ,
2013-11-03 20:11:13 +09:00
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 12:13:36 +09:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 20:14:44 +09:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2013-10-22 21:49:34 +09:00
}
2013-11-23 06:46:46 +09:00
reason = self . _match_entry ( entry )
if reason is not None :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] ' + reason )
2013-11-23 06:46:46 +09:00
continue
2013-06-19 05:14:21 +09:00
entry_result = self . process_ie_result ( entry ,
download = download ,
extra_info = extra )
playlist_results . append ( entry_result )
ie_result [ ' entries ' ] = playlist_results
return ie_result
elif result_type == ' compat_list ' :
def _fixup ( r ) :
2013-11-03 19:56:45 +09:00
self . add_extra_info ( r ,
2013-11-03 20:11:13 +09:00
{
' extractor ' : ie_result [ ' extractor ' ] ,
' webpage_url ' : ie_result [ ' webpage_url ' ] ,
2013-12-17 12:13:36 +09:00
' webpage_url_basename ' : url_basename ( ie_result [ ' webpage_url ' ] ) ,
2013-11-03 20:14:44 +09:00
' extractor_key ' : ie_result [ ' extractor_key ' ] ,
2013-11-03 20:11:13 +09:00
} )
2013-06-19 05:14:21 +09:00
return r
ie_result [ ' entries ' ] = [
2013-11-03 19:56:45 +09:00
self . process_ie_result ( _fixup ( r ) , download , extra_info )
2013-06-19 05:14:21 +09:00
for r in ie_result [ ' entries ' ]
]
return ie_result
else :
raise Exception ( ' Invalid result type: %s ' % result_type )
2013-10-21 20:19:58 +09:00
def select_format ( self , format_spec , available_formats ) :
if format_spec == ' best ' or format_spec is None :
return available_formats [ - 1 ]
elif format_spec == ' worst ' :
return available_formats [ 0 ]
2014-01-22 22:47:29 +09:00
elif format_spec == ' bestaudio ' :
audio_formats = [
f for f in available_formats
if f . get ( ' vcodec ' ) == ' none ' ]
if audio_formats :
return audio_formats [ - 1 ]
elif format_spec == ' worstaudio ' :
audio_formats = [
f for f in available_formats
if f . get ( ' vcodec ' ) == ' none ' ]
if audio_formats :
return audio_formats [ 0 ]
2014-03-15 01:01:47 +09:00
elif format_spec == ' bestvideo ' :
video_formats = [
f for f in available_formats
if f . get ( ' acodec ' ) == ' none ' ]
if video_formats :
return video_formats [ - 1 ]
elif format_spec == ' worstvideo ' :
video_formats = [
f for f in available_formats
if f . get ( ' acodec ' ) == ' none ' ]
if video_formats :
return video_formats [ 0 ]
2013-10-21 20:19:58 +09:00
else :
2014-09-19 01:40:19 +09:00
extensions = [ ' mp4 ' , ' flv ' , ' webm ' , ' 3gp ' , ' m4a ' ]
2013-10-21 20:31:55 +09:00
if format_spec in extensions :
filter_f = lambda f : f [ ' ext ' ] == format_spec
else :
filter_f = lambda f : f [ ' format_id ' ] == format_spec
2013-10-22 21:49:34 +09:00
matches = list ( filter ( filter_f , available_formats ) )
2013-10-21 20:19:58 +09:00
if matches :
return matches [ - 1 ]
return None
2013-07-02 17:08:58 +09:00
def process_video_result ( self , info_dict , download = True ) :
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
2014-04-03 21:36:40 +09:00
if ' id ' not in info_dict :
raise ExtractorError ( ' Missing " id " field in extractor result ' )
if ' title ' not in info_dict :
raise ExtractorError ( ' Missing " title " field in extractor result ' )
2013-07-02 17:08:58 +09:00
if ' playlist ' not in info_dict :
# It isn't part of a playlist
info_dict [ ' playlist ' ] = None
info_dict [ ' playlist_index ' ] = None
2014-06-07 22:33:45 +09:00
thumbnails = info_dict . get ( ' thumbnails ' )
if thumbnails :
2014-06-07 22:39:21 +09:00
thumbnails . sort ( key = lambda t : (
t . get ( ' width ' ) , t . get ( ' height ' ) , t . get ( ' url ' ) ) )
2014-06-07 22:33:45 +09:00
for t in thumbnails :
if ' width ' in t and ' height ' in t :
t [ ' resolution ' ] = ' %d x %d ' % ( t [ ' width ' ] , t [ ' height ' ] )
if thumbnails and ' thumbnail ' not in info_dict :
info_dict [ ' thumbnail ' ] = thumbnails [ - 1 ] [ ' url ' ]
2014-03-04 11:32:28 +09:00
if ' display_id ' not in info_dict and ' id ' in info_dict :
2014-03-03 20:06:28 +09:00
info_dict [ ' display_id ' ] = info_dict [ ' id ' ]
2014-03-14 02:21:55 +09:00
if info_dict . get ( ' upload_date ' ) is None and info_dict . get ( ' timestamp ' ) is not None :
2014-03-13 23:30:25 +09:00
upload_date = datetime . datetime . utcfromtimestamp (
2014-03-14 02:21:55 +09:00
info_dict [ ' timestamp ' ] )
2014-03-13 23:30:25 +09:00
info_dict [ ' upload_date ' ] = upload_date . strftime ( ' % Y % m %d ' )
2013-07-14 00:51:26 +09:00
# This extractors handle format selection themselves
2014-01-05 09:52:03 +09:00
if info_dict [ ' extractor ' ] in [ ' Youku ' ] :
2013-10-22 07:01:59 +09:00
if download :
self . process_info ( info_dict )
2013-07-14 00:51:26 +09:00
return info_dict
2013-07-02 17:08:58 +09:00
# We now pick which formats have to be downloaded
if info_dict . get ( ' formats ' ) is None :
# There's only one format available
formats = [ info_dict ]
else :
formats = info_dict [ ' formats ' ]
2014-03-11 04:55:47 +09:00
if not formats :
raise ExtractorError ( ' No video formats found! ' )
2013-07-02 17:08:58 +09:00
# We check that all the formats have the format and format_id fields
2014-03-11 04:55:47 +09:00
for i , format in enumerate ( formats ) :
2014-04-03 21:36:40 +09:00
if ' url ' not in format :
raise ExtractorError ( ' Missing " url " key in result (index %d ) ' % i )
2013-07-02 17:08:58 +09:00
if format . get ( ' format_id ' ) is None :
2013-07-15 00:31:52 +09:00
format [ ' format_id ' ] = compat_str ( i )
2013-10-21 21:09:38 +09:00
if format . get ( ' format ' ) is None :
2014-01-05 09:52:03 +09:00
format [ ' format ' ] = ' {id} - {res} {note} ' . format (
2013-10-21 21:09:38 +09:00
id = format [ ' format_id ' ] ,
res = self . format_resolution ( format ) ,
2014-01-05 09:52:03 +09:00
note = ' ( {0} ) ' . format ( format [ ' format_note ' ] ) if format . get ( ' format_note ' ) is not None else ' ' ,
2013-10-21 21:09:38 +09:00
)
2013-10-28 19:28:02 +09:00
# Automatically determine file extension if missing
if ' ext ' not in format :
2014-04-03 15:55:38 +09:00
format [ ' ext ' ] = determine_ext ( format [ ' url ' ] ) . lower ( )
2013-07-02 17:08:58 +09:00
2013-07-08 19:10:47 +09:00
format_limit = self . params . get ( ' format_limit ' , None )
if format_limit :
2013-10-18 07:46:35 +09:00
formats = list ( takewhile_inclusive (
lambda f : f [ ' format_id ' ] != format_limit , formats
) )
2013-12-24 20:25:22 +09:00
# TODO Central sorting goes here
2013-07-08 19:10:47 +09:00
2014-01-25 20:02:43 +09:00
if formats [ 0 ] is not info_dict :
2013-12-23 18:23:13 +09:00
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
2014-01-25 20:02:43 +09:00
# element in the 'formats' field in info_dict is info_dict itself,
2013-12-23 18:23:13 +09:00
# wich can't be exported to json
info_dict [ ' formats ' ] = formats
2013-12-19 05:24:39 +09:00
if self . params . get ( ' listformats ' , None ) :
self . list_formats ( info_dict )
return
2014-01-22 22:53:23 +09:00
req_format = self . params . get ( ' format ' )
2013-10-21 20:19:58 +09:00
if req_format is None :
req_format = ' best '
2013-07-02 17:08:58 +09:00
formats_to_download = [ ]
# The -1 is for supporting YoutubeIE
2013-10-21 20:19:58 +09:00
if req_format in ( ' -1 ' , ' all ' ) :
2013-07-02 17:08:58 +09:00
formats_to_download = formats
else :
2014-09-19 01:43:49 +09:00
for rfstr in req_format . split ( ' , ' ) :
# We can accept formats requested in the format: 34/5/best, we pick
# the first that is available, starting from left
req_formats = rfstr . split ( ' / ' )
for rf in req_formats :
if re . match ( r ' .+? \ +.+? ' , rf ) is not None :
# Two formats have been requested like '137+139'
format_1 , format_2 = rf . split ( ' + ' )
formats_info = ( self . select_format ( format_1 , formats ) ,
self . select_format ( format_2 , formats ) )
if all ( formats_info ) :
selected_format = {
' requested_formats ' : formats_info ,
' format ' : rf ,
' ext ' : formats_info [ 0 ] [ ' ext ' ] ,
}
else :
selected_format = None
2014-01-04 21:13:51 +09:00
else :
2014-09-19 01:43:49 +09:00
selected_format = self . select_format ( rf , formats )
if selected_format is not None :
formats_to_download . append ( selected_format )
break
2013-07-02 17:08:58 +09:00
if not formats_to_download :
2014-01-05 09:52:03 +09:00
raise ExtractorError ( ' requested format not available ' ,
2013-10-28 19:41:43 +09:00
expected = True )
2013-07-02 17:08:58 +09:00
if download :
if len ( formats_to_download ) > 1 :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] %s : downloading video in %s formats ' % ( info_dict [ ' id ' ] , len ( formats_to_download ) ) )
2013-07-02 17:08:58 +09:00
for format in formats_to_download :
new_info = dict ( info_dict )
new_info . update ( format )
self . process_info ( new_info )
# We update the info dict with the best quality format (backwards compatibility)
info_dict . update ( formats_to_download [ - 1 ] )
return info_dict
2013-06-19 05:14:21 +09:00
def process_info ( self , info_dict ) :
""" Process a single resolved IE result. """
assert info_dict . get ( ' _type ' , ' video ' ) == ' video '
2014-01-24 02:56:36 +09:00
max_downloads = self . params . get ( ' max_downloads ' )
if max_downloads is not None :
if self . _num_downloads > = int ( max_downloads ) :
raise MaxDownloadsReached ( )
2013-06-19 05:14:21 +09:00
info_dict [ ' fulltitle ' ] = info_dict [ ' title ' ]
if len ( info_dict [ ' title ' ] ) > 200 :
2014-01-05 09:52:03 +09:00
info_dict [ ' title ' ] = info_dict [ ' title ' ] [ : 197 ] + ' ... '
2013-06-19 05:14:21 +09:00
# Keep for backwards compatibility
info_dict [ ' stitle ' ] = info_dict [ ' title ' ]
2014-07-26 06:37:32 +09:00
if ' format ' not in info_dict :
2013-06-19 05:14:21 +09:00
info_dict [ ' format ' ] = info_dict [ ' ext ' ]
reason = self . _match_entry ( info_dict )
if reason is not None :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [download] ' + reason )
2013-06-19 05:14:21 +09:00
return
2014-01-24 02:56:36 +09:00
self . _num_downloads + = 1
2013-06-19 05:14:21 +09:00
filename = self . prepare_filename ( info_dict )
# Forced printings
if self . params . get ( ' forcetitle ' , False ) :
2013-12-09 12:08:51 +09:00
self . to_stdout ( info_dict [ ' fulltitle ' ] )
2013-06-19 05:14:21 +09:00
if self . params . get ( ' forceid ' , False ) :
2013-12-09 12:08:51 +09:00
self . to_stdout ( info_dict [ ' id ' ] )
2013-06-19 05:14:21 +09:00
if self . params . get ( ' forceurl ' , False ) :
2013-08-28 19:14:45 +09:00
# For RTMP URLs, also include the playpath
2014-01-05 09:52:03 +09:00
self . to_stdout ( info_dict [ ' url ' ] + info_dict . get ( ' play_path ' , ' ' ) )
2013-10-29 00:28:35 +09:00
if self . params . get ( ' forcethumbnail ' , False ) and info_dict . get ( ' thumbnail ' ) is not None :
2013-12-09 12:08:51 +09:00
self . to_stdout ( info_dict [ ' thumbnail ' ] )
2013-10-29 00:28:35 +09:00
if self . params . get ( ' forcedescription ' , False ) and info_dict . get ( ' description ' ) is not None :
2013-12-09 12:08:51 +09:00
self . to_stdout ( info_dict [ ' description ' ] )
2013-06-19 05:14:21 +09:00
if self . params . get ( ' forcefilename ' , False ) and filename is not None :
2013-12-09 12:08:51 +09:00
self . to_stdout ( filename )
2013-12-16 12:15:10 +09:00
if self . params . get ( ' forceduration ' , False ) and info_dict . get ( ' duration ' ) is not None :
self . to_stdout ( formatSeconds ( info_dict [ ' duration ' ] ) )
2013-06-19 05:14:21 +09:00
if self . params . get ( ' forceformat ' , False ) :
2013-12-09 12:08:51 +09:00
self . to_stdout ( info_dict [ ' format ' ] )
2013-11-19 22:59:22 +09:00
if self . params . get ( ' forcejson ' , False ) :
2013-12-09 12:31:18 +09:00
info_dict [ ' _filename ' ] = filename
2013-12-09 12:08:51 +09:00
self . to_stdout ( json . dumps ( info_dict ) )
2014-10-25 07:30:57 +09:00
if self . params . get ( ' dump_single_json ' , False ) :
info_dict [ ' _filename ' ] = filename
2013-06-19 05:14:21 +09:00
# Do nothing else if in simulate mode
if self . params . get ( ' simulate ' , False ) :
return
if filename is None :
return
try :
dn = os . path . dirname ( encodeFilename ( filename ) )
2014-04-03 22:28:39 +09:00
if dn and not os . path . exists ( dn ) :
2013-06-19 05:14:21 +09:00
os . makedirs ( dn )
except ( OSError , IOError ) as err :
2014-01-05 09:52:03 +09:00
self . report_error ( ' unable to create directory ' + compat_str ( err ) )
2013-06-19 05:14:21 +09:00
return
if self . params . get ( ' writedescription ' , False ) :
2014-01-05 09:52:03 +09:00
descfn = filename + ' .description '
2013-12-16 12:39:04 +09:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( descfn ) ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Video description is already present ' )
2013-12-16 12:39:04 +09:00
else :
try :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Writing video description to: ' + descfn )
2013-12-16 12:39:04 +09:00
with io . open ( encodeFilename ( descfn ) , ' w ' , encoding = ' utf-8 ' ) as descfile :
descfile . write ( info_dict [ ' description ' ] )
except ( KeyError , TypeError ) :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' There \' s no description to write. ' )
2013-12-16 12:39:04 +09:00
except ( OSError , IOError ) :
2014-01-05 09:52:03 +09:00
self . report_error ( ' Cannot write description file ' + descfn )
2013-12-16 12:39:04 +09:00
return
2013-06-19 05:14:21 +09:00
2013-10-14 14:18:58 +09:00
if self . params . get ( ' writeannotations ' , False ) :
2014-01-05 09:52:03 +09:00
annofn = filename + ' .annotations.xml '
2013-12-16 12:39:04 +09:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( annofn ) ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Video annotations are already present ' )
2013-12-16 12:39:04 +09:00
else :
try :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Writing video annotations to: ' + annofn )
2013-12-16 12:39:04 +09:00
with io . open ( encodeFilename ( annofn ) , ' w ' , encoding = ' utf-8 ' ) as annofile :
annofile . write ( info_dict [ ' annotations ' ] )
except ( KeyError , TypeError ) :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' There are no annotations to write. ' )
2013-12-16 12:39:04 +09:00
except ( OSError , IOError ) :
2014-01-05 09:52:03 +09:00
self . report_error ( ' Cannot write annotations file: ' + annofn )
2013-12-16 12:39:04 +09:00
return
2013-10-14 14:18:58 +09:00
2013-06-26 07:02:15 +09:00
subtitles_are_requested = any ( [ self . params . get ( ' writesubtitles ' , False ) ,
2013-09-14 18:14:40 +09:00
self . params . get ( ' writeautomaticsub ' ) ] )
2013-06-26 07:02:15 +09:00
2013-10-22 21:49:34 +09:00
if subtitles_are_requested and ' subtitles ' in info_dict and info_dict [ ' subtitles ' ] :
2013-06-19 05:14:21 +09:00
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict [ ' subtitles ' ]
2013-11-14 01:06:02 +09:00
sub_format = self . params . get ( ' subtitlesformat ' , ' srt ' )
2013-06-26 18:03:44 +09:00
for sub_lang in subtitles . keys ( ) :
sub = subtitles [ sub_lang ]
2013-07-20 19:59:47 +09:00
if sub is None :
continue
2013-06-19 05:14:21 +09:00
try :
2013-07-20 19:48:57 +09:00
sub_filename = subtitles_filename ( filename , sub_lang , sub_format )
2013-12-16 12:39:04 +09:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( sub_filename ) ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Video subtitle %s . %s is already_present ' % ( sub_lang , sub_format ) )
2013-12-16 12:39:04 +09:00
else :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Writing video subtitles to: ' + sub_filename )
2013-12-16 12:39:04 +09:00
with io . open ( encodeFilename ( sub_filename ) , ' w ' , encoding = ' utf-8 ' ) as subfile :
subfile . write ( sub )
2013-06-19 05:14:21 +09:00
except ( OSError , IOError ) :
2014-04-08 23:55:55 +09:00
self . report_error ( ' Cannot write subtitles file ' + sub_filename )
2013-06-19 05:14:21 +09:00
return
if self . params . get ( ' writeinfojson ' , False ) :
2014-01-05 09:52:03 +09:00
infofn = os . path . splitext ( filename ) [ 0 ] + ' .info.json '
2013-12-16 12:39:04 +09:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( infofn ) ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Video description metadata is already present ' )
2013-12-16 12:39:04 +09:00
else :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Writing video description metadata as JSON to: ' + infofn )
2013-12-16 12:39:04 +09:00
try :
2013-12-23 23:48:00 +09:00
write_json_file ( info_dict , encodeFilename ( infofn ) )
2013-12-16 12:39:04 +09:00
except ( OSError , IOError ) :
2014-01-05 09:52:03 +09:00
self . report_error ( ' Cannot write metadata to JSON file ' + infofn )
2013-12-16 12:39:04 +09:00
return
2013-06-19 05:14:21 +09:00
if self . params . get ( ' writethumbnail ' , False ) :
2013-07-13 05:11:59 +09:00
if info_dict . get ( ' thumbnail ' ) is not None :
2014-01-05 09:52:03 +09:00
thumb_format = determine_ext ( info_dict [ ' thumbnail ' ] , ' jpg ' )
thumb_filename = os . path . splitext ( filename ) [ 0 ] + ' . ' + thumb_format
2013-12-17 01:44:28 +09:00
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( thumb_filename ) ) :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [ %s ] %s : Thumbnail is already present ' %
2013-12-16 12:39:04 +09:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] ) )
else :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [ %s ] %s : Downloading thumbnail ... ' %
2013-12-16 12:39:04 +09:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] ) )
try :
2014-03-08 00:43:34 +09:00
uf = self . urlopen ( info_dict [ ' thumbnail ' ] )
2013-12-16 12:39:04 +09:00
with open ( thumb_filename , ' wb ' ) as thumbf :
shutil . copyfileobj ( uf , thumbf )
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [ %s ] %s : Writing thumbnail to: %s ' %
2013-12-16 12:39:04 +09:00
( info_dict [ ' extractor ' ] , info_dict [ ' id ' ] , thumb_filename ) )
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' Unable to download thumbnail " %s " : %s ' %
2013-12-16 12:39:04 +09:00
( info_dict [ ' thumbnail ' ] , compat_str ( err ) ) )
2013-06-19 05:14:21 +09:00
if not self . params . get ( ' skip_download ' , False ) :
if self . params . get ( ' nooverwrites ' , False ) and os . path . exists ( encodeFilename ( filename ) ) :
success = True
else :
try :
2014-01-04 21:13:51 +09:00
def dl ( name , info ) :
fd = get_suitable_downloader ( info ) ( self , self . params )
for ph in self . _progress_hooks :
fd . add_progress_hook ( ph )
2014-07-06 18:28:51 +09:00
if self . params . get ( ' verbose ' ) :
self . to_stdout ( ' [debug] Invoking downloader on %r ' % info . get ( ' url ' ) )
2014-01-04 21:13:51 +09:00
return fd . download ( name , info )
if info_dict . get ( ' requested_formats ' ) is not None :
downloaded = [ ]
success = True
2014-07-23 09:53:44 +09:00
merger = FFmpegMergerPP ( self , not self . params . get ( ' keepvideo ' ) )
2014-01-15 20:59:15 +09:00
if not merger . _get_executable ( ) :
postprocessors = [ ]
self . report_warning ( ' You have requested multiple '
' formats but ffmpeg or avconv are not installed. '
' The formats won \' t be merged ' )
else :
postprocessors = [ merger ]
2014-01-04 21:13:51 +09:00
for f in info_dict [ ' requested_formats ' ] :
new_info = dict ( info_dict )
new_info . update ( f )
fname = self . prepare_filename ( new_info )
fname = prepend_extension ( fname , ' f %s ' % f [ ' format_id ' ] )
downloaded . append ( fname )
partial_success = dl ( fname , new_info )
success = success and partial_success
2014-01-15 20:59:15 +09:00
info_dict [ ' __postprocessors ' ] = postprocessors
2014-01-04 21:13:51 +09:00
info_dict [ ' __files_to_merge ' ] = downloaded
else :
# Just a single file
success = dl ( filename , info_dict )
2013-06-19 05:14:21 +09:00
except ( compat_urllib_error . URLError , compat_http_client . HTTPException , socket . error ) as err :
2014-01-05 09:52:03 +09:00
self . report_error ( ' unable to download video data: %s ' % str ( err ) )
2013-06-19 05:14:21 +09:00
return
2013-09-20 20:26:03 +09:00
except ( OSError , IOError ) as err :
raise UnavailableVideoError ( err )
2013-06-19 05:14:21 +09:00
except ( ContentTooShortError , ) as err :
2014-01-05 09:52:03 +09:00
self . report_error ( ' content too short (expected %s bytes and served %s ) ' % ( err . expected , err . downloaded ) )
2013-06-19 05:14:21 +09:00
return
if success :
try :
self . post_process ( filename , info_dict )
except ( PostProcessingError ) as err :
2014-01-05 09:52:03 +09:00
self . report_error ( ' postprocessing: %s ' % str ( err ) )
2013-06-19 05:14:21 +09:00
return
2013-10-06 11:27:09 +09:00
self . record_download_archive ( info_dict )
2013-06-19 05:14:21 +09:00
def download ( self , url_list ) :
""" Download a given list of URLs. """
2014-04-30 17:02:03 +09:00
outtmpl = self . params . get ( ' outtmpl ' , DEFAULT_OUTTMPL )
2013-11-26 06:15:20 +09:00
if ( len ( url_list ) > 1 and
2014-04-30 17:02:03 +09:00
' % ' not in outtmpl
2013-11-26 06:15:20 +09:00
and self . params . get ( ' max_downloads ' ) != 1 ) :
2014-04-30 17:02:03 +09:00
raise SameFileError ( outtmpl )
2013-06-19 05:14:21 +09:00
for url in url_list :
try :
#It also downloads the videos
2014-10-25 07:30:57 +09:00
res = self . extract_info ( url )
2013-06-19 05:14:21 +09:00
except UnavailableVideoError :
2014-01-05 09:52:03 +09:00
self . report_error ( ' unable to download video ' )
2013-06-19 05:14:21 +09:00
except MaxDownloadsReached :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' [info] Maximum number of downloaded files reached. ' )
2013-06-19 05:14:21 +09:00
raise
2014-10-25 07:30:57 +09:00
else :
if self . params . get ( ' dump_single_json ' , False ) :
self . to_stdout ( json . dumps ( res ) )
2013-06-19 05:14:21 +09:00
return self . _download_retcode
2013-11-22 22:57:53 +09:00
def download_with_info_file ( self , info_filename ) :
2013-12-09 12:59:50 +09:00
with io . open ( info_filename , ' r ' , encoding = ' utf-8 ' ) as f :
2013-11-22 22:57:53 +09:00
info = json . load ( f )
2013-12-04 04:16:52 +09:00
try :
self . process_ie_result ( info , download = True )
except DownloadError :
webpage_url = info . get ( ' webpage_url ' )
if webpage_url is not None :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' The info failed to download, trying with " %s " ' % webpage_url )
2013-12-04 04:16:52 +09:00
return self . download ( [ webpage_url ] )
else :
raise
return self . _download_retcode
2013-11-22 22:57:53 +09:00
2013-06-19 05:14:21 +09:00
def post_process ( self , filename , ie_info ) :
""" Run all the postprocessors on the given file. """
info = dict ( ie_info )
info [ ' filepath ' ] = filename
keep_video = None
2014-01-04 21:13:51 +09:00
pps_chain = [ ]
if ie_info . get ( ' __postprocessors ' ) is not None :
pps_chain . extend ( ie_info [ ' __postprocessors ' ] )
pps_chain . extend ( self . _pps )
for pp in pps_chain :
2013-06-19 05:14:21 +09:00
try :
2013-10-22 21:49:34 +09:00
keep_video_wish , new_info = pp . run ( info )
2013-06-19 05:14:21 +09:00
if keep_video_wish is not None :
if keep_video_wish :
keep_video = keep_video_wish
elif keep_video is None :
# No clear decision yet, let IE decide
keep_video = keep_video_wish
except PostProcessingError as e :
2013-08-01 04:20:46 +09:00
self . report_error ( e . msg )
2013-06-19 05:14:21 +09:00
if keep_video is False and not self . params . get ( ' keepvideo ' , False ) :
try :
2014-01-05 09:52:03 +09:00
self . to_screen ( ' Deleting original file %s (pass -k to keep) ' % filename )
2013-06-19 05:14:21 +09:00
os . remove ( encodeFilename ( filename ) )
except ( IOError , OSError ) :
2014-01-05 09:52:03 +09:00
self . report_warning ( ' Unable to remove downloaded video file ' )
2013-10-06 11:27:09 +09:00
2013-11-25 23:46:54 +09:00
def _make_archive_id ( self , info_dict ) :
# Future-proof against any change in case
# and backwards compatibility with prior versions
2013-11-26 06:57:15 +09:00
extractor = info_dict . get ( ' extractor_key ' )
2013-11-23 06:46:46 +09:00
if extractor is None :
if ' id ' in info_dict :
extractor = info_dict . get ( ' ie_key ' ) # key in a playlist
if extractor is None :
2013-11-25 23:46:54 +09:00
return None # Incomplete video information
2014-01-05 09:52:03 +09:00
return extractor . lower ( ) + ' ' + info_dict [ ' id ' ]
2013-11-25 23:46:54 +09:00
def in_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return False
vid_id = self . _make_archive_id ( info_dict )
if vid_id is None :
2013-11-23 06:46:46 +09:00
return False # Incomplete video information
2013-11-25 23:46:54 +09:00
2013-10-06 11:27:09 +09:00
try :
with locked_file ( fn , ' r ' , encoding = ' utf-8 ' ) as archive_file :
for line in archive_file :
if line . strip ( ) == vid_id :
return True
except IOError as ioe :
if ioe . errno != errno . ENOENT :
raise
return False
def record_download_archive ( self , info_dict ) :
fn = self . params . get ( ' download_archive ' )
if fn is None :
return
2013-11-25 23:46:54 +09:00
vid_id = self . _make_archive_id ( info_dict )
assert vid_id
2013-10-06 11:27:09 +09:00
with locked_file ( fn , ' a ' , encoding = ' utf-8 ' ) as archive_file :
2014-01-05 09:52:03 +09:00
archive_file . write ( vid_id + ' \n ' )
2013-07-02 17:08:58 +09:00
2013-10-21 21:09:38 +09:00
@staticmethod
2013-10-28 19:31:12 +09:00
def format_resolution ( format , default = ' unknown ' ) :
2013-11-26 06:34:56 +09:00
if format . get ( ' vcodec ' ) == ' none ' :
return ' audio only '
2013-12-24 19:56:02 +09:00
if format . get ( ' resolution ' ) is not None :
return format [ ' resolution ' ]
2013-10-21 21:09:38 +09:00
if format . get ( ' height ' ) is not None :
if format . get ( ' width ' ) is not None :
2014-01-05 09:52:03 +09:00
res = ' %s x %s ' % ( format [ ' width ' ] , format [ ' height ' ] )
2013-10-21 21:09:38 +09:00
else :
2014-01-05 09:52:03 +09:00
res = ' %s p ' % format [ ' height ' ]
2013-12-24 19:56:02 +09:00
elif format . get ( ' width ' ) is not None :
2014-01-05 09:52:03 +09:00
res = ' ?x %d ' % format [ ' width ' ]
2013-10-21 21:09:38 +09:00
else :
2013-10-28 19:31:12 +09:00
res = default
2013-10-21 21:09:38 +09:00
return res
2014-04-30 09:02:41 +09:00
def _format_note ( self , fdict ) :
res = ' '
if fdict . get ( ' ext ' ) in [ ' f4f ' , ' f4m ' ] :
res + = ' (unsupported) '
if fdict . get ( ' format_note ' ) is not None :
res + = fdict [ ' format_note ' ] + ' '
if fdict . get ( ' tbr ' ) is not None :
res + = ' %4d k ' % fdict [ ' tbr ' ]
if fdict . get ( ' container ' ) is not None :
if res :
res + = ' , '
res + = ' %s container ' % fdict [ ' container ' ]
if ( fdict . get ( ' vcodec ' ) is not None and
fdict . get ( ' vcodec ' ) != ' none ' ) :
if res :
res + = ' , '
res + = fdict [ ' vcodec ' ]
2013-11-16 09:08:43 +09:00
if fdict . get ( ' vbr ' ) is not None :
2014-04-30 09:02:41 +09:00
res + = ' @ '
elif fdict . get ( ' vbr ' ) is not None and fdict . get ( ' abr ' ) is not None :
res + = ' video@ '
if fdict . get ( ' vbr ' ) is not None :
res + = ' %4d k ' % fdict [ ' vbr ' ]
if fdict . get ( ' acodec ' ) is not None :
if res :
res + = ' , '
if fdict [ ' acodec ' ] == ' none ' :
res + = ' video only '
else :
res + = ' %-5s ' % fdict [ ' acodec ' ]
elif fdict . get ( ' abr ' ) is not None :
if res :
res + = ' , '
res + = ' audio '
if fdict . get ( ' abr ' ) is not None :
res + = ' @ %3d k ' % fdict [ ' abr ' ]
if fdict . get ( ' asr ' ) is not None :
res + = ' ( %5d Hz) ' % fdict [ ' asr ' ]
if fdict . get ( ' filesize ' ) is not None :
if res :
res + = ' , '
res + = format_bytes ( fdict [ ' filesize ' ] )
2014-07-21 19:02:44 +09:00
elif fdict . get ( ' filesize_approx ' ) is not None :
if res :
res + = ' , '
res + = ' ~ ' + format_bytes ( fdict [ ' filesize_approx ' ] )
2014-04-30 09:02:41 +09:00
return res
2013-11-16 09:08:43 +09:00
2014-04-30 09:02:41 +09:00
def list_formats ( self , info_dict ) :
2013-11-25 11:12:26 +09:00
def line ( format , idlen = 20 ) :
2014-01-05 09:52:03 +09:00
return ( ( ' % - ' + compat_str ( idlen + 1 ) + ' s %-10s %-12s %s ' ) % (
2013-10-21 21:09:38 +09:00
format [ ' format_id ' ] ,
format [ ' ext ' ] ,
self . format_resolution ( format ) ,
2014-04-30 09:02:41 +09:00
self . _format_note ( format ) ,
2013-11-25 11:12:26 +09:00
) )
2013-10-29 23:09:45 +09:00
2013-10-30 09:09:26 +09:00
formats = info_dict . get ( ' formats ' , [ info_dict ] )
2014-01-05 09:52:03 +09:00
idlen = max ( len ( ' format code ' ) ,
2013-11-25 11:12:26 +09:00
max ( len ( f [ ' format_id ' ] ) for f in formats ) )
formats_s = [ line ( f , idlen ) for f in formats ]
2013-10-30 09:09:26 +09:00
if len ( formats ) > 1 :
2014-04-30 09:02:41 +09:00
formats_s [ 0 ] + = ( ' ' if self . _format_note ( formats [ 0 ] ) else ' ' ) + ' (worst) '
formats_s [ - 1 ] + = ( ' ' if self . _format_note ( formats [ - 1 ] ) else ' ' ) + ' (best) '
2013-10-29 23:09:45 +09:00
header_line = line ( {
2014-01-05 09:52:03 +09:00
' format_id ' : ' format code ' , ' ext ' : ' extension ' ,
' resolution ' : ' resolution ' , ' format_note ' : ' note ' } , idlen = idlen )
self . to_screen ( ' [info] Available formats for %s : \n %s \n %s ' %
( info_dict [ ' id ' ] , header_line , ' \n ' . join ( formats_s ) ) )
2013-11-23 03:57:52 +09:00
def urlopen ( self , req ) :
""" Start an HTTP download """
2014-09-13 01:20:17 +09:00
2014-09-13 22:59:16 +09:00
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
2014-09-13 01:20:17 +09:00
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
2014-09-13 22:59:16 +09:00
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
2014-09-28 04:48:41 +09:00
req_is_string = isinstance ( req , basestring if sys . version_info < ( 3 , 0 ) else compat_str )
2014-09-28 04:07:42 +09:00
url = req if req_is_string else req . get_full_url ( )
2014-09-13 22:59:16 +09:00
url_escaped = escape_url ( url )
2014-09-13 01:20:17 +09:00
# Substitute URL if any change after escaping
if url != url_escaped :
2014-09-28 04:07:42 +09:00
if req_is_string :
2014-09-13 01:20:17 +09:00
req = url_escaped
else :
req = compat_urllib_request . Request (
url_escaped , data = req . data , headers = req . headers ,
origin_req_host = req . origin_req_host , unverifiable = req . unverifiable )
2014-03-11 03:01:29 +09:00
return self . _opener . open ( req , timeout = self . _socket_timeout )
2013-11-23 03:57:52 +09:00
def print_debug_header ( self ) :
if not self . params . get ( ' verbose ' ) :
return
2014-03-30 13:02:41 +09:00
2014-07-24 20:29:44 +09:00
if type ( ' ' ) is not compat_str :
# Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
self . report_warning (
' Your Python is broken! Update to a newer and supported version ' )
2014-07-23 09:24:50 +09:00
encoding_str = (
2014-04-08 02:57:42 +09:00
' [debug] Encodings: locale %s , fs %s , out %s , pref %s \n ' % (
locale . getpreferredencoding ( ) ,
sys . getfilesystemencoding ( ) ,
sys . stdout . encoding ,
2014-07-23 09:24:50 +09:00
self . get_encoding ( ) ) )
2014-07-24 20:29:44 +09:00
write_string ( encoding_str , encoding = None )
2014-04-08 02:57:42 +09:00
self . _write_string ( ' [debug] youtube-dl version ' + __version__ + ' \n ' )
2013-11-23 03:57:52 +09:00
try :
sp = subprocess . Popen (
[ ' git ' , ' rev-parse ' , ' --short ' , ' HEAD ' ] ,
stdout = subprocess . PIPE , stderr = subprocess . PIPE ,
cwd = os . path . dirname ( os . path . abspath ( __file__ ) ) )
out , err = sp . communicate ( )
out = out . decode ( ) . strip ( )
if re . match ( ' [0-9a-f]+ ' , out ) :
2014-04-08 02:57:42 +09:00
self . _write_string ( ' [debug] Git HEAD: ' + out + ' \n ' )
2013-11-23 03:57:52 +09:00
except :
try :
sys . exc_clear ( )
except :
pass
2014-10-27 00:31:52 +09:00
self . _write_string ( ' [debug] Python version %s - %s \n ' % (
platform . python_version ( ) , platform_name ( ) ) )
exe_versions = FFmpegPostProcessor . get_versions ( )
exe_str = ' , ' . join (
' %s %s ' % ( exe , v )
for exe , v in sorted ( exe_versions . items ( ) )
if v
)
if not exe_str :
exe_str = ' none '
self . _write_string ( ' [debug] exe versions: %s \n ' % exe_str )
2013-11-23 03:57:52 +09:00
proxy_map = { }
for handler in self . _opener . handlers :
if hasattr ( handler , ' proxies ' ) :
proxy_map . update ( handler . proxies )
2014-04-08 02:57:42 +09:00
self . _write_string ( ' [debug] Proxy map: ' + compat_str ( proxy_map ) + ' \n ' )
2013-11-23 03:57:52 +09:00
2013-12-01 19:42:02 +09:00
def _setup_opener ( self ) :
2013-12-02 21:37:05 +09:00
timeout_val = self . params . get ( ' socket_timeout ' )
2014-03-11 03:01:29 +09:00
self . _socket_timeout = 600 if timeout_val is None else float ( timeout_val )
2013-12-02 21:37:05 +09:00
2013-11-23 03:57:52 +09:00
opts_cookiefile = self . params . get ( ' cookiefile ' )
opts_proxy = self . params . get ( ' proxy ' )
if opts_cookiefile is None :
self . cookiejar = compat_cookiejar . CookieJar ( )
else :
self . cookiejar = compat_cookiejar . MozillaCookieJar (
opts_cookiefile )
if os . access ( opts_cookiefile , os . R_OK ) :
self . cookiejar . load ( )
cookie_processor = compat_urllib_request . HTTPCookieProcessor (
self . cookiejar )
if opts_proxy is not None :
if opts_proxy == ' ' :
proxies = { }
else :
proxies = { ' http ' : opts_proxy , ' https ' : opts_proxy }
else :
proxies = compat_urllib_request . getproxies ( )
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if ' http ' in proxies and ' https ' not in proxies :
proxies [ ' https ' ] = proxies [ ' http ' ]
proxy_handler = compat_urllib_request . ProxyHandler ( proxies )
2013-12-29 23:28:32 +09:00
debuglevel = 1 if self . params . get ( ' debug_printtraffic ' ) else 0
2013-11-23 03:57:52 +09:00
https_handler = make_HTTPS_handler (
2013-12-29 23:28:32 +09:00
self . params . get ( ' nocheckcertificate ' , False ) , debuglevel = debuglevel )
ydlh = YoutubeDLHandler ( debuglevel = debuglevel )
2013-11-23 03:57:52 +09:00
opener = compat_urllib_request . build_opener (
2013-12-29 23:28:32 +09:00
https_handler , proxy_handler , cookie_processor , ydlh )
2013-11-23 03:57:52 +09:00
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener . addheaders = [ ]
self . _opener = opener
2014-03-30 13:02:41 +09:00
def encode ( self , s ) :
if isinstance ( s , bytes ) :
return s # Already encoded
try :
return s . encode ( self . get_encoding ( ) )
except UnicodeEncodeError as err :
err . reason = err . reason + ' . Check your system encoding configuration or use the --encoding option. '
raise
def get_encoding ( self ) :
encoding = self . params . get ( ' encoding ' )
if encoding is None :
encoding = preferredencoding ( )
return encoding