Compare commits

..

33 Commits

Author SHA1 Message Date
Ricardo Garcia
c34e358456 Bump version number 2010-10-31 11:28:45 +01:00
Ricardo Garcia
a6a61601de Update User-Agent string 2010-10-31 11:28:45 +01:00
Ricardo Garcia
e0c982c8d0 Load cookies if the cookie file exists when starting the program 2010-10-31 11:28:45 +01:00
Ricardo Garcia
331ce0a05d Use stderr for output when the output file is "-" (fixes issue #216) 2010-10-31 11:28:45 +01:00
Ricardo Garcia
80066952bc Add new --cookies option to be able to save cookies to disk (fixes issue #208) 2010-10-31 11:28:45 +01:00
Ricardo Garcia
e08878f498 Set stdout to binary mode under Windows (fixes issue #218) 2010-10-31 11:28:45 +01:00
Ricardo Garcia
a949a3ae6b Support "https" in YouTube video URLs (fixes issue #215) 2010-10-31 11:28:45 +01:00
Ricardo Garcia
7df4635faf Use HTTPS for the login URL (fixes issue #163) 2010-10-31 11:28:45 +01:00
Ricardo Garcia
f79007e542 Bump version number 2010-10-31 11:28:41 +01:00
Ricardo Garcia
ac249f421f Retry on any 5xx server error 2010-10-31 11:28:41 +01:00
Ricardo Garcia
e86e9474bf Treat HTTP error 500 the same way as 503 (fixes issue #209) 2010-10-31 11:28:41 +01:00
Ricardo Garcia
bbd4bb037a Support the -nocookie suffix in youtube domain name (fixes issue #200) 2010-10-31 11:28:41 +01:00
Ricardo Garcia
5c44af1875 Do not print file name on warning message (fixes issue #197) 2010-10-31 11:28:41 +01:00
Ricardo Garcia
33407be7d6 Fix "unable to extract uploader nickname" error with Dailymotion 2010-10-31 11:28:41 +01:00
Ricardo Garcia
8e686771af Decode the reason given on YouTube errors to avoid crashes (fixes issue #193) 2010-10-31 11:28:41 +01:00
Ricardo Garcia
2933532c5b Allow the #! notation for YouTube URLs found in many links in their website 2010-10-31 11:28:41 +01:00
Ricardo Garcia
6b57e8c5ac Extract the video extension from the media URL in metacafe.com 2010-10-31 11:28:41 +01:00
Ricardo Garcia
c6c555cf8a Fix metacafe.com downloads for some videos (fixes issue #189) 2010-10-31 11:28:41 +01:00
Ricardo Garcia
db7e31b853 Use unicode strings for several error messages that were missing the "u" 2010-10-31 11:28:41 +01:00
Ricardo Garcia
d67e097462 Abort download in case of error writing file data to disk 2010-10-31 11:28:41 +01:00
Ricardo Garcia
38ed13444a Improve error message on invalid output template and abort execution 2010-10-31 11:28:40 +01:00
Ricardo Garcia
8a9f53bebf Fix typo in report_resuming_byte doc string (fixes issue #188) 2010-10-31 11:28:40 +01:00
Ricardo Garcia
80cc23304f Bump version number 2010-10-31 11:28:36 +01:00
Ricardo Garcia
813962f85a Update user-agent string 2010-10-31 11:28:36 +01:00
Ricardo Garcia
109626fcc0 Fix metacafe.com code not working due to gdaKey again (fixes issue #185) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
204c9398ab Merge Gavin van Lelyveld's patch for --playlist-start option 2010-10-31 11:28:36 +01:00
Ricardo Garcia
2962317dea Put back -b option as a placeholder with a warning message 2010-10-31 11:28:36 +01:00
Ricardo Garcia
268fb2bdd8 Consider the file downloaded if the size differs in less than 100 bytes (fixes issue #175) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
101e0d1e91 Reorganize request code to make it a bit more robust 2010-10-31 11:28:36 +01:00
Ricardo Garcia
f95f29fd25 Properly detect YouTube error messages to print them on screen (fixes issue #172) 2010-10-31 11:28:36 +01:00
Ricardo Garcia
06f34701fe Bump version number 2010-10-31 11:28:33 +01:00
Ricardo Garcia
5ce7d172d7 Restore support for the get_video method, fixing many issues 2010-10-31 11:28:33 +01:00
Ricardo Garcia
2e3a32e4ac Restore proper support for webm formats (fixes issue #166) 2010-10-31 11:28:32 +01:00
2 changed files with 225 additions and 142 deletions

View File

@@ -1 +1 @@
2010.07.22
2010.10.24

View File

@@ -4,6 +4,7 @@
# Author: Danny Colligan
# Author: Benjamin Johnson
# License: Public domain code
import cookielib
import htmlentitydefs
import httplib
import locale
@@ -27,7 +28,7 @@ except ImportError:
from cgi import parse_qs
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.7) Gecko/20100720 Firefox/3.6.7',
'User-Agent': 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.11) Gecko/20101019 Firefox/3.6.11',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-us,en;q=0.5',
@@ -94,6 +95,9 @@ def sanitize_open(filename, open_mode):
"""
try:
if filename == u'-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename)
stream = open(filename, open_mode)
return (stream, filename)
@@ -181,22 +185,26 @@ class FileDownloader(object):
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
usenetrc: Use netrc for authentication instead.
quiet: Do not print messages to stdout.
forceurl: Force printing final URL.
forcetitle: Force printing title.
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
outtmpl: Template for output names.
ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec.
nooverwrites: Prevent overwriting files.
retries: Number of times to retry for HTTP error 503
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
username: Username for authentication purposes.
password: Password for authentication purposes.
usenetrc: Use netrc for authentication instead.
quiet: Do not print messages to stdout.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
simulate: Do not download the video files.
format: Video format code.
format_limit: Highest quality format to try.
outtmpl: Template for output names.
ignoreerrors: Do not stop on download errors.
ratelimit: Download speed limit, in bytes/sec.
nooverwrites: Prevent overwriting files.
retries: Number of times to retry for HTTP error 5xx
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
playliststart: Playlist item to start at.
logtostderr: Log messages to stderr instead of stdout.
"""
params = None
@@ -204,6 +212,7 @@ class FileDownloader(object):
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params):
"""Create a FileDownloader object with the given options."""
@@ -211,6 +220,7 @@ class FileDownloader(object):
self._pps = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self.params = params
@staticmethod
@@ -287,16 +297,6 @@ class FileDownloader(object):
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return long(round(number * multiplier))
@staticmethod
def verify_url(url):
"""Verify a URL is valid and data could be downloaded. Return real data URL."""
request = urllib2.Request(url, None, std_headers)
data = urllib2.urlopen(request)
data.read(1)
url = data.geturl()
data.close()
return url
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
@@ -307,12 +307,13 @@ class FileDownloader(object):
self._pps.append(pp)
pp.set_downloader(self)
def to_stdout(self, message, skip_eol=False, ignore_encoding_errors=False):
def to_screen(self, message, skip_eol=False, ignore_encoding_errors=False):
"""Print message to stdout if not in quiet mode."""
try:
if not self.params.get('quiet', False):
print (u'%s%s' % (message, [u'\n', u''][skip_eol])).encode(preferredencoding()),
sys.stdout.flush()
terminator = [u'\n', u''][skip_eol]
print >>self._screen_file, (u'%s%s' % (message, terminator)).encode(preferredencoding()),
self._screen_file.flush()
except (UnicodeEncodeError), err:
if not ignore_encoding_errors:
raise
@@ -353,40 +354,40 @@ class FileDownloader(object):
def report_destination(self, filename):
"""Report destination filename."""
self.to_stdout(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
self.to_screen(u'[download] Destination: %s' % filename, ignore_encoding_errors=True)
def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
"""Report download progress."""
if self.params.get('noprogress', False):
return
self.to_stdout(u'\r[download] %s of %s at %s ETA %s' %
self.to_screen(u'\r[download] %s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
def report_resuming_byte(self, resume_len):
"""Report attemtp to resume at given byte."""
self.to_stdout(u'[download] Resuming download at byte %s' % resume_len)
"""Report attempt to resume at given byte."""
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 503"""
self.to_stdout(u'[download] Got HTTP error 503. Retrying (attempt %d of %d)...' % (count, retries))
"""Report retry in case of HTTP error 5xx"""
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_stdout(u'[download] %s has already been downloaded' % file_name)
self.to_screen(u'[download] %s has already been downloaded' % file_name)
except (UnicodeEncodeError), err:
self.to_stdout(u'[download] The file has already been downloaded')
self.to_screen(u'[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_stdout(u'[download] Unable to resume')
self.to_screen(u'[download] Unable to resume')
def report_finish(self):
"""Report download finished."""
if self.params.get('noprogress', False):
self.to_stdout(u'[download] Download completed')
self.to_screen(u'[download] Download completed')
else:
self.to_stdout(u'')
self.to_screen(u'')
def increment_downloads(self):
"""Increment the ordinal that assigns a number to each file."""
@@ -396,13 +397,6 @@ class FileDownloader(object):
"""Process a single dictionary returned by an InfoExtractor."""
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
# Verify URL if it's an HTTP one
if info_dict['url'].startswith('http'):
try:
self.verify_url(info_dict['url'].encode('utf-8')).decode('utf-8')
except (OSError, IOError, urllib2.URLError, httplib.HTTPException, socket.error), err:
raise UnavailableVideoError
# Forced printings
if self.params.get('forcetitle', False):
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
@@ -421,15 +415,16 @@ class FileDownloader(object):
template_dict['ord'] = unicode('%05d' % self._num_downloads)
filename = self.params['outtmpl'] % template_dict
except (ValueError, KeyError), err:
self.trouble('ERROR: invalid output template or system charset: %s' % str(err))
self.trouble(u'ERROR: invalid system charset or erroneous output template')
return
if self.params.get('nooverwrites', False) and os.path.exists(filename):
self.to_stderr(u'WARNING: file exists: %s; skipping' % filename)
self.to_stderr(u'WARNING: file exists and will be skipped')
return
try:
self.pmkdir(filename)
except (OSError, IOError), err:
self.trouble('ERROR: unable to create directories: %s' % str(err))
self.trouble(u'ERROR: unable to create directories: %s' % str(err))
return
try:
@@ -437,17 +432,17 @@ class FileDownloader(object):
except (OSError, IOError), err:
raise UnavailableVideoError
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self.trouble('ERROR: unable to download video data: %s' % str(err))
self.trouble(u'ERROR: unable to download video data: %s' % str(err))
return
except (ContentTooShortError, ), err:
self.trouble('ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success:
try:
self.post_process(filename, info_dict)
except (PostProcessingError), err:
self.trouble('ERROR: postprocessing: %s' % str(err))
self.trouble(u'ERROR: postprocessing: %s' % str(err))
return
def download(self, url_list):
@@ -472,7 +467,7 @@ class FileDownloader(object):
break
if not suitable_found:
self.trouble('ERROR: no suitable InfoExtractor: %s' % url)
self.trouble(u'ERROR: no suitable InfoExtractor: %s' % url)
return self._download_retcode
@@ -502,17 +497,17 @@ class FileDownloader(object):
retval = subprocess.call(basic_args + [[], ['-e', '-k', '1']][self.params.get('continuedl', False)])
while retval == 2 or retval == 1:
prevsize = os.path.getsize(filename)
self.to_stdout(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
time.sleep(5.0) # This seems to be needed
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
cursize = os.path.getsize(filename)
if prevsize == cursize and retval == 1:
break
if retval == 0:
self.to_stdout(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename))
self.to_screen(u'\r[rtmpdump] %s bytes' % os.path.getsize(filename))
return True
else:
self.trouble('\nERROR: rtmpdump exited with code %d' % retval)
self.trouble(u'\nERROR: rtmpdump exited with code %d' % retval)
return False
def _do_download(self, filename, url, player_url):
@@ -539,32 +534,50 @@ class FileDownloader(object):
count = 0
retries = self.params.get('retries', 0)
while True:
while count <= retries:
# Establish connection
try:
data = urllib2.urlopen(request)
break
except (urllib2.HTTPError, ), err:
if err.code == 503:
# Retry in case of HTTP error 503
count += 1
if count <= retries:
self.report_retry(count, retries)
continue
if err.code != 416: # 416 is 'Requested range not satisfiable'
if (err.code < 500 or err.code >= 600) and err.code != 416:
# Unexpected HTTP error
raise
# Unable to resume
data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length']
elif err.code == 416:
# Unable to resume (requested range not satisfiable)
try:
# Open the connection again without the range header
data = urllib2.urlopen(basic_request)
content_length = data.info()['Content-Length']
except (urllib2.HTTPError, ), err:
if err.code < 500 or err.code >= 600:
raise
else:
# Examine the reported length
if (content_length is not None and
(resume_len - 100 < long(content_length) < resume_len + 100)):
# The file had already been fully downloaded.
# Explanation to the above condition: in issue #175 it was revealed that
# YouTube sometimes adds or removes a few bytes from the end of the file,
# changing the file size slightly and causing problems for some users. So
# I decided to implement a suggested change and consider the file
# completely downloaded if the file size differs less than 100 bytes from
# the one in the hard drive.
self.report_file_already_downloaded(filename)
return True
else:
# The length does not match, we start the download over
self.report_unable_to_resume()
open_mode = 'wb'
break
# Retry
count += 1
if count <= retries:
self.report_retry(count, retries)
if content_length is not None and long(content_length) == resume_len:
# Because the file had already been fully downloaded
self.report_file_already_downloaded(filename)
return True
else:
# Because the server didn't let us
self.report_unable_to_resume()
open_mode = 'wb'
if count > retries:
self.trouble(u'ERROR: giving up after %s retries' % retries)
return False
data_len = data.info().get('Content-length', None)
data_len_str = self.format_bytes(data_len)
@@ -587,12 +600,13 @@ class FileDownloader(object):
(stream, filename) = sanitize_open(filename, open_mode)
self.report_destination(filename)
except (OSError, IOError), err:
self.trouble('ERROR: unable to open for writing: %s' % str(err))
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
return False
try:
stream.write(data_block)
except (IOError, OSError), err:
self.trouble('\nERROR: unable to write data: %s' % str(err))
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
return False
block_size = self.best_block_size(after - before, data_block_len)
# Progress message
@@ -683,9 +697,9 @@ class InfoExtractor(object):
class YoutubeIE(InfoExtractor):
"""Information extractor for youtube.com."""
_VALID_URL = r'^((?:http://)?(?:youtu\.be/|(?:\w+\.)?youtube\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?[\?#](?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
_VALID_URL = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/(?:(?:v/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=))))?([0-9A-Za-z_-]+)(?(1).+)?$'
_LANG_URL = r'http://www.youtube.com/?hl=en&persist_hl=1&gl=US&persist_gl=1&opt_out_ackd=1'
_LOGIN_URL = 'http://www.youtube.com/signup?next=/&gl=US&hl=en'
_LOGIN_URL = 'https://www.youtube.com/signup?next=/&gl=US&hl=en'
_AGE_URL = 'http://www.youtube.com/verify_age?next_url=/&gl=US&hl=en'
_NETRC_MACHINE = 'youtube'
# Listed in order of quality
@@ -707,35 +721,35 @@ class YoutubeIE(InfoExtractor):
def report_lang(self):
"""Report attempt to set language."""
self._downloader.to_stdout(u'[youtube] Setting language')
self._downloader.to_screen(u'[youtube] Setting language')
def report_login(self):
"""Report attempt to log in."""
self._downloader.to_stdout(u'[youtube] Logging in')
self._downloader.to_screen(u'[youtube] Logging in')
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self._downloader.to_stdout(u'[youtube] Confirming age')
self._downloader.to_screen(u'[youtube] Confirming age')
def report_video_webpage_download(self, video_id):
"""Report attempt to download video webpage."""
self._downloader.to_stdout(u'[youtube] %s: Downloading video webpage' % video_id)
self._downloader.to_screen(u'[youtube] %s: Downloading video webpage' % video_id)
def report_video_info_webpage_download(self, video_id):
"""Report attempt to download video info webpage."""
self._downloader.to_stdout(u'[youtube] %s: Downloading video info webpage' % video_id)
self._downloader.to_screen(u'[youtube] %s: Downloading video info webpage' % video_id)
def report_information_extraction(self, video_id):
"""Report attempt to extract video information."""
self._downloader.to_stdout(u'[youtube] %s: Extracting video information' % video_id)
self._downloader.to_screen(u'[youtube] %s: Extracting video information' % video_id)
def report_unavailable_format(self, video_id, format):
"""Report extracted video URL."""
self._downloader.to_stdout(u'[youtube] %s: Format %s not available' % (video_id, format))
self._downloader.to_screen(u'[youtube] %s: Format %s not available' % (video_id, format))
def report_rtmp_download(self):
"""Indicate the download will use the RTMP protocol."""
self._downloader.to_stdout(u'[youtube] RTMP download detected')
self._downloader.to_screen(u'[youtube] RTMP download detected')
def _real_initialize(self):
if self._downloader is None:
@@ -844,6 +858,14 @@ class YoutubeIE(InfoExtractor):
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % str(err))
return
if 'token' not in video_info:
if 'reason' in video_info:
self._downloader.trouble(u'ERROR: YouTube said: %s' % video_info['reason'][0].decode('utf-8'))
else:
self._downloader.trouble(u'ERROR: "token" parameter not in video info for unknown reason')
return
# Start extracting information
self.report_information_extraction(video_id)
# uploader
@@ -878,7 +900,13 @@ class YoutubeIE(InfoExtractor):
if mobj is not None:
video_description = mobj.group(1)
# token
video_token = urllib.unquote_plus(video_info['token'][0])
# Decide which formats to download
requested_format = self._downloader.params.get('format', None)
get_video_template = 'http://www.youtube.com/get_video?video_id=%s&t=%s&eurl=&el=&ps=&asv=&fmt=%%s' % (video_id, video_token)
if 'fmt_url_map' in video_info:
url_map = dict(tuple(pair.split('|')) for pair in video_info['fmt_url_map'][0].split(','))
format_limit = self._downloader.params.get('format_limit', None)
@@ -890,19 +918,17 @@ class YoutubeIE(InfoExtractor):
if len(existing_formats) == 0:
self._downloader.trouble(u'ERROR: no known formats available for video')
return
requested_format = self._downloader.params.get('format', None)
if requested_format is None:
video_url_list = [(existing_formats[0], url_map[existing_formats[0]])] # Best quality
video_url_list = [(existing_formats[0], get_video_template % existing_formats[0])] # Best quality
elif requested_format == '-1':
video_url_list = url_map.items() # All formats
video_url_list = [(f, get_video_template % f) for f in existing_formats] # All formats
else:
if requested_format not in existing_formats:
self._downloader.trouble(u'ERROR: format not available for video')
return
video_url_list = [(requested_format, url_map[requested_format])] # Specific format
video_url_list = [(requested_format, get_video_template % requested_format)] # Specific format
elif 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
self.report_rtmp_download()
video_url_list = [(None, video_info['conn'][0])]
else:
self._downloader.trouble(u'ERROR: no fmt_url_map or conn information found in video info')
return
@@ -930,7 +956,7 @@ class YoutubeIE(InfoExtractor):
'player_url': player_url,
})
except UnavailableVideoError, err:
self._downloader.trouble(u'ERROR: unable to download video')
self._downloader.trouble(u'ERROR: unable to download video (format may not be available)')
class MetacafeIE(InfoExtractor):
@@ -951,19 +977,19 @@ class MetacafeIE(InfoExtractor):
def report_disclaimer(self):
"""Report disclaimer retrieval."""
self._downloader.to_stdout(u'[metacafe] Retrieving disclaimer')
self._downloader.to_screen(u'[metacafe] Retrieving disclaimer')
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self._downloader.to_stdout(u'[metacafe] Confirming age')
self._downloader.to_screen(u'[metacafe] Confirming age')
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'[metacafe] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'[metacafe] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[metacafe] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[metacafe] %s: Extracting information' % video_id)
def _real_initialize(self):
# Retrieve disclaimer
@@ -1007,7 +1033,6 @@ class MetacafeIE(InfoExtractor):
self._downloader.increment_downloads()
simple_title = mobj.group(2).decode('utf-8')
video_extension = 'flv'
# Retrieve video webpage to extract further information
request = urllib2.Request('http://www.metacafe.com/watch/%s/' % video_id)
@@ -1021,20 +1046,33 @@ class MetacafeIE(InfoExtractor):
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = urllib.unquote(mobj.group(1))
#mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
#if mobj is None:
# self._downloader.trouble(u'ERROR: unable to extract gdaKey')
# return
#gdaKey = mobj.group(1)
#
#video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
video_url = mediaURL
if mobj is not None:
mediaURL = urllib.unquote(mobj.group(1))
video_extension = mediaURL[-3:]
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
else:
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
vardict = parse_qs(mobj.group(1))
if 'mediaData' not in vardict:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mobj = re.search(r'"mediaURL":"(http.*?)","key":"(.*?)"', vardict['mediaData'][0])
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract media URL')
return
mediaURL = mobj.group(1).replace('\\/', '/')
video_extension = mediaURL[-3:]
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group(2))
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
if mobj is None:
@@ -1079,11 +1117,11 @@ class DailymotionIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'[dailymotion] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'[dailymotion] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[dailymotion] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[dailymotion] %s: Extracting information' % video_id)
def _real_initialize(self):
return
@@ -1131,7 +1169,7 @@ class DailymotionIE(InfoExtractor):
video_title = mobj.group(1).decode('utf-8')
video_title = sanitize_title(video_title)
mobj = re.search(r'(?im)<div class="dmco_html owner">.*?<a class="name" href="/.+?">(.+?)</a></div>', webpage)
mobj = re.search(r'(?im)<div class="dmco_html owner">.*?<a class="name" href="/.+?">(.+?)</a>', webpage)
if mobj is None:
self._downloader.trouble(u'ERROR: unable to extract uploader nickname')
return
@@ -1166,11 +1204,11 @@ class GoogleIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'[video.google] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'[video.google] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[video.google] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[video.google] %s: Extracting information' % video_id)
def _real_initialize(self):
return
@@ -1276,11 +1314,11 @@ class PhotobucketIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'[photobucket] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'[photobucket] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[photobucket] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[photobucket] %s: Extracting information' % video_id)
def _real_initialize(self):
return
@@ -1360,11 +1398,11 @@ class YahooIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'[video.yahoo] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'[video.yahoo] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[video.yahoo] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[video.yahoo] %s: Extracting information' % video_id)
def _real_initialize(self):
return
@@ -1512,12 +1550,12 @@ class GenericIE(InfoExtractor):
def report_download_webpage(self, video_id):
"""Report webpage download."""
self._downloader.to_stdout(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_stdout(u'[generic] %s: Downloading webpage' % video_id)
self._downloader.to_screen(u'WARNING: Falling back on generic information extractor.')
self._downloader.to_screen(u'[generic] %s: Downloading webpage' % video_id)
def report_extraction(self, video_id):
"""Report information extraction."""
self._downloader.to_stdout(u'[generic] %s: Extracting information' % video_id)
self._downloader.to_screen(u'[generic] %s: Extracting information' % video_id)
def _real_initialize(self):
return
@@ -1619,7 +1657,7 @@ class YoutubeSearchIE(InfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
self._downloader.to_stdout(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
self._downloader.to_screen(u'[youtube] query "%s": Downloading page %s' % (query, pagenum))
def _real_initialize(self):
self._youtube_ie.initialize()
@@ -1710,7 +1748,7 @@ class GoogleSearchIE(InfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
self._downloader.to_stdout(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
self._downloader.to_screen(u'[video.google] query "%s": Downloading page %s' % (query, pagenum))
def _real_initialize(self):
self._google_ie.initialize()
@@ -1801,7 +1839,7 @@ class YahooSearchIE(InfoExtractor):
def report_download_page(self, query, pagenum):
"""Report attempt to download playlist page with given number."""
query = query.decode(preferredencoding())
self._downloader.to_stdout(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
self._downloader.to_screen(u'[video.yahoo] query "%s": Downloading page %s' % (query, pagenum))
def _real_initialize(self):
self._yahoo_ie.initialize()
@@ -1891,7 +1929,7 @@ class YoutubePlaylistIE(InfoExtractor):
def report_download_page(self, playlist_id, pagenum):
"""Report attempt to download playlist page with given number."""
self._downloader.to_stdout(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
self._downloader.to_screen(u'[youtube] PL %s: Downloading page #%s' % (playlist_id, pagenum))
def _real_initialize(self):
self._youtube_ie.initialize()
@@ -1928,6 +1966,11 @@ class YoutubePlaylistIE(InfoExtractor):
break
pagenum = pagenum + 1
playliststart = self._downloader.params.get('playliststart', 1)
playliststart -= 1 #our arrays are zero-based but the playlist is 1-based
if playliststart > 0:
video_ids = video_ids[playliststart:]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
@@ -1950,7 +1993,7 @@ class YoutubeUserIE(InfoExtractor):
def report_download_page(self, username):
"""Report attempt to download user page."""
self._downloader.to_stdout(u'[youtube] user %s: Downloading page ' % (username))
self._downloader.to_screen(u'[youtube] user %s: Downloading page ' % (username))
def _real_initialize(self):
self._youtube_ie.initialize()
@@ -1983,6 +2026,11 @@ class YoutubeUserIE(InfoExtractor):
ids_in_page.append(mobj.group(1))
video_ids.extend(ids_in_page)
playliststart = self._downloader.params.get('playliststart', 1)
playliststart = playliststart-1 #our arrays are zero-based but the playlist is 1-based
if playliststart > 0:
video_ids = video_ids[playliststart:]
for id in video_ids:
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
return
@@ -2046,7 +2094,7 @@ if __name__ == '__main__':
if not os.access (filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
downloader.to_stdout('Updating to latest stable version...')
downloader.to_screen('Updating to latest stable version...')
latest_url = 'http://bitbucket.org/rg3/youtube-dl/raw/tip/LATEST_VERSION'
latest_version = urllib.urlopen(latest_url).read().strip()
prog_url = 'http://bitbucket.org/rg3/youtube-dl/raw/%s/youtube-dl' % latest_version
@@ -2054,17 +2102,12 @@ if __name__ == '__main__':
stream = open(filename, 'w')
stream.write(newcontent)
stream.close()
downloader.to_stdout('Updated to version %s' % latest_version)
# General configuration
urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
downloader.to_screen('Updated to version %s' % latest_version)
# Parse command line
parser = optparse.OptionParser(
usage='Usage: %prog [options] url...',
version='2010.07.22',
version='2010.10.24',
conflict_handler='resolve',
)
@@ -2080,6 +2123,8 @@ if __name__ == '__main__':
dest='ratelimit', metavar='LIMIT', help='download rate limit (e.g. 50k or 44.6m)')
parser.add_option('-R', '--retries',
dest='retries', metavar='RETRIES', help='number of retries (default is 10)', default=10)
parser.add_option('--playlist-start',
dest='playliststart', metavar='NUMBER', help='playlist video to start at (default is 1)', default=1)
authentication = optparse.OptionGroup(parser, 'Authentication Options')
authentication.add_option('-u', '--username',
@@ -2099,6 +2144,8 @@ if __name__ == '__main__':
action='store_const', dest='format', help='download all available video formats', const='-1')
video_format.add_option('--max-quality',
action='store', dest='format_limit', metavar='FORMAT', help='highest quality format to download')
video_format.add_option('-b', '--best-quality',
action='store_true', dest='bestquality', help='download the best video quality (DEPRECATED)')
parser.add_option_group(video_format)
verbosity = optparse.OptionGroup(parser, 'Verbosity / Simulation Options')
@@ -2131,10 +2178,29 @@ if __name__ == '__main__':
action='store_true', dest='nooverwrites', help='do not overwrite files', default=False)
filesystem.add_option('-c', '--continue',
action='store_true', dest='continue_dl', help='resume partially downloaded files', default=False)
filesystem.add_option('--cookies',
dest='cookiefile', metavar='FILE', help='file to dump cookie jar to')
parser.add_option_group(filesystem)
(opts, args) = parser.parse_args()
# Open appropriate CookieJar
if opts.cookiefile is None:
jar = cookielib.CookieJar()
else:
try:
jar = cookielib.MozillaCookieJar(opts.cookiefile)
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
jar.load()
except (IOError, OSError), err:
sys.exit(u'ERROR: unable to open cookie file')
# General configuration
cookie_processor = urllib2.HTTPCookieProcessor(jar)
urllib2.install_opener(urllib2.build_opener(urllib2.ProxyHandler()))
urllib2.install_opener(urllib2.build_opener(cookie_processor))
socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words)
# Batch file verification
batchurls = []
if opts.batchfile is not None:
@@ -2151,6 +2217,8 @@ if __name__ == '__main__':
all_urls = batchurls + args
# Conflicting, missing and erroneous options
if opts.bestquality:
print >>sys.stderr, u'\nWARNING: -b/--best-quality IS DEPRECATED AS IT IS THE DEFAULT BEHAVIOR NOW\n'
if opts.usenetrc and (opts.username is not None or opts.password is not None):
parser.error(u'using .netrc conflicts with giving username/password')
if opts.password is not None and opts.username is None:
@@ -2171,6 +2239,11 @@ if __name__ == '__main__':
opts.retries = long(opts.retries)
except (TypeError, ValueError), err:
parser.error(u'invalid retry count specified')
if opts.playliststart is not None:
try:
opts.playliststart = long(opts.playliststart)
except (TypeError, ValueError), err:
parser.error(u'invalid playlist page specified')
# Information extractors
youtube_ie = YoutubeIE()
@@ -2212,6 +2285,8 @@ if __name__ == '__main__':
'retries': opts.retries,
'continuedl': opts.continue_dl,
'noprogress': opts.noprogress,
'playliststart': opts.playliststart,
'logtostderr': opts.outtmpl == '-',
})
fd.add_info_extractor(youtube_search_ie)
fd.add_info_extractor(youtube_pl_ie)
@@ -2240,6 +2315,14 @@ if __name__ == '__main__':
else:
sys.exit()
retcode = fd.download(all_urls)
# Dump cookie jar if requested
if opts.cookiefile is not None:
try:
jar.save()
except (IOError, OSError), err:
sys.exit(u'ERROR: unable to save cookie jar')
sys.exit(retcode)
except DownloadError: