Compare commits
31 Commits
2014.01.22
...
2014.01.22
Author | SHA1 | Date | |
---|---|---|---|
|
780083dbc6 | ||
|
4919603f66 | ||
|
dd26ced164 | ||
|
bd2d82a5d3 | ||
|
c4cd138b92 | ||
|
65697b3bf3 | ||
|
50317b111d | ||
|
d7975ea287 | ||
|
714d709a31 | ||
|
11577ec054 | ||
|
79bf58f9b5 | ||
|
cd8a562267 | ||
|
de3ef3ed58 | ||
|
8908741806 | ||
|
ba7678f9cc | ||
|
a70c83768e | ||
|
04b4d394d9 | ||
|
130f12985a | ||
|
4ca5d43cd8 | ||
|
4bbf139aa7 | ||
|
47739636a9 | ||
|
407ae733ab | ||
|
c39f7013e1 | ||
|
a4a028323e | ||
|
780ee4e501 | ||
|
d7b51547c0 | ||
|
43030f36db | ||
|
48c63f1653 | ||
|
90f479b6d5 | ||
|
b7ab059084 | ||
|
dd27fd1739 |
377
README.md
377
README.md
@@ -14,194 +14,227 @@ your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||
which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version. Make sure
|
||||
that you have sufficient permissions (run with
|
||||
sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to to
|
||||
skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the
|
||||
playlist or the command line) if an error occurs
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video access
|
||||
is restricted to one domain
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
--extractor-descriptions Output descriptions of all supported extractors
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in an
|
||||
empty string (--proxy "") for direct connection
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl can
|
||||
store some downloaded information permanently. By
|
||||
default $XDG_CACHE_HOME/youtube-dl or ~/.cache
|
||||
/youtube-dl . At the moment, only YouTube player
|
||||
files (for videos with obfuscated signatures) are
|
||||
cached, but that may change.
|
||||
--no-cache-dir Disable filesystem caching
|
||||
--socket-timeout None Time to wait before giving up, in seconds
|
||||
--bidi-workaround Work around terminals that lack bidirectional
|
||||
text support. Requires bidiv or fribidi
|
||||
executable in PATH
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version. Make
|
||||
sure that you have sufficient permissions
|
||||
(run with sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to
|
||||
to skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the
|
||||
playlist or the command line) if an error
|
||||
occurs
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video
|
||||
access is restricted to one domain
|
||||
--list-extractors List all supported extractors and the URLs
|
||||
they would handle
|
||||
--extractor-descriptions Output descriptions of all supported
|
||||
extractors
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
||||
an empty string (--proxy "") for direct
|
||||
connection
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
||||
can store some downloaded information
|
||||
permanently. By default $XDG_CACHE_HOME
|
||||
/youtube-dl or ~/.cache/youtube-dl . At the
|
||||
moment, only YouTube player files (for
|
||||
videos with obfuscated signatures) are
|
||||
cached, but that may change.
|
||||
--no-cache-dir Disable filesystem caching
|
||||
--socket-timeout None Time to wait before giving up, in seconds
|
||||
--bidi-workaround Work around terminals that lack
|
||||
bidirectional text support. Requires bidiv
|
||||
or fribidi executable in PATH
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
||||
example "gvsearch2:" downloads two videos
|
||||
from google videos for youtube-dl "large
|
||||
apple". By default (with value "auto")
|
||||
youtube-dl guesses.
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or caseless
|
||||
sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than SIZE
|
||||
(e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
||||
50k or 44.6m)
|
||||
--date DATE download only videos uploaded in this date
|
||||
--datebefore DATE download only videos uploaded on or before this
|
||||
date (i.e. inclusive)
|
||||
--dateafter DATE download only videos uploaded on or after this
|
||||
date (i.e. inclusive)
|
||||
--min-views COUNT Do not download any videos with less than COUNT
|
||||
views
|
||||
--max-views COUNT Do not download any videos with more than COUNT
|
||||
views
|
||||
--no-playlist download only the currently playing video
|
||||
--age-limit YEARS download only videos suitable for the given age
|
||||
--download-archive FILE Download only videos not listed in the archive
|
||||
file. Record the IDs of all downloaded videos in
|
||||
it.
|
||||
--include-ads Download advertisements as well (experimental)
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or
|
||||
caseless sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than
|
||||
SIZE (e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE
|
||||
(e.g. 50k or 44.6m)
|
||||
--date DATE download only videos uploaded in this date
|
||||
--datebefore DATE download only videos uploaded on or before
|
||||
this date (i.e. inclusive)
|
||||
--dateafter DATE download only videos uploaded on or after
|
||||
this date (i.e. inclusive)
|
||||
--min-views COUNT Do not download any videos with less than
|
||||
COUNT views
|
||||
--max-views COUNT Do not download any videos with more than
|
||||
COUNT views
|
||||
--no-playlist download only the currently playing video
|
||||
--age-limit YEARS download only videos suitable for the given
|
||||
age
|
||||
--download-archive FILE Download only videos not listed in the
|
||||
archive file. Record the IDs of all
|
||||
downloaded videos in it.
|
||||
--include-ads Download advertisements as well
|
||||
(experimental)
|
||||
|
||||
## Download Options:
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g.
|
||||
50K or 4.2M)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
||||
(e.g. 50K or 4.2M)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer
|
||||
size. By default, the buffer size is
|
||||
automatically resized from an initial value
|
||||
of SIZE.
|
||||
|
||||
## Filesystem Options:
|
||||
-t, --title use title in file name (default)
|
||||
--id use only video ID in file name
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to get
|
||||
the title, %(uploader)s for the uploader name,
|
||||
%(uploader_id)s for the uploader nickname if
|
||||
different, %(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the filename
|
||||
extension, %(format)s for the format description
|
||||
(like "22 - 1280x720" or "HD"), %(format_id)s for
|
||||
the unique id of the format (like Youtube's
|
||||
itags: "137"), %(upload_date)s for the upload
|
||||
date (YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the video
|
||||
id, %(playlist)s for the playlist the video is
|
||||
in, %(playlist_index)s for the position in the
|
||||
playlist and %% for a literal percent. Use - to
|
||||
output to stdout. Can also be used to download to
|
||||
a different directory, for example with -o '/my/d
|
||||
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
|
||||
when it is present in output filename template or
|
||||
--auto-number option is given
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
||||
avoid "&" and spaces in filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
--load-info FILE json file containing the video information
|
||||
(created with the "--write-json" option)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue force resume of partially downloaded files. By
|
||||
default, youtube-dl will resume downloads if
|
||||
possible.
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set the
|
||||
file modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
--write-annotations write video annotations to a .annotation file
|
||||
--write-thumbnail write thumbnail image to disk
|
||||
-t, --title use title in file name (default)
|
||||
--id use only video ID in file name
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to
|
||||
get the title, %(uploader)s for the
|
||||
uploader name, %(uploader_id)s for the
|
||||
uploader nickname if different,
|
||||
%(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the
|
||||
filename extension, %(format)s for the
|
||||
format description (like "22 - 1280x720" or
|
||||
"HD"), %(format_id)s for the unique id of
|
||||
the format (like Youtube's itags: "137"),
|
||||
%(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the
|
||||
video id, %(playlist)s for the playlist the
|
||||
video is in, %(playlist_index)s for the
|
||||
position in the playlist and %% for a
|
||||
literal percent. Use - to output to stdout.
|
||||
Can also be used to download to a different
|
||||
directory, for example with -o '/my/downloa
|
||||
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in
|
||||
%(autonumber)s when it is present in output
|
||||
filename template or --auto-number option
|
||||
is given
|
||||
--restrict-filenames Restrict filenames to only ASCII
|
||||
characters, and avoid "&" and spaces in
|
||||
filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for
|
||||
stdin)
|
||||
--load-info FILE json file containing the video information
|
||||
(created with the "--write-json" option)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue force resume of partially downloaded files.
|
||||
By default, youtube-dl will resume
|
||||
downloads if possible.
|
||||
--no-continue do not resume partially downloaded files
|
||||
(restart from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie
|
||||
jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set
|
||||
the file modification time
|
||||
--write-description write video description to a .description
|
||||
file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
--write-annotations write video annotations to a .annotation
|
||||
file
|
||||
--write-thumbnail write thumbnail image to disk
|
||||
|
||||
## Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-id simulate, quiet but print id
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-duration simulate, quiet but print video length
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
-j, --dump-json simulate, quiet but print JSON information
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems (very
|
||||
verbose)
|
||||
--write-pages Write downloaded intermediary pages to files in
|
||||
the current directory to debug problems
|
||||
--youtube-include-dash-manifest Try to download the DASH manifest on
|
||||
YouTube videos (experimental)
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-id simulate, quiet but print id
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-duration simulate, quiet but print video length
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
-j, --dump-json simulate, quiet but print JSON information
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems
|
||||
(very verbose)
|
||||
--write-pages Write downloaded intermediary pages to
|
||||
files in the current directory to debug
|
||||
problems
|
||||
--print-traffic Display sent and read HTTP traffic
|
||||
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code, specify the order of
|
||||
preference using slashes: "-f 22/17/18". "-f mp4"
|
||||
and "-f flv" are also supported
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one
|
||||
is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats
|
||||
-f, --format FORMAT video format code, specify the order of
|
||||
preference using slashes: "-f 22/17/18".
|
||||
"-f mp4" and "-f flv" are also supported.
|
||||
You can also use the special names "best",
|
||||
"bestaudio", "worst", and "worstaudio"
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific
|
||||
one is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats
|
||||
|
||||
## Subtitle Options:
|
||||
--write-sub write subtitle file
|
||||
--write-auto-sub write automatic subtitle file (youtube only)
|
||||
--all-subs downloads all the available subtitles of the
|
||||
video
|
||||
--list-subs lists all available subtitles for the video
|
||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt] youtube
|
||||
only)
|
||||
--sub-lang LANGS languages of the subtitles to download (optional)
|
||||
separated by commas, use IETF language tags like
|
||||
'en,pt'
|
||||
--write-sub write subtitle file
|
||||
--write-auto-sub write automatic subtitle file (youtube
|
||||
only)
|
||||
--all-subs downloads all the available subtitles of
|
||||
the video
|
||||
--list-subs lists all available subtitles for the video
|
||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt]
|
||||
youtube only)
|
||||
--sub-lang LANGS languages of the subtitles to download
|
||||
(optional) separated by commas, use IETF
|
||||
language tags like 'en,pt'
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo, smotri)
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo, smotri)
|
||||
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
||||
"wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
|
||||
a value between 0 (better) and 9 (worse) for VBR
|
||||
or a specific bitrate like 128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary
|
||||
(currently supported: mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
processed files are overwritten by default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs (using
|
||||
dublin core and xdg standards)
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
||||
postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
||||
postprocessors
|
||||
-x, --extract-audio convert video files to audio-only files
|
||||
(requires ffmpeg or avconv and ffprobe or
|
||||
avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
||||
"opus", or "wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
||||
insert a value between 0 (better) and 9
|
||||
(worse) for VBR or a specific bitrate like
|
||||
128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if
|
||||
necessary (currently supported:
|
||||
mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the
|
||||
post-processing; the video is erased by
|
||||
default
|
||||
--no-post-overwrites do not overwrite post-processed files; the
|
||||
post-processed files are overwritten by
|
||||
default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
||||
postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
||||
postprocessors
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
|
@@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
@@ -30,125 +32,155 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 460},
|
||||
{u'ext': u'mp4', u'height': 460},
|
||||
{'ext': 'webm', 'height': 460},
|
||||
{'ext': 'mp4', 'height': 460},
|
||||
]
|
||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'webm')
|
||||
self.assertEqual(downloaded['ext'], 'webm')
|
||||
|
||||
# Different resolution => download best quality (mp4)
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 720},
|
||||
{u'ext': u'mp4', u'height': 1080},
|
||||
{'ext': 'webm', 'height': 720},
|
||||
{'ext': 'mp4', 'height': 1080},
|
||||
]
|
||||
info_dict[u'formats'] = formats
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'mp4')
|
||||
self.assertEqual(downloaded['ext'], 'mp4')
|
||||
|
||||
# No prefer_free_formats => prefer mp4 and flv for greater compatibilty
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{u'ext': u'webm', u'height': 720},
|
||||
{u'ext': u'mp4', u'height': 720},
|
||||
{u'ext': u'flv', u'height': 720},
|
||||
{'ext': 'webm', 'height': 720},
|
||||
{'ext': 'mp4', 'height': 720},
|
||||
{'ext': 'flv', 'height': 720},
|
||||
]
|
||||
info_dict[u'formats'] = formats
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'mp4')
|
||||
self.assertEqual(downloaded['ext'], 'mp4')
|
||||
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{u'ext': u'flv', u'height': 720},
|
||||
{u'ext': u'webm', u'height': 720},
|
||||
{'ext': 'flv', 'height': 720},
|
||||
{'ext': 'webm', 'height': 720},
|
||||
]
|
||||
info_dict[u'formats'] = formats
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'ext'], u'flv')
|
||||
self.assertEqual(downloaded['ext'], 'flv')
|
||||
|
||||
def test_format_limit(self):
|
||||
formats = [
|
||||
{u'format_id': u'meh', u'url': u'http://example.com/meh', 'preference': 1},
|
||||
{u'format_id': u'good', u'url': u'http://example.com/good', 'preference': 2},
|
||||
{u'format_id': u'great', u'url': u'http://example.com/great', 'preference': 3},
|
||||
{u'format_id': u'excellent', u'url': u'http://example.com/exc', 'preference': 4},
|
||||
{'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1},
|
||||
{'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2},
|
||||
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
|
||||
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
|
||||
]
|
||||
info_dict = {
|
||||
u'formats': formats, u'extractor': u'test', 'id': 'testvid'}
|
||||
'formats': formats, 'extractor': 'test', 'id': 'testvid'}
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict)
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
||||
self.assertEqual(downloaded['format_id'], 'excellent')
|
||||
|
||||
ydl = YDL({'format_limit': 'good'})
|
||||
assert ydl.params['format_limit'] == 'good'
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'good')
|
||||
self.assertEqual(downloaded['format_id'], 'good')
|
||||
|
||||
ydl = YDL({'format_limit': 'great', 'format': 'all'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
self.assertEqual(ydl.downloaded_info_dicts[0][u'format_id'], u'meh')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[1][u'format_id'], u'good')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[2][u'format_id'], u'great')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good')
|
||||
self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great')
|
||||
self.assertTrue('3' in ydl.msgs[0])
|
||||
|
||||
ydl = YDL()
|
||||
ydl.params['format_limit'] = 'excellent'
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded[u'format_id'], u'excellent')
|
||||
self.assertEqual(downloaded['format_id'], 'excellent')
|
||||
|
||||
def test_format_selection(self):
|
||||
formats = [
|
||||
{u'format_id': u'35', u'ext': u'mp4', 'preference': 1},
|
||||
{u'format_id': u'45', u'ext': u'webm', 'preference': 2},
|
||||
{u'format_id': u'47', u'ext': u'webm', 'preference': 3},
|
||||
{u'format_id': u'2', u'ext': u'flv', 'preference': 4},
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4},
|
||||
]
|
||||
info_dict = {u'formats': formats, u'extractor': u'test'}
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
|
||||
ydl = YDL({'format': u'20/47'})
|
||||
ydl = YDL({'format': '20/47'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'47')
|
||||
self.assertEqual(downloaded['format_id'], '47')
|
||||
|
||||
ydl = YDL({'format': u'20/71/worst'})
|
||||
ydl = YDL({'format': '20/71/worst'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'35')
|
||||
self.assertEqual(downloaded['format_id'], '35')
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'2')
|
||||
self.assertEqual(downloaded['format_id'], '2')
|
||||
|
||||
ydl = YDL({'format': u'webm/mp4'})
|
||||
ydl = YDL({'format': 'webm/mp4'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'47')
|
||||
self.assertEqual(downloaded['format_id'], '47')
|
||||
|
||||
ydl = YDL({'format': u'3gp/40/mp4'})
|
||||
ydl = YDL({'format': '3gp/40/mp4'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], u'35')
|
||||
self.assertEqual(downloaded['format_id'], '35')
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
formats = [
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none'},
|
||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none'},
|
||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
|
||||
ydl = YDL({'format': 'bestaudio'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'audio-high')
|
||||
|
||||
ydl = YDL({'format': 'worstaudio'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'audio-low')
|
||||
|
||||
formats = [
|
||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1},
|
||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
|
||||
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'vid-high')
|
||||
|
||||
def test_youtube_format_selection(self):
|
||||
order = [
|
||||
@@ -200,17 +232,17 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_prepare_filename(self):
|
||||
info = {
|
||||
u'id': u'1234',
|
||||
u'ext': u'mp4',
|
||||
u'width': None,
|
||||
'id': '1234',
|
||||
'ext': 'mp4',
|
||||
'width': None,
|
||||
}
|
||||
def fname(templ):
|
||||
ydl = YoutubeDL({'outtmpl': templ})
|
||||
return ydl.prepare_filename(info)
|
||||
self.assertEqual(fname(u'%(id)s.%(ext)s'), u'1234.mp4')
|
||||
self.assertEqual(fname(u'%(id)s-%(width)s.%(ext)s'), u'1234-NA.mp4')
|
||||
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
|
||||
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
|
||||
# Replace missing fields with 'NA'
|
||||
self.assertEqual(fname(u'%(uploader_date)s-%(id)s.%(ext)s'), u'NA-1234.mp4')
|
||||
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@@ -19,6 +19,7 @@ from youtube_dl.utils import (
|
||||
fix_xml_ampersands,
|
||||
get_meta_content,
|
||||
orderedSet,
|
||||
PagedList,
|
||||
parse_duration,
|
||||
sanitize_filename,
|
||||
shell_quote,
|
||||
@@ -214,5 +215,26 @@ class TestUtil(unittest.TestCase):
|
||||
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
|
||||
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
|
||||
|
||||
def test_paged_list(self):
|
||||
def testPL(size, pagesize, sliceargs, expected):
|
||||
def get_page(pagenum):
|
||||
firstid = pagenum * pagesize
|
||||
upto = min(size, pagenum * pagesize + pagesize)
|
||||
for i in range(firstid, upto):
|
||||
yield i
|
||||
|
||||
pl = PagedList(get_page, pagesize)
|
||||
got = pl.getslice(*sliceargs)
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||
testPL(5, 2, (2,), [2, 3, 4])
|
||||
testPL(5, 2, (4,), [4])
|
||||
testPL(5, 2, (0, 3), [0, 1, 2])
|
||||
testPL(5, 2, (1, 4), [1, 2, 3])
|
||||
testPL(5, 2, (2, 99), [2, 3, 4])
|
||||
testPL(5, 2, (20, 99), [])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@@ -39,6 +39,7 @@ from .utils import (
|
||||
locked_file,
|
||||
make_HTTPS_handler,
|
||||
MaxDownloadsReached,
|
||||
PagedList,
|
||||
PostProcessingError,
|
||||
platform_name,
|
||||
preferredencoding,
|
||||
@@ -152,6 +153,8 @@ class YoutubeDL(object):
|
||||
support, using fridibi
|
||||
debug_printtraffic:Print out sent and received HTTP traffic
|
||||
include_ads: Download ads as well
|
||||
default_search: Prepend this string if an input url is not valid.
|
||||
'auto' for elaborate guessing
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
the FileDownloader:
|
||||
@@ -576,19 +579,27 @@ class YoutubeDL(object):
|
||||
|
||||
playlist_results = []
|
||||
|
||||
n_all_entries = len(ie_result['entries'])
|
||||
playliststart = self.params.get('playliststart', 1) - 1
|
||||
playlistend = self.params.get('playlistend', None)
|
||||
# For backwards compatibility, interpret -1 as whole list
|
||||
if playlistend == -1:
|
||||
playlistend = None
|
||||
|
||||
entries = ie_result['entries'][playliststart:playlistend]
|
||||
n_entries = len(entries)
|
||||
|
||||
self.to_screen(
|
||||
"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
|
||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||
if isinstance(ie_result['entries'], list):
|
||||
n_all_entries = len(ie_result['entries'])
|
||||
entries = ie_result['entries'][playliststart:playlistend]
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
|
||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||
else:
|
||||
assert isinstance(ie_result['entries'], PagedList)
|
||||
entries = ie_result['entries'].getslice(
|
||||
playliststart, playlistend)
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Downloading %d videos" %
|
||||
(ie_result['extractor'], playlist, n_entries))
|
||||
|
||||
for i, entry in enumerate(entries, 1):
|
||||
self.to_screen('[download] Downloading video #%s of %s' % (i, n_entries))
|
||||
@@ -635,6 +646,18 @@ class YoutubeDL(object):
|
||||
return available_formats[-1]
|
||||
elif format_spec == 'worst':
|
||||
return available_formats[0]
|
||||
elif format_spec == 'bestaudio':
|
||||
audio_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
return audio_formats[-1]
|
||||
elif format_spec == 'worstaudio':
|
||||
audio_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
return audio_formats[0]
|
||||
else:
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp']
|
||||
if format_spec in extensions:
|
||||
@@ -699,7 +722,7 @@ class YoutubeDL(object):
|
||||
self.list_formats(info_dict)
|
||||
return
|
||||
|
||||
req_format = self.params.get('format', 'best')
|
||||
req_format = self.params.get('format')
|
||||
if req_format is None:
|
||||
req_format = 'best'
|
||||
formats_to_download = []
|
||||
@@ -1092,6 +1115,8 @@ class YoutubeDL(object):
|
||||
res += 'audio'
|
||||
if fdict.get('abr') is not None:
|
||||
res += '@%3dk' % fdict['abr']
|
||||
if fdict.get('asr') is not None:
|
||||
res += ' (%5dHz)' % fdict['asr']
|
||||
if fdict.get('filesize') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
|
@@ -199,7 +199,9 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option(
|
||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||
|
||||
general.add_option('--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||
|
||||
selection.add_option(
|
||||
'--playlist-start',
|
||||
@@ -242,6 +244,10 @@ def parseOpts(overrideArguments=None):
|
||||
'--include-ads', dest='include_ads',
|
||||
action='store_true',
|
||||
help='Download advertisements as well (experimental)')
|
||||
verbosity.add_option(
|
||||
'--youtube-include-dash-manifest', action='store_true',
|
||||
dest='youtube_include_dash_manifest', default=False,
|
||||
help='Try to download the DASH manifest on YouTube videos (experimental)')
|
||||
|
||||
authentication.add_option('-u', '--username',
|
||||
dest='username', metavar='USERNAME', help='account username')
|
||||
@@ -254,8 +260,8 @@ def parseOpts(overrideArguments=None):
|
||||
|
||||
|
||||
video_format.add_option('-f', '--format',
|
||||
action='store', dest='format', metavar='FORMAT', default='best',
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported')
|
||||
action='store', dest='format', metavar='FORMAT', default=None,
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio"')
|
||||
video_format.add_option('--all-formats',
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
video_format.add_option('--prefer-free-formats',
|
||||
@@ -346,7 +352,8 @@ def parseOpts(overrideArguments=None):
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
verbosity.add_option('--print-traffic',
|
||||
dest='debug_printtraffic', action='store_true', default=False,
|
||||
help=optparse.SUPPRESS_HELP)
|
||||
help='Display sent and read HTTP traffic')
|
||||
|
||||
|
||||
filesystem.add_option('-t', '--title',
|
||||
action='store_true', dest='usetitle', help='use title in file name (default)', default=False)
|
||||
@@ -619,6 +626,12 @@ def _real_main(argv=None):
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
if opts.default_search not in ('auto', None) and ':' not in opts.default_search:
|
||||
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
|
||||
|
||||
# Do not download videos when there are audio-only formats
|
||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||
opts.format = 'bestaudio/best'
|
||||
|
||||
# --all-sub automatically sets --write-sub if --write-auto-sub is not given
|
||||
# this was the old behaviour if only --all-sub was given.
|
||||
@@ -720,6 +733,8 @@ def _real_main(argv=None):
|
||||
'debug_printtraffic': opts.debug_printtraffic,
|
||||
'prefer_ffmpeg': opts.prefer_ffmpeg,
|
||||
'include_ads': opts.include_ads,
|
||||
'default_search': opts.default_search,
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -15,30 +17,22 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:www.)?comedycentral.com/
|
||||
(video-clips|episodes|cc-studios|video-collections)
|
||||
/(?P<title>.*)'''
|
||||
_FEED_URL = u'http://comedycentral.com/feeds/mrss/'
|
||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
u'md5': u'4167875aae411f903b751a21f357f1ee',
|
||||
u'info_dict': {
|
||||
u'id': u'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Uncensored - Greg Fitzsimmons - Too Good of a Mother',
|
||||
u'description': u'After a certain point, breastfeeding becomes c**kblocking.',
|
||||
'url': 'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
'md5': '4167875aae411f903b751a21f357f1ee',
|
||||
'info_dict': {
|
||||
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
||||
'ext': 'mp4',
|
||||
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
|
||||
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
mgid = self._search_regex(r'data-mgid="(?P<mgid>mgid:.*?)"',
|
||||
webpage, u'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
|
||||
|
||||
class ComedyCentralShowsIE(InfoExtractor):
|
||||
IE_DESC = u'The Daily Show / Colbert Report'
|
||||
IE_DESC = 'The Daily Show / Colbert Report'
|
||||
# urls can be abbreviations like :thedailyshow or :colbert
|
||||
# urls for episodes like:
|
||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||
@@ -55,14 +49,14 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
|
||||
$"""
|
||||
_TEST = {
|
||||
u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
u'file': u'422212.mp4',
|
||||
u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20121214",
|
||||
u"description": u"Kristen Stewart",
|
||||
u"uploader": u"thedailyshow",
|
||||
u"title": u"thedailyshow-kristen-stewart part 1"
|
||||
'url': 'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
'file': '422212.mp4',
|
||||
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
|
||||
'info_dict': {
|
||||
"upload_date": "20121214",
|
||||
"description": "Kristen Stewart",
|
||||
"uploader": "thedailyshow",
|
||||
"title": "thedailyshow-kristen-stewart part 1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,20 +88,20 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
def _transform_rtmp_url(rtmp_video_url):
|
||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot transform RTMP url')
|
||||
raise ExtractorError('Cannot transform RTMP url')
|
||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
||||
return base + m.group('finalid')
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
raise ExtractorError('Invalid URL: %s' % url)
|
||||
|
||||
if mobj.group('shortname'):
|
||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||
url = u'http://www.thedailyshow.com/full-episodes/'
|
||||
url = 'http://www.thedailyshow.com/full-episodes/'
|
||||
else:
|
||||
url = u'http://www.colbertnation.com/full-episodes/'
|
||||
url = 'http://www.colbertnation.com/full-episodes/'
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
assert mobj is not None
|
||||
|
||||
@@ -133,9 +127,9 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
url = htmlHandle.geturl()
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid redirected URL: ' + url)
|
||||
raise ExtractorError('Invalid redirected URL: ' + url)
|
||||
if mobj.group('episode') == '':
|
||||
raise ExtractorError(u'Redirected URL is still not specific: ' + url)
|
||||
raise ExtractorError('Redirected URL is still not specific: ' + url)
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
||||
@@ -147,15 +141,15 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
|
||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
|
||||
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||
idoc = self._download_xml(indexUrl, epTitle,
|
||||
u'Downloading show index',
|
||||
u'unable to download episode index')
|
||||
'Downloading show index',
|
||||
'unable to download episode index')
|
||||
|
||||
results = []
|
||||
|
||||
@@ -170,7 +164,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
cdoc = self._download_xml(configUrl, epTitle,
|
||||
u'Downloading configuration for %s' % shortMediaId)
|
||||
'Downloading configuration for %s' % shortMediaId)
|
||||
|
||||
turls = []
|
||||
for rendition in cdoc.findall('.//rendition'):
|
||||
@@ -178,7 +172,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
turls.append(finfo)
|
||||
|
||||
if len(turls) == 0:
|
||||
self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
|
||||
self._downloader.report_error('unable to download ' + mediaId + ': No videos found')
|
||||
continue
|
||||
|
||||
formats = []
|
||||
@@ -192,7 +186,7 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
'width': w,
|
||||
})
|
||||
|
||||
effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
|
||||
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1)
|
||||
results.append({
|
||||
'id': shortMediaId,
|
||||
'formats': formats,
|
||||
|
@@ -63,6 +63,7 @@ class InfoExtractor(object):
|
||||
* tbr Average bitrate of audio and video in KBit/s
|
||||
* abr Average audio bitrate in KBit/s
|
||||
* acodec Name of the audio codec in use
|
||||
* asr Audio sampling rate in Hertz
|
||||
* vbr Average video bitrate in KBit/s
|
||||
* vcodec Name of the video codec in use
|
||||
* filesize The number of bytes, if known in advance
|
||||
|
@@ -10,7 +10,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class CSpanIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.c-spanvideo\.org/program/(.*)'
|
||||
_VALID_URL = r'http://(?:www\.)?c-spanvideo\.org/program/(?P<name>.*)'
|
||||
IE_DESC = 'C-SPAN'
|
||||
_TEST = {
|
||||
'url': 'http://www.c-spanvideo.org/program/HolderonV',
|
||||
@@ -20,13 +20,14 @@ class CSpanIE(InfoExtractor):
|
||||
'title': 'Attorney General Eric Holder on Voting Rights Act Decision',
|
||||
'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in [Shelby County v. Holder] in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
|
||||
},
|
||||
'skip': 'Regularly fails on travis, for unknown reasons',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
prog_name = mobj.group(1)
|
||||
prog_name = mobj.group('name')
|
||||
webpage = self._download_webpage(url, prog_name)
|
||||
video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
|
||||
video_id = self._search_regex(r'prog(?:ram)?id=(.*?)&', webpage, 'video id')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<!-- title -->\n\s*<h1[^>]*>(.*?)</h1>', webpage, 'title')
|
||||
|
@@ -1,22 +1,25 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .canalplus import CanalplusIE
|
||||
|
||||
|
||||
class D8IE(CanalplusIE):
|
||||
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
||||
IE_NAME = u'd8.tv'
|
||||
IE_NAME = 'd8.tv'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||
u'file': u'966289.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Campagne intime - Documentaire exceptionnel',
|
||||
u'description': u'md5:d2643b799fb190846ae09c61e59a859f',
|
||||
u'upload_date': u'20131108',
|
||||
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||
'file': '966289.flv',
|
||||
'info_dict': {
|
||||
'title': 'Campagne intime - Documentaire exceptionnel',
|
||||
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
||||
'upload_date': '20131108',
|
||||
},
|
||||
u'params': {
|
||||
'params': {
|
||||
# rtmp
|
||||
u'skip_download': True,
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'videos get deleted after a while',
|
||||
}
|
||||
|
@@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .mtv import MTVServicesInfoExtractor
|
||||
|
||||
|
||||
@@ -18,12 +16,3 @@ class GametrailersIE(MTVServicesInfoExtractor):
|
||||
}
|
||||
|
||||
_FEED_URL = 'http://www.gametrailers.com/feeds/mrss'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"',
|
||||
r'data-contentId=\'(?P<mgid>mgid:.*?)\''],
|
||||
webpage, 'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
|
@@ -162,8 +162,19 @@ class GenericIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
if not parsed_url.scheme:
|
||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
return self.url_result('http://' + url)
|
||||
default_search = self._downloader.params.get('default_search')
|
||||
if default_search is None:
|
||||
default_search = 'auto'
|
||||
|
||||
if default_search == 'auto':
|
||||
if '/' in url:
|
||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
return self.url_result('http://' + url)
|
||||
else:
|
||||
return self.url_result('ytsearch:' + url)
|
||||
else:
|
||||
assert ':' in default_search
|
||||
return self.url_result(default_search + url)
|
||||
video_id = os.path.splitext(url.split('/')[-1])[0]
|
||||
|
||||
self.to_screen('%s: Requesting header' % video_id)
|
||||
|
@@ -67,23 +67,16 @@ class ImdbListIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
list_id = mobj.group('id')
|
||||
|
||||
# RSS XML is sometimes malformed
|
||||
rss = self._download_webpage('http://rss.imdb.com/list/%s' % list_id, list_id, 'Downloading list RSS')
|
||||
list_title = self._html_search_regex(r'<title>(.*?)</title>', rss, 'list title')
|
||||
|
||||
# Export is independent of actual author_id, but returns 404 if no author_id is provided.
|
||||
# However, passing dummy author_id seems to be enough.
|
||||
csv = self._download_webpage('http://www.imdb.com/list/export?list_id=%s&author_id=ur00000000' % list_id,
|
||||
list_id, 'Downloading list CSV')
|
||||
|
||||
entries = []
|
||||
for item in csv.split('\n')[1:]:
|
||||
cols = item.split(',')
|
||||
if len(cols) < 2:
|
||||
continue
|
||||
item_id = cols[1][1:-1]
|
||||
if item_id.startswith('vi'):
|
||||
entries.append(self.url_result('http://www.imdb.com/video/imdb/%s' % item_id, 'Imdb'))
|
||||
|
||||
|
||||
webpage = self._download_webpage(url, list_id)
|
||||
list_code = self._search_regex(
|
||||
r'(?s)<div\s+class="list\sdetail">(.*?)class="see-more"',
|
||||
webpage, 'list code')
|
||||
entries = [
|
||||
self.url_result('http://www.imdb.com' + m, 'Imdb')
|
||||
for m in re.findall(r'href="(/video/imdb/vi[^"]+)"', webpage)]
|
||||
|
||||
list_title = self._html_search_regex(
|
||||
r'<h1 class="header">(.*?)</h1>', webpage, 'list title')
|
||||
|
||||
return self.playlist_result(entries, list_id, list_title)
|
||||
|
@@ -89,6 +89,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||
title = title_el.text
|
||||
if title is None:
|
||||
raise ExtractorError('Could not find video title')
|
||||
title = title.strip()
|
||||
|
||||
return {
|
||||
'title': title,
|
||||
@@ -111,9 +112,12 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||
title = url_basename(url)
|
||||
webpage = self._download_webpage(url, title)
|
||||
try:
|
||||
# the url is in the format http://media.mtvnservices.com/fb/{mgid}.swf
|
||||
fb_url = self._og_search_video_url(webpage)
|
||||
mgid = url_basename(fb_url).rpartition('.')[0]
|
||||
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
|
||||
# or http://media.mtvnservices.com/{mgid}
|
||||
og_url = self._og_search_video_url(webpage)
|
||||
mgid = url_basename(og_url)
|
||||
if mgid.endswith('.swf'):
|
||||
mgid = mgid[:-4]
|
||||
except RegexNotFoundError:
|
||||
mgid = self._search_regex(r'data-mgid="(.*?)"', webpage, u'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
@@ -8,12 +10,12 @@ from ..utils import determine_ext
|
||||
class NewgroundsIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?newgrounds\.com/audio/listen/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.newgrounds.com/audio/listen/549479',
|
||||
u'file': u'549479.mp3',
|
||||
u'md5': u'fe6033d297591288fa1c1f780386f07a',
|
||||
u'info_dict': {
|
||||
u"title": u"B7 - BusMode",
|
||||
u"uploader": u"Burn7",
|
||||
'url': 'http://www.newgrounds.com/audio/listen/549479',
|
||||
'file': '549479.mp3',
|
||||
'md5': 'fe6033d297591288fa1c1f780386f07a',
|
||||
'info_dict': {
|
||||
"title": "B7 - BusMode",
|
||||
"uploader": "Burn7",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,17 +24,19 @@ class NewgroundsIE(InfoExtractor):
|
||||
music_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, music_id)
|
||||
|
||||
title = self._html_search_regex(r',"name":"([^"]+)",', webpage, u'music title')
|
||||
uploader = self._html_search_regex(r',"artist":"([^"]+)",', webpage, u'music uploader')
|
||||
title = self._html_search_regex(
|
||||
r',"name":"([^"]+)",', webpage, 'music title')
|
||||
uploader = self._html_search_regex(
|
||||
r',"artist":"([^"]+)",', webpage, 'music uploader')
|
||||
|
||||
music_url_json_string = self._html_search_regex(r'({"url":"[^"]+"),', webpage, u'music url') + '}'
|
||||
music_url_json_string = self._html_search_regex(
|
||||
r'({"url":"[^"]+"),', webpage, 'music url') + '}'
|
||||
music_url_json = json.loads(music_url_json_string)
|
||||
music_url = music_url_json['url']
|
||||
|
||||
return {
|
||||
'id': music_id,
|
||||
'title': title,
|
||||
'url': music_url,
|
||||
'id': music_id,
|
||||
'title': title,
|
||||
'url': music_url,
|
||||
'uploader': uploader,
|
||||
'ext': determine_ext(music_url),
|
||||
}
|
||||
|
@@ -19,7 +19,8 @@ class NovamovIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'title': 'search engine optimization',
|
||||
'description': 'search engine optimization is used to rank the web page in the google search engine'
|
||||
}
|
||||
},
|
||||
'skip': '"Invalid token" errors abound (in web interface as well as youtube-dl, there is nothing we can do about it.)'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -18,14 +18,6 @@ class SouthParkStudiosIE(MTVServicesInfoExtractor):
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
url = u'http://www.' + mobj.group(u'url')
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
mgid = self._search_regex(r'swfobject.embedSWF\(".*?(mgid:.*?)"',
|
||||
webpage, u'mgid')
|
||||
return self._get_videos_info(mgid)
|
||||
|
||||
class SouthparkDeIE(SouthParkStudiosIE):
|
||||
IE_NAME = u'southpark.de'
|
||||
|
@@ -6,7 +6,7 @@ from ..utils import RegexNotFoundError, ExtractorError
|
||||
|
||||
|
||||
class SpaceIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html'
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html'
|
||||
_TEST = {
|
||||
u'add_ie': ['Brightcove'],
|
||||
u'url': u'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html',
|
||||
|
@@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -9,12 +11,12 @@ from ..utils import (
|
||||
class XVideosIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?xvideos\.com/video([0-9]+)(?:.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.xvideos.com/video939581/funny_porns_by_s_-1',
|
||||
u'file': u'939581.flv',
|
||||
u'md5': u'1d0c835822f0a71a7bf011855db929d0',
|
||||
u'info_dict': {
|
||||
u"title": u"Funny Porns By >>>>S<<<<<< -1",
|
||||
u"age_limit": 18,
|
||||
'url': 'http://www.xvideos.com/video939581/funny_porns_by_s_-1',
|
||||
'file': '939581.flv',
|
||||
'md5': '1d0c835822f0a71a7bf011855db929d0',
|
||||
'info_dict': {
|
||||
"title": "Funny Porns By >>>>S<<<<<< -1",
|
||||
"age_limit": 18,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,18 +29,18 @@ class XVideosIE(InfoExtractor):
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Extract video URL
|
||||
video_url = compat_urllib_parse.unquote(self._search_regex(r'flv_url=(.+?)&',
|
||||
webpage, u'video URL'))
|
||||
video_url = compat_urllib_parse.unquote(
|
||||
self._search_regex(r'flv_url=(.+?)&', webpage, 'video URL'))
|
||||
|
||||
# Extract title
|
||||
video_title = self._html_search_regex(r'<title>(.*?)\s+-\s+XVID',
|
||||
webpage, u'title')
|
||||
video_title = self._html_search_regex(
|
||||
r'<title>(.*?)\s+-\s+XVID', webpage, 'title')
|
||||
|
||||
# Extract video thumbnail
|
||||
video_thumbnail = self._search_regex(r'http://(?:img.*?\.)xvideos.com/videos/thumbs/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/[a-fA-F0-9]+/([a-fA-F0-9.]+jpg)',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
video_thumbnail = self._search_regex(
|
||||
r'url_bigthumb=(.+?)&', webpage, 'thumbnail', fatal=False)
|
||||
|
||||
info = {
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
@@ -49,5 +51,3 @@ class XVideosIE(InfoExtractor):
|
||||
'description': None,
|
||||
'age_limit': 18,
|
||||
}
|
||||
|
||||
return [info]
|
||||
|
@@ -27,6 +27,8 @@ from ..utils import (
|
||||
get_element_by_id,
|
||||
get_element_by_attribute,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
PagedList,
|
||||
RegexNotFoundError,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
@@ -270,6 +272,22 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
u"uploader_id": u"setindia"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"url": u"http://www.youtube.com/watch?v=a9LDPn-MO4I",
|
||||
u"file": u"a9LDPn-MO4I.m4a",
|
||||
u"note": u"256k DASH audio (format 141) via DASH manifest",
|
||||
u"info_dict": {
|
||||
u"upload_date": "20121002",
|
||||
u"uploader_id": "8KVIDEO",
|
||||
u"description": "No description available.",
|
||||
u"uploader": "8KVIDEO",
|
||||
u"title": "UHDTV TEST 8K VIDEO.mp4"
|
||||
},
|
||||
u"params": {
|
||||
u"youtube_include_dash_manifest": True,
|
||||
u"format": "141",
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
@@ -1067,18 +1085,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
video_id = mobj.group(2)
|
||||
return video_id
|
||||
|
||||
def _get_video_url_list(self, url_map):
|
||||
"""
|
||||
Transform a dictionary in the format {itag:url} to a list of (itag, url)
|
||||
with the requested formats.
|
||||
"""
|
||||
existing_formats = [x for x in self._formats if x in url_map]
|
||||
if len(existing_formats) == 0:
|
||||
raise ExtractorError(u'no known formats available for video')
|
||||
video_url_list = [(f, url_map[f]) for f in existing_formats] # All formats
|
||||
video_url_list.reverse() # order worst to best
|
||||
return video_url_list
|
||||
|
||||
def _extract_from_m3u8(self, manifest_url, video_id):
|
||||
url_map = {}
|
||||
def _get_urls(_manifest):
|
||||
@@ -1252,7 +1258,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
video_annotations = self._extract_annotations(video_id)
|
||||
|
||||
# Decide which formats to download
|
||||
|
||||
try:
|
||||
mobj = re.search(r';ytplayer.config = ({.*?});', video_webpage)
|
||||
if not mobj:
|
||||
@@ -1277,9 +1282,26 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def _map_to_format_list(urlmap):
|
||||
formats = []
|
||||
for itag, video_real_url in urlmap.items():
|
||||
dct = {
|
||||
'format_id': itag,
|
||||
'url': video_real_url,
|
||||
'player_url': player_url,
|
||||
}
|
||||
dct.update(self._formats[itag])
|
||||
formats.append(dct)
|
||||
return formats
|
||||
|
||||
if 'conn' in video_info and video_info['conn'][0].startswith('rtmp'):
|
||||
self.report_rtmp_download()
|
||||
video_url_list = [('_rtmp', video_info['conn'][0])]
|
||||
formats = [{
|
||||
'format_id': '_rtmp',
|
||||
'protocol': 'rtmp',
|
||||
'url': video_info['conn'][0],
|
||||
'player_url': player_url,
|
||||
}]
|
||||
elif len(video_info.get('url_encoded_fmt_stream_map', [])) >= 1 or len(video_info.get('adaptive_fmts', [])) >= 1:
|
||||
encoded_url_map = video_info.get('url_encoded_fmt_stream_map', [''])[0] + ',' + video_info.get('adaptive_fmts',[''])[0]
|
||||
if 'rtmpe%3Dyes' in encoded_url_map:
|
||||
@@ -1324,23 +1346,50 @@ class YoutubeIE(YoutubeBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
if 'ratebypass' not in url:
|
||||
url += '&ratebypass=yes'
|
||||
url_map[url_data['itag'][0]] = url
|
||||
video_url_list = self._get_video_url_list(url_map)
|
||||
formats = _map_to_format_list(url_map)
|
||||
elif video_info.get('hlsvp'):
|
||||
manifest_url = video_info['hlsvp'][0]
|
||||
url_map = self._extract_from_m3u8(manifest_url, video_id)
|
||||
video_url_list = self._get_video_url_list(url_map)
|
||||
formats = _map_to_format_list(url_map)
|
||||
else:
|
||||
raise ExtractorError(u'no conn, hlsvp or url_encoded_fmt_stream_map information found in video info')
|
||||
|
||||
formats = []
|
||||
for itag, video_real_url in video_url_list:
|
||||
dct = {
|
||||
'format_id': itag,
|
||||
'url': video_real_url,
|
||||
'player_url': player_url,
|
||||
}
|
||||
dct.update(self._formats[itag])
|
||||
formats.append(dct)
|
||||
# Look for the DASH manifest
|
||||
dash_manifest_url_lst = video_info.get('dashmpd')
|
||||
if (dash_manifest_url_lst and dash_manifest_url_lst[0] and
|
||||
self._downloader.params.get('youtube_include_dash_manifest', False)):
|
||||
try:
|
||||
dash_doc = self._download_xml(
|
||||
dash_manifest_url_lst[0], video_id,
|
||||
note=u'Downloading DASH manifest',
|
||||
errnote=u'Could not download DASH manifest')
|
||||
for r in dash_doc.findall(u'.//{urn:mpeg:DASH:schema:MPD:2011}Representation'):
|
||||
url_el = r.find('{urn:mpeg:DASH:schema:MPD:2011}BaseURL')
|
||||
if url_el is None:
|
||||
continue
|
||||
format_id = r.attrib['id']
|
||||
video_url = url_el.text
|
||||
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength'))
|
||||
f = {
|
||||
'format_id': format_id,
|
||||
'url': video_url,
|
||||
'width': int_or_none(r.attrib.get('width')),
|
||||
'tbr': int_or_none(r.attrib.get('bandwidth'), 1000),
|
||||
'asr': int_or_none(r.attrib.get('audioSamplingRate')),
|
||||
'filesize': filesize,
|
||||
}
|
||||
try:
|
||||
existing_format = next(
|
||||
fo for fo in formats
|
||||
if fo['format_id'] == format_id)
|
||||
except StopIteration:
|
||||
f.update(self._formats.get(format_id, {}))
|
||||
formats.append(f)
|
||||
else:
|
||||
existing_format.update(f)
|
||||
|
||||
except (ExtractorError, KeyError) as e:
|
||||
self.report_warning(u'Skipping DASH manifest: %s' % e, video_id)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
@@ -1580,44 +1629,35 @@ class YoutubeUserIE(InfoExtractor):
|
||||
# page by page until there are no video ids - it means we got
|
||||
# all of them.
|
||||
|
||||
url_results = []
|
||||
|
||||
for pagenum in itertools.count(0):
|
||||
def download_page(pagenum):
|
||||
start_index = pagenum * self._GDATA_PAGE_SIZE + 1
|
||||
|
||||
gdata_url = self._GDATA_URL % (username, self._GDATA_PAGE_SIZE, start_index)
|
||||
page = self._download_webpage(gdata_url, username,
|
||||
u'Downloading video ids from %d to %d' % (start_index, start_index + self._GDATA_PAGE_SIZE))
|
||||
page = self._download_webpage(
|
||||
gdata_url, username,
|
||||
u'Downloading video ids from %d to %d' % (
|
||||
start_index, start_index + self._GDATA_PAGE_SIZE))
|
||||
|
||||
try:
|
||||
response = json.loads(page)
|
||||
except ValueError as err:
|
||||
raise ExtractorError(u'Invalid JSON in API response: ' + compat_str(err))
|
||||
if 'entry' not in response['feed']:
|
||||
# Number of videos is a multiple of self._MAX_RESULTS
|
||||
break
|
||||
return
|
||||
|
||||
# Extract video identifiers
|
||||
entries = response['feed']['entry']
|
||||
for entry in entries:
|
||||
title = entry['title']['$t']
|
||||
video_id = entry['id']['$t'].split('/')[-1]
|
||||
url_results.append({
|
||||
yield {
|
||||
'_type': 'url',
|
||||
'url': video_id,
|
||||
'ie_key': 'Youtube',
|
||||
'id': 'video_id',
|
||||
'title': title,
|
||||
})
|
||||
|
||||
# A little optimization - if current page is not
|
||||
# "full", ie. does not contain PAGE_SIZE video ids then
|
||||
# we can assume that this page is the last one - there
|
||||
# are no more ids on further pages - no need to query
|
||||
# again.
|
||||
|
||||
if len(entries) < self._GDATA_PAGE_SIZE:
|
||||
break
|
||||
}
|
||||
url_results = PagedList(download_page, self._GDATA_PAGE_SIZE)
|
||||
|
||||
return self.playlist_result(url_results, playlist_title=username)
|
||||
|
||||
|
@@ -6,6 +6,7 @@ import datetime
|
||||
import email.utils
|
||||
import errno
|
||||
import gzip
|
||||
import itertools
|
||||
import io
|
||||
import json
|
||||
import locale
|
||||
@@ -1131,8 +1132,8 @@ class HEADRequest(compat_urllib_request.Request):
|
||||
return "HEAD"
|
||||
|
||||
|
||||
def int_or_none(v):
|
||||
return v if v is None else int(v)
|
||||
def int_or_none(v, scale=1):
|
||||
return v if v is None else (int(v) // scale)
|
||||
|
||||
|
||||
def parse_duration(s):
|
||||
@@ -1164,3 +1165,50 @@ def check_executable(exe, args=[]):
|
||||
except OSError:
|
||||
return False
|
||||
return exe
|
||||
|
||||
|
||||
class PagedList(object):
|
||||
def __init__(self, pagefunc, pagesize):
|
||||
self._pagefunc = pagefunc
|
||||
self._pagesize = pagesize
|
||||
|
||||
def __len__(self):
|
||||
# This is only useful for tests
|
||||
return len(self.getslice())
|
||||
|
||||
def getslice(self, start=0, end=None):
|
||||
res = []
|
||||
for pagenum in itertools.count(start // self._pagesize):
|
||||
firstid = pagenum * self._pagesize
|
||||
nextfirstid = pagenum * self._pagesize + self._pagesize
|
||||
if start >= nextfirstid:
|
||||
continue
|
||||
|
||||
page_results = list(self._pagefunc(pagenum))
|
||||
|
||||
startv = (
|
||||
start % self._pagesize
|
||||
if firstid <= start < nextfirstid
|
||||
else 0)
|
||||
|
||||
endv = (
|
||||
((end - 1) % self._pagesize) + 1
|
||||
if (end is not None and firstid <= end <= nextfirstid)
|
||||
else None)
|
||||
|
||||
if startv != 0 or endv is not None:
|
||||
page_results = page_results[startv:endv]
|
||||
res.extend(page_results)
|
||||
|
||||
# A little optimization - if current page is not "full", ie. does
|
||||
# not contain page_size videos then we can assume that this page
|
||||
# is the last one - there are no more ids on further pages -
|
||||
# i.e. no need to query again.
|
||||
if len(page_results) + startv < self._pagesize:
|
||||
break
|
||||
|
||||
# If we got the whole page, but the next page is not interesting,
|
||||
# break out early as well
|
||||
if end == nextfirstid:
|
||||
break
|
||||
return res
|
||||
|
@@ -1,2 +1,2 @@
|
||||
|
||||
__version__ = '2014.01.22.1'
|
||||
__version__ = '2014.01.22.5'
|
||||
|
Reference in New Issue
Block a user