Compare commits
348 Commits
2015.02.17
...
2015.03.15
Author | SHA1 | Date | |
---|---|---|---|
|
1de4ac1385 | ||
|
39aa42ffbb | ||
|
ec1b9577ba | ||
|
3b4444f99a | ||
|
613b2d9dc6 | ||
|
8f4cc22455 | ||
|
7c42327e0e | ||
|
873383e9bd | ||
|
8508557e77 | ||
|
4d1652484f | ||
|
88cf6fb368 | ||
|
e7db87f700 | ||
|
082b1155a3 | ||
|
9202b1b787 | ||
|
a7e01c438d | ||
|
05be67e77d | ||
|
85741b9986 | ||
|
f247a199fe | ||
|
29171bc2d2 | ||
|
7be5a62ed7 | ||
|
3647136f24 | ||
|
13598940e3 | ||
|
0eb365868e | ||
|
28c6411e49 | ||
|
bba3fc7960 | ||
|
fcd877013e | ||
|
ba1d4c0488 | ||
|
517bcca299 | ||
|
1b53778175 | ||
|
b7a0304d92 | ||
|
545315a985 | ||
|
3f4327520c | ||
|
4a34f69ea6 | ||
|
fb7e68833c | ||
|
486dd09e0b | ||
|
054b99a330 | ||
|
65c5e044c7 | ||
|
11984c7467 | ||
|
3946864c8a | ||
|
b84037013e | ||
|
1dbfc62d75 | ||
|
d7d79106c7 | ||
|
1138491631 | ||
|
71705fa70d | ||
|
602814adab | ||
|
3a77719c5a | ||
|
7e195d0e92 | ||
|
e04793401d | ||
|
a3fbd18824 | ||
|
c6052b8c14 | ||
|
c792b5011f | ||
|
32aaeca775 | ||
|
1593194c63 | ||
|
614a7e1e23 | ||
|
2ebfeacabc | ||
|
f5d8f58a17 | ||
|
937daef4a7 | ||
|
dd77f14c64 | ||
|
c36cbe5a8a | ||
|
41b2194f86 | ||
|
d1e2e8f583 | ||
|
47fe42e1ab | ||
|
4c60393854 | ||
|
f848215dfc | ||
|
dcca581967 | ||
|
d475b3384c | ||
|
dd7831fe94 | ||
|
cc08b11d16 | ||
|
8bba753cca | ||
|
43d6280d0a | ||
|
e5a11a2293 | ||
|
f18ef2d144 | ||
|
1bb5c511a5 | ||
|
d55de57b67 | ||
|
a2aaf4dbc6 | ||
|
bdf6eee0ae | ||
|
8b910bda0c | ||
|
24993e3b39 | ||
|
11101076a1 | ||
|
f838875726 | ||
|
28778d6bae | ||
|
1132eae56d | ||
|
d34e79492d | ||
|
ab205b9dc8 | ||
|
7dcad95d4f | ||
|
8a48223a7b | ||
|
d47ae7f620 | ||
|
135c9c42bf | ||
|
0bf79ac455 | ||
|
98998cded6 | ||
|
14137b5781 | ||
|
23ba76bc0e | ||
|
61e00a9775 | ||
|
d1508cd68d | ||
|
9c85b5376d | ||
|
3c6f245083 | ||
|
f207019ce5 | ||
|
bd05aa4e24 | ||
|
8dc9d361c2 | ||
|
d0e958c71c | ||
|
a0bb7c5593 | ||
|
7feddd9fc7 | ||
|
9609f02e3c | ||
|
c2ebea6580 | ||
|
12a129ec6d | ||
|
f28fe66970 | ||
|
123397317c | ||
|
dc570c4951 | ||
|
22d3628319 | ||
|
50c9949d7a | ||
|
376817c6d4 | ||
|
e0d0572b73 | ||
|
7fde87c77d | ||
|
938c3f65b6 | ||
|
2461f79d2a | ||
|
499bfcbfd0 | ||
|
07490f8017 | ||
|
91410c9bfa | ||
|
a7440261c5 | ||
|
76c73715fb | ||
|
c75f0b361a | ||
|
295df4edb9 | ||
|
562ceab13d | ||
|
2f0f6578c3 | ||
|
30cbd4e0d6 | ||
|
549e58069c | ||
|
7594be85ff | ||
|
3630034609 | ||
|
4e01501bbf | ||
|
1aa5172f56 | ||
|
f7e2ee8fa6 | ||
|
66dc9a3701 | ||
|
31bd39256b | ||
|
003c69a84b | ||
|
0134901108 | ||
|
eee6293d57 | ||
|
8237bec4f0 | ||
|
29cad7ad13 | ||
|
0d103de3b0 | ||
|
a0090691d0 | ||
|
6c87c2eea8 | ||
|
58c2ec6ab3 | ||
|
df5ae3eb16 | ||
|
efda2d7854 | ||
|
e143f5dae9 | ||
|
48218cdb97 | ||
|
e9fade72f3 | ||
|
0f2c0d335b | ||
|
40b077bc7e | ||
|
a931092cb3 | ||
|
bd3749ed69 | ||
|
4ffbf77886 | ||
|
781a7ef60a | ||
|
5b2949ee0b | ||
|
a0d646135a | ||
|
7862ad88b7 | ||
|
f3bff94cf9 | ||
|
0eba1e1782 | ||
|
e3216b82bf | ||
|
da419e2332 | ||
|
0d97ef43be | ||
|
1a2313a6f2 | ||
|
250a9bdfe2 | ||
|
6317a3e9da | ||
|
7ab7c9e932 | ||
|
e129c5bc0d | ||
|
2e241242a3 | ||
|
9724e5d336 | ||
|
63a562f95e | ||
|
5c340b0387 | ||
|
1c6510f57a | ||
|
2a15a98a6a | ||
|
72a406e7aa | ||
|
feccc3ff37 | ||
|
265bfa2c79 | ||
|
8faf9b9b41 | ||
|
84be7c230c | ||
|
3e675fabe0 | ||
|
cd5b4b0bc2 | ||
|
7ef822021b | ||
|
9a48926a57 | ||
|
13cd97f3df | ||
|
183139340b | ||
|
1c69bca258 | ||
|
c10ea454dc | ||
|
9504fc21b5 | ||
|
13d8fbef30 | ||
|
b8988b63a6 | ||
|
5eaaeb7c31 | ||
|
c4f8c453ae | ||
|
6f4ba54079 | ||
|
637570326b | ||
|
37f885650c | ||
|
c8c34ccb20 | ||
|
e765ed3a9c | ||
|
677063594e | ||
|
59c7cbd482 | ||
|
570311610e | ||
|
41b264e77c | ||
|
df4bd0d53f | ||
|
7f09a662a0 | ||
|
4f3b21e1c7 | ||
|
54233c9080 | ||
|
db8e13ef71 | ||
|
5a42414b9c | ||
|
9c665ab72e | ||
|
b665ba6aa6 | ||
|
ec5913b5cd | ||
|
25ac63ed71 | ||
|
99209c2916 | ||
|
1fbaa0a521 | ||
|
3037b91e05 | ||
|
ffdf972b91 | ||
|
459e5fbd5f | ||
|
bfc993cc91 | ||
|
4432db35d9 | ||
|
591ab1dff9 | ||
|
5bca2424bc | ||
|
bd61a9e770 | ||
|
3438e7acd2 | ||
|
09c200acf2 | ||
|
716889cab1 | ||
|
409693984f | ||
|
04e8c11080 | ||
|
80af2b73ab | ||
|
3cc57f9645 | ||
|
a65d4e7f14 | ||
|
b531cfc019 | ||
|
543ec2136b | ||
|
93b5071f73 | ||
|
ddc369f073 | ||
|
1b40dc92eb | ||
|
fcc3e6138b | ||
|
9fe6ef7ab2 | ||
|
c010af6f19 | ||
|
35b7982303 | ||
|
f311cfa231 | ||
|
80970e531b | ||
|
b7bb76df05 | ||
|
98c70d6fc7 | ||
|
ab84349b16 | ||
|
03091e372f | ||
|
4d17184817 | ||
|
e086e0eb6c | ||
|
314368c822 | ||
|
c5181ab410 | ||
|
ea5152cae1 | ||
|
255fca5eea | ||
|
4aeccadf4e | ||
|
93540ee10e | ||
|
8fb3ac3649 | ||
|
77b2986b5b | ||
|
62b013df0d | ||
|
fad6768bd1 | ||
|
a78125f925 | ||
|
a00a8bcc8a | ||
|
1e9a9e167d | ||
|
3da0db62e6 | ||
|
e14ced7918 | ||
|
ab9d02f53b | ||
|
a461a11989 | ||
|
1bd838608f | ||
|
365577f567 | ||
|
50efb383f0 | ||
|
5da6bd0083 | ||
|
5e9a033e6e | ||
|
fb7cb6823e | ||
|
dd0a58f5f0 | ||
|
a21420389e | ||
|
6140baf4e1 | ||
|
8fc642eb5b | ||
|
e66e1a0046 | ||
|
d5c69f1da4 | ||
|
f13b1e7d7f | ||
|
5c8a3f862a | ||
|
8807f1277f | ||
|
a3b9157f49 | ||
|
b88ba05356 | ||
|
b74d505577 | ||
|
9e2d7dca87 | ||
|
d236b37ac9 | ||
|
e880c66bd8 | ||
|
383456aa29 | ||
|
1a13940c8d | ||
|
3d54788495 | ||
|
71d53ace2f | ||
|
f37e3f99f0 | ||
|
bd03ffc16e | ||
|
1ac1af9b47 | ||
|
3bf5705316 | ||
|
1c2528c8a3 | ||
|
7bd15b1a03 | ||
|
6b961a85fd | ||
|
7707004043 | ||
|
a025d3c5a5 | ||
|
c460bdd56b | ||
|
b81a359eb6 | ||
|
d61aefb24c | ||
|
d305dd73a3 | ||
|
93a16ba238 | ||
|
4f7cea6c53 | ||
|
afbdd3acc3 | ||
|
85d5866177 | ||
|
9789d7535d | ||
|
d8443cd3f7 | ||
|
d47c26e168 | ||
|
01561da142 | ||
|
0af25f784b | ||
|
b9b42f2ea0 | ||
|
311c393838 | ||
|
18c1c42405 | ||
|
37dd5d4629 | ||
|
81975f4693 | ||
|
b8b928d5cb | ||
|
3eff81fbf7 | ||
|
785521bf4f | ||
|
6d1a55a521 | ||
|
9cad27008b | ||
|
11e611a7fa | ||
|
72c1f8de06 | ||
|
6e99868e4c | ||
|
4d278fde64 | ||
|
f21e915fb9 | ||
|
6f53c63df6 | ||
|
1def5f359e | ||
|
15ec669374 | ||
|
a3fa5da496 | ||
|
30965ac66a | ||
|
09ab40b7d1 | ||
|
edab9dbf4d | ||
|
9868ea4936 | ||
|
85920dd01d | ||
|
fa15607773 | ||
|
a91a2c1a83 | ||
|
16e7711e22 | ||
|
5cda4eda72 | ||
|
98f000409f | ||
|
bd7fe0cf66 | ||
|
48246541da | ||
|
4a8d4a53b1 | ||
|
4cd95bcbc3 | ||
|
360e1ca5cc | ||
|
a1f2a06b34 | ||
|
c84dd8a90d | ||
|
65469a7f8b | ||
|
6b597516c1 | ||
|
b5857f62e2 | ||
|
a504ced097 |
@@ -2,6 +2,7 @@ language: python
|
||||
python:
|
||||
- "2.6"
|
||||
- "2.7"
|
||||
- "3.2"
|
||||
- "3.3"
|
||||
- "3.4"
|
||||
before_install:
|
||||
|
4
AUTHORS
4
AUTHORS
@@ -111,3 +111,7 @@ Paul Hartmann
|
||||
Frans de Jonge
|
||||
Robin de Rooij
|
||||
Ryan Schmidt
|
||||
Leslie P. Polzer
|
||||
Duncan Keall
|
||||
Alexander Mamay
|
||||
Devin J. Pohly
|
||||
|
@@ -18,7 +18,9 @@ If your report is shorter than two lines, it is almost certainly missing some of
|
||||
|
||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
|
||||
Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||
If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||
|
||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||
|
||||
### Are you using the latest version?
|
||||
|
||||
|
5
Makefile
5
Makefile
@@ -1,7 +1,8 @@
|
||||
all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish supportedsites
|
||||
|
||||
clean:
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
||||
find -name "*.pyc" -delete
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
@@ -43,7 +44,7 @@ test:
|
||||
ot: offlinetest
|
||||
|
||||
offlinetest: codetest
|
||||
nosetests --verbose test --exclude test_download --exclude test_age_restriction --exclude test_subtitles --exclude test_write_annotations --exclude test_youtube_lists
|
||||
nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
|
||||
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
|
444
README.md
444
README.md
@@ -47,209 +47,109 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
# OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version. Make
|
||||
sure that you have sufficient permissions
|
||||
(run with sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to
|
||||
skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the
|
||||
playlist or the command line) if an error
|
||||
occurs
|
||||
-U, --update update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the playlist or the command line) if an error occurs
|
||||
--dump-user-agent display the current browser identification
|
||||
--list-extractors List all supported extractors and the URLs
|
||||
they would handle
|
||||
--extractor-descriptions Output descriptions of all supported
|
||||
extractors
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For
|
||||
example "gvsearch2:" downloads two videos
|
||||
from google videos for youtube-dl "large
|
||||
apple". Use the value "auto" to let
|
||||
youtube-dl guess ("auto_warning" to emit a
|
||||
warning when guessing). "error" just throws
|
||||
an error. The default value "fixup_error"
|
||||
repairs broken URLs, but emits an error if
|
||||
this is not possible instead of searching.
|
||||
--ignore-config Do not read configuration files. When given
|
||||
in the global configuration file /etc
|
||||
/youtube-dl.conf: Do not read the user
|
||||
configuration in ~/.config/youtube-
|
||||
dl/config (%APPDATA%/youtube-dl/config.txt
|
||||
on Windows)
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them.
|
||||
--list-extractors List all supported extractors and the URLs they would handle
|
||||
--extractor-descriptions Output descriptions of all supported extractors
|
||||
--default-search PREFIX Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple".
|
||||
Use the value "auto" to let youtube-dl guess ("auto_warning" to emit a warning when guessing). "error" just throws an error. The
|
||||
default value "fixup_error" repairs broken URLs, but emits an error if this is not possible instead of searching.
|
||||
--ignore-config Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: Do not read the user configuration
|
||||
in ~/.config/youtube-dl/config (%APPDATA%/youtube-dl/config.txt on Windows)
|
||||
--flat-playlist Do not extract the videos of a playlist, only list them.
|
||||
--no-color Do not emit color codes in output.
|
||||
|
||||
## Network Options:
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in
|
||||
an empty string (--proxy "") for direct
|
||||
connection
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection
|
||||
--socket-timeout SECONDS Time to wait before giving up, in seconds
|
||||
--source-address IP Client-side IP address to bind to
|
||||
(experimental)
|
||||
-4, --force-ipv4 Make all connections via IPv4
|
||||
(experimental)
|
||||
-6, --force-ipv6 Make all connections via IPv6
|
||||
(experimental)
|
||||
--source-address IP Client-side IP address to bind to (experimental)
|
||||
-4, --force-ipv4 Make all connections via IPv4 (experimental)
|
||||
-6, --force-ipv6 Make all connections via IPv6 (experimental)
|
||||
--cn-verification-proxy URL Use this proxy to verify the IP address for some Chinese sites. The default proxy specified by --proxy (or none, if the options is
|
||||
not present) is used for the actual downloading. (experimental)
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--playlist-items ITEM_SPEC playlist video items to download. Specify
|
||||
indices of the videos in the playlist
|
||||
seperated by commas like: "--playlist-items
|
||||
1,2,5,8" if you want to download videos
|
||||
indexed 1, 2, 5, 8 in the playlist. You can
|
||||
specify range: "--playlist-items
|
||||
1-3,7,10-13", it will download the videos
|
||||
at index 1, 2, 3, 7, 10, 11, 12 and 13.
|
||||
--match-title REGEX download only matching titles (regex or
|
||||
caseless sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--playlist-items ITEM_SPEC playlist video items to download. Specify indices of the videos in the playlist seperated by commas like: "--playlist-items 1,2,5,8"
|
||||
if you want to download videos indexed 1, 2, 5, 8 in the playlist. You can specify range: "--playlist-items 1-3,7,10-13", it will
|
||||
download the videos at index 1, 2, 3, 7, 10, 11, 12 and 13.
|
||||
--match-title REGEX download only matching titles (regex or caseless sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than
|
||||
SIZE (e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE
|
||||
(e.g. 50k or 44.6m)
|
||||
--min-filesize SIZE Do not download any videos smaller than SIZE (e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g. 50k or 44.6m)
|
||||
--date DATE download only videos uploaded in this date
|
||||
--datebefore DATE download only videos uploaded on or before
|
||||
this date (i.e. inclusive)
|
||||
--dateafter DATE download only videos uploaded on or after
|
||||
this date (i.e. inclusive)
|
||||
--min-views COUNT Do not download any videos with less than
|
||||
COUNT views
|
||||
--max-views COUNT Do not download any videos with more than
|
||||
COUNT views
|
||||
--match-filter FILTER (Experimental) Generic video filter.
|
||||
Specify any key (see help for -o for a list
|
||||
of available keys) to match if the key is
|
||||
present, !key to check if the key is not
|
||||
present,key > NUMBER (like "comment_count >
|
||||
12", also works with >=, <, <=, !=, =) to
|
||||
compare against a number, and & to require
|
||||
multiple matches. Values which are not
|
||||
known are excluded unless you put a
|
||||
question mark (?) after the operator.For
|
||||
example, to only match videos that have
|
||||
been liked more than 100 times and disliked
|
||||
less than 50 times (or the dislike
|
||||
functionality is not available at the given
|
||||
service), but who also have a description,
|
||||
use --match-filter "like_count > 100 &
|
||||
--datebefore DATE download only videos uploaded on or before this date (i.e. inclusive)
|
||||
--dateafter DATE download only videos uploaded on or after this date (i.e. inclusive)
|
||||
--min-views COUNT Do not download any videos with less than COUNT views
|
||||
--max-views COUNT Do not download any videos with more than COUNT views
|
||||
--match-filter FILTER (Experimental) Generic video filter. Specify any key (see help for -o for a list of available keys) to match if the key is present,
|
||||
!key to check if the key is not present,key > NUMBER (like "comment_count > 12", also works with >=, <, <=, !=, =) to compare against
|
||||
a number, and & to require multiple matches. Values which are not known are excluded unless you put a question mark (?) after the
|
||||
operator.For example, to only match videos that have been liked more than 100 times and disliked less than 50 times (or the dislike
|
||||
functionality is not available at the given service), but who also have a description, use --match-filter "like_count > 100 &
|
||||
dislike_count <? 50 & description" .
|
||||
--no-playlist If the URL refers to a video and a
|
||||
playlist, download only the video.
|
||||
--age-limit YEARS download only videos suitable for the given
|
||||
age
|
||||
--download-archive FILE Download only videos not listed in the
|
||||
archive file. Record the IDs of all
|
||||
downloaded videos in it.
|
||||
--include-ads Download advertisements as well
|
||||
(experimental)
|
||||
--no-playlist If the URL refers to a video and a playlist, download only the video.
|
||||
--yes-playlist If the URL refers to a video and a playlist, download the playlist.
|
||||
--age-limit YEARS download only videos suitable for the given age
|
||||
--download-archive FILE Download only videos not listed in the archive file. Record the IDs of all downloaded videos in it.
|
||||
--include-ads Download advertisements as well (experimental)
|
||||
|
||||
## Download Options:
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
||||
(e.g. 50K or 4.2M)
|
||||
-R, --retries RETRIES number of retries (default is 10), or
|
||||
"infinite".
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer
|
||||
size. By default, the buffer size is
|
||||
automatically resized from an initial value
|
||||
of SIZE.
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second (e.g. 50K or 4.2M)
|
||||
-R, --retries RETRIES number of retries (default is 10), or "infinite".
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16K) (default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By default, the buffer size is automatically resized from an initial value of SIZE.
|
||||
--playlist-reverse Download playlist videos in reverse order
|
||||
--xattr-set-filesize (experimental) set file xattribute
|
||||
ytdl.filesize with expected filesize
|
||||
--hls-prefer-native (experimental) Use the native HLS
|
||||
downloader instead of ffmpeg.
|
||||
--external-downloader COMMAND (experimental) Use the specified external
|
||||
downloader. Currently supports
|
||||
aria2c,curl,wget
|
||||
--xattr-set-filesize (experimental) set file xattribute ytdl.filesize with expected filesize
|
||||
--hls-prefer-native (experimental) Use the native HLS downloader instead of ffmpeg.
|
||||
--external-downloader COMMAND Use the specified external downloader. Currently supports aria2c,curl,wget
|
||||
--external-downloader-args ARGS Give these arguments to the external downloader.
|
||||
|
||||
## Filesystem Options:
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for
|
||||
stdin)
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
--id use only video ID in file name
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to
|
||||
get the title, %(uploader)s for the
|
||||
uploader name, %(uploader_id)s for the
|
||||
uploader nickname if different,
|
||||
%(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the
|
||||
filename extension, %(format)s for the
|
||||
format description (like "22 - 1280x720" or
|
||||
"HD"), %(format_id)s for the unique id of
|
||||
the format (like Youtube's itags: "137"),
|
||||
%(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the
|
||||
video id, %(playlist_title)s,
|
||||
%(playlist_id)s, or %(playlist)s (=title if
|
||||
present, ID otherwise) for the playlist the
|
||||
video is in, %(playlist_index)s for the
|
||||
position in the playlist. %(height)s and
|
||||
%(width)s for the width and height of the
|
||||
video format. %(resolution)s for a textual
|
||||
description of the resolution of the video
|
||||
format. %% for a literal percent. Use - to
|
||||
output to stdout. Can also be used to
|
||||
download to a different directory, for
|
||||
example with -o '/my/downloads/%(uploader)s
|
||||
/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in
|
||||
%(autonumber)s when it is present in output
|
||||
filename template or --auto-number option
|
||||
is given
|
||||
--restrict-filenames Restrict filenames to only ASCII
|
||||
characters, and avoid "&" and spaces in
|
||||
filenames
|
||||
-A, --auto-number [deprecated; use -o
|
||||
"%(autonumber)s-%(title)s.%(ext)s" ] number
|
||||
downloaded files starting from 00000
|
||||
-t, --title [deprecated] use title in file name
|
||||
(default)
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader
|
||||
nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(format)s for
|
||||
the format description (like "22 - 1280x720" or "HD"), %(format_id)s for the unique id of the format (like Youtube's itags: "137"),
|
||||
%(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id,
|
||||
%(playlist_title)s, %(playlist_id)s, or %(playlist)s (=title if present, ID otherwise) for the playlist the video is in,
|
||||
%(playlist_index)s for the position in the playlist. %(height)s and %(width)s for the width and height of the video format.
|
||||
%(resolution)s for a textual description of the resolution of the video format. %% for a literal percent. Use - to output to stdout.
|
||||
Can also be used to download to a different directory, for example with -o '/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s when it is present in output filename template or --auto-number option is given
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames
|
||||
-A, --auto-number [deprecated; use -o "%(autonumber)s-%(title)s.%(ext)s" ] number downloaded files starting from 00000
|
||||
-t, --title [deprecated] use title in file name (default)
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue force resume of partially downloaded files.
|
||||
By default, youtube-dl will resume
|
||||
downloads if possible.
|
||||
--no-continue do not resume partially downloaded files
|
||||
(restart from beginning)
|
||||
--no-part do not use .part files - write directly
|
||||
into output file
|
||||
--no-mtime do not use the Last-modified header to set
|
||||
the file modification time
|
||||
--write-description write video description to a .description
|
||||
file
|
||||
-c, --continue force resume of partially downloaded files. By default, youtube-dl will resume downloads if possible.
|
||||
--no-continue do not resume partially downloaded files (restart from beginning)
|
||||
--no-part do not use .part files - write directly into output file
|
||||
--no-mtime do not use the Last-modified header to set the file modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
--write-annotations write video annotations to a .annotation
|
||||
file
|
||||
--load-info FILE json file containing the video information
|
||||
(created with the "--write-json" option)
|
||||
--cookies FILE file to read cookies from and dump cookie
|
||||
jar in
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
||||
can store some downloaded information
|
||||
permanently. By default $XDG_CACHE_HOME
|
||||
/youtube-dl or ~/.cache/youtube-dl . At the
|
||||
moment, only YouTube player files (for
|
||||
videos with obfuscated signatures) are
|
||||
cached, but that may change.
|
||||
--write-annotations write video annotations to a .annotation file
|
||||
--load-info FILE json file containing the video information (created with the "--write-json" option)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl
|
||||
or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may
|
||||
change.
|
||||
--no-cache-dir Disable filesystem caching
|
||||
--rm-cache-dir Delete all filesystem cache files
|
||||
|
||||
## Thumbnail images:
|
||||
--write-thumbnail write thumbnail image to disk
|
||||
--write-all-thumbnails write all thumbnail image formats to disk
|
||||
--list-thumbnails Simulate and list all available thumbnail
|
||||
formats
|
||||
--list-thumbnails Simulate and list all available thumbnail formats
|
||||
|
||||
## Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
--no-warnings Ignore warnings
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
-s, --simulate do not download the video and do not write anything to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
@@ -259,153 +159,87 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--get-duration simulate, quiet but print video length
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
-j, --dump-json simulate, quiet but print JSON information.
|
||||
See --output for a description of available
|
||||
keys.
|
||||
-J, --dump-single-json simulate, quiet but print JSON information
|
||||
for each command-line argument. If the URL
|
||||
refers to a playlist, dump the whole
|
||||
playlist information in a single line.
|
||||
--print-json Be quiet and print the video information as
|
||||
JSON (video is still being downloaded).
|
||||
-j, --dump-json simulate, quiet but print JSON information. See --output for a description of available keys.
|
||||
-J, --dump-single-json simulate, quiet but print JSON information for each command-line argument. If the URL refers to a playlist, dump the whole playlist
|
||||
information in a single line.
|
||||
--print-json Be quiet and print the video information as JSON (video is still being downloaded).
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems
|
||||
(very verbose)
|
||||
--write-pages Write downloaded intermediary pages to
|
||||
files in the current directory to debug
|
||||
problems
|
||||
--dump-pages print downloaded pages to debug problems (very verbose)
|
||||
--write-pages Write downloaded intermediary pages to files in the current directory to debug problems
|
||||
--print-traffic Display sent and read HTTP traffic
|
||||
-C, --call-home Contact the youtube-dl server for
|
||||
debugging.
|
||||
--no-call-home Do NOT contact the youtube-dl server for
|
||||
debugging.
|
||||
-C, --call-home Contact the youtube-dl server for debugging.
|
||||
--no-call-home Do NOT contact the youtube-dl server for debugging.
|
||||
|
||||
## Workarounds:
|
||||
--encoding ENCODING Force the specified encoding (experimental)
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
--prefer-insecure Use an unencrypted connection to retrieve
|
||||
information about the video. (Currently
|
||||
supported only for YouTube)
|
||||
--prefer-insecure Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer URL specify a custom referer, use if the video
|
||||
access is restricted to one domain
|
||||
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
||||
separated by a colon ':'. You can use this
|
||||
option multiple times
|
||||
--bidi-workaround Work around terminals that lack
|
||||
bidirectional text support. Requires bidiv
|
||||
or fribidi executable in PATH
|
||||
--sleep-interval SECONDS Number of seconds to sleep before each
|
||||
download.
|
||||
--referer URL specify a custom referer, use if the video access is restricted to one domain
|
||||
--add-header FIELD:VALUE specify a custom HTTP header and its value, separated by a colon ':'. You can use this option multiple times
|
||||
--bidi-workaround Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH
|
||||
--sleep-interval SECONDS Number of seconds to sleep before each download.
|
||||
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code, specify the order of
|
||||
preference using slashes, as in -f 22/17/18
|
||||
. Instead of format codes, you can select
|
||||
by extension for the extensions aac, m4a,
|
||||
mp3, mp4, ogg, wav, webm. You can also use
|
||||
the special names "best", "bestvideo",
|
||||
"bestaudio", "worst". You can filter the
|
||||
video results by putting a condition in
|
||||
brackets, as in -f "best[height=720]" (or
|
||||
-f "[filesize>10M]"). This works for
|
||||
filesize, height, width, tbr, abr, vbr,
|
||||
asr, and fps and the comparisons <, <=, >,
|
||||
>=, =, != and for ext, acodec, vcodec,
|
||||
container, and protocol and the comparisons
|
||||
=, != . Formats for which the value is not
|
||||
known are excluded unless you put a
|
||||
question mark (?) after the operator. You
|
||||
can combine format filters, so -f "[height
|
||||
<=? 720][tbr>500]" selects up to 720p
|
||||
videos (or videos where the height is not
|
||||
known) with a bitrate of at least 500
|
||||
KBit/s. By default, youtube-dl will pick
|
||||
the best quality. Use commas to download
|
||||
multiple audio formats, such as -f
|
||||
136/137/mp4/bestvideo,140/m4a/bestaudio.
|
||||
You can merge the video and audio of two
|
||||
formats into a single file using -f <video-
|
||||
format>+<audio-format> (requires ffmpeg or
|
||||
avconv), for example -f
|
||||
-f, --format FORMAT video format code, specify the order of preference using slashes, as in -f 22/17/18 . Instead of format codes, you can select by
|
||||
extension for the extensions aac, m4a, mp3, mp4, ogg, wav, webm. You can also use the special names "best", "bestvideo", "bestaudio",
|
||||
"worst". You can filter the video results by putting a condition in brackets, as in -f "best[height=720]" (or -f "[filesize>10M]").
|
||||
This works for filesize, height, width, tbr, abr, vbr, asr, and fps and the comparisons <, <=, >, >=, =, != and for ext, acodec,
|
||||
vcodec, container, and protocol and the comparisons =, != . Formats for which the value is not known are excluded unless you put a
|
||||
question mark (?) after the operator. You can combine format filters, so -f "[height <=? 720][tbr>500]" selects up to 720p videos
|
||||
(or videos where the height is not known) with a bitrate of at least 500 KBit/s. By default, youtube-dl will pick the best quality.
|
||||
Use commas to download multiple audio formats, such as -f 136/137/mp4/bestvideo,140/m4a/bestaudio. You can merge the video and audio
|
||||
of two formats into a single file using -f <video-format>+<audio-format> (requires ffmpeg or avconv), for example -f
|
||||
bestvideo+bestaudio.
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific
|
||||
one is requested
|
||||
--prefer-free-formats prefer free video formats unless a specific one is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifest on
|
||||
YouTube videos
|
||||
--merge-output-format FORMAT If a merge is required (e.g.
|
||||
bestvideo+bestaudio), output to given
|
||||
container format. One of mkv, mp4, ogg,
|
||||
webm, flv.Ignored if no merge is required
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifest on YouTube videos
|
||||
--merge-output-format FORMAT If a merge is required (e.g. bestvideo+bestaudio), output to given container format. One of mkv, mp4, ogg, webm, flv.Ignored if no
|
||||
merge is required
|
||||
|
||||
## Subtitle Options:
|
||||
--write-sub write subtitle file
|
||||
--write-auto-sub write automatic subtitle file (youtube
|
||||
only)
|
||||
--all-subs downloads all the available subtitles of
|
||||
the video
|
||||
--write-auto-sub write automatic subtitle file (youtube only)
|
||||
--all-subs downloads all the available subtitles of the video
|
||||
--list-subs lists all available subtitles for the video
|
||||
--sub-format FORMAT subtitle format (default=srt) ([sbv/vtt]
|
||||
youtube only)
|
||||
--sub-lang LANGS languages of the subtitles to download
|
||||
(optional) separated by commas, use IETF
|
||||
language tags like 'en,pt'
|
||||
--sub-format FORMAT subtitle format, accepts formats preference, for example: "ass/srt/best"
|
||||
--sub-lang LANGS languages of the subtitles to download (optional) separated by commas, use IETF language tags like 'en,pt'
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME login with this account ID
|
||||
-p, --password PASSWORD account password. If this option is left
|
||||
out, youtube-dl will ask interactively.
|
||||
-p, --password PASSWORD account password. If this option is left out, youtube-dl will ask interactively.
|
||||
-2, --twofactor TWOFACTOR two-factor auth code
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo, smotri)
|
||||
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files
|
||||
(requires ffmpeg or avconv and ffprobe or
|
||||
avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
||||
"opus", or "wav"; "best" by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
||||
insert a value between 0 (better) and 9
|
||||
(worse) for VBR or a specific bitrate like
|
||||
128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if
|
||||
necessary (currently supported:
|
||||
mp4|flv|ogg|webm|mkv)
|
||||
-k, --keep-video keeps the video file on disk after the
|
||||
post-processing; the video is erased by
|
||||
default
|
||||
--no-post-overwrites do not overwrite post-processed files; the
|
||||
post-processed files are overwritten by
|
||||
default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
-x, --extract-audio convert video files to audio-only files (requires ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or "wav"; "best" by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert a value between 0 (better) and 9 (worse) for VBR or a specific bitrate like 128K
|
||||
(default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary (currently supported: mp4|flv|ogg|webm|mkv)
|
||||
-k, --keep-video keeps the video file on disk after the post-processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-processed files are overwritten by default
|
||||
--embed-subs embed subtitles in the video (only for mp4 videos)
|
||||
--embed-thumbnail embed thumbnail in the audio as cover art
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--fixup POLICY Automatically correct known faults of the
|
||||
file. One of never (do nothing), warn (only
|
||||
emit a warning), detect_or_warn(the
|
||||
default; fix file if we can, warn
|
||||
otherwise)
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the
|
||||
postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the
|
||||
postprocessors
|
||||
--ffmpeg-location PATH Location of the ffmpeg/avconv binary;
|
||||
either the path to the binary or its
|
||||
containing directory.
|
||||
--exec CMD Execute a command on the file after
|
||||
downloading, similar to find's -exec
|
||||
syntax. Example: --exec 'adb push {}
|
||||
/sdcard/Music/ && rm {}'
|
||||
--metadata-from-title FORMAT parse additional metadata like song title / artist from the video title. The format syntax is the same as --output, the parsed
|
||||
parameters replace existing values. Additional templates: %(album), %(artist). Example: --metadata-from-title "%(artist)s -
|
||||
%(title)s" matches a title like "Coldplay - Paradise"
|
||||
--xattrs write metadata to the video file's xattrs (using dublin core and xdg standards)
|
||||
--fixup POLICY Automatically correct known faults of the file. One of never (do nothing), warn (only emit a warning), detect_or_warn(the default;
|
||||
fix file if we can, warn otherwise)
|
||||
--prefer-avconv Prefer avconv over ffmpeg for running the postprocessors (default)
|
||||
--prefer-ffmpeg Prefer ffmpeg over avconv for running the postprocessors
|
||||
--ffmpeg-location PATH Location of the ffmpeg/avconv binary; either the path to the binary or its containing directory.
|
||||
--exec CMD Execute a command on the file after downloading, similar to find's -exec syntax. Example: --exec 'adb push {} /sdcard/Music/ && rm
|
||||
{}'
|
||||
--convert-subtitles FORMAT Convert the subtitles to other format (currently supported: srt|ass|vtt)
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
@@ -515,11 +349,19 @@ If you want to play the video on a machine that is not running youtube-dl, you c
|
||||
|
||||
### ERROR: no fmt_url_map or conn information found in video info
|
||||
|
||||
youtube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
||||
YouTube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||
|
||||
### ERROR: unable to download video ###
|
||||
|
||||
youtube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
||||
YouTube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||
|
||||
### ExtractorError: Could not find JS function u'OF'
|
||||
|
||||
In February 2015, the new YouTube player contained a character sequence in a string that was misinterpreted by old versions of youtube-dl. See [above](#how-do-i-update-youtube-dl) for how to update youtube-dl.
|
||||
|
||||
### HTTP Error 429: Too Many Requests or 402: Payment Required
|
||||
|
||||
These two error codes indicate that the service is blocking your IP address because of overuse. Contact the service and ask them to unblock your IP address, or - if you have acquired a whitelisted IP address already - use the [`--proxy` or `--network-address` options](#network-options) to select another IP address.
|
||||
|
||||
### SyntaxError: Non-ASCII character ###
|
||||
|
||||
@@ -565,9 +407,21 @@ A note on the service that they don't host the infringing content, but just link
|
||||
|
||||
Support requests for services that **do** purchase the rights to distribute their content are perfectly fine though. If in doubt, you can simply include a source that mentions the legitimate purchase of content.
|
||||
|
||||
### How can I speed up work on my issue?
|
||||
|
||||
(Also known as: Help, my important issue not being solved!) The youtube-dl core developer team is quite small. While we do our best to solve as many issues as possible, sometimes that can take quite a while. To speed up your issue, here's what you can do:
|
||||
|
||||
First of all, please do report the issue [at our issue tracker](https://yt-dl.org/bugs). That allows us to coordinate all efforts by users and developers, and serves as a unified point. Unfortunately, the youtube-dl project has grown too large to use personal email as an effective communication channel.
|
||||
|
||||
Please read the [bug reporting instructions](#bugs) below. A lot of bugs lack all the necessary information. If you can, offer proxy, VPN, or shell access to the youtube-dl developers. If you are able to, test the issue from multiple computers in multiple countries to exclude local censorship or misconfiguration issues.
|
||||
|
||||
If nobody is interested in solving your issue, you are welcome to take matters into your own hands and submit a pull request (or coerce/pay somebody else to do so).
|
||||
|
||||
Feel free to bump the issue from time to time by writing a small comment ("Issue is still present in youtube-dl version ...from France, but fixed from Belgium"), but please not more than once a month. Please do not declare your issue as `important` or `urgent`.
|
||||
|
||||
### How can I detect whether a given URL is supported by youtube-dl?
|
||||
|
||||
For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/v/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
||||
For one, have a look at the [list of supported sites](docs/supportedsites.md). Note that it can sometimes happen that the site changes its URL scheme (say, from http://example.com/video/1234567 to http://example.com/v/1234567 ) and youtube-dl reports an URL of a service in that list as unsupported. In that case, simply report a bug.
|
||||
|
||||
It is *not* possible to detect whether a URL is supported or not. That's because youtube-dl contains a generic extractor which matches **all** URLs. You may be tempted to disable, exclude, or remove the generic extractor, but the generic extractor not only allows users to extract videos from lots of websites that embed a video from another service, but may also be used to extract video from a service that it's hosting itself. Therefore, we neither recommend nor support disabling, excluding, or removing the generic extractor.
|
||||
|
||||
@@ -664,6 +518,7 @@ youtube-dl makes the best effort to be a good command-line program, and thus sho
|
||||
From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
|
||||
|
||||
```python
|
||||
from __future__ import unicode_literals
|
||||
import youtube_dl
|
||||
|
||||
ydl_opts = {}
|
||||
@@ -676,6 +531,7 @@ Most likely, you'll want to use various options. For a list of what can be done,
|
||||
Here's a more complete example of a program that outputs only errors (and a short message after the download is finished), and downloads/converts the video to an mp3 file:
|
||||
|
||||
```python
|
||||
from __future__ import unicode_literals
|
||||
import youtube_dl
|
||||
|
||||
|
||||
@@ -733,7 +589,9 @@ If your report is shorter than two lines, it is almost certainly missing some of
|
||||
|
||||
For bug reports, this means that your report should contain the *complete* output of youtube-dl when called with the -v flag. The error message you get for (most) bugs even says so, but you would not believe how many of our bug reports do not contain this information.
|
||||
|
||||
Site support requests **must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||
If your server has multiple IPs or you suspect censorship, adding --call-home may be a good idea to get more diagnostics. If the error is `ERROR: Unable to extract ...` and you cannot reproduce it from multiple countries, add `--dump-pages` (warning: this will yield a rather large output, redirect it to the file `log.txt` by adding `>log.txt 2>&1` to your command-line) or upload the `.dump` files you get when you add `--write-pages` [somewhere](https://gist.github.com/).
|
||||
|
||||
**Site support requests must contain an example URL**. An example URL is a URL you might want to download, like http://www.youtube.com/watch?v=BaW_jenozKc . There should be an obvious video present. Except under very special circumstances, the main page of a video service (e.g. http://www.youtube.com/ ) is *not* an example URL.
|
||||
|
||||
### Are you using the latest version?
|
||||
|
||||
|
@@ -45,12 +45,12 @@ for test in get_testcases():
|
||||
|
||||
RESULT = ('.' + domain + '\n' in LIST or '\n' + domain + '\n' in LIST)
|
||||
|
||||
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict']
|
||||
or test['info_dict']['age_limit'] != 18):
|
||||
if RESULT and ('info_dict' not in test or 'age_limit' not in test['info_dict'] or
|
||||
test['info_dict']['age_limit'] != 18):
|
||||
print('\nPotential missing age_limit check: {0}'.format(test['name']))
|
||||
|
||||
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict']
|
||||
and test['info_dict']['age_limit'] == 18):
|
||||
elif not RESULT and ('info_dict' in test and 'age_limit' in test['info_dict'] and
|
||||
test['info_dict']['age_limit'] == 18):
|
||||
print('\nPotential false negative: {0}'.format(test['name']))
|
||||
|
||||
else:
|
||||
|
@@ -17,6 +17,7 @@
|
||||
- **AdultSwim**
|
||||
- **Aftenposten**
|
||||
- **Aftonbladet**
|
||||
- **AirMozilla**
|
||||
- **AlJazeera**
|
||||
- **Allocine**
|
||||
- **AlphaPorno**
|
||||
@@ -46,6 +47,7 @@
|
||||
- **Bandcamp**
|
||||
- **Bandcamp:album**
|
||||
- **bbc.co.uk**: BBC iPlayer
|
||||
- **BeatportPro**
|
||||
- **Beeg**
|
||||
- **BehindKink**
|
||||
- **Bet**
|
||||
@@ -68,9 +70,12 @@
|
||||
- **Canalplus**: canalplus.fr, piwiplus.fr and d8.tv
|
||||
- **CBS**
|
||||
- **CBSNews**: CBS News
|
||||
- **CBSSports**
|
||||
- **CeskaTelevize**
|
||||
- **channel9**: Channel 9
|
||||
- **Chilloutzone**
|
||||
- **chirbit**
|
||||
- **chirbit:profile**
|
||||
- **Cinchcast**
|
||||
- **Cinemassacre**
|
||||
- **clipfish**
|
||||
@@ -113,6 +118,7 @@
|
||||
- **DRTV**
|
||||
- **Dump**
|
||||
- **dvtv**: http://video.aktualne.cz/
|
||||
- **EaglePlatform**
|
||||
- **EbaumsWorld**
|
||||
- **EchoMsk**
|
||||
- **eHow**
|
||||
@@ -121,6 +127,7 @@
|
||||
- **EllenTV**
|
||||
- **EllenTV:clips**
|
||||
- **ElPais**: El País
|
||||
- **Embedly**
|
||||
- **EMPFlix**
|
||||
- **Engadget**
|
||||
- **Eporner**
|
||||
@@ -139,6 +146,7 @@
|
||||
- **Firstpost**
|
||||
- **Flickr**
|
||||
- **Folketinget**: Folketinget (ft.dk; Danish parliament)
|
||||
- **FootyRoom**
|
||||
- **Foxgay**
|
||||
- **FoxNews**
|
||||
- **france2.fr:generation-quoi**
|
||||
@@ -156,6 +164,7 @@
|
||||
- **GameSpot**
|
||||
- **GameStar**
|
||||
- **Gametrailers**
|
||||
- **Gazeta**
|
||||
- **GDCVault**
|
||||
- **generic**: Generic downloader that works on some sites
|
||||
- **GiantBomb**
|
||||
@@ -190,6 +199,7 @@
|
||||
- **ign.com**
|
||||
- **imdb**: Internet Movie Database trailers
|
||||
- **imdb:list**: Internet Movie Database lists
|
||||
- **Imgur**
|
||||
- **Ina**
|
||||
- **InfoQ**
|
||||
- **Instagram**
|
||||
@@ -204,6 +214,8 @@
|
||||
- **Jove**
|
||||
- **jpopsuki.tv**
|
||||
- **Jukebox**
|
||||
- **Kaltura**
|
||||
- **KanalPlay**: Kanal 5/9/11 Play
|
||||
- **Kankan**
|
||||
- **Karaoketv**
|
||||
- **keek**
|
||||
@@ -215,6 +227,9 @@
|
||||
- **Ku6**
|
||||
- **la7.tv**
|
||||
- **Laola1Tv**
|
||||
- **Letv**
|
||||
- **LetvPlaylist**
|
||||
- **LetvTv**
|
||||
- **lifenews**: LIFE | NEWS
|
||||
- **LiveLeak**
|
||||
- **livestream**
|
||||
@@ -262,6 +277,7 @@
|
||||
- **myvideo**
|
||||
- **MyVidster**
|
||||
- **n-tv.de**
|
||||
- **NationalGeographic**
|
||||
- **Naver**
|
||||
- **NBA**
|
||||
- **NBC**
|
||||
@@ -298,11 +314,13 @@
|
||||
- **Nuvid**
|
||||
- **NYTimes**
|
||||
- **ocw.mit.edu**
|
||||
- **Odnoklassniki**
|
||||
- **OktoberfestTV**
|
||||
- **on.aol.com**
|
||||
- **Ooyala**
|
||||
- **OpenFilm**
|
||||
- **orf:fm4**: radio FM4
|
||||
- **orf:iptv**: iptv.ORF.at
|
||||
- **orf:oe1**: Radio Österreich 1
|
||||
- **orf:tvthek**: ORF TVthek
|
||||
- **parliamentlive.tv**: UK parliament videos
|
||||
@@ -310,21 +328,26 @@
|
||||
- **PBS**
|
||||
- **Phoenix**
|
||||
- **Photobucket**
|
||||
- **Pladform**
|
||||
- **PlanetaPlay**
|
||||
- **play.fm**
|
||||
- **played.to**
|
||||
- **Playvid**
|
||||
- **Playwire**
|
||||
- **plus.google**: Google Plus
|
||||
- **pluzz.francetv.fr**
|
||||
- **podomatic**
|
||||
- **PornHd**
|
||||
- **PornHub**
|
||||
- **PornHubPlaylist**
|
||||
- **Pornotube**
|
||||
- **PornoXO**
|
||||
- **PromptFile**
|
||||
- **prosiebensat1**: ProSiebenSat.1 Digital
|
||||
- **Puls4**
|
||||
- **Pyvideo**
|
||||
- **QuickVid**
|
||||
- **R7**
|
||||
- **radio.de**
|
||||
- **radiobremen**
|
||||
- **radiofrance**
|
||||
@@ -352,6 +375,7 @@
|
||||
- **rutube:movie**: Rutube movies
|
||||
- **rutube:person**: Rutube person videos
|
||||
- **RUTV**: RUTV.RU
|
||||
- **Sandia**: Sandia National Laboratories
|
||||
- **Sapo**: SAPO Vídeos
|
||||
- **savefrom.net**
|
||||
- **SBS**: sbs.com.au
|
||||
@@ -379,7 +403,8 @@
|
||||
- **soundcloud:playlist**
|
||||
- **soundcloud:set**
|
||||
- **soundcloud:user**
|
||||
- **Soundgasm**
|
||||
- **soundgasm**
|
||||
- **soundgasm:profile**
|
||||
- **southpark.cc.com**
|
||||
- **southpark.de**
|
||||
- **Space**
|
||||
@@ -392,13 +417,14 @@
|
||||
- **SportBox**
|
||||
- **SportDeutschland**
|
||||
- **SRMediathek**: Saarländischer Rundfunk
|
||||
- **SSA**
|
||||
- **stanfordoc**: Stanford Open ClassRoom
|
||||
- **Steam**
|
||||
- **streamcloud.eu**
|
||||
- **StreamCZ**
|
||||
- **StreetVoice**
|
||||
- **SunPorno**
|
||||
- **SVTPlay**
|
||||
- **SVTPlay**: SVT Play and Öppet arkiv
|
||||
- **SWRMediathek**
|
||||
- **Syfy**
|
||||
- **SztvHu**
|
||||
@@ -445,6 +471,7 @@
|
||||
- **Turbo**
|
||||
- **Tutv**
|
||||
- **tv.dfb.de**
|
||||
- **TV4**: tv4.se and tv4play.se
|
||||
- **tvigle**: Интернет-телевидение Tvigle.ru
|
||||
- **tvp.pl**
|
||||
- **tvp.pl:Series**
|
||||
@@ -487,6 +514,7 @@
|
||||
- **Vidzi**
|
||||
- **vier**
|
||||
- **vier:videos**
|
||||
- **Viewster**
|
||||
- **viki**
|
||||
- **vimeo**
|
||||
- **vimeo:album**
|
||||
@@ -533,6 +561,9 @@
|
||||
- **XXXYMovies**
|
||||
- **Yahoo**: Yahoo screen and movies
|
||||
- **Yam**
|
||||
- **yandexmusic:album**: Яндекс.Музыка - Альбом
|
||||
- **yandexmusic:playlist**: Яндекс.Музыка - Плейлист
|
||||
- **yandexmusic:track**: Яндекс.Музыка - Трек
|
||||
- **YesJapan**
|
||||
- **Ynet**
|
||||
- **YouJizz**
|
||||
@@ -552,6 +583,7 @@
|
||||
- **youtube:subscriptions**: YouTube.com subscriptions feed, "ytsubs" keyword (requires authentication)
|
||||
- **youtube:user**: YouTube.com user videos (URL or "ytuser" keyword)
|
||||
- **youtube:watch_later**: Youtube watch later list, ":ytwatchlater" for short (requires authentication)
|
||||
- **Zapiks**
|
||||
- **ZDF**
|
||||
- **ZDFChannel**
|
||||
- **zingmp3:album**: mp3.zing.vn albums
|
||||
|
@@ -113,6 +113,16 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
self.assertTrue(
|
||||
got.startswith(start_str),
|
||||
'field %s (value: %r) should start with %r' % (info_field, got, start_str))
|
||||
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
|
||||
got = got_dict.get(info_field)
|
||||
contains_str = expected[len('contains:'):]
|
||||
self.assertTrue(
|
||||
isinstance(got, compat_str),
|
||||
'Expected a %s object, but got %s for field %s' % (
|
||||
compat_str.__name__, type(got).__name__, info_field))
|
||||
self.assertTrue(
|
||||
contains_str in got,
|
||||
'field %s (value: %r) should contain %r' % (info_field, got, contains_str))
|
||||
elif isinstance(expected, type):
|
||||
got = got_dict.get(info_field)
|
||||
self.assertTrue(isinstance(got, expected),
|
||||
@@ -163,12 +173,14 @@ def expect_info_dict(self, got_dict, expected_dict):
|
||||
info_dict_str += ''.join(
|
||||
' %s: %s,\n' % (_repr(k), _repr(v))
|
||||
for k, v in test_info_dict.items() if k not in missing_keys)
|
||||
info_dict_str += '\n'
|
||||
|
||||
if info_dict_str:
|
||||
info_dict_str += '\n'
|
||||
info_dict_str += ''.join(
|
||||
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
|
||||
for k in missing_keys)
|
||||
write_string(
|
||||
'\n\'info_dict\': {\n' + info_dict_str + '}\n', out=sys.stderr)
|
||||
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
|
||||
self.assertFalse(
|
||||
missing_keys,
|
||||
'Missing keys in test definition: %s' % (
|
||||
|
@@ -28,7 +28,7 @@
|
||||
"retries": 10,
|
||||
"simulate": false,
|
||||
"subtitleslang": null,
|
||||
"subtitlesformat": "srt",
|
||||
"subtitlesformat": "best",
|
||||
"test": true,
|
||||
"updatetime": true,
|
||||
"usenetrc": false,
|
||||
|
@@ -15,6 +15,8 @@ from youtube_dl import YoutubeDL
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.postprocessor.common import PostProcessor
|
||||
|
||||
TEST_URL = 'http://localhost/sample.mp4'
|
||||
|
||||
|
||||
class YDL(FakeYDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -46,8 +48,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 460, 'url': 'x'},
|
||||
{'ext': 'mp4', 'height': 460, 'url': 'y'},
|
||||
{'ext': 'webm', 'height': 460, 'url': TEST_URL},
|
||||
{'ext': 'mp4', 'height': 460, 'url': TEST_URL},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
yie = YoutubeIE(ydl)
|
||||
@@ -60,8 +62,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 720, 'url': 'a'},
|
||||
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
|
||||
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||
{'ext': 'mp4', 'height': 1080, 'url': TEST_URL},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@@ -74,9 +76,9 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
||||
{'ext': 'mp4', 'height': 720, 'url': '_'},
|
||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
||||
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||
{'ext': 'mp4', 'height': 720, 'url': TEST_URL},
|
||||
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@@ -88,8 +90,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
||||
{'ext': 'flv', 'height': 720, 'url': TEST_URL},
|
||||
{'ext': 'webm', 'height': 720, 'url': TEST_URL},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@@ -133,10 +135,10 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_format_selection(self):
|
||||
formats = [
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': TEST_URL},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': TEST_URL},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': TEST_URL},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
@@ -167,10 +169,10 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
formats = [
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': TEST_URL},
|
||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': TEST_URL},
|
||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': TEST_URL},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': TEST_URL},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
@@ -185,8 +187,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
self.assertEqual(downloaded['format_id'], 'audio-low')
|
||||
|
||||
formats = [
|
||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
|
||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': TEST_URL},
|
||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': TEST_URL},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
@@ -228,9 +230,9 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_format_selection_video(self):
|
||||
formats = [
|
||||
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
|
||||
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
|
||||
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': TEST_URL},
|
||||
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': TEST_URL},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': TEST_URL},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
@@ -337,6 +339,65 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'G')
|
||||
|
||||
def test_subtitles(self):
|
||||
def s_formats(lang, autocaption=False):
|
||||
return [{
|
||||
'ext': ext,
|
||||
'url': 'http://localhost/video.%s.%s' % (lang, ext),
|
||||
'_auto': autocaption,
|
||||
} for ext in ['vtt', 'srt', 'ass']]
|
||||
subtitles = dict((l, s_formats(l)) for l in ['en', 'fr', 'es'])
|
||||
auto_captions = dict((l, s_formats(l, True)) for l in ['it', 'pt', 'es'])
|
||||
info_dict = {
|
||||
'id': 'test',
|
||||
'title': 'Test',
|
||||
'url': 'http://localhost/video.mp4',
|
||||
'subtitles': subtitles,
|
||||
'automatic_captions': auto_captions,
|
||||
'extractor': 'TEST',
|
||||
}
|
||||
|
||||
def get_info(params={}):
|
||||
params.setdefault('simulate', True)
|
||||
ydl = YDL(params)
|
||||
ydl.report_warning = lambda *args, **kargs: None
|
||||
return ydl.process_video_result(info_dict, download=False)
|
||||
|
||||
result = get_info()
|
||||
self.assertFalse(result.get('requested_subtitles'))
|
||||
self.assertEqual(result['subtitles'], subtitles)
|
||||
self.assertEqual(result['automatic_captions'], auto_captions)
|
||||
|
||||
result = get_info({'writesubtitles': True})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['en']))
|
||||
self.assertTrue(subs['en'].get('data') is None)
|
||||
self.assertEqual(subs['en']['ext'], 'ass')
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitlesformat': 'foo/srt'})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertEqual(subs['en']['ext'], 'srt')
|
||||
|
||||
result = get_info({'writesubtitles': True, 'subtitleslangs': ['es', 'fr', 'it']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'fr']))
|
||||
|
||||
result = get_info({'writesubtitles': True, 'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
|
||||
self.assertFalse(subs['es']['_auto'])
|
||||
self.assertTrue(subs['pt']['_auto'])
|
||||
|
||||
result = get_info({'writeautomaticsub': True, 'subtitleslangs': ['es', 'pt']})
|
||||
subs = result['requested_subtitles']
|
||||
self.assertTrue(subs)
|
||||
self.assertEqual(set(subs.keys()), set(['es', 'pt']))
|
||||
self.assertTrue(subs['es']['_auto'])
|
||||
self.assertTrue(subs['pt']['_auto'])
|
||||
|
||||
def test_add_extra_info(self):
|
||||
test_dict = {
|
||||
'extractor': 'Foo',
|
||||
|
@@ -104,11 +104,11 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch(':tds', ['ComedyCentralShows'])
|
||||
|
||||
def test_vimeo_matching(self):
|
||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||
self.assertMatch('http://vimeo.com/channels/31259', ['vimeo:channel'])
|
||||
self.assertMatch('http://vimeo.com/channels/31259/53576664', ['vimeo'])
|
||||
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
||||
self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||
self.assertMatch('https://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||
self.assertMatch('https://vimeo.com/channels/31259', ['vimeo:channel'])
|
||||
self.assertMatch('https://vimeo.com/channels/31259/53576664', ['vimeo'])
|
||||
self.assertMatch('https://vimeo.com/user7108434', ['vimeo:user'])
|
||||
self.assertMatch('https://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
||||
|
||||
# https://github.com/rg3/youtube-dl/issues/1930
|
||||
|
@@ -70,6 +70,8 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
self.assertEqual(jsi.call_function('f'), -11)
|
||||
|
||||
def test_comments(self):
|
||||
'Skipping: Not yet fully implemented'
|
||||
return
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
var x = /* 1 + */ 2;
|
||||
@@ -80,6 +82,15 @@ class TestJSInterpreter(unittest.TestCase):
|
||||
''')
|
||||
self.assertEqual(jsi.call_function('x'), 52)
|
||||
|
||||
jsi = JSInterpreter('''
|
||||
function f() {
|
||||
var x = "/*";
|
||||
var y = 1 /* comment */ + 2;
|
||||
return y;
|
||||
}
|
||||
''')
|
||||
self.assertEqual(jsi.call_function('f'), 3)
|
||||
|
||||
def test_precedence(self):
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
|
26
test/test_netrc.py
Normal file
26
test/test_netrc.py
Normal file
@@ -0,0 +1,26 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
gen_extractors,
|
||||
)
|
||||
|
||||
|
||||
class TestNetRc(unittest.TestCase):
|
||||
def test_netrc_present(self):
|
||||
for ie in gen_extractors():
|
||||
if not hasattr(ie, '_login'):
|
||||
continue
|
||||
self.assertTrue(
|
||||
hasattr(ie, '_NETRC_MACHINE'),
|
||||
'Extractor %s supports login, but is missing a _NETRC_MACHINE property' % ie.IE_NAME)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
17
test/test_postprocessors.py
Normal file
17
test/test_postprocessors.py
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.postprocessor import MetadataFromTitlePP
|
||||
|
||||
|
||||
class TestMetadataFromTitle(unittest.TestCase):
|
||||
def test_format_to_regex(self):
|
||||
pp = MetadataFromTitlePP(None, '%(title)s - %(artist)s')
|
||||
self.assertEqual(pp._titleregex, '(?P<title>.+)\ \-\ (?P<artist>.+)')
|
@@ -18,6 +18,15 @@ from youtube_dl.extractor import (
|
||||
VimeoIE,
|
||||
WallaIE,
|
||||
CeskaTelevizeIE,
|
||||
LyndaIE,
|
||||
NPOIE,
|
||||
ComedyCentralIE,
|
||||
NRKTVIE,
|
||||
RaiIE,
|
||||
VikiIE,
|
||||
ThePlatformIE,
|
||||
RTVEALaCartaIE,
|
||||
FunnyOrDieIE,
|
||||
)
|
||||
|
||||
|
||||
@@ -27,42 +36,38 @@ class BaseTestSubtitles(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.DL = FakeYDL()
|
||||
self.ie = self.IE(self.DL)
|
||||
self.ie = self.IE()
|
||||
self.DL.add_info_extractor(self.ie)
|
||||
|
||||
def getInfoDict(self):
|
||||
info_dict = self.ie.extract(self.url)
|
||||
info_dict = self.DL.extract_info(self.url, download=False)
|
||||
return info_dict
|
||||
|
||||
def getSubtitles(self):
|
||||
info_dict = self.getInfoDict()
|
||||
return info_dict['subtitles']
|
||||
subtitles = info_dict['requested_subtitles']
|
||||
if not subtitles:
|
||||
return subtitles
|
||||
for sub_info in subtitles.values():
|
||||
if sub_info.get('data') is None:
|
||||
uf = self.DL.urlopen(sub_info['url'])
|
||||
sub_info['data'] = uf.read().decode('utf-8')
|
||||
return dict((l, sub_info['data']) for l, sub_info in subtitles.items())
|
||||
|
||||
|
||||
class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
url = 'QRS8MkLhQmM'
|
||||
IE = YoutubeIE
|
||||
|
||||
def test_youtube_no_writesubtitles(self):
|
||||
self.DL.params['writesubtitles'] = False
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_youtube_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
||||
|
||||
def test_youtube_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['it']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
||||
|
||||
def test_youtube_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles.keys()), 13)
|
||||
self.assertEqual(md5(subtitles['en']), '4cd9278a35ba2305f47354ee13472260')
|
||||
self.assertEqual(md5(subtitles['it']), '164a51f16f260476a05b50fe4c2f161d')
|
||||
for lang in ['it', 'fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
def test_youtube_subtitles_sbv_format(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
@@ -76,12 +81,6 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '3cb210999d3e021bd6c7f0ea751eab06')
|
||||
|
||||
def test_youtube_list_subtitles(self):
|
||||
self.DL.expect_warning('Video doesn\'t have automatic captions')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_youtube_automatic_captions(self):
|
||||
self.url = '8YoUxe5ncPo'
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
@@ -103,55 +102,22 @@ class TestYoutubeSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
def test_youtube_multiple_langs(self):
|
||||
self.url = 'QRS8MkLhQmM'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['it', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.dailymotion.com/video/xczg00'
|
||||
IE = DailymotionIE
|
||||
|
||||
def test_no_writesubtitles(self):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||
|
||||
def test_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['fr']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) >= 6)
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_automatic_captions(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslang'] = ['en']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) == 0)
|
||||
self.assertEqual(md5(subtitles['en']), '976553874490cba125086bbfea3ff76f')
|
||||
self.assertEqual(md5(subtitles['fr']), '594564ec7d588942e384e920e5341792')
|
||||
for lang in ['es', 'fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
def test_nosubtitles(self):
|
||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||
@@ -159,61 +125,21 @@ class TestDailymotionSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
def test_multiple_langs(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['es', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
class TestTedSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html'
|
||||
IE = TEDIE
|
||||
|
||||
def test_no_writesubtitles(self):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||
|
||||
def test_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['fr']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) >= 28)
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_automatic_captions(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslang'] = ['en']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) == 0)
|
||||
|
||||
def test_multiple_langs(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['es', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertEqual(md5(subtitles['en']), '4262c1665ff928a2dada178f62cb8d14')
|
||||
self.assertEqual(md5(subtitles['fr']), '66a63f7f42c97a50f8c0e90bc7797bb5')
|
||||
for lang in ['es', 'fr', 'de']:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
|
||||
@@ -221,14 +147,7 @@ class TestBlipTVSubtitles(BaseTestSubtitles):
|
||||
url = 'http://blip.tv/a/a-6603250'
|
||||
IE = BlipTVIE
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
@@ -240,39 +159,13 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
||||
url = 'http://vimeo.com/76979871'
|
||||
IE = VimeoIE
|
||||
|
||||
def test_no_writesubtitles(self):
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(subtitles, None)
|
||||
|
||||
def test_subtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
||||
|
||||
def test_subtitles_lang(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['subtitleslangs'] = ['fr']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['de', 'en', 'es', 'fr']))
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_automatic_captions(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writeautomaticsub'] = True
|
||||
self.DL.params['subtitleslang'] = ['en']
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertTrue(len(subtitles.keys()) == 0)
|
||||
self.assertEqual(md5(subtitles['en']), '8062383cf4dec168fc40a088aa6d5888')
|
||||
self.assertEqual(md5(subtitles['fr']), 'b6191146a6c5d3a452244d853fde6dc8')
|
||||
|
||||
def test_nosubtitles(self):
|
||||
self.DL.expect_warning('video doesn\'t have subtitles')
|
||||
@@ -280,27 +173,13 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
def test_multiple_langs(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
langs = ['es', 'fr', 'de']
|
||||
self.DL.params['subtitleslangs'] = langs
|
||||
subtitles = self.getSubtitles()
|
||||
for lang in langs:
|
||||
self.assertTrue(subtitles.get(lang) is not None, 'Subtitles for \'%s\' not extracted' % lang)
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
class TestWallaSubtitles(BaseTestSubtitles):
|
||||
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||
IE = WallaIE
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writesubtitles'] = True
|
||||
@@ -315,19 +194,13 @@ class TestWallaSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.ceskatelevize.cz/ivysilani/10600540290-u6-uzasny-svet-techniky'
|
||||
IE = CeskaTelevizeIE
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.expect_warning('Automatic Captions not supported by this server')
|
||||
self.DL.params['writesubtitles'] = True
|
||||
@@ -342,7 +215,122 @@ class TestCeskaTelevizeSubtitles(BaseTestSubtitles):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
self.assertFalse(subtitles)
|
||||
|
||||
|
||||
class TestLyndaSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html'
|
||||
IE = LyndaIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), '09bbe67222259bed60deaa26997d73a7')
|
||||
|
||||
|
||||
class TestNPOSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.npo.nl/nos-journaal/28-08-2014/POW_00722860'
|
||||
IE = NPOIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['nl']))
|
||||
self.assertEqual(md5(subtitles['nl']), 'fc6435027572b63fb4ab143abd5ad3f4')
|
||||
|
||||
|
||||
class TestMTVSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother'
|
||||
IE = ComedyCentralIE
|
||||
|
||||
def getInfoDict(self):
|
||||
return super(TestMTVSubtitles, self).getInfoDict()['entries'][0]
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), 'b9f6ca22a6acf597ec76f61749765e65')
|
||||
|
||||
|
||||
class TestNRKSubtitles(BaseTestSubtitles):
|
||||
url = 'http://tv.nrk.no/serie/ikke-gjoer-dette-hjemme/DMPV73000411/sesong-2/episode-1'
|
||||
IE = NRKTVIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['no']))
|
||||
self.assertEqual(md5(subtitles['no']), '1d221e6458c95c5494dcd38e6a1f129a')
|
||||
|
||||
|
||||
class TestRaiSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html'
|
||||
IE = RaiIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['it']))
|
||||
self.assertEqual(md5(subtitles['it']), 'b1d90a98755126b61e667567a1f6680a')
|
||||
|
||||
|
||||
class TestVikiSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.viki.com/videos/1060846v-punch-episode-18'
|
||||
IE = VikiIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), '53cb083a5914b2d84ef1ab67b880d18a')
|
||||
|
||||
|
||||
class TestThePlatformSubtitles(BaseTestSubtitles):
|
||||
# from http://www.3playmedia.com/services-features/tools/integrations/theplatform/
|
||||
# (see http://theplatform.com/about/partners/type/subtitles-closed-captioning/)
|
||||
url = 'theplatform:JFUjUE1_ehvq'
|
||||
IE = ThePlatformIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), '97e7670cbae3c4d26ae8bcc7fdd78d4b')
|
||||
|
||||
|
||||
class TestRtveSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.rtve.es/alacarta/videos/los-misterios-de-laura/misterios-laura-capitulo-32-misterio-del-numero-17-2-parte/2428621/'
|
||||
IE = RTVEALaCartaIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
print('Skipping, only available from Spain')
|
||||
return
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['es']))
|
||||
self.assertEqual(md5(subtitles['es']), '69e70cae2d40574fb7316f31d6eb7fca')
|
||||
|
||||
|
||||
class TestFunnyOrDieSubtitles(BaseTestSubtitles):
|
||||
url = 'http://www.funnyordie.com/videos/224829ff6d/judd-apatow-will-direct-your-vine'
|
||||
IE = FunnyOrDieIE
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['en']))
|
||||
self.assertEqual(md5(subtitles['en']), 'c5593c193eacd353596c11c2d4f9ecc4')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@@ -34,8 +34,8 @@ def _make_testfunc(testfile):
|
||||
def test_func(self):
|
||||
as_file = os.path.join(TEST_DIR, testfile)
|
||||
swf_file = os.path.join(TEST_DIR, test_id + '.swf')
|
||||
if ((not os.path.exists(swf_file))
|
||||
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
||||
if ((not os.path.exists(swf_file)) or
|
||||
os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
||||
# Recompile
|
||||
try:
|
||||
subprocess.check_call([
|
||||
|
@@ -17,13 +17,22 @@ IGNORED_FILES = [
|
||||
'buildserver.py',
|
||||
]
|
||||
|
||||
IGNORED_DIRS = [
|
||||
'.git',
|
||||
'.tox',
|
||||
]
|
||||
|
||||
from test.helper import assertRegexpMatches
|
||||
|
||||
|
||||
class TestUnicodeLiterals(unittest.TestCase):
|
||||
def test_all_files(self):
|
||||
for dirpath, _, filenames in os.walk(rootDir):
|
||||
for dirpath, dirnames, filenames in os.walk(rootDir):
|
||||
for ignore_dir in IGNORED_DIRS:
|
||||
if ignore_dir in dirnames:
|
||||
# If we remove the directory from dirnames os.walk won't
|
||||
# recurse into it
|
||||
dirnames.remove(ignore_dir)
|
||||
for basename in filenames:
|
||||
if not basename.endswith('.py'):
|
||||
continue
|
||||
|
@@ -38,6 +38,7 @@ from youtube_dl.utils import (
|
||||
parse_iso8601,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
shell_quote,
|
||||
smuggle_url,
|
||||
str_to_int,
|
||||
@@ -86,6 +87,11 @@ class TestUtil(unittest.TestCase):
|
||||
sanitize_filename('New World record at 0:12:34'),
|
||||
'New World record at 0_12_34')
|
||||
|
||||
self.assertEqual(sanitize_filename('--gasdgf'), '_-gasdgf')
|
||||
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
|
||||
self.assertEqual(sanitize_filename('.gasdgf'), 'gasdgf')
|
||||
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
|
||||
|
||||
forbidden = '"\0\\/'
|
||||
for fc in forbidden:
|
||||
for fbc in forbidden:
|
||||
@@ -126,6 +132,42 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
||||
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
||||
|
||||
def test_sanitize_path(self):
|
||||
if sys.platform != 'win32':
|
||||
return
|
||||
|
||||
self.assertEqual(sanitize_path('abc'), 'abc')
|
||||
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
|
||||
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
|
||||
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
|
||||
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
|
||||
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
|
||||
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
|
||||
|
||||
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
|
||||
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
|
||||
|
||||
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
|
||||
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
|
||||
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
|
||||
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
|
||||
|
||||
self.assertEqual(
|
||||
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
|
||||
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
|
||||
|
||||
self.assertEqual(
|
||||
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
|
||||
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
|
||||
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
|
||||
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
|
||||
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
|
||||
|
||||
self.assertEqual(sanitize_path('../abc'), '..\\abc')
|
||||
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
|
||||
self.assertEqual(sanitize_path('./abc'), 'abc')
|
||||
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
|
||||
|
||||
def test_ordered_set(self):
|
||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||
self.assertEqual(orderedSet([]), [])
|
||||
@@ -244,6 +286,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_duration('2.5 hours'), 9000)
|
||||
self.assertEqual(parse_duration('02:03:04'), 7384)
|
||||
self.assertEqual(parse_duration('01:02:03:04'), 93784)
|
||||
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
|
||||
|
||||
def test_fix_xml_ampersands(self):
|
||||
self.assertEqual(
|
||||
@@ -370,6 +413,10 @@ class TestUtil(unittest.TestCase):
|
||||
"playlist":[{"controls":{"all":null}}]
|
||||
}''')
|
||||
|
||||
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
|
||||
json_code = js_to_json(inp)
|
||||
self.assertEqual(json.loads(json_code), json.loads(inp))
|
||||
|
||||
def test_js_to_json_edgecases(self):
|
||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||
|
@@ -64,6 +64,12 @@ _TESTS = [
|
||||
'js',
|
||||
'4646B5181C6C3020DF1D9C7FCFEA.AD80ABF70C39BD369CCCAE780AFBB98FA6B6CB42766249D9488C288',
|
||||
'82C8849D94266724DC6B6AF89BBFA087EACCD963.B93C07FBA084ACAEFCF7C9D1FD0203C6C1815B6B'
|
||||
),
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflKjOTVq/html5player.js',
|
||||
'js',
|
||||
'312AA52209E3623129A412D56A40F11CB0AF14AE.3EE09501CB14E3BCDC3B2AE808BF3F1D14E7FBF12',
|
||||
'112AA5220913623229A412D56A40F11CB0AF14AE.3EE0950FCB14EEBCDC3B2AE808BF331D14E7FBF3',
|
||||
)
|
||||
]
|
||||
|
||||
|
7
tox.ini
7
tox.ini
@@ -1,8 +1,11 @@
|
||||
[tox]
|
||||
envlist = py26,py27,py33
|
||||
envlist = py26,py27,py33,py34
|
||||
[testenv]
|
||||
deps =
|
||||
nose
|
||||
coverage
|
||||
commands = nosetests --verbose {posargs:test} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
||||
--exclude test_subtitles.py --exclude test_write_annotations.py
|
||||
--exclude test_youtube_lists.py
|
||||
commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||
# test.test_download:TestDownload.test_NowVideo
|
||||
|
@@ -4,8 +4,10 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import datetime
|
||||
import errno
|
||||
import fileinput
|
||||
import io
|
||||
import itertools
|
||||
import json
|
||||
@@ -28,6 +30,7 @@ from .compat import (
|
||||
compat_basestring,
|
||||
compat_cookiejar,
|
||||
compat_expanduser,
|
||||
compat_get_terminal_size,
|
||||
compat_http_client,
|
||||
compat_kwargs,
|
||||
compat_str,
|
||||
@@ -46,18 +49,19 @@ from .utils import (
|
||||
ExtractorError,
|
||||
format_bytes,
|
||||
formatSeconds,
|
||||
get_term_width,
|
||||
locked_file,
|
||||
make_HTTPS_handler,
|
||||
MaxDownloadsReached,
|
||||
PagedList,
|
||||
parse_filesize,
|
||||
PerRequestProxyHandler,
|
||||
PostProcessingError,
|
||||
platform_name,
|
||||
preferredencoding,
|
||||
render_table,
|
||||
SameFileError,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
std_headers,
|
||||
subtitles_filename,
|
||||
takewhile_inclusive,
|
||||
@@ -154,7 +158,7 @@ class YoutubeDL(object):
|
||||
allsubtitles: Downloads all the subtitles of the video
|
||||
(requires writesubtitles or writeautomaticsub)
|
||||
listsubtitles: Lists all available subtitles for the video
|
||||
subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt)
|
||||
subtitlesformat: The format code for subtitles
|
||||
subtitleslangs: List of languages of the subtitles to download
|
||||
keepvideo: Keep the video file after post-processing
|
||||
daterange: A DateRange object, download only if the upload_date is in the range.
|
||||
@@ -181,6 +185,8 @@ class YoutubeDL(object):
|
||||
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
|
||||
At the moment, this is only supported by YouTube.
|
||||
proxy: URL of the proxy server to use
|
||||
cn_verification_proxy: URL of the proxy to use for IP address verification
|
||||
on Chinese sites. (Experimental)
|
||||
socket_timeout: Time to wait for unresponsive hosts, in seconds
|
||||
bidi_workaround: Work around buggy terminals without bidirectional text
|
||||
support, using fridibi
|
||||
@@ -199,18 +205,25 @@ class YoutubeDL(object):
|
||||
postprocessor.
|
||||
progress_hooks: A list of functions that get called on download
|
||||
progress, with a dictionary with the entries
|
||||
* status: One of "downloading" and "finished".
|
||||
* status: One of "downloading", "error", or "finished".
|
||||
Check this first and ignore unknown values.
|
||||
|
||||
If status is one of "downloading" or "finished", the
|
||||
If status is one of "downloading", or "finished", the
|
||||
following properties may also be present:
|
||||
* filename: The final filename (always present)
|
||||
* tmpfilename: The filename we're currently writing to
|
||||
* downloaded_bytes: Bytes on disk
|
||||
* total_bytes: Size of the whole file, None if unknown
|
||||
* tmpfilename: The filename we're currently writing to
|
||||
* total_bytes_estimate: Guess of the eventual file size,
|
||||
None if unavailable.
|
||||
* elapsed: The number of seconds since download started.
|
||||
* eta: The estimated time in seconds, None if unknown
|
||||
* speed: The download speed in bytes/second, None if
|
||||
unknown
|
||||
* fragment_index: The counter of the currently
|
||||
downloaded video fragment.
|
||||
* fragment_count: The number of fragments (= individual
|
||||
files that will be merged)
|
||||
|
||||
Progress hooks are guaranteed to be called at least once
|
||||
(with status "finished") if the download is successful.
|
||||
@@ -240,10 +253,10 @@ class YoutubeDL(object):
|
||||
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv.
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
the FileDownloader:
|
||||
the downloader (see youtube_dl/downloader/common.py):
|
||||
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
||||
noresizebuffer, retries, continuedl, noprogress, consoletitle,
|
||||
xattr_set_filesize.
|
||||
xattr_set_filesize, external_downloader_args.
|
||||
|
||||
The following options are used by the post processors:
|
||||
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
|
||||
@@ -277,7 +290,7 @@ class YoutubeDL(object):
|
||||
try:
|
||||
import pty
|
||||
master, slave = pty.openpty()
|
||||
width = get_term_width()
|
||||
width = compat_get_terminal_size().columns
|
||||
if width is None:
|
||||
width_args = []
|
||||
else:
|
||||
@@ -301,8 +314,8 @@ class YoutubeDL(object):
|
||||
raise
|
||||
|
||||
if (sys.version_info >= (3,) and sys.platform != 'win32' and
|
||||
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
|
||||
and not params.get('restrictfilenames', False)):
|
||||
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
|
||||
not params.get('restrictfilenames', False)):
|
||||
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
||||
self.report_warning(
|
||||
'Assuming --restrict-filenames since file system encoding '
|
||||
@@ -310,6 +323,11 @@ class YoutubeDL(object):
|
||||
'Set the LC_ALL environment variable to fix this.')
|
||||
self.params['restrictfilenames'] = True
|
||||
|
||||
if isinstance(params.get('outtmpl'), bytes):
|
||||
self.report_warning(
|
||||
'Parameter outtmpl is bytes, but should be a unicode string. '
|
||||
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
|
||||
|
||||
if '%(stitle)s' in self.params.get('outtmpl', ''):
|
||||
self.report_warning('%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
|
||||
|
||||
@@ -550,7 +568,7 @@ class YoutubeDL(object):
|
||||
if v is not None)
|
||||
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
|
||||
|
||||
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
||||
outtmpl = sanitize_path(self.params.get('outtmpl', DEFAULT_OUTTMPL))
|
||||
tmpl = compat_expanduser(outtmpl)
|
||||
filename = tmpl % template_dict
|
||||
# Temporary fix for #4787
|
||||
@@ -617,7 +635,7 @@ class YoutubeDL(object):
|
||||
Returns a list with a dictionary for each video we find.
|
||||
If 'download', also downloads the videos.
|
||||
extra_info is a dict containing the extra values to add to each result
|
||||
'''
|
||||
'''
|
||||
|
||||
if ie_key:
|
||||
ies = [self.get_info_extractor(ie_key)]
|
||||
@@ -1001,6 +1019,15 @@ class YoutubeDL(object):
|
||||
info_dict['timestamp'])
|
||||
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
|
||||
|
||||
if self.params.get('listsubtitles', False):
|
||||
if 'automatic_captions' in info_dict:
|
||||
self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
|
||||
self.list_subtitles(info_dict['id'], info_dict.get('subtitles'), 'subtitles')
|
||||
return
|
||||
info_dict['requested_subtitles'] = self.process_subtitles(
|
||||
info_dict['id'], info_dict.get('subtitles'),
|
||||
info_dict.get('automatic_captions'))
|
||||
|
||||
# This extractors handle format selection themselves
|
||||
if info_dict['extractor'] in ['Youku']:
|
||||
if download:
|
||||
@@ -1064,8 +1091,7 @@ class YoutubeDL(object):
|
||||
if req_format is None:
|
||||
req_format = 'best'
|
||||
formats_to_download = []
|
||||
# The -1 is for supporting YoutubeIE
|
||||
if req_format in ('-1', 'all'):
|
||||
if req_format == 'all':
|
||||
formats_to_download = formats
|
||||
else:
|
||||
for rfstr in req_format.split(','):
|
||||
@@ -1129,6 +1155,55 @@ class YoutubeDL(object):
|
||||
info_dict.update(formats_to_download[-1])
|
||||
return info_dict
|
||||
|
||||
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
|
||||
"""Select the requested subtitles and their format"""
|
||||
available_subs = {}
|
||||
if normal_subtitles and self.params.get('writesubtitles'):
|
||||
available_subs.update(normal_subtitles)
|
||||
if automatic_captions and self.params.get('writeautomaticsub'):
|
||||
for lang, cap_info in automatic_captions.items():
|
||||
if lang not in available_subs:
|
||||
available_subs[lang] = cap_info
|
||||
|
||||
if (not self.params.get('writesubtitles') and not
|
||||
self.params.get('writeautomaticsub') or not
|
||||
available_subs):
|
||||
return None
|
||||
|
||||
if self.params.get('allsubtitles', False):
|
||||
requested_langs = available_subs.keys()
|
||||
else:
|
||||
if self.params.get('subtitleslangs', False):
|
||||
requested_langs = self.params.get('subtitleslangs')
|
||||
elif 'en' in available_subs:
|
||||
requested_langs = ['en']
|
||||
else:
|
||||
requested_langs = [list(available_subs.keys())[0]]
|
||||
|
||||
formats_query = self.params.get('subtitlesformat', 'best')
|
||||
formats_preference = formats_query.split('/') if formats_query else []
|
||||
subs = {}
|
||||
for lang in requested_langs:
|
||||
formats = available_subs.get(lang)
|
||||
if formats is None:
|
||||
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
|
||||
continue
|
||||
for ext in formats_preference:
|
||||
if ext == 'best':
|
||||
f = formats[-1]
|
||||
break
|
||||
matches = list(filter(lambda f: f['ext'] == ext, formats))
|
||||
if matches:
|
||||
f = matches[-1]
|
||||
break
|
||||
else:
|
||||
f = formats[-1]
|
||||
self.report_warning(
|
||||
'No subtitle format found matching "%s" for language %s, '
|
||||
'using %s' % (formats_query, lang, f['ext']))
|
||||
subs[lang] = f
|
||||
return subs
|
||||
|
||||
def process_info(self, info_dict):
|
||||
"""Process a single resolved IE result."""
|
||||
|
||||
@@ -1191,7 +1266,7 @@ class YoutubeDL(object):
|
||||
return
|
||||
|
||||
try:
|
||||
dn = os.path.dirname(encodeFilename(filename))
|
||||
dn = os.path.dirname(sanitize_path(encodeFilename(filename)))
|
||||
if dn and not os.path.exists(dn):
|
||||
os.makedirs(dn)
|
||||
except (OSError, IOError) as err:
|
||||
@@ -1231,15 +1306,23 @@ class YoutubeDL(object):
|
||||
subtitles_are_requested = any([self.params.get('writesubtitles', False),
|
||||
self.params.get('writeautomaticsub')])
|
||||
|
||||
if subtitles_are_requested and 'subtitles' in info_dict and info_dict['subtitles']:
|
||||
if subtitles_are_requested and info_dict.get('requested_subtitles'):
|
||||
# subtitles download errors are already managed as troubles in relevant IE
|
||||
# that way it will silently go on when used with unsupporting IE
|
||||
subtitles = info_dict['subtitles']
|
||||
sub_format = self.params.get('subtitlesformat', 'srt')
|
||||
for sub_lang in subtitles.keys():
|
||||
sub = subtitles[sub_lang]
|
||||
if sub is None:
|
||||
continue
|
||||
subtitles = info_dict['requested_subtitles']
|
||||
ie = self.get_info_extractor(info_dict['extractor_key'])
|
||||
for sub_lang, sub_info in subtitles.items():
|
||||
sub_format = sub_info['ext']
|
||||
if sub_info.get('data') is not None:
|
||||
sub_data = sub_info['data']
|
||||
else:
|
||||
try:
|
||||
sub_data = ie._download_webpage(
|
||||
sub_info['url'], info_dict['id'], note=False)
|
||||
except ExtractorError as err:
|
||||
self.report_warning('Unable to download subtitle for "%s": %s' %
|
||||
(sub_lang, compat_str(err.cause)))
|
||||
continue
|
||||
try:
|
||||
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
|
||||
@@ -1247,7 +1330,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
subfile.write(sub_data)
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write subtitles file ' + sub_filename)
|
||||
return
|
||||
@@ -1359,8 +1442,8 @@ class YoutubeDL(object):
|
||||
"""Download a given list of URLs."""
|
||||
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
||||
if (len(url_list) > 1 and
|
||||
'%' not in outtmpl
|
||||
and self.params.get('max_downloads') != 1):
|
||||
'%' not in outtmpl and
|
||||
self.params.get('max_downloads') != 1):
|
||||
raise SameFileError(outtmpl)
|
||||
|
||||
for url in url_list:
|
||||
@@ -1379,8 +1462,11 @@ class YoutubeDL(object):
|
||||
return self._download_retcode
|
||||
|
||||
def download_with_info_file(self, info_filename):
|
||||
with io.open(info_filename, 'r', encoding='utf-8') as f:
|
||||
info = json.load(f)
|
||||
with contextlib.closing(fileinput.FileInput(
|
||||
[info_filename], mode='r',
|
||||
openhook=fileinput.hook_encoded('utf-8'))) as f:
|
||||
# FileInput doesn't have a read method, we can't call json.load
|
||||
info = json.loads('\n'.join(f))
|
||||
try:
|
||||
self.process_ie_result(info, download=True)
|
||||
except DownloadError:
|
||||
@@ -1527,29 +1613,18 @@ class YoutubeDL(object):
|
||||
return res
|
||||
|
||||
def list_formats(self, info_dict):
|
||||
def line(format, idlen=20):
|
||||
return (('%-' + compat_str(idlen + 1) + 's%-10s%-12s%s') % (
|
||||
format['format_id'],
|
||||
format['ext'],
|
||||
self.format_resolution(format),
|
||||
self._format_note(format),
|
||||
))
|
||||
|
||||
formats = info_dict.get('formats', [info_dict])
|
||||
idlen = max(len('format code'),
|
||||
max(len(f['format_id']) for f in formats))
|
||||
formats_s = [
|
||||
line(f, idlen) for f in formats
|
||||
table = [
|
||||
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
|
||||
for f in formats
|
||||
if f.get('preference') is None or f['preference'] >= -1000]
|
||||
if len(formats) > 1:
|
||||
formats_s[-1] += (' ' if self._format_note(formats[-1]) else '') + '(best)'
|
||||
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
|
||||
|
||||
header_line = line({
|
||||
'format_id': 'format code', 'ext': 'extension',
|
||||
'resolution': 'resolution', 'format_note': 'note'}, idlen=idlen)
|
||||
header_line = ['format code', 'extension', 'resolution', 'note']
|
||||
self.to_screen(
|
||||
'[info] Available formats for %s:\n%s\n%s' %
|
||||
(info_dict['id'], header_line, '\n'.join(formats_s)))
|
||||
'[info] Available formats for %s:\n%s' %
|
||||
(info_dict['id'], render_table(header_line, table)))
|
||||
|
||||
def list_thumbnails(self, info_dict):
|
||||
thumbnails = info_dict.get('thumbnails')
|
||||
@@ -1568,6 +1643,17 @@ class YoutubeDL(object):
|
||||
['ID', 'width', 'height', 'URL'],
|
||||
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
|
||||
|
||||
def list_subtitles(self, video_id, subtitles, name='subtitles'):
|
||||
if not subtitles:
|
||||
self.to_screen('%s has no %s' % (video_id, name))
|
||||
return
|
||||
self.to_screen(
|
||||
'Available %s for %s:' % (name, video_id))
|
||||
self.to_screen(render_table(
|
||||
['Language', 'formats'],
|
||||
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
|
||||
for lang, formats in subtitles.items()]))
|
||||
|
||||
def urlopen(self, req):
|
||||
""" Start an HTTP download """
|
||||
|
||||
@@ -1684,13 +1770,14 @@ class YoutubeDL(object):
|
||||
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
|
||||
if 'http' in proxies and 'https' not in proxies:
|
||||
proxies['https'] = proxies['http']
|
||||
proxy_handler = compat_urllib_request.ProxyHandler(proxies)
|
||||
proxy_handler = PerRequestProxyHandler(proxies)
|
||||
|
||||
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
|
||||
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
|
||||
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
|
||||
opener = compat_urllib_request.build_opener(
|
||||
https_handler, proxy_handler, cookie_processor, ydlh)
|
||||
proxy_handler, https_handler, cookie_processor, ydlh)
|
||||
|
||||
# Delete the default user-agent header, which would otherwise apply in
|
||||
# cases where our custom HTTP handler doesn't come into play
|
||||
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
|
||||
|
@@ -9,6 +9,7 @@ import codecs
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
|
||||
@@ -170,6 +171,9 @@ def _real_main(argv=None):
|
||||
if opts.recodevideo is not None:
|
||||
if opts.recodevideo not in ['mp4', 'flv', 'webm', 'ogg', 'mkv']:
|
||||
parser.error('invalid video recode format specified')
|
||||
if opts.convertsubtitles is not None:
|
||||
if opts.convertsubtitles not in ['srt', 'vtt', 'ass']:
|
||||
parser.error('invalid subtitle format specified')
|
||||
|
||||
if opts.date is not None:
|
||||
date = DateRange.day(opts.date)
|
||||
@@ -189,14 +193,14 @@ def _real_main(argv=None):
|
||||
# In Python 2, sys.argv is a bytestring (also note http://bugs.python.org/issue2128 for Windows systems)
|
||||
if opts.outtmpl is not None:
|
||||
opts.outtmpl = opts.outtmpl.decode(preferredencoding())
|
||||
outtmpl = ((opts.outtmpl is not None and opts.outtmpl)
|
||||
or (opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.format == '-1' and '%(id)s-%(format)s.%(ext)s')
|
||||
or (opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.usetitle and '%(title)s-%(id)s.%(ext)s')
|
||||
or (opts.useid and '%(id)s.%(ext)s')
|
||||
or (opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s')
|
||||
or DEFAULT_OUTTMPL)
|
||||
outtmpl = ((opts.outtmpl is not None and opts.outtmpl) or
|
||||
(opts.format == '-1' and opts.usetitle and '%(title)s-%(id)s-%(format)s.%(ext)s') or
|
||||
(opts.format == '-1' and '%(id)s-%(format)s.%(ext)s') or
|
||||
(opts.usetitle and opts.autonumber and '%(autonumber)s-%(title)s-%(id)s.%(ext)s') or
|
||||
(opts.usetitle and '%(title)s-%(id)s.%(ext)s') or
|
||||
(opts.useid and '%(id)s.%(ext)s') or
|
||||
(opts.autonumber and '%(autonumber)s-%(id)s.%(ext)s') or
|
||||
DEFAULT_OUTTMPL)
|
||||
if not os.path.splitext(outtmpl)[1] and opts.extractaudio:
|
||||
parser.error('Cannot download a video and extract audio into the same'
|
||||
' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||
@@ -209,6 +213,11 @@ def _real_main(argv=None):
|
||||
# PostProcessors
|
||||
postprocessors = []
|
||||
# Add the metadata pp first, the other pps will copy it
|
||||
if opts.metafromtitle:
|
||||
postprocessors.append({
|
||||
'key': 'MetadataFromTitle',
|
||||
'titleformat': opts.metafromtitle
|
||||
})
|
||||
if opts.addmetadata:
|
||||
postprocessors.append({'key': 'FFmpegMetadata'})
|
||||
if opts.extractaudio:
|
||||
@@ -223,10 +232,14 @@ def _real_main(argv=None):
|
||||
'key': 'FFmpegVideoConvertor',
|
||||
'preferedformat': opts.recodevideo,
|
||||
})
|
||||
if opts.convertsubtitles:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegSubtitlesConvertor',
|
||||
'format': opts.convertsubtitles,
|
||||
})
|
||||
if opts.embedsubtitles:
|
||||
postprocessors.append({
|
||||
'key': 'FFmpegEmbedSubtitle',
|
||||
'subtitlesformat': opts.subtitlesformat,
|
||||
})
|
||||
if opts.xattrs:
|
||||
postprocessors.append({'key': 'XAttrMetadata'})
|
||||
@@ -248,6 +261,9 @@ def _real_main(argv=None):
|
||||
xattr # Confuse flake8
|
||||
except ImportError:
|
||||
parser.error('setting filesize xattr requested but python-xattr is not available')
|
||||
external_downloader_args = None
|
||||
if opts.external_downloader_args:
|
||||
external_downloader_args = shlex.split(opts.external_downloader_args)
|
||||
match_filter = (
|
||||
None if opts.match_filter is None
|
||||
else match_filter_func(opts.match_filter))
|
||||
@@ -352,6 +368,8 @@ def _real_main(argv=None):
|
||||
'no_color': opts.no_color,
|
||||
'ffmpeg_location': opts.ffmpeg_location,
|
||||
'hls_prefer_native': opts.hls_prefer_native,
|
||||
'external_downloader_args': external_downloader_args,
|
||||
'cn_verification_proxy': opts.cn_verification_proxy,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
|
@@ -1,9 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import collections
|
||||
import getpass
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -364,6 +366,33 @@ def workaround_optparse_bug9161():
|
||||
return real_add_option(self, *bargs, **bkwargs)
|
||||
optparse.OptionGroup.add_option = _compat_add_option
|
||||
|
||||
if hasattr(shutil, 'get_terminal_size'): # Python >= 3.3
|
||||
compat_get_terminal_size = shutil.get_terminal_size
|
||||
else:
|
||||
_terminal_size = collections.namedtuple('terminal_size', ['columns', 'lines'])
|
||||
|
||||
def compat_get_terminal_size():
|
||||
columns = compat_getenv('COLUMNS', None)
|
||||
if columns:
|
||||
columns = int(columns)
|
||||
else:
|
||||
columns = None
|
||||
lines = compat_getenv('LINES', None)
|
||||
if lines:
|
||||
lines = int(lines)
|
||||
else:
|
||||
lines = None
|
||||
|
||||
try:
|
||||
sp = subprocess.Popen(
|
||||
['stty', 'size'],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
out, err = sp.communicate()
|
||||
lines, columns = map(int, out.split())
|
||||
except:
|
||||
pass
|
||||
return _terminal_size(columns, lines)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'compat_HTTPError',
|
||||
@@ -371,6 +400,7 @@ __all__ = [
|
||||
'compat_chr',
|
||||
'compat_cookiejar',
|
||||
'compat_expanduser',
|
||||
'compat_get_terminal_size',
|
||||
'compat_getenv',
|
||||
'compat_getpass',
|
||||
'compat_html_entities',
|
||||
|
@@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
@@ -42,6 +42,8 @@ class FileDownloader(object):
|
||||
max_filesize: Skip files larger than this size
|
||||
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
|
||||
(experimenatal)
|
||||
external_downloader_args: A list of additional command-line arguments for the
|
||||
external downloader.
|
||||
|
||||
Subclasses of this one must re-define the real_download method.
|
||||
"""
|
||||
@@ -54,6 +56,7 @@ class FileDownloader(object):
|
||||
self.ydl = ydl
|
||||
self._progress_hooks = []
|
||||
self.params = params
|
||||
self.add_progress_hook(self.report_progress)
|
||||
|
||||
@staticmethod
|
||||
def format_seconds(seconds):
|
||||
@@ -226,42 +229,64 @@ class FileDownloader(object):
|
||||
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
|
||||
self.to_console_title('youtube-dl ' + msg)
|
||||
|
||||
def report_progress(self, percent, data_len_str, speed, eta):
|
||||
"""Report download progress."""
|
||||
if self.params.get('noprogress', False):
|
||||
def report_progress(self, s):
|
||||
if s['status'] == 'finished':
|
||||
if self.params.get('noprogress', False):
|
||||
self.to_screen('[download] Download completed')
|
||||
else:
|
||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||
if s.get('elapsed') is not None:
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
|
||||
else:
|
||||
msg_template = '100%% of %(_total_bytes_str)s'
|
||||
self._report_progress_status(
|
||||
msg_template % s, is_last_line=True)
|
||||
|
||||
if self.params.get('noprogress'):
|
||||
return
|
||||
if eta is not None:
|
||||
eta_str = self.format_eta(eta)
|
||||
else:
|
||||
eta_str = 'Unknown ETA'
|
||||
if percent is not None:
|
||||
percent_str = self.format_percent(percent)
|
||||
else:
|
||||
percent_str = 'Unknown %'
|
||||
speed_str = self.format_speed(speed)
|
||||
|
||||
msg = ('%s of %s at %s ETA %s' %
|
||||
(percent_str, data_len_str, speed_str, eta_str))
|
||||
self._report_progress_status(msg)
|
||||
|
||||
def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
|
||||
if self.params.get('noprogress', False):
|
||||
if s['status'] != 'downloading':
|
||||
return
|
||||
downloaded_str = format_bytes(downloaded_data_len)
|
||||
speed_str = self.format_speed(speed)
|
||||
elapsed_str = FileDownloader.format_seconds(elapsed)
|
||||
msg = '%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
|
||||
self._report_progress_status(msg)
|
||||
|
||||
def report_finish(self, data_len_str, tot_time):
|
||||
"""Report download finished."""
|
||||
if self.params.get('noprogress', False):
|
||||
self.to_screen('[download] Download completed')
|
||||
if s.get('eta') is not None:
|
||||
s['_eta_str'] = self.format_eta(s['eta'])
|
||||
else:
|
||||
self._report_progress_status(
|
||||
('100%% of %s in %s' %
|
||||
(data_len_str, self.format_seconds(tot_time))),
|
||||
is_last_line=True)
|
||||
s['_eta_str'] = 'Unknown ETA'
|
||||
|
||||
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
|
||||
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
|
||||
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
|
||||
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
|
||||
else:
|
||||
if s.get('downloaded_bytes') == 0:
|
||||
s['_percent_str'] = self.format_percent(0)
|
||||
else:
|
||||
s['_percent_str'] = 'Unknown %'
|
||||
|
||||
if s.get('speed') is not None:
|
||||
s['_speed_str'] = self.format_speed(s['speed'])
|
||||
else:
|
||||
s['_speed_str'] = 'Unknown speed'
|
||||
|
||||
if s.get('total_bytes') is not None:
|
||||
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
|
||||
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||
elif s.get('total_bytes_estimate') is not None:
|
||||
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
|
||||
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
|
||||
else:
|
||||
if s.get('downloaded_bytes') is not None:
|
||||
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
|
||||
if s.get('elapsed'):
|
||||
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
|
||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
|
||||
else:
|
||||
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
|
||||
else:
|
||||
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
|
||||
|
||||
self._report_progress_status(msg_template % s)
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
@@ -288,14 +313,14 @@ class FileDownloader(object):
|
||||
"""
|
||||
|
||||
nooverwrites_and_exists = (
|
||||
self.params.get('nooverwrites', False)
|
||||
and os.path.exists(encodeFilename(filename))
|
||||
self.params.get('nooverwrites', False) and
|
||||
os.path.exists(encodeFilename(filename))
|
||||
)
|
||||
|
||||
continuedl_and_exists = (
|
||||
self.params.get('continuedl', False)
|
||||
and os.path.isfile(encodeFilename(filename))
|
||||
and not self.params.get('nopart', False)
|
||||
self.params.get('continuedl', False) and
|
||||
os.path.isfile(encodeFilename(filename)) and
|
||||
not self.params.get('nopart', False)
|
||||
)
|
||||
|
||||
# Check file already present
|
||||
|
@@ -51,6 +51,13 @@ class ExternalFD(FileDownloader):
|
||||
return []
|
||||
return [command_option, source_address]
|
||||
|
||||
def _configuration_args(self, default=[]):
|
||||
ex_args = self.params.get('external_downloader_args')
|
||||
if ex_args is None:
|
||||
return default
|
||||
assert isinstance(ex_args, list)
|
||||
return ex_args
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
""" Either overwrite this or implement _make_cmd """
|
||||
cmd = self._make_cmd(tmpfilename, info_dict)
|
||||
@@ -79,6 +86,7 @@ class CurlFD(ExternalFD):
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._source_address('--interface')
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
@@ -89,15 +97,16 @@ class WgetFD(ExternalFD):
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['--header', '%s: %s' % (key, val)]
|
||||
cmd += self._source_address('--bind-address')
|
||||
cmd += self._configuration_args()
|
||||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
|
||||
class Aria2cFD(ExternalFD):
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [
|
||||
self.exe, '-c',
|
||||
'--min-split-size', '1M', '--max-connection-per-server', '4']
|
||||
cmd = [self.exe, '-c']
|
||||
cmd += self._configuration_args([
|
||||
'--min-split-size', '1M', '--max-connection-per-server', '4'])
|
||||
dn = os.path.dirname(tmpfilename)
|
||||
if dn:
|
||||
cmd += ['--dir', dn]
|
||||
|
@@ -1,4 +1,4 @@
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import division, unicode_literals
|
||||
|
||||
import base64
|
||||
import io
|
||||
@@ -11,11 +11,11 @@ from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_urllib_error,
|
||||
)
|
||||
from ..utils import (
|
||||
struct_pack,
|
||||
struct_unpack,
|
||||
format_bytes,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
xpath_text,
|
||||
@@ -122,7 +122,8 @@ class FlvReader(io.BytesIO):
|
||||
|
||||
self.read_unsigned_int() # BootstrapinfoVersion
|
||||
# Profile,Live,Update,Reserved
|
||||
self.read(1)
|
||||
flags = self.read_unsigned_char()
|
||||
live = flags & 0x20 != 0
|
||||
# time scale
|
||||
self.read_unsigned_int()
|
||||
# CurrentMediaTime
|
||||
@@ -161,6 +162,7 @@ class FlvReader(io.BytesIO):
|
||||
return {
|
||||
'segments': segments,
|
||||
'fragments': fragments,
|
||||
'live': live,
|
||||
}
|
||||
|
||||
def read_bootstrap_info(self):
|
||||
@@ -183,6 +185,10 @@ def build_fragments_list(boot_info):
|
||||
for segment, fragments_count in segment_run_table['segment_run']:
|
||||
for _ in range(fragments_count):
|
||||
res.append((segment, next(fragments_counter)))
|
||||
|
||||
if boot_info['live']:
|
||||
res = res[-2:]
|
||||
|
||||
return res
|
||||
|
||||
|
||||
@@ -247,22 +253,43 @@ class F4mFD(FileDownloader):
|
||||
self.report_error('Unsupported DRM')
|
||||
return media
|
||||
|
||||
def _get_bootstrap_from_url(self, bootstrap_url):
|
||||
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
||||
return read_bootstrap_info(bootstrap)
|
||||
|
||||
def _update_live_fragments(self, bootstrap_url, latest_fragment):
|
||||
fragments_list = []
|
||||
retries = 30
|
||||
while (not fragments_list) and (retries > 0):
|
||||
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||
fragments_list = build_fragments_list(boot_info)
|
||||
fragments_list = [f for f in fragments_list if f[1] > latest_fragment]
|
||||
if not fragments_list:
|
||||
# Retry after a while
|
||||
time.sleep(5.0)
|
||||
retries -= 1
|
||||
|
||||
if not fragments_list:
|
||||
self.report_error('Failed to update fragments')
|
||||
|
||||
return fragments_list
|
||||
|
||||
def _parse_bootstrap_node(self, node, base_url):
|
||||
if node.text is None:
|
||||
bootstrap_url = compat_urlparse.urljoin(
|
||||
base_url, node.attrib['url'])
|
||||
boot_info = self._get_bootstrap_from_url(bootstrap_url)
|
||||
else:
|
||||
bootstrap_url = None
|
||||
bootstrap = base64.b64decode(node.text.encode('ascii'))
|
||||
boot_info = read_bootstrap_info(bootstrap)
|
||||
return (boot_info, bootstrap_url)
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
requested_bitrate = info_dict.get('tbr')
|
||||
self.to_screen('[download] Downloading f4m manifest')
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
self.report_destination(filename)
|
||||
http_dl = HttpQuietDownloader(
|
||||
self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
)
|
||||
|
||||
doc = etree.fromstring(manifest)
|
||||
formats = [(int(f.attrib.get('bitrate', -1)), f)
|
||||
@@ -277,18 +304,13 @@ class F4mFD(FileDownloader):
|
||||
|
||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||
if bootstrap_node.text is None:
|
||||
bootstrap_url = compat_urlparse.urljoin(
|
||||
base_url, bootstrap_node.attrib['url'])
|
||||
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
||||
else:
|
||||
bootstrap = base64.b64decode(bootstrap_node.text)
|
||||
boot_info, bootstrap_url = self._parse_bootstrap_node(bootstrap_node, base_url)
|
||||
live = boot_info['live']
|
||||
metadata_node = media.find(_add_ns('metadata'))
|
||||
if metadata_node is not None:
|
||||
metadata = base64.b64decode(metadata_node.text)
|
||||
metadata = base64.b64decode(metadata_node.text.encode('ascii'))
|
||||
else:
|
||||
metadata = None
|
||||
boot_info = read_bootstrap_info(bootstrap)
|
||||
|
||||
fragments_list = build_fragments_list(boot_info)
|
||||
if self.params.get('test', False):
|
||||
@@ -298,64 +320,112 @@ class F4mFD(FileDownloader):
|
||||
# For some akamai manifests we'll need to add a query to the fragment url
|
||||
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
||||
|
||||
self.report_destination(filename)
|
||||
http_dl = HttpQuietDownloader(
|
||||
self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||
|
||||
write_flv_header(dest_stream)
|
||||
write_metadata_tag(dest_stream, metadata)
|
||||
if not live:
|
||||
write_metadata_tag(dest_stream, metadata)
|
||||
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
state = {
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': 0,
|
||||
'frag_counter': 0,
|
||||
'frag_index': 0,
|
||||
'frag_count': total_frags,
|
||||
'filename': filename,
|
||||
'tmpfilename': tmpfilename,
|
||||
}
|
||||
start = time.time()
|
||||
|
||||
def frag_progress_hook(status):
|
||||
frag_total_bytes = status.get('total_bytes', 0)
|
||||
estimated_size = (state['downloaded_bytes'] +
|
||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||
if status['status'] == 'finished':
|
||||
def frag_progress_hook(s):
|
||||
if s['status'] not in ('downloading', 'finished'):
|
||||
return
|
||||
|
||||
frag_total_bytes = s.get('total_bytes', 0)
|
||||
if s['status'] == 'finished':
|
||||
state['downloaded_bytes'] += frag_total_bytes
|
||||
state['frag_counter'] += 1
|
||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||
byte_counter = state['downloaded_bytes']
|
||||
state['frag_index'] += 1
|
||||
|
||||
estimated_size = (
|
||||
(state['downloaded_bytes'] + frag_total_bytes) /
|
||||
(state['frag_index'] + 1) * total_frags)
|
||||
time_now = time.time()
|
||||
state['total_bytes_estimate'] = estimated_size
|
||||
state['elapsed'] = time_now - start
|
||||
|
||||
if s['status'] == 'finished':
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
else:
|
||||
frag_downloaded_bytes = status['downloaded_bytes']
|
||||
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
||||
frag_downloaded_bytes = s['downloaded_bytes']
|
||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||
frag_total_bytes)
|
||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||
progress = self.calc_percent(state['frag_index'], total_frags)
|
||||
progress += frag_progress / float(total_frags)
|
||||
|
||||
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
||||
self.report_progress(progress, format_bytes(estimated_size),
|
||||
status.get('speed'), eta)
|
||||
state['eta'] = self.calc_eta(
|
||||
start, time_now, estimated_size, state['downloaded_bytes'] + frag_downloaded_bytes)
|
||||
state['speed'] = s.get('speed')
|
||||
self._hook_progress(state)
|
||||
|
||||
http_dl.add_progress_hook(frag_progress_hook)
|
||||
|
||||
frags_filenames = []
|
||||
for (seg_i, frag_i) in fragments_list:
|
||||
while fragments_list:
|
||||
seg_i, frag_i = fragments_list.pop(0)
|
||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||
url = base_url + name
|
||||
if akamai_pv:
|
||||
url += '?' + akamai_pv.strip(';')
|
||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||
success = http_dl.download(frag_filename, {'url': url})
|
||||
if not success:
|
||||
return False
|
||||
with open(frag_filename, 'rb') as down:
|
||||
down_data = down.read()
|
||||
reader = FlvReader(down_data)
|
||||
while True:
|
||||
_, box_type, box_data = reader.read_box_info()
|
||||
if box_type == b'mdat':
|
||||
dest_stream.write(box_data)
|
||||
break
|
||||
frags_filenames.append(frag_filename)
|
||||
try:
|
||||
success = http_dl.download(frag_filename, {'url': url})
|
||||
if not success:
|
||||
return False
|
||||
with open(frag_filename, 'rb') as down:
|
||||
down_data = down.read()
|
||||
reader = FlvReader(down_data)
|
||||
while True:
|
||||
_, box_type, box_data = reader.read_box_info()
|
||||
if box_type == b'mdat':
|
||||
dest_stream.write(box_data)
|
||||
break
|
||||
if live:
|
||||
os.remove(frag_filename)
|
||||
else:
|
||||
frags_filenames.append(frag_filename)
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if live and (err.code == 404 or err.code == 410):
|
||||
# We didn't keep up with the live window. Continue
|
||||
# with the next available fragment.
|
||||
msg = 'Fragment %d unavailable' % frag_i
|
||||
self.report_warning(msg)
|
||||
fragments_list = []
|
||||
else:
|
||||
raise
|
||||
|
||||
if not fragments_list and live and bootstrap_url:
|
||||
fragments_list = self._update_live_fragments(bootstrap_url, frag_i)
|
||||
total_frags += len(fragments_list)
|
||||
if fragments_list and (fragments_list[0][1] > frag_i + 1):
|
||||
msg = 'Missed %d fragments' % (fragments_list[0][1] - (frag_i + 1))
|
||||
self.report_warning(msg)
|
||||
|
||||
dest_stream.close()
|
||||
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
|
||||
|
||||
elapsed = time.time() - start
|
||||
self.try_rename(tmpfilename, filename)
|
||||
for frag_file in frags_filenames:
|
||||
os.remove(frag_file)
|
||||
@@ -366,6 +436,7 @@ class F4mFD(FileDownloader):
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'elapsed': elapsed,
|
||||
})
|
||||
|
||||
return True
|
||||
|
@@ -1,10 +1,9 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from socket import error as SocketError
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..compat import (
|
||||
@@ -15,7 +14,6 @@ from ..utils import (
|
||||
ContentTooShortError,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
format_bytes,
|
||||
)
|
||||
|
||||
|
||||
@@ -94,6 +92,8 @@ class HttpFD(FileDownloader):
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'downloaded_bytes': resume_len,
|
||||
'total_bytes': resume_len,
|
||||
})
|
||||
return True
|
||||
else:
|
||||
@@ -102,7 +102,7 @@ class HttpFD(FileDownloader):
|
||||
resume_len = 0
|
||||
open_mode = 'wb'
|
||||
break
|
||||
except SocketError as e:
|
||||
except socket.error as e:
|
||||
if e.errno != errno.ECONNRESET:
|
||||
# Connection reset is no problem, just retry
|
||||
raise
|
||||
@@ -137,7 +137,6 @@ class HttpFD(FileDownloader):
|
||||
self.to_screen('\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||
return False
|
||||
|
||||
data_len_str = format_bytes(data_len)
|
||||
byte_counter = 0 + resume_len
|
||||
block_size = self.params.get('buffersize', 1024)
|
||||
start = time.time()
|
||||
@@ -196,20 +195,19 @@ class HttpFD(FileDownloader):
|
||||
# Progress message
|
||||
speed = self.calc_speed(start, now, byte_counter - resume_len)
|
||||
if data_len is None:
|
||||
eta = percent = None
|
||||
eta = None
|
||||
else:
|
||||
percent = self.calc_percent(byte_counter, data_len)
|
||||
eta = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
||||
self.report_progress(percent, data_len_str, speed, eta)
|
||||
|
||||
self._hook_progress({
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': data_len,
|
||||
'tmpfilename': tmpfilename,
|
||||
'filename': filename,
|
||||
'status': 'downloading',
|
||||
'eta': eta,
|
||||
'speed': speed,
|
||||
'elapsed': now - start,
|
||||
})
|
||||
|
||||
if is_test and byte_counter == data_len:
|
||||
@@ -221,7 +219,7 @@ class HttpFD(FileDownloader):
|
||||
return False
|
||||
if tmpfilename != '-':
|
||||
stream.close()
|
||||
self.report_finish(data_len_str, (time.time() - start))
|
||||
|
||||
if data_len is not None and byte_counter != data_len:
|
||||
raise ContentTooShortError(byte_counter, int(data_len))
|
||||
self.try_rename(tmpfilename, filename)
|
||||
@@ -235,6 +233,7 @@ class HttpFD(FileDownloader):
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
'elapsed': time.time() - start,
|
||||
})
|
||||
|
||||
return True
|
||||
|
@@ -11,7 +11,6 @@ from ..compat import compat_str
|
||||
from ..utils import (
|
||||
check_executable,
|
||||
encodeFilename,
|
||||
format_bytes,
|
||||
get_exe_version,
|
||||
)
|
||||
|
||||
@@ -51,23 +50,23 @@ class RtmpFD(FileDownloader):
|
||||
if not resume_percent:
|
||||
resume_percent = percent
|
||||
resume_downloaded_data_len = downloaded_data_len
|
||||
eta = self.calc_eta(start, time.time(), 100 - resume_percent, percent - resume_percent)
|
||||
speed = self.calc_speed(start, time.time(), downloaded_data_len - resume_downloaded_data_len)
|
||||
time_now = time.time()
|
||||
eta = self.calc_eta(start, time_now, 100 - resume_percent, percent - resume_percent)
|
||||
speed = self.calc_speed(start, time_now, downloaded_data_len - resume_downloaded_data_len)
|
||||
data_len = None
|
||||
if percent > 0:
|
||||
data_len = int(downloaded_data_len * 100 / percent)
|
||||
data_len_str = '~' + format_bytes(data_len)
|
||||
self.report_progress(percent, data_len_str, speed, eta)
|
||||
cursor_in_new_line = False
|
||||
self._hook_progress({
|
||||
'status': 'downloading',
|
||||
'downloaded_bytes': downloaded_data_len,
|
||||
'total_bytes': data_len,
|
||||
'total_bytes_estimate': data_len,
|
||||
'tmpfilename': tmpfilename,
|
||||
'filename': filename,
|
||||
'status': 'downloading',
|
||||
'eta': eta,
|
||||
'elapsed': time_now - start,
|
||||
'speed': speed,
|
||||
})
|
||||
cursor_in_new_line = False
|
||||
else:
|
||||
# no percent for live streams
|
||||
mobj = re.search(r'([0-9]+\.[0-9]{3}) kB / [0-9]+\.[0-9]{2} sec', line)
|
||||
@@ -75,15 +74,15 @@ class RtmpFD(FileDownloader):
|
||||
downloaded_data_len = int(float(mobj.group(1)) * 1024)
|
||||
time_now = time.time()
|
||||
speed = self.calc_speed(start, time_now, downloaded_data_len)
|
||||
self.report_progress_live_stream(downloaded_data_len, speed, time_now - start)
|
||||
cursor_in_new_line = False
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': downloaded_data_len,
|
||||
'tmpfilename': tmpfilename,
|
||||
'filename': filename,
|
||||
'status': 'downloading',
|
||||
'elapsed': time_now - start,
|
||||
'speed': speed,
|
||||
})
|
||||
cursor_in_new_line = False
|
||||
elif self.params.get('verbose', False):
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen('')
|
||||
@@ -120,7 +119,9 @@ class RtmpFD(FileDownloader):
|
||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||
# the connection was interrumpted and resuming appears to be
|
||||
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
||||
basic_args = ['rtmpdump', '--verbose', '-r', url, '-o', tmpfilename]
|
||||
basic_args = [
|
||||
'rtmpdump', '--verbose', '-r', url,
|
||||
'-o', encodeFilename(tmpfilename, True)]
|
||||
if player_url is not None:
|
||||
basic_args += ['--swfVfy', player_url]
|
||||
if page_url is not None:
|
||||
|
@@ -8,6 +8,7 @@ from .adobetv import AdobeTVIE
|
||||
from .adultswim import AdultSwimIE
|
||||
from .aftenposten import AftenpostenIE
|
||||
from .aftonbladet import AftonbladetIE
|
||||
from .airmozilla import AirMozillaIE
|
||||
from .aljazeera import AlJazeeraIE
|
||||
from .alphaporno import AlphaPornoIE
|
||||
from .anitube import AnitubeIE
|
||||
@@ -36,6 +37,7 @@ from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bbccouk import BBCCoUkIE
|
||||
from .beeg import BeegIE
|
||||
from .behindkink import BehindKinkIE
|
||||
from .beatportpro import BeatportProIE
|
||||
from .bet import BetIE
|
||||
from .bild import BildIE
|
||||
from .bilibili import BiliBiliIE
|
||||
@@ -58,10 +60,15 @@ from .canalplus import CanalplusIE
|
||||
from .canalc2 import Canalc2IE
|
||||
from .cbs import CBSIE
|
||||
from .cbsnews import CBSNewsIE
|
||||
from .cbssports import CBSSportsIE
|
||||
from .ccc import CCCIE
|
||||
from .ceskatelevize import CeskaTelevizeIE
|
||||
from .channel9 import Channel9IE
|
||||
from .chilloutzone import ChilloutzoneIE
|
||||
from .chirbit import (
|
||||
ChirbitIE,
|
||||
ChirbitProfileIE,
|
||||
)
|
||||
from .cinchcast import CinchcastIE
|
||||
from .clipfish import ClipfishIE
|
||||
from .cliphunter import CliphunterIE
|
||||
@@ -110,6 +117,7 @@ from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .divxstage import DivxStageIE
|
||||
from .dropbox import DropboxIE
|
||||
from .eagleplatform import EaglePlatformIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .echomsk import EchoMskIE
|
||||
from .ehow import EHowIE
|
||||
@@ -121,6 +129,7 @@ from .ellentv import (
|
||||
EllenTVClipsIE,
|
||||
)
|
||||
from .elpais import ElPaisIE
|
||||
from .embedly import EmbedlyIE
|
||||
from .empflix import EMPFlixIE
|
||||
from .engadget import EngadgetIE
|
||||
from .eporner import EpornerIE
|
||||
@@ -143,6 +152,7 @@ from .fktv import (
|
||||
)
|
||||
from .flickr import FlickrIE
|
||||
from .folketinget import FolketingetIE
|
||||
from .footyroom import FootyRoomIE
|
||||
from .fourtube import FourTubeIE
|
||||
from .foxgay import FoxgayIE
|
||||
from .foxnews import FoxNewsIE
|
||||
@@ -167,6 +177,7 @@ from .gameone import (
|
||||
from .gamespot import GameSpotIE
|
||||
from .gamestar import GameStarIE
|
||||
from .gametrailers import GametrailersIE
|
||||
from .gazeta import GazetaIE
|
||||
from .gdcvault import GDCVaultIE
|
||||
from .generic import GenericIE
|
||||
from .giantbomb import GiantBombIE
|
||||
@@ -204,6 +215,7 @@ from .imdb import (
|
||||
ImdbIE,
|
||||
ImdbListIE
|
||||
)
|
||||
from .imgur import ImgurIE
|
||||
from .ina import InaIE
|
||||
from .infoq import InfoQIE
|
||||
from .instagram import InstagramIE, InstagramUserIE
|
||||
@@ -219,6 +231,8 @@ from .jeuxvideo import JeuxVideoIE
|
||||
from .jove import JoveIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .jpopsukitv import JpopsukiIE
|
||||
from .kaltura import KalturaIE
|
||||
from .kanalplay import KanalPlayIE
|
||||
from .kankan import KankanIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
@@ -230,6 +244,11 @@ from .krasview import KrasViewIE
|
||||
from .ku6 import Ku6IE
|
||||
from .la7 import LA7IE
|
||||
from .laola1tv import Laola1TvIE
|
||||
from .letv import (
|
||||
LetvIE,
|
||||
LetvTvIE,
|
||||
LetvPlaylistIE
|
||||
)
|
||||
from .lifenews import LifeNewsIE
|
||||
from .liveleak import LiveLeakIE
|
||||
from .livestream import (
|
||||
@@ -282,6 +301,7 @@ from .myspace import MySpaceIE, MySpaceAlbumIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvideo import MyVideoIE
|
||||
from .myvidster import MyVidsterIE
|
||||
from .nationalgeographic import NationalGeographicIE
|
||||
from .naver import NaverIE
|
||||
from .nba import NBAIE
|
||||
from .nbc import (
|
||||
@@ -331,6 +351,7 @@ from .ntvde import NTVDeIE
|
||||
from .ntvru import NTVRuIE
|
||||
from .nytimes import NYTimesIE
|
||||
from .nuvid import NuvidIE
|
||||
from .odnoklassniki import OdnoklassnikiIE
|
||||
from .oktoberfesttv import OktoberfestTVIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .openfilm import OpenFilmIE
|
||||
@@ -338,6 +359,7 @@ from .orf import (
|
||||
ORFTVthekIE,
|
||||
ORFOE1IE,
|
||||
ORFFM4IE,
|
||||
ORFIPTVIE,
|
||||
)
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .patreon import PatreonIE
|
||||
@@ -345,18 +367,25 @@ from .pbs import PBSIE
|
||||
from .phoenix import PhoenixIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .planetaplay import PlanetaPlayIE
|
||||
from .pladform import PladformIE
|
||||
from .played import PlayedIE
|
||||
from .playfm import PlayFMIE
|
||||
from .playvid import PlayvidIE
|
||||
from .playwire import PlaywireIE
|
||||
from .podomatic import PodomaticIE
|
||||
from .pornhd import PornHdIE
|
||||
from .pornhub import PornHubIE
|
||||
from .pornhub import (
|
||||
PornHubIE,
|
||||
PornHubPlaylistIE,
|
||||
)
|
||||
from .pornotube import PornotubeIE
|
||||
from .pornoxo import PornoXOIE
|
||||
from .promptfile import PromptFileIE
|
||||
from .prosiebensat1 import ProSiebenSat1IE
|
||||
from .puls4 import Puls4IE
|
||||
from .pyvideo import PyvideoIE
|
||||
from .quickvid import QuickVidIE
|
||||
from .r7 import R7IE
|
||||
from .radiode import RadioDeIE
|
||||
from .radiobremen import RadioBremenIE
|
||||
from .radiofrance import RadioFranceIE
|
||||
@@ -386,6 +415,7 @@ from .rutube import (
|
||||
RutubePersonIE,
|
||||
)
|
||||
from .rutv import RUTVIE
|
||||
from .sandia import SandiaIE
|
||||
from .sapo import SapoIE
|
||||
from .savefrom import SaveFromIE
|
||||
from .sbs import SBSIE
|
||||
@@ -416,7 +446,10 @@ from .soundcloud import (
|
||||
SoundcloudUserIE,
|
||||
SoundcloudPlaylistIE
|
||||
)
|
||||
from .soundgasm import SoundgasmIE
|
||||
from .soundgasm import (
|
||||
SoundgasmIE,
|
||||
SoundgasmProfileIE
|
||||
)
|
||||
from .southpark import (
|
||||
SouthParkIE,
|
||||
SouthparkDeIE,
|
||||
@@ -430,6 +463,7 @@ from .sport5 import Sport5IE
|
||||
from .sportbox import SportBoxIE
|
||||
from .sportdeutschland import SportDeutschlandIE
|
||||
from .srmediathek import SRMediathekIE
|
||||
from .ssa import SSAIE
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .steam import SteamIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
@@ -482,6 +516,7 @@ from .tumblr import TumblrIE
|
||||
from .tunein import TuneInIE
|
||||
from .turbo import TurboIE
|
||||
from .tutv import TutvIE
|
||||
from .tv4 import TV4IE
|
||||
from .tvigle import TvigleIE
|
||||
from .tvp import TvpIE, TvpSeriesIE
|
||||
from .tvplay import TVPlayIE
|
||||
@@ -524,6 +559,7 @@ from .videoweed import VideoWeedIE
|
||||
from .vidme import VidmeIE
|
||||
from .vidzi import VidziIE
|
||||
from .vier import VierIE, VierVideosIE
|
||||
from .viewster import ViewsterIE
|
||||
from .vimeo import (
|
||||
VimeoIE,
|
||||
VimeoAlbumIE,
|
||||
@@ -580,6 +616,11 @@ from .yahoo import (
|
||||
YahooSearchIE,
|
||||
)
|
||||
from .yam import YamIE
|
||||
from .yandexmusic import (
|
||||
YandexMusicTrackIE,
|
||||
YandexMusicAlbumIE,
|
||||
YandexMusicPlaylistIE,
|
||||
)
|
||||
from .yesjapan import YesJapanIE
|
||||
from .ynet import YnetIE
|
||||
from .youjizz import YouJizzIE
|
||||
@@ -603,6 +644,7 @@ from .youtube import (
|
||||
YoutubeUserIE,
|
||||
YoutubeWatchLaterIE,
|
||||
)
|
||||
from .zapiks import ZapiksIE
|
||||
from .zdf import ZDFIE, ZDFChannelIE
|
||||
from .zingmp3 import (
|
||||
ZingMp3SongIE,
|
||||
|
@@ -28,7 +28,6 @@ class AdobeTVIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player = self._parse_json(
|
||||
@@ -44,8 +43,10 @@ class AdobeTVIE(InfoExtractor):
|
||||
self._html_search_meta('datepublished', webpage, 'upload date'))
|
||||
|
||||
duration = parse_duration(
|
||||
self._html_search_meta('duration', webpage, 'duration')
|
||||
or self._search_regex(r'Runtime:\s*(\d{2}:\d{2}:\d{2})', webpage, 'duration'))
|
||||
self._html_search_meta('duration', webpage, 'duration') or
|
||||
self._search_regex(
|
||||
r'Runtime:\s*(\d{2}:\d{2}:\d{2})',
|
||||
webpage, 'duration', fatal=False))
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'<div class="views">\s*Views?:\s*([\d,.]+)\s*</div>',
|
||||
|
@@ -2,13 +2,12 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
xpath_text,
|
||||
float_or_none,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
@@ -38,6 +37,7 @@ class AdultSwimIE(InfoExtractor):
|
||||
},
|
||||
],
|
||||
'info_dict': {
|
||||
'id': 'rQxZvXQ4ROaSOqq-or2Mow',
|
||||
'title': 'Rick and Morty - Pilot',
|
||||
'description': "Rick moves in with his daughter's family and establishes himself as a bad influence on his grandson, Morty. "
|
||||
}
|
||||
@@ -55,9 +55,28 @@ class AdultSwimIE(InfoExtractor):
|
||||
}
|
||||
],
|
||||
'info_dict': {
|
||||
'id': '-t8CamQlQ2aYZ49ItZCFog',
|
||||
'title': 'American Dad - Putting Francine Out of Business',
|
||||
'description': 'Stan hatches a plan to get Francine out of the real estate business.Watch more American Dad on [adult swim].'
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.adultswim.com/videos/tim-and-eric-awesome-show-great-job/dr-steve-brule-for-your-wine/',
|
||||
'playlist': [
|
||||
{
|
||||
'md5': '3e346a2ab0087d687a05e1e7f3b3e529',
|
||||
'info_dict': {
|
||||
'id': 'sY3cMUR_TbuE4YmdjzbIcQ-0',
|
||||
'ext': 'flv',
|
||||
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
||||
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
||||
},
|
||||
}
|
||||
],
|
||||
'info_dict': {
|
||||
'id': 'sY3cMUR_TbuE4YmdjzbIcQ',
|
||||
'title': 'Tim and Eric Awesome Show Great Job! - Dr. Steve Brule, For Your Wine',
|
||||
'description': 'Dr. Brule reports live from Wine Country with a special report on wines. \r\nWatch Tim and Eric Awesome Show Great Job! episode #20, "Embarrassed" on Adult Swim.\r\n\r\n',
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@@ -78,6 +97,7 @@ class AdultSwimIE(InfoExtractor):
|
||||
for video in collection.get('videos'):
|
||||
if video.get('slug') == slug:
|
||||
return collection, video
|
||||
return None, None
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@@ -88,28 +108,30 @@ class AdultSwimIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, episode_path)
|
||||
|
||||
# Extract the value of `bootstrappedData` from the Javascript in the page.
|
||||
bootstrappedDataJS = self._search_regex(r'var bootstrappedData = ({.*});', webpage, episode_path)
|
||||
|
||||
try:
|
||||
bootstrappedData = json.loads(bootstrappedDataJS)
|
||||
except ValueError as ve:
|
||||
errmsg = '%s: Failed to parse JSON ' % episode_path
|
||||
raise ExtractorError(errmsg, cause=ve)
|
||||
bootstrapped_data = self._parse_json(self._search_regex(
|
||||
r'var bootstrappedData = ({.*});', webpage, 'bootstraped data'), episode_path)
|
||||
|
||||
# Downloading videos from a /videos/playlist/ URL needs to be handled differently.
|
||||
# NOTE: We are only downloading one video (the current one) not the playlist
|
||||
if is_playlist:
|
||||
collections = bootstrappedData['playlists']['collections']
|
||||
collections = bootstrapped_data['playlists']['collections']
|
||||
collection = self.find_collection_by_linkURL(collections, show_path)
|
||||
video_info = self.find_video_info(collection, episode_path)
|
||||
|
||||
show_title = video_info['showTitle']
|
||||
segment_ids = [video_info['videoPlaybackID']]
|
||||
else:
|
||||
collections = bootstrappedData['show']['collections']
|
||||
collections = bootstrapped_data['show']['collections']
|
||||
collection, video_info = self.find_collection_containing_video(collections, episode_path)
|
||||
|
||||
show = bootstrappedData['show']
|
||||
# Video wasn't found in the collections, let's try `slugged_video`.
|
||||
if video_info is None:
|
||||
if bootstrapped_data.get('slugged_video', {}).get('slug') == episode_path:
|
||||
video_info = bootstrapped_data['slugged_video']
|
||||
else:
|
||||
raise ExtractorError('Unable to find video info')
|
||||
|
||||
show = bootstrapped_data['show']
|
||||
show_title = show['title']
|
||||
segment_ids = [clip['videoPlaybackID'] for clip in video_info['clips']]
|
||||
|
||||
|
@@ -14,10 +14,10 @@ from ..utils import (
|
||||
|
||||
|
||||
class AftenpostenIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/([^/]+/)*(?P<id>[^/]+)-\d+\.html'
|
||||
_VALID_URL = r'https?://(?:www\.)?aftenposten\.no/webtv/(?:#!/)?video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.aftenposten.no/webtv/serier-og-programmer/sweatshopenglish/TRAILER-SWEATSHOP---I-cant-take-any-more-7800835.html?paging=§ion=webtv_serierogprogrammer_sweatshop_sweatshopenglish',
|
||||
'url': 'http://www.aftenposten.no/webtv/#!/video/21039/trailer-sweatshop-i-can-t-take-any-more',
|
||||
'md5': 'fd828cd29774a729bf4d4425fe192972',
|
||||
'info_dict': {
|
||||
'id': '21039',
|
||||
@@ -30,12 +30,7 @@ class AftenpostenIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_id = self._html_search_regex(
|
||||
r'data-xs-id="(\d+)"', webpage, 'video id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
data = self._download_xml(
|
||||
'http://frontend.xstream.dk/ap/feed/video/?platform=web&id=%s' % video_id, video_id)
|
||||
|
74
youtube_dl/extractor/airmozilla.py
Normal file
74
youtube_dl/extractor/airmozilla.py
Normal file
@@ -0,0 +1,74 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class AirMozillaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://air\.mozilla\.org/(?P<id>[0-9a-z-]+)/?'
|
||||
_TEST = {
|
||||
'url': 'https://air.mozilla.org/privacy-lab-a-meetup-for-privacy-minded-people-in-san-francisco/',
|
||||
'md5': '2e3e7486ba5d180e829d453875b9b8bf',
|
||||
'info_dict': {
|
||||
'id': '6x4q2w',
|
||||
'ext': 'mp4',
|
||||
'title': 'Privacy Lab - a meetup for privacy minded people in San Francisco',
|
||||
'thumbnail': 're:https://\w+\.cloudfront\.net/6x4q2w/poster\.jpg\?t=\d+',
|
||||
'description': 'Brings together privacy professionals and others interested in privacy at for-profits, non-profits, and NGOs in an effort to contribute to the state of the ecosystem...',
|
||||
'timestamp': 1422487800,
|
||||
'upload_date': '20150128',
|
||||
'location': 'SFO Commons',
|
||||
'duration': 3780,
|
||||
'view_count': int,
|
||||
'categories': ['Main'],
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._html_search_regex(r'//vid.ly/(.*?)/embed', webpage, 'id')
|
||||
|
||||
embed_script = self._download_webpage('https://vid.ly/{0}/embed'.format(video_id), video_id)
|
||||
jwconfig = self._search_regex(r'\svar jwconfig = (\{.*?\});\s', embed_script, 'metadata')
|
||||
metadata = self._parse_json(jwconfig, video_id)
|
||||
|
||||
formats = [{
|
||||
'url': source['file'],
|
||||
'ext': source['type'],
|
||||
'format_id': self._search_regex(r'&format=(.*)$', source['file'], 'video format'),
|
||||
'format': source['label'],
|
||||
'height': int(source['label'].rstrip('p')),
|
||||
} for source in metadata['playlist'][0]['sources']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
view_count = int_or_none(self._html_search_regex(
|
||||
r'Views since archived: ([0-9]+)',
|
||||
webpage, 'view count', fatal=False))
|
||||
timestamp = parse_iso8601(self._html_search_regex(
|
||||
r'<time datetime="(.*?)"', webpage, 'timestamp', fatal=False))
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'Duration:\s*(\d+\s*hours?\s*\d+\s*minutes?)',
|
||||
webpage, 'duration', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'formats': formats,
|
||||
'url': self._og_search_url(webpage),
|
||||
'display_id': display_id,
|
||||
'thumbnail': metadata['playlist'][0].get('image'),
|
||||
'description': self._og_search_description(webpage),
|
||||
'timestamp': timestamp,
|
||||
'location': self._html_search_regex(r'Location: (.*)', webpage, 'location', default=None),
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'categories': re.findall(r'<a href=".*?" class="channel">(.*?)</a>', webpage),
|
||||
}
|
@@ -11,9 +11,12 @@ from ..utils import (
|
||||
|
||||
|
||||
class AppleTrailersIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_TESTS = [{
|
||||
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
|
||||
'info_dict': {
|
||||
'id': 'manofsteel',
|
||||
},
|
||||
"playlist": [
|
||||
{
|
||||
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
|
||||
@@ -60,7 +63,10 @@ class AppleTrailersIE(InfoExtractor):
|
||||
},
|
||||
},
|
||||
]
|
||||
}
|
||||
}, {
|
||||
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
||||
|
@@ -50,6 +50,9 @@ class ARDMediathekIE(InfoExtractor):
|
||||
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
|
||||
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
|
||||
|
||||
if 'Diese Sendung ist für Jugendliche unter 12 Jahren nicht geeignet. Der Clip ist deshalb nur von 20 bis 6 Uhr verfügbar.' in webpage:
|
||||
raise ExtractorError('This program is only suitable for those aged 12 and older. Video %s is therefore only available between 20 pm and 6 am.' % video_id, expected=True)
|
||||
|
||||
if re.search(r'[\?&]rss($|[=&])', url):
|
||||
doc = parse_xml(webpage)
|
||||
if doc.tag == 'rss':
|
||||
|
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
||||
import time
|
||||
import hmac
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
@@ -17,8 +17,9 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class AtresPlayerIE(SubtitlesInfoExtractor):
|
||||
class AtresPlayerIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html'
|
||||
_NETRC_MACHINE = 'atresplayer'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html',
|
||||
@@ -144,13 +145,12 @@ class AtresPlayerIE(SubtitlesInfoExtractor):
|
||||
thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail')
|
||||
|
||||
subtitles = {}
|
||||
subtitle = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
|
||||
if subtitle:
|
||||
subtitles['es'] = subtitle
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
subtitle_url = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
|
||||
if subtitle_url:
|
||||
subtitles['es'] = [{
|
||||
'ext': 'srt',
|
||||
'url': subtitle_url,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -159,5 +159,5 @@ class AtresPlayerIE(SubtitlesInfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': self.extract_subtitles(video_id, subtitles),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
@@ -109,7 +109,7 @@ class BandcampIE(InfoExtractor):
|
||||
|
||||
class BandcampAlbumIE(InfoExtractor):
|
||||
IE_NAME = 'Bandcamp:album'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<title>[^?#]+)|/?(?:$|[?#]))'
|
||||
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^?#]+)|/?(?:$|[?#]))'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
@@ -133,31 +133,37 @@ class BandcampAlbumIE(InfoExtractor):
|
||||
],
|
||||
'info_dict': {
|
||||
'title': 'Jazz Format Mixtape vol.1',
|
||||
'id': 'jazz-format-mixtape-vol-1',
|
||||
'uploader_id': 'blazo',
|
||||
},
|
||||
'params': {
|
||||
'playlistend': 2
|
||||
},
|
||||
'skip': 'Bandcamp imposes download limits. See test_playlists:test_bandcamp_album for the playlist test'
|
||||
'skip': 'Bandcamp imposes download limits.'
|
||||
}, {
|
||||
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
|
||||
'info_dict': {
|
||||
'title': 'Hierophany of the Open Grave',
|
||||
'uploader_id': 'nightbringer',
|
||||
'id': 'hierophany-of-the-open-grave',
|
||||
},
|
||||
'playlist_mincount': 9,
|
||||
}, {
|
||||
'url': 'http://dotscale.bandcamp.com',
|
||||
'info_dict': {
|
||||
'title': 'Loom',
|
||||
'id': 'dotscale',
|
||||
'uploader_id': 'dotscale',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
playlist_id = mobj.group('subdomain')
|
||||
title = mobj.group('title')
|
||||
display_id = title or playlist_id
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
uploader_id = mobj.group('subdomain')
|
||||
album_id = mobj.group('album_id')
|
||||
playlist_id = album_id or uploader_id
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
tracks_paths = re.findall(r'<a href="(.*?)" itemprop="url">', webpage)
|
||||
if not tracks_paths:
|
||||
raise ExtractorError('The page doesn\'t contain any tracks')
|
||||
@@ -168,8 +174,8 @@ class BandcampAlbumIE(InfoExtractor):
|
||||
r'album_title\s*:\s*"(.*?)"', webpage, 'title', fatal=False)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'uploader_id': uploader_id,
|
||||
'id': playlist_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
|
@@ -2,12 +2,12 @@ from __future__ import unicode_literals
|
||||
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
from ..compat import compat_HTTPError
|
||||
|
||||
|
||||
class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
class BBCCoUkIE(InfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
|
||||
@@ -215,17 +215,32 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _extract_captions(self, media, programme_id):
|
||||
def _get_subtitles(self, media, programme_id):
|
||||
subtitles = {}
|
||||
for connection in self._extract_connections(media):
|
||||
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
|
||||
srt = ''
|
||||
|
||||
def _extract_text(p):
|
||||
if p.text is not None:
|
||||
stripped_text = p.text.strip()
|
||||
if stripped_text:
|
||||
return stripped_text
|
||||
return ' '.join(span.text.strip() for span in p.findall('{http://www.w3.org/2006/10/ttaf1}span'))
|
||||
for pos, p in enumerate(ps):
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'),
|
||||
p.text.strip() if p.text is not None else '')
|
||||
subtitles[lang] = srt
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'), _extract_text(p))
|
||||
subtitles[lang] = [
|
||||
{
|
||||
'url': connection.get('href'),
|
||||
'ext': 'ttml',
|
||||
},
|
||||
{
|
||||
'data': srt,
|
||||
'ext': 'srt',
|
||||
},
|
||||
]
|
||||
return subtitles
|
||||
|
||||
def _download_media_selector(self, programme_id):
|
||||
@@ -249,7 +264,7 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
elif kind == 'video':
|
||||
formats.extend(self._extract_video(media, programme_id))
|
||||
elif kind == 'captions':
|
||||
subtitles = self._extract_captions(media, programme_id)
|
||||
subtitles = self.extract_subtitles(media, programme_id)
|
||||
|
||||
return formats, subtitles
|
||||
|
||||
@@ -324,10 +339,6 @@ class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
else:
|
||||
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(programme_id, subtitles)
|
||||
return
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
103
youtube_dl/extractor/beatportpro.py
Normal file
103
youtube_dl/extractor/beatportpro.py
Normal file
@@ -0,0 +1,103 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class BeatportProIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://pro\.beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://pro.beatport.com/track/synesthesia-original-mix/5379371',
|
||||
'md5': 'b3c34d8639a2f6a7f734382358478887',
|
||||
'info_dict': {
|
||||
'id': '5379371',
|
||||
'display_id': 'synesthesia-original-mix',
|
||||
'ext': 'mp4',
|
||||
'title': 'Froxic - Synesthesia (Original Mix)',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://pro.beatport.com/track/love-and-war-original-mix/3756896',
|
||||
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
|
||||
'info_dict': {
|
||||
'id': '3756896',
|
||||
'display_id': 'love-and-war-original-mix',
|
||||
'ext': 'mp3',
|
||||
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://pro.beatport.com/track/birds-original-mix/4991738',
|
||||
'md5': 'a1fd8e8046de3950fd039304c186c05f',
|
||||
'info_dict': {
|
||||
'id': '4991738',
|
||||
'display_id': 'birds-original-mix',
|
||||
'ext': 'mp4',
|
||||
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
track_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
playables = self._parse_json(
|
||||
self._search_regex(
|
||||
r'window\.Playables\s*=\s*({.+?});', webpage,
|
||||
'playables info', flags=re.DOTALL),
|
||||
track_id)
|
||||
|
||||
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
|
||||
|
||||
title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name']
|
||||
if track['mix']:
|
||||
title += ' (' + track['mix'] + ')'
|
||||
|
||||
formats = []
|
||||
for ext, info in track['preview'].items():
|
||||
if not info['url']:
|
||||
continue
|
||||
fmt = {
|
||||
'url': info['url'],
|
||||
'ext': ext,
|
||||
'format_id': ext,
|
||||
'vcodec': 'none',
|
||||
}
|
||||
if ext == 'mp3':
|
||||
fmt['preference'] = 0
|
||||
fmt['acodec'] = 'mp3'
|
||||
fmt['abr'] = 96
|
||||
fmt['asr'] = 44100
|
||||
elif ext == 'mp4':
|
||||
fmt['preference'] = 1
|
||||
fmt['acodec'] = 'aac'
|
||||
fmt['abr'] = 96
|
||||
fmt['asr'] = 44100
|
||||
formats.append(fmt)
|
||||
self._sort_formats(formats)
|
||||
|
||||
images = []
|
||||
for name, info in track['images'].items():
|
||||
image_url = info.get('url')
|
||||
if name == 'dynamic' or not image_url:
|
||||
continue
|
||||
image = {
|
||||
'id': name,
|
||||
'url': image_url,
|
||||
'height': int_or_none(info.get('height')),
|
||||
'width': int_or_none(info.get('width')),
|
||||
}
|
||||
images.append(image)
|
||||
|
||||
return {
|
||||
'id': compat_str(track.get('id')) or track_id,
|
||||
'display_id': track.get('slug') or display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnails': images,
|
||||
}
|
@@ -1,40 +1,35 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import remove_start
|
||||
from ..utils import (
|
||||
remove_start,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class BlinkxIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
|
||||
_VALID_URL = r'(?:https?://(?:www\.)blinkx\.com/#?ce/|blinkx:)(?P<id>[^?]+)'
|
||||
IE_NAME = 'blinkx'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.blinkx.com/ce/8aQUy7GVFYgFzpKhT0oqsilwOGFRVXk3R1ZGWWdGenBLaFQwb3FzaWx3OGFRVXk3R1ZGWWdGenB',
|
||||
'md5': '2e9a07364af40163a908edbf10bb2492',
|
||||
'url': 'http://www.blinkx.com/ce/Da0Gw3xc5ucpNduzLuDDlv4WC9PuI4fDi1-t6Y3LyfdY2SZS5Urbvn-UPJvrvbo8LTKTc67Wu2rPKSQDJyZeeORCR8bYkhs8lI7eqddznH2ofh5WEEdjYXnoRtj7ByQwt7atMErmXIeYKPsSDuMAAqJDlQZ-3Ff4HJVeH_s3Gh8oQ',
|
||||
'md5': '337cf7a344663ec79bf93a526a2e06c7',
|
||||
'info_dict': {
|
||||
'id': '8aQUy7GV',
|
||||
'id': 'Da0Gw3xc',
|
||||
'ext': 'mp4',
|
||||
'title': 'Police Car Rolls Away',
|
||||
'uploader': 'stupidvideos.com',
|
||||
'upload_date': '20131215',
|
||||
'timestamp': 1387068000,
|
||||
'description': 'A police car gently rolls away from a fight. Maybe it felt weird being around a confrontation and just had to get out of there!',
|
||||
'duration': 14.886,
|
||||
'thumbnails': [{
|
||||
'width': 100,
|
||||
'height': 76,
|
||||
'resolution': '100x76',
|
||||
'url': 'http://cdn.blinkx.com/stream/b/41/StupidVideos/20131215/1873969261/1873969261_tn_0.jpg',
|
||||
}],
|
||||
'title': 'No Daily Show for John Oliver; HBO Show Renewed - IGN News',
|
||||
'uploader': 'IGN News',
|
||||
'upload_date': '20150217',
|
||||
'timestamp': 1424215740,
|
||||
'description': 'HBO has renewed Last Week Tonight With John Oliver for two more seasons.',
|
||||
'duration': 47.743333,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, rl):
|
||||
m = re.match(self._VALID_URL, rl)
|
||||
video_id = m.group('id')
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
display_id = video_id[:8]
|
||||
|
||||
api_url = ('https://apib4.blinkx.com/api.php?action=play_video&' +
|
||||
@@ -60,18 +55,20 @@ class BlinkxIE(InfoExtractor):
|
||||
elif m['type'] in ('flv', 'mp4'):
|
||||
vcodec = remove_start(m['vcodec'], 'ff')
|
||||
acodec = remove_start(m['acodec'], 'ff')
|
||||
tbr = (int(m['vbr']) + int(m['abr'])) // 1000
|
||||
vbr = int_or_none(m.get('vbr') or m.get('vbitrate'), 1000)
|
||||
abr = int_or_none(m.get('abr') or m.get('abitrate'), 1000)
|
||||
tbr = vbr + abr if vbr and abr else None
|
||||
format_id = '%s-%sk-%s' % (vcodec, tbr, m['w'])
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': m['link'],
|
||||
'vcodec': vcodec,
|
||||
'acodec': acodec,
|
||||
'abr': int(m['abr']) // 1000,
|
||||
'vbr': int(m['vbr']) // 1000,
|
||||
'abr': abr,
|
||||
'vbr': vbr,
|
||||
'tbr': tbr,
|
||||
'width': int(m['w']),
|
||||
'height': int(m['h']),
|
||||
'width': int_or_none(m.get('w')),
|
||||
'height': int_or_none(m.get('h')),
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
@@ -18,7 +17,7 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class BlipTVIE(SubtitlesInfoExtractor):
|
||||
class BlipTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:\w+\.)?blip\.tv/(?:(?:.+-|rss/flash/)(?P<id>\d+)|((?:play/|api\.swf#)(?P<lookup_id>[\da-zA-Z+_]+)))'
|
||||
|
||||
_TESTS = [
|
||||
@@ -143,7 +142,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
||||
categories = [category.text for category in item.findall('category')]
|
||||
|
||||
formats = []
|
||||
subtitles = {}
|
||||
subtitles_urls = {}
|
||||
|
||||
media_group = item.find(media('group'))
|
||||
for media_content in media_group.findall(media('content')):
|
||||
@@ -161,7 +160,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
||||
}
|
||||
lang = role.rpartition('-')[-1].strip().lower()
|
||||
langcode = LANGS.get(lang, lang)
|
||||
subtitles[langcode] = url
|
||||
subtitles_urls[langcode] = url
|
||||
elif media_type.startswith('video/'):
|
||||
formats.append({
|
||||
'url': real_url,
|
||||
@@ -175,11 +174,7 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
# subtitles
|
||||
video_subtitles = self.extract_subtitles(video_id, subtitles)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
subtitles = self.extract_subtitles(video_id, subtitles_urls)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -192,15 +187,22 @@ class BlipTVIE(SubtitlesInfoExtractor):
|
||||
'thumbnail': thumbnail,
|
||||
'categories': categories,
|
||||
'formats': formats,
|
||||
'subtitles': video_subtitles,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _download_subtitle_url(self, sub_lang, url):
|
||||
# For some weird reason, blip.tv serves a video instead of subtitles
|
||||
# when we request with a common UA
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('User-Agent', 'youtube-dl')
|
||||
return self._download_webpage(req, None, note=False)
|
||||
def _get_subtitles(self, video_id, subtitles_urls):
|
||||
subtitles = {}
|
||||
for lang, url in subtitles_urls.items():
|
||||
# For some weird reason, blip.tv serves a video instead of subtitles
|
||||
# when we request with a common UA
|
||||
req = compat_urllib_request.Request(url)
|
||||
req.add_header('User-Agent', 'youtube-dl')
|
||||
subtitles[lang] = [{
|
||||
# The extension is 'srt' but it's actually an 'ass' file
|
||||
'ext': 'ass',
|
||||
'data': self._download_webpage(req, None, note=False),
|
||||
}]
|
||||
return subtitles
|
||||
|
||||
|
||||
class BlipTVUserIE(InfoExtractor):
|
||||
|
@@ -6,7 +6,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class BloombergIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?)\.html'
|
||||
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<id>.+?)\.html'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
|
||||
@@ -20,9 +20,9 @@ class BloombergIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
name = self._match_id(url)
|
||||
webpage = self._download_webpage(url, name)
|
||||
|
||||
f4m_url = self._search_regex(
|
||||
r'<source src="(https?://[^"]+\.f4m.*?)"', webpage,
|
||||
'f4m url')
|
||||
|
@@ -41,7 +41,7 @@ class BreakIE(InfoExtractor):
|
||||
'tbr': media['bitRate'],
|
||||
'width': media['width'],
|
||||
'height': media['height'],
|
||||
} for media in info['media']]
|
||||
} for media in info['media'] if media.get('mediaPurpose') == 'play']
|
||||
|
||||
if not formats:
|
||||
formats.append({
|
||||
|
@@ -95,6 +95,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
|
||||
'info_dict': {
|
||||
'title': 'Sealife',
|
||||
'id': '3550319591001',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
},
|
||||
@@ -247,7 +248,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
playlist_info = json_data['videoList']
|
||||
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
||||
|
||||
return self.playlist_result(videos, playlist_id=playlist_info['id'],
|
||||
return self.playlist_result(videos, playlist_id='%s' % playlist_info['id'],
|
||||
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
|
||||
|
||||
def _extract_video_info(self, video_info):
|
||||
|
@@ -33,6 +33,7 @@ class BuzzFeedIE(InfoExtractor):
|
||||
'skip_download': True, # Got enough YouTube download tests
|
||||
},
|
||||
'info_dict': {
|
||||
'id': 'look-at-this-cute-dog-omg',
|
||||
'description': 're:Munchkin the Teddy Bear is back ?!',
|
||||
'title': 'You Need To Stop What You\'re Doing And Watching This Dog Walk On A Treadmill',
|
||||
},
|
||||
@@ -42,8 +43,8 @@ class BuzzFeedIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20141124',
|
||||
'uploader_id': 'CindysMunchkin',
|
||||
'description': 're:© 2014 Munchkin the Shih Tzu',
|
||||
'uploader': 'Munchkin the Shih Tzu',
|
||||
'description': 're:© 2014 Munchkin the',
|
||||
'uploader': 're:^Munchkin the',
|
||||
'title': 're:Munchkin the Teddy Bear gets her exercise',
|
||||
},
|
||||
}]
|
||||
|
@@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@@ -39,8 +37,7 @@ class CBSIE(InfoExtractor):
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
real_id = self._search_regex(
|
||||
r"video\.settings\.pid\s*=\s*'([^']+)';",
|
||||
|
30
youtube_dl/extractor/cbssports.py
Normal file
30
youtube_dl/extractor/cbssports.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CBSSportsIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.cbssports\.com/video/player/(?P<section>[^/]+)/(?P<id>[^/]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.cbssports.com/video/player/tennis/318462531970/0/us-open-flashbacks-1990s',
|
||||
'info_dict': {
|
||||
'id': '_d5_GbO8p1sT',
|
||||
'ext': 'flv',
|
||||
'title': 'US Open flashbacks: 1990s',
|
||||
'description': 'Bill Macatee relives the best moments in US Open history from the 1990s.',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
section = mobj.group('section')
|
||||
video_id = mobj.group('id')
|
||||
all_videos = self._download_json(
|
||||
'http://www.cbssports.com/data/video/player/getVideos/%s?as=json' % section,
|
||||
video_id)
|
||||
# The json file contains the info of all the videos in the section
|
||||
video_info = next(v for v in all_videos if v['pcid'] == video_id)
|
||||
return self.url_result('theplatform:%s' % video_info['pid'], 'ThePlatform')
|
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
@@ -15,7 +15,7 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class CeskaTelevizeIE(SubtitlesInfoExtractor):
|
||||
class CeskaTelevizeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
|
||||
|
||||
_TESTS = [
|
||||
@@ -107,13 +107,7 @@ class CeskaTelevizeIE(SubtitlesInfoExtractor):
|
||||
subtitles = {}
|
||||
subs = item.get('subtitles')
|
||||
if subs:
|
||||
subtitles['cs'] = subs[0]['url']
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, subtitles))
|
||||
subtitles = self.extract_subtitles(episode_id, subs)
|
||||
|
||||
return {
|
||||
'id': episode_id,
|
||||
@@ -125,11 +119,20 @@ class CeskaTelevizeIE(SubtitlesInfoExtractor):
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
def _get_subtitles(self, episode_id, subs):
|
||||
original_subtitles = self._download_webpage(
|
||||
subs[0]['url'], episode_id, 'Downloading subtitles')
|
||||
srt_subs = self._fix_subtitles(original_subtitles)
|
||||
return {
|
||||
'cs': [{
|
||||
'ext': 'srt',
|
||||
'data': srt_subs,
|
||||
}]
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _fix_subtitles(subtitles):
|
||||
""" Convert millisecond-based subtitles to SRT """
|
||||
if subtitles is None:
|
||||
return subtitles # subtitles not requested
|
||||
|
||||
def _msectotimecode(msec):
|
||||
""" Helper utility to convert milliseconds to timecode """
|
||||
@@ -149,7 +152,4 @@ class CeskaTelevizeIE(SubtitlesInfoExtractor):
|
||||
else:
|
||||
yield line
|
||||
|
||||
fixed_subtitles = {}
|
||||
for k, v in subtitles.items():
|
||||
fixed_subtitles[k] = "\r\n".join(_fix_subtitle(v))
|
||||
return fixed_subtitles
|
||||
return "\r\n".join(_fix_subtitle(subtitles))
|
||||
|
84
youtube_dl/extractor/chirbit.py
Normal file
84
youtube_dl/extractor/chirbit.py
Normal file
@@ -0,0 +1,84 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class ChirbitIE(InfoExtractor):
|
||||
IE_NAME = 'chirbit'
|
||||
_VALID_URL = r'https?://(?:www\.)?chirb\.it/(?:(?:wp|pl)/|fb_chirbit_player\.swf\?key=)?(?P<id>[\da-zA-Z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://chirb.it/PrIPv5',
|
||||
'md5': '9847b0dad6ac3e074568bf2cfb197de8',
|
||||
'info_dict': {
|
||||
'id': 'PrIPv5',
|
||||
'ext': 'mp3',
|
||||
'title': 'Фасадстрой',
|
||||
'duration': 52,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://chirb.it/fb_chirbit_player.swf?key=PrIPv5',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
audio_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://chirb.it/%s' % audio_id, audio_id)
|
||||
|
||||
audio_url = self._search_regex(
|
||||
r'"setFile"\s*,\s*"([^"]+)"', webpage, 'audio url')
|
||||
|
||||
title = self._search_regex(
|
||||
r'itemprop="name">([^<]+)', webpage, 'title')
|
||||
duration = parse_duration(self._html_search_meta(
|
||||
'duration', webpage, 'duration', fatal=False))
|
||||
view_count = int_or_none(self._search_regex(
|
||||
r'itemprop="playCount"\s*>(\d+)', webpage,
|
||||
'listen count', fatal=False))
|
||||
comment_count = int_or_none(self._search_regex(
|
||||
r'>(\d+) Comments?:', webpage,
|
||||
'comment count', fatal=False))
|
||||
|
||||
return {
|
||||
'id': audio_id,
|
||||
'url': audio_url,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'comment_count': comment_count,
|
||||
}
|
||||
|
||||
|
||||
class ChirbitProfileIE(InfoExtractor):
|
||||
IE_NAME = 'chirbit:profile'
|
||||
_VALID_URL = r'https?://(?:www\.)?chirbit.com/(?:rss/)?(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
'url': 'http://chirbit.com/ScarletBeauty',
|
||||
'info_dict': {
|
||||
'id': 'ScarletBeauty',
|
||||
'title': 'Chirbits by ScarletBeauty',
|
||||
},
|
||||
'playlist_mincount': 3,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
profile_id = self._match_id(url)
|
||||
|
||||
rss = self._download_xml(
|
||||
'http://chirbit.com/rss/%s' % profile_id, profile_id)
|
||||
|
||||
entries = [
|
||||
self.url_result(audio_url.text, 'Chirbit')
|
||||
for audio_url in rss.findall('./channel/item/link')]
|
||||
|
||||
title = rss.find('./channel/title').text
|
||||
|
||||
return self.playlist_result(entries, profile_id, title)
|
@@ -105,6 +105,7 @@ class CloudyIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
file_key = self._search_regex(
|
||||
r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
|
||||
[r'key\s*:\s*"([^"]+)"', r'filekey\s*=\s*"([^"]+)"'],
|
||||
webpage, 'file_key')
|
||||
|
||||
return self._extract_video(video_host, video_id, file_key)
|
||||
|
@@ -250,6 +250,8 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = self._extract_subtitles(cdoc, guid)
|
||||
|
||||
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
|
||||
entries.append({
|
||||
'id': guid,
|
||||
@@ -260,6 +262,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
'subtitles': subtitles,
|
||||
})
|
||||
|
||||
return {
|
||||
|
@@ -27,7 +27,6 @@ from ..utils import (
|
||||
compiled_regex_type,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
RegexNotFoundError,
|
||||
sanitize_filename,
|
||||
@@ -151,8 +150,14 @@ class InfoExtractor(object):
|
||||
If not explicitly set, calculated from timestamp.
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
location: Physical location where the video was filmed.
|
||||
subtitles: The subtitle file contents as a dictionary in the format
|
||||
{language: subtitles}.
|
||||
subtitles: The available subtitles as a dictionary in the format
|
||||
{language: subformats}. "subformats" is a list sorted from
|
||||
lower to higher preference, each element is a dictionary
|
||||
with the "ext" entry and one of:
|
||||
* "data": The subtitles file contents
|
||||
* "url": A url pointing to the subtitles file
|
||||
automatic_captions: Like 'subtitles', used by the YoutubeIE for
|
||||
automatically generated captions
|
||||
duration: Length of the video in seconds, as an integer.
|
||||
view_count: How many users have watched the video on the platform.
|
||||
like_count: Number of positive ratings of the video
|
||||
@@ -392,6 +397,16 @@ class InfoExtractor(object):
|
||||
if blocked_iframe:
|
||||
msg += ' Visit %s for more details' % blocked_iframe
|
||||
raise ExtractorError(msg, expected=True)
|
||||
if '<title>The URL you requested has been blocked</title>' in content[:512]:
|
||||
msg = (
|
||||
'Access to this webpage has been blocked by Indian censorship. '
|
||||
'Use a VPN or proxy server (with --proxy) to route around it.')
|
||||
block_msg = self._html_search_regex(
|
||||
r'</h1><p>(.*?)</p>',
|
||||
content, 'block message', default=None)
|
||||
if block_msg:
|
||||
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
return content
|
||||
|
||||
@@ -752,6 +767,10 @@ class InfoExtractor(object):
|
||||
formats)
|
||||
|
||||
def _is_valid_url(self, url, video_id, item='video'):
|
||||
url = self._proto_relative_url(url, scheme='http:')
|
||||
# For now assume non HTTP(S) URLs always valid
|
||||
if not (url.startswith('http://') or url.startswith('https://')):
|
||||
return True
|
||||
try:
|
||||
self._request_webpage(url, video_id, 'Checking %s URL' % item)
|
||||
return True
|
||||
@@ -799,8 +818,8 @@ class InfoExtractor(object):
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
|
||||
for i, media_el in enumerate(media_nodes):
|
||||
if manifest_version == '2.0':
|
||||
manifest_url = ('/'.join(manifest_url.split('/')[:-1]) + '/'
|
||||
+ (media_el.attrib.get('href') or media_el.attrib.get('url')))
|
||||
manifest_url = ('/'.join(manifest_url.split('/')[:-1]) + '/' +
|
||||
(media_el.attrib.get('href') or media_el.attrib.get('url')))
|
||||
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
||||
formats.append({
|
||||
'format_id': '-'.join(filter(None, [f4m_id, 'f4m-%d' % (i if tbr is None else tbr)])),
|
||||
@@ -820,11 +839,11 @@ class InfoExtractor(object):
|
||||
m3u8_id=None):
|
||||
|
||||
formats = [{
|
||||
'format_id': '-'.join(filter(None, [m3u8_id, 'm3u8-meta'])),
|
||||
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
|
||||
'url': m3u8_url,
|
||||
'ext': ext,
|
||||
'protocol': 'm3u8',
|
||||
'preference': -1,
|
||||
'preference': preference - 1 if preference else -1,
|
||||
'resolution': 'multiple',
|
||||
'format_note': 'Quality selection URL',
|
||||
}]
|
||||
@@ -839,6 +858,7 @@ class InfoExtractor(object):
|
||||
note='Downloading m3u8 information',
|
||||
errnote='Failed to download m3u8 information')
|
||||
last_info = None
|
||||
last_media = None
|
||||
kv_rex = re.compile(
|
||||
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
|
||||
for line in m3u8_doc.splitlines():
|
||||
@@ -849,6 +869,13 @@ class InfoExtractor(object):
|
||||
if v.startswith('"'):
|
||||
v = v[1:-1]
|
||||
last_info[m.group('key')] = v
|
||||
elif line.startswith('#EXT-X-MEDIA:'):
|
||||
last_media = {}
|
||||
for m in kv_rex.finditer(line):
|
||||
v = m.group('val')
|
||||
if v.startswith('"'):
|
||||
v = v[1:-1]
|
||||
last_media[m.group('key')] = v
|
||||
elif line.startswith('#') or not line.strip():
|
||||
continue
|
||||
else:
|
||||
@@ -856,8 +883,13 @@ class InfoExtractor(object):
|
||||
formats.append({'url': format_url(line)})
|
||||
continue
|
||||
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
|
||||
format_id = []
|
||||
if m3u8_id:
|
||||
format_id.append(m3u8_id)
|
||||
last_media_name = last_media.get('NAME') if last_media else None
|
||||
format_id.append(last_media_name if last_media_name else '%d' % (tbr if tbr else len(formats)))
|
||||
f = {
|
||||
'format_id': '-'.join(filter(None, [m3u8_id, 'm3u8-%d' % (tbr if tbr else len(formats))])),
|
||||
'format_id': '-'.join(format_id),
|
||||
'url': format_url(line.strip()),
|
||||
'tbr': tbr,
|
||||
'ext': ext,
|
||||
@@ -877,6 +909,9 @@ class InfoExtractor(object):
|
||||
width_str, height_str = resolution.split('x')
|
||||
f['width'] = int(width_str)
|
||||
f['height'] = int(height_str)
|
||||
if last_media is not None:
|
||||
f['m3u8_media'] = last_media
|
||||
last_media = None
|
||||
formats.append(f)
|
||||
last_info = {}
|
||||
self._sort_formats(formats)
|
||||
@@ -895,39 +930,57 @@ class InfoExtractor(object):
|
||||
|
||||
formats = []
|
||||
rtmp_count = 0
|
||||
for video in smil.findall('./body/switch/video'):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
continue
|
||||
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
proto = video.get('proto')
|
||||
if not proto:
|
||||
if base:
|
||||
if base.startswith('rtmp'):
|
||||
proto = 'rtmp'
|
||||
elif base.startswith('http'):
|
||||
proto = 'http'
|
||||
ext = video.get('ext')
|
||||
if proto == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(src, video_id, ext))
|
||||
elif proto == 'rtmp':
|
||||
rtmp_count += 1
|
||||
streamer = video.get('streamer') or base
|
||||
formats.append({
|
||||
'url': streamer,
|
||||
'play_path': src,
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
|
||||
'tbr': bitrate,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
if smil.findall('./body/seq/video'):
|
||||
video = smil.findall('./body/seq/video')[0]
|
||||
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
|
||||
formats.extend(fmts)
|
||||
else:
|
||||
for video in smil.findall('./body/switch/video'):
|
||||
fmts, rtmp_count = self._parse_smil_video(video, video_id, base, rtmp_count)
|
||||
formats.extend(fmts)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
def _parse_smil_video(self, video, video_id, base, rtmp_count):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
return ([], rtmp_count)
|
||||
bitrate = int_or_none(video.get('system-bitrate') or video.get('systemBitrate'), 1000)
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
proto = video.get('proto')
|
||||
if not proto:
|
||||
if base:
|
||||
if base.startswith('rtmp'):
|
||||
proto = 'rtmp'
|
||||
elif base.startswith('http'):
|
||||
proto = 'http'
|
||||
ext = video.get('ext')
|
||||
if proto == 'm3u8':
|
||||
return (self._extract_m3u8_formats(src, video_id, ext), rtmp_count)
|
||||
elif proto == 'rtmp':
|
||||
rtmp_count += 1
|
||||
streamer = video.get('streamer') or base
|
||||
return ([{
|
||||
'url': streamer,
|
||||
'play_path': src,
|
||||
'ext': 'flv',
|
||||
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
|
||||
'tbr': bitrate,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}], rtmp_count)
|
||||
elif proto.startswith('http'):
|
||||
return ([{
|
||||
'url': base + src,
|
||||
'ext': ext or 'flv',
|
||||
'tbr': bitrate,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}], rtmp_count)
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
@@ -991,6 +1044,27 @@ class InfoExtractor(object):
|
||||
any_restricted = any_restricted or is_restricted
|
||||
return not any_restricted
|
||||
|
||||
def extract_subtitles(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('writesubtitles', False) or
|
||||
self._downloader.params.get('listsubtitles')):
|
||||
return self._get_subtitles(*args, **kwargs)
|
||||
return {}
|
||||
|
||||
def _get_subtitles(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
|
||||
def extract_automatic_captions(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('writeautomaticsub', False) or
|
||||
self._downloader.params.get('listsubtitles')):
|
||||
return self._get_automatic_captions(*args, **kwargs)
|
||||
return {}
|
||||
|
||||
def _get_automatic_captions(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
|
||||
def _subtitles_timecode(self, seconds):
|
||||
return '%02d:%02d:%02d.%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
|
@@ -9,7 +9,7 @@ import xml.etree.ElementTree
|
||||
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
@@ -25,11 +25,11 @@ from ..aes import (
|
||||
aes_cbc_decrypt,
|
||||
inc,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.(?:com|fr)/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_NETRC_MACHINE = 'crunchyroll'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
'info_dict': {
|
||||
@@ -187,6 +187,38 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
return output
|
||||
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
subtitles = {}
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
if not id or not iv or not data:
|
||||
continue
|
||||
id = int(id)
|
||||
iv = base64.b64decode(iv)
|
||||
data = base64.b64decode(data)
|
||||
|
||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
subtitles[lang_code] = [
|
||||
{
|
||||
'ext': 'srt',
|
||||
'data': self._convert_subtitles_to_srt(sub_root),
|
||||
},
|
||||
{
|
||||
'ext': 'ass',
|
||||
'data': self._convert_subtitles_to_ass(sub_root),
|
||||
},
|
||||
]
|
||||
return subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
@@ -249,34 +281,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
'format_id': video_format,
|
||||
})
|
||||
|
||||
subtitles = {}
|
||||
sub_format = self._downloader.params.get('subtitlesformat', 'srt')
|
||||
for sub_id, sub_name in re.findall(r'\?ssid=([0-9]+)" title="([^"]+)', webpage):
|
||||
sub_page = self._download_webpage(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiSubtitle_GetXml&subtitle_script_id=' + sub_id,
|
||||
video_id, note='Downloading subtitles for ' + sub_name)
|
||||
id = self._search_regex(r'id=\'([0-9]+)', sub_page, 'subtitle_id', fatal=False)
|
||||
iv = self._search_regex(r'<iv>([^<]+)', sub_page, 'subtitle_iv', fatal=False)
|
||||
data = self._search_regex(r'<data>([^<]+)', sub_page, 'subtitle_data', fatal=False)
|
||||
if not id or not iv or not data:
|
||||
continue
|
||||
id = int(id)
|
||||
iv = base64.b64decode(iv)
|
||||
data = base64.b64decode(data)
|
||||
|
||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
if sub_format == 'ass':
|
||||
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
|
||||
else:
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
subtitles = self.extract_subtitles(video_id, webpage)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@@ -6,7 +6,6 @@ import json
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
@@ -31,7 +30,7 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||
return request
|
||||
|
||||
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
|
||||
@@ -47,13 +46,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
|
||||
'md5': '392c4b85a60a90dc4792da41ce3144eb',
|
||||
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
|
||||
'md5': '2137c41a8e78554bb09225b8eb322406',
|
||||
'info_dict': {
|
||||
'id': 'x33vw9',
|
||||
'id': 'x2iuewm',
|
||||
'ext': 'mp4',
|
||||
'uploader': 'Amphora Alex and Van .',
|
||||
'title': 'Tutoriel de Youtubeur"DL DES VIDEO DE YOUTUBE"',
|
||||
'uploader': 'IGN',
|
||||
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
|
||||
}
|
||||
},
|
||||
# Vevo video
|
||||
@@ -143,9 +142,6 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
# subtitles
|
||||
video_subtitles = self.extract_subtitles(video_id, webpage)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, webpage)
|
||||
return
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)',
|
||||
@@ -169,7 +165,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
'view_count': view_count,
|
||||
}
|
||||
|
||||
def _get_available_subtitles(self, video_id, webpage):
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
try:
|
||||
sub_list = self._download_webpage(
|
||||
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
|
||||
@@ -179,7 +175,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
return {}
|
||||
info = json.loads(sub_list)
|
||||
if (info['total'] > 0):
|
||||
sub_lang_list = dict((l['language'], l['url']) for l in info['list'])
|
||||
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
|
||||
return sub_lang_list
|
||||
self._downloader.report_warning('video doesn\'t have subtitles')
|
||||
return {}
|
||||
@@ -194,6 +190,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
|
||||
'info_dict': {
|
||||
'title': 'SPORT',
|
||||
'id': 'xv4bw_nqtv_sport',
|
||||
},
|
||||
'playlist_mincount': 20,
|
||||
}]
|
||||
|
@@ -25,8 +25,9 @@ class DefenseGouvFrIE(InfoExtractor):
|
||||
r"flashvars.pvg_id=\"(\d+)\";",
|
||||
webpage, 'ID')
|
||||
|
||||
json_url = ('http://static.videos.gouv.fr/brightcovehub/export/json/'
|
||||
+ video_id)
|
||||
json_url = (
|
||||
'http://static.videos.gouv.fr/brightcovehub/export/json/%s' %
|
||||
video_id)
|
||||
info = self._download_json(json_url, title, 'Downloading JSON config')
|
||||
video_url = info['renditions'][0]['url']
|
||||
|
||||
|
@@ -1,11 +1,10 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import ExtractorError
|
||||
from .common import InfoExtractor, ExtractorError
|
||||
from ..utils import parse_iso8601
|
||||
|
||||
|
||||
class DRTVIE(SubtitlesInfoExtractor):
|
||||
class DRTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dr\.dk/tv/se/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)'
|
||||
|
||||
_TEST = {
|
||||
@@ -76,7 +75,7 @@ class DRTVIE(SubtitlesInfoExtractor):
|
||||
}
|
||||
for subs in subtitles_list:
|
||||
lang = subs['Language']
|
||||
subtitles[LANGS.get(lang, lang)] = subs['Uri']
|
||||
subtitles[LANGS.get(lang, lang)] = [{'url': subs['Uri'], 'ext': 'vtt'}]
|
||||
|
||||
if not formats and restricted_to_denmark:
|
||||
raise ExtractorError(
|
||||
@@ -84,10 +83,6 @@ class DRTVIE(SubtitlesInfoExtractor):
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
@@ -96,5 +91,5 @@ class DRTVIE(SubtitlesInfoExtractor):
|
||||
'timestamp': timestamp,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': self.extract_subtitles(video_id, subtitles),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
98
youtube_dl/extractor/eagleplatform.py
Normal file
98
youtube_dl/extractor/eagleplatform.py
Normal file
@@ -0,0 +1,98 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class EaglePlatformIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
eagleplatform:(?P<custom_host>[^/]+):|
|
||||
https?://(?P<host>.+?\.media\.eagleplatform\.com)/index/player\?.*\brecord_id=
|
||||
)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
_TESTS = [{
|
||||
# http://lenta.ru/news/2015/03/06/navalny/
|
||||
'url': 'http://lentaru.media.eagleplatform.com/index/player?player=new&record_id=227304&player_template_id=5201',
|
||||
'md5': '0b7994faa2bd5c0f69a3db6db28d078d',
|
||||
'info_dict': {
|
||||
'id': '227304',
|
||||
'ext': 'mp4',
|
||||
'title': 'Навальный вышел на свободу',
|
||||
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 87,
|
||||
'view_count': int,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
# http://muz-tv.ru/play/7129/
|
||||
# http://media.clipyou.ru/index/player?record_id=12820&width=730&height=415&autoplay=true
|
||||
'url': 'eagleplatform:media.clipyou.ru:12820',
|
||||
'md5': '6c2ebeab03b739597ce8d86339d5a905',
|
||||
'info_dict': {
|
||||
'id': '12820',
|
||||
'ext': 'mp4',
|
||||
'title': "'O Sole Mio",
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 216,
|
||||
'view_count': int,
|
||||
},
|
||||
}]
|
||||
|
||||
def _handle_error(self, response):
|
||||
status = int_or_none(response.get('status', 200))
|
||||
if status != 200:
|
||||
raise ExtractorError(' '.join(response['errors']), expected=True)
|
||||
|
||||
def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata'):
|
||||
response = super(EaglePlatformIE, self)._download_json(url_or_request, video_id, note)
|
||||
self._handle_error(response)
|
||||
return response
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
host, video_id = mobj.group('custom_host') or mobj.group('host'), mobj.group('id')
|
||||
|
||||
player_data = self._download_json(
|
||||
'http://%s/api/player_data?id=%s' % (host, video_id), video_id)
|
||||
|
||||
media = player_data['data']['playlist']['viewports'][0]['medialist'][0]
|
||||
|
||||
title = media['title']
|
||||
description = media.get('description')
|
||||
thumbnail = media.get('snapshot')
|
||||
duration = int_or_none(media.get('duration'))
|
||||
view_count = int_or_none(media.get('views'))
|
||||
|
||||
age_restriction = media.get('age_restriction')
|
||||
age_limit = None
|
||||
if age_restriction:
|
||||
age_limit = 0 if age_restriction == 'allow_all' else 18
|
||||
|
||||
m3u8_data = self._download_json(
|
||||
media['sources']['secure_m3u8']['auto'],
|
||||
video_id, 'Downloading m3u8 JSON')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
m3u8_data['data'][0], video_id,
|
||||
'mp4', entry_protocol='m3u8_native')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
@@ -103,20 +102,23 @@ class EightTracksIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
playlist_id = mobj.group('id')
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
json_like = self._search_regex(
|
||||
r"(?s)PAGE.mix = (.*?);\n", webpage, 'trax information')
|
||||
data = json.loads(json_like)
|
||||
data = self._parse_json(
|
||||
self._search_regex(
|
||||
r"(?s)PAGE\.mix\s*=\s*({.+?});\n", webpage, 'trax information'),
|
||||
playlist_id)
|
||||
|
||||
session = str(random.randint(0, 1000000000))
|
||||
mix_id = data['id']
|
||||
track_count = data['tracks_count']
|
||||
duration = data['duration']
|
||||
avg_song_duration = float(duration) / track_count
|
||||
# duration is sometimes negative, use predefined avg duration
|
||||
if avg_song_duration <= 0:
|
||||
avg_song_duration = 300
|
||||
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
|
||||
next_url = first_url
|
||||
entries = []
|
||||
|
16
youtube_dl/extractor/embedly.py
Normal file
16
youtube_dl/extractor/embedly.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
|
||||
|
||||
class EmbedlyIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?url=(?P<id>[^#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
|
@@ -35,10 +35,7 @@ class EpornerIE(InfoExtractor):
|
||||
title = self._html_search_regex(
|
||||
r'<title>(.*?) - EPORNER', webpage, 'title')
|
||||
|
||||
redirect_code = self._html_search_regex(
|
||||
r'<script type="text/javascript" src="/config5/%s/([a-f\d]+)/">' % video_id,
|
||||
webpage, 'redirect_code')
|
||||
redirect_url = 'http://www.eporner.com/config5/%s/%s' % (video_id, redirect_code)
|
||||
redirect_url = 'http://www.eporner.com/config5/%s' % video_id
|
||||
player_code = self._download_webpage(
|
||||
redirect_url, display_id, note='Downloading player config')
|
||||
|
||||
@@ -69,5 +66,5 @@ class EpornerIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
'age_limit': self._rta_search(webpage),
|
||||
'age_limit': 18,
|
||||
}
|
||||
|
@@ -3,15 +3,18 @@ from __future__ import unicode_literals
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
js_to_json,
|
||||
parse_duration,
|
||||
)
|
||||
|
||||
|
||||
class EscapistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://?(www\.)?escapistmagazine\.com/videos/view/[^/?#]+/(?P<id>[0-9]+)-[^/?#]*(?:$|[?#])'
|
||||
_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
|
||||
_TEST = {
|
||||
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
|
||||
@@ -22,46 +25,68 @@ class EscapistIE(InfoExtractor):
|
||||
'uploader_id': 'the-escapist-presents',
|
||||
'uploader': 'The Escapist Presents',
|
||||
'title': "Breaking Down Baldur's Gate",
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 264,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage_req = compat_urllib_request.Request(url)
|
||||
webpage_req.add_header('User-Agent', self._USER_AGENT)
|
||||
webpage = self._download_webpage(webpage_req, video_id)
|
||||
|
||||
uploader_id = self._html_search_regex(
|
||||
r"<h1 class='headline'><a href='/videos/view/(.*?)'",
|
||||
r"<h1\s+class='headline'>\s*<a\s+href='/videos/view/(.*?)'",
|
||||
webpage, 'uploader ID', fatal=False)
|
||||
uploader = self._html_search_regex(
|
||||
r"<h1 class='headline'>(.*?)</a>",
|
||||
r"<h1\s+class='headline'>(.*?)</a>",
|
||||
webpage, 'uploader', fatal=False)
|
||||
description = self._html_search_meta('description', webpage)
|
||||
duration = parse_duration(self._html_search_meta('duration', webpage))
|
||||
|
||||
raw_title = self._html_search_meta('title', webpage, fatal=True)
|
||||
title = raw_title.partition(' : ')[2]
|
||||
|
||||
player_url = self._og_search_video_url(webpage, name='player URL')
|
||||
config_url = compat_urllib_parse.unquote(self._search_regex(
|
||||
r'config=(.*)$', player_url, 'config URL'))
|
||||
config_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||
r'''(?x)
|
||||
(?:
|
||||
<param\s+name="flashvars".*?\s+value="config=|
|
||||
flashvars="config=
|
||||
)
|
||||
(https?://[^"&]+)
|
||||
''',
|
||||
webpage, 'config URL'))
|
||||
|
||||
formats = []
|
||||
ad_formats = []
|
||||
|
||||
def _add_format(name, cfgurl, quality):
|
||||
def _add_format(name, cfg_url, quality):
|
||||
cfg_req = compat_urllib_request.Request(cfg_url)
|
||||
cfg_req.add_header('User-Agent', self._USER_AGENT)
|
||||
config = self._download_json(
|
||||
cfgurl, video_id,
|
||||
cfg_req, video_id,
|
||||
'Downloading ' + name + ' configuration',
|
||||
'Unable to download ' + name + ' configuration',
|
||||
transform_source=js_to_json)
|
||||
|
||||
playlist = config['playlist']
|
||||
video_url = next(
|
||||
p['url'] for p in playlist
|
||||
if p.get('eventCategory') == 'Video')
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_id': name,
|
||||
'quality': quality,
|
||||
})
|
||||
for p in playlist:
|
||||
if p.get('eventCategory') == 'Video':
|
||||
ar = formats
|
||||
elif p.get('eventCategory') == 'Video Postroll':
|
||||
ar = ad_formats
|
||||
else:
|
||||
continue
|
||||
|
||||
ar.append({
|
||||
'url': p['url'],
|
||||
'format_id': name,
|
||||
'quality': quality,
|
||||
'http_headers': {
|
||||
'User-Agent': self._USER_AGENT,
|
||||
},
|
||||
})
|
||||
|
||||
_add_format('normal', config_url, quality=0)
|
||||
hq_url = (config_url +
|
||||
@@ -70,10 +95,12 @@ class EscapistIE(InfoExtractor):
|
||||
_add_format('hq', hq_url, quality=1)
|
||||
except ExtractorError:
|
||||
pass # That's fine, we'll just use normal quality
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
if '/escapist/sales-marketing/' in formats[-1]['url']:
|
||||
raise ExtractorError('This IP address has been blocked by The Escapist', expected=True)
|
||||
|
||||
res = {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': uploader,
|
||||
@@ -81,5 +108,21 @@ class EscapistIE(InfoExtractor):
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': description,
|
||||
'player_url': player_url,
|
||||
'duration': duration,
|
||||
}
|
||||
|
||||
if self._downloader.params.get('include_ads') and ad_formats:
|
||||
self._sort_formats(ad_formats)
|
||||
ad_res = {
|
||||
'id': '%s-ad' % video_id,
|
||||
'title': '%s (Postroll)' % title,
|
||||
'formats': ad_formats,
|
||||
}
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': [res, ad_res],
|
||||
'title': title,
|
||||
'id': video_id,
|
||||
}
|
||||
|
||||
return res
|
||||
|
@@ -4,11 +4,11 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_parse_qs,
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..utils import (
|
||||
qualities,
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
@@ -17,7 +17,7 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<id>[0-9]+))(?:[/?&]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
|
||||
'md5': '344d0c6d50e2f16b06e49ca011d8ac69',
|
||||
'info_dict': {
|
||||
'id': '652431',
|
||||
'ext': 'mp4',
|
||||
@@ -49,19 +49,27 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||
r'video_url=(.+?)&', webpage, 'video_url'))
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
format = path.split('/')[5].split('_')[:2]
|
||||
format = "-".join(format)
|
||||
flash_vars = compat_parse_qs(self._search_regex(
|
||||
r'<param[^>]+?name="flashvars"[^>]+?value="([^"]+)"', webpage, 'flash vars'))
|
||||
|
||||
formats = []
|
||||
quality = qualities(['180p', '240p', '360p', '480p', '720p', '1080p'])
|
||||
for k, vals in flash_vars.items():
|
||||
m = re.match(r'quality_(?P<quality>[0-9]+p)$', k)
|
||||
if m is not None:
|
||||
formats.append({
|
||||
'format_id': m.group('quality'),
|
||||
'quality': quality(m.group('quality')),
|
||||
'url': vals[0],
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'uploader': uploader,
|
||||
'view_count': view_count,
|
||||
'url': video_url,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
'age_limit': 18,
|
||||
}
|
||||
|
@@ -126,11 +126,17 @@ class FacebookIE(InfoExtractor):
|
||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
video_data = params['video_data'][0]
|
||||
video_url = video_data.get('hd_src')
|
||||
if not video_url:
|
||||
video_url = video_data['sd_src']
|
||||
if not video_url:
|
||||
raise ExtractorError('Cannot find video URL')
|
||||
|
||||
formats = []
|
||||
for quality in ['sd', 'hd']:
|
||||
src = video_data.get('%s_src' % quality)
|
||||
if src is not None:
|
||||
formats.append({
|
||||
'format_id': quality,
|
||||
'url': src,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError('Cannot find video formats')
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title',
|
||||
@@ -146,7 +152,7 @@ class FacebookIE(InfoExtractor):
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'formats': formats,
|
||||
'duration': int_or_none(video_data.get('video_duration')),
|
||||
'thumbnail': video_data.get('thumbnail_src'),
|
||||
}
|
||||
|
@@ -14,6 +14,7 @@ class FiveMinIE(InfoExtractor):
|
||||
IE_NAME = '5min'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?:.*?&)?playList=|
|
||||
https?://(?:(?:massively|www)\.)?joystiq\.com/video/|
|
||||
5min:)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
|
41
youtube_dl/extractor/footyroom.py
Normal file
41
youtube_dl/extractor/footyroom.py
Normal file
@@ -0,0 +1,41 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FootyRoomIE(InfoExtractor):
|
||||
_VALID_URL = r'http://footyroom\.com/(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
'url': 'http://footyroom.com/schalke-04-0-2-real-madrid-2015-02/',
|
||||
'info_dict': {
|
||||
'id': 'schalke-04-0-2-real-madrid-2015-02',
|
||||
'title': 'Schalke 04 0 – 2 Real Madrid',
|
||||
},
|
||||
'playlist_count': 3,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'VideoSelector\.load\((\[.+?\])\);', webpage, 'video selector'),
|
||||
playlist_id)
|
||||
|
||||
playlist_title = self._og_search_title(webpage)
|
||||
|
||||
entries = []
|
||||
for video in playlist:
|
||||
payload = video.get('payload')
|
||||
if not payload:
|
||||
continue
|
||||
playwire_url = self._search_regex(
|
||||
r'data-config="([^"]+)"', payload,
|
||||
'playwire url', default=None)
|
||||
if playwire_url:
|
||||
entries.append(self.url_result(playwire_url, 'Playwire'))
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title)
|
@@ -50,7 +50,6 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
bitrates.sort()
|
||||
|
||||
formats = []
|
||||
|
||||
for bitrate in bitrates:
|
||||
for link in links:
|
||||
formats.append({
|
||||
@@ -59,6 +58,13 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
'vbr': bitrate,
|
||||
})
|
||||
|
||||
subtitles = {}
|
||||
for src, src_lang in re.findall(r'<track kind="captions" src="([^"]+)" srclang="([^"]+)"', webpage):
|
||||
subtitles[src_lang] = [{
|
||||
'ext': src.split('/')[-1],
|
||||
'url': 'http://www.funnyordie.com%s' % src,
|
||||
}]
|
||||
|
||||
post_json = self._search_regex(
|
||||
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
|
||||
post = json.loads(post_json)
|
||||
@@ -69,4 +75,5 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
'description': post.get('description'),
|
||||
'thumbnail': post.get('picture'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
@@ -31,7 +33,7 @@ class GameStarIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
og_title = self._og_search_title(webpage)
|
||||
title = og_title.replace(' - Video bei GameStar.de', '').strip()
|
||||
title = re.sub(r'\s*- Video (bei|-) GameStar\.de$', '', og_title)
|
||||
|
||||
url = 'http://gamestar.de/_misc/videos/portal/getVideoUrl.cfm?premium=0&videoId=' + video_id
|
||||
|
||||
|
38
youtube_dl/extractor/gazeta.py
Normal file
38
youtube_dl/extractor/gazeta.py
Normal file
@@ -0,0 +1,38 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class GazetaIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?gazeta\.ru/(?:[^/]+/)?video/(?:(?:main|\d{4}/\d{2}/\d{2})/)?(?P<id>[A-Za-z0-9-_.]+)\.s?html)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.gazeta.ru/video/main/zadaite_vopros_vladislavu_yurevichu.shtml',
|
||||
'md5': 'd49c9bdc6e5a7888f27475dc215ee789',
|
||||
'info_dict': {
|
||||
'id': '205566',
|
||||
'ext': 'mp4',
|
||||
'title': '«70–80 процентов гражданских в Донецке на грани голода»',
|
||||
'description': 'md5:38617526050bd17b234728e7f9620a71',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.gazeta.ru/lifestyle/video/2015/03/08/master-klass_krasivoi_byt._delaem_vesennii_makiyazh.shtml',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
display_id = mobj.group('id')
|
||||
embed_url = '%s?p=embed' % mobj.group('url')
|
||||
embed_page = self._download_webpage(
|
||||
embed_url, display_id, 'Downloading embed page')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'<div[^>]*?class="eagleplayer"[^>]*?data-id="([^"]+)"', embed_page, 'video id')
|
||||
|
||||
return self.url_result(
|
||||
'eagleplatform:gazeta.media.eagleplatform.com:%s' % video_id, 'EaglePlatform')
|
@@ -7,10 +7,12 @@ from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import remove_end
|
||||
|
||||
|
||||
class GDCVaultIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
|
||||
_NETRC_MACHINE = 'gdcvault'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
|
||||
@@ -65,10 +67,12 @@ class GDCVaultIE(InfoExtractor):
|
||||
|
||||
def _parse_flv(self, xml_description):
|
||||
video_formats = []
|
||||
akami_url = xml_description.find('./metadata/akamaiHost').text
|
||||
akamai_url = xml_description.find('./metadata/akamaiHost').text
|
||||
slide_video_path = xml_description.find('./metadata/slideVideo').text
|
||||
video_formats.append({
|
||||
'url': 'rtmp://' + akami_url + '/' + slide_video_path,
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(slide_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
'format_note': 'slide deck video',
|
||||
'quality': -2,
|
||||
'preference': -2,
|
||||
@@ -76,7 +80,9 @@ class GDCVaultIE(InfoExtractor):
|
||||
})
|
||||
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
|
||||
video_formats.append({
|
||||
'url': 'rtmp://' + akami_url + '/' + speaker_video_path,
|
||||
'url': 'rtmp://%s/ondemand?ovpfv=1.1' % akamai_url,
|
||||
'play_path': remove_end(speaker_video_path, '.flv'),
|
||||
'ext': 'flv',
|
||||
'format_note': 'speaker video',
|
||||
'quality': -1,
|
||||
'preference': -1,
|
||||
|
@@ -26,6 +26,7 @@ from ..utils import (
|
||||
unsmuggle_url,
|
||||
UnsupportedError,
|
||||
url_basename,
|
||||
xpath_text,
|
||||
)
|
||||
from .brightcove import BrightcoveIE
|
||||
from .ooyala import OoyalaIE
|
||||
@@ -473,6 +474,7 @@ class GenericIE(InfoExtractor):
|
||||
{
|
||||
'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
|
||||
'info_dict': {
|
||||
'id': '1986',
|
||||
'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
|
||||
},
|
||||
'playlist_mincount': 2,
|
||||
@@ -531,7 +533,7 @@ class GenericIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'Mrj4DVp2zeA',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20150204',
|
||||
'upload_date': '20150212',
|
||||
'uploader': 'The National Archives UK',
|
||||
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
|
||||
'uploader_id': 'NationalArchives08',
|
||||
@@ -546,6 +548,76 @@ class GenericIE(InfoExtractor):
|
||||
'id': 'aanslagen-kopenhagen',
|
||||
'title': 'Aanslagen Kopenhagen | RTL Nieuws',
|
||||
}
|
||||
},
|
||||
# Zapiks embed
|
||||
{
|
||||
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
|
||||
'info_dict': {
|
||||
'id': '118046',
|
||||
'ext': 'mp4',
|
||||
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
|
||||
}
|
||||
},
|
||||
# Kaltura embed
|
||||
{
|
||||
'url': 'http://www.monumentalnetwork.com/videos/john-carlson-postgame-2-25-15',
|
||||
'info_dict': {
|
||||
'id': '1_eergr3h1',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20150226',
|
||||
'uploader_id': 'MonumentalSports-Kaltura@perfectsensedigital.com',
|
||||
'timestamp': int,
|
||||
'title': 'John Carlson Postgame 2/25/15',
|
||||
},
|
||||
},
|
||||
# Eagle.Platform embed (generic URL)
|
||||
{
|
||||
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
|
||||
'info_dict': {
|
||||
'id': '227304',
|
||||
'ext': 'mp4',
|
||||
'title': 'Навальный вышел на свободу',
|
||||
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 87,
|
||||
'view_count': int,
|
||||
'age_limit': 0,
|
||||
},
|
||||
},
|
||||
# ClipYou (Eagle.Platform) embed (custom URL)
|
||||
{
|
||||
'url': 'http://muz-tv.ru/play/7129/',
|
||||
'info_dict': {
|
||||
'id': '12820',
|
||||
'ext': 'mp4',
|
||||
'title': "'O Sole Mio",
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 216,
|
||||
'view_count': int,
|
||||
},
|
||||
},
|
||||
# Pladform embed
|
||||
{
|
||||
'url': 'http://muz-tv.ru/kinozal/view/7400/',
|
||||
'info_dict': {
|
||||
'id': '100183293',
|
||||
'ext': 'mp4',
|
||||
'title': 'Тайны перевала Дятлова • Тайна перевала Дятлова 1 серия 2 часть',
|
||||
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 694,
|
||||
'age_limit': 0,
|
||||
},
|
||||
},
|
||||
# RSS feed with enclosure
|
||||
{
|
||||
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
|
||||
'info_dict': {
|
||||
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
|
||||
'ext': 'm4v',
|
||||
'upload_date': '20150228',
|
||||
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -558,11 +630,24 @@ class GenericIE(InfoExtractor):
|
||||
playlist_desc_el = doc.find('./channel/description')
|
||||
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': e.find('link').text,
|
||||
'title': e.find('title').text,
|
||||
} for e in doc.findall('./channel/item')]
|
||||
entries = []
|
||||
for it in doc.findall('./channel/item'):
|
||||
next_url = xpath_text(it, 'link', fatal=False)
|
||||
if not next_url:
|
||||
enclosure_nodes = it.findall('./enclosure')
|
||||
for e in enclosure_nodes:
|
||||
next_url = e.attrib.get('url')
|
||||
if next_url:
|
||||
break
|
||||
|
||||
if not next_url:
|
||||
continue
|
||||
|
||||
entries.append({
|
||||
'_type': 'url',
|
||||
'url': next_url,
|
||||
'title': it.find('title').text,
|
||||
})
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
@@ -1097,6 +1182,36 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Livestream')
|
||||
|
||||
# Look for Zapiks embed
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Zapiks')
|
||||
|
||||
# Look for Kaltura embeds
|
||||
mobj = re.search(
|
||||
r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?'wid'\s*:\s*'_?(?P<partner_id>[^']+)',.*?'entry_id'\s*:\s*'(?P<id>[^']+)',", webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result('kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(), 'Kaltura')
|
||||
|
||||
# Look for Eagle.Platform embeds
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+src="(?P<url>https?://.+?\.media\.eagleplatform\.com/index/player\?.+?)"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'EaglePlatform')
|
||||
|
||||
# Look for ClipYou (uses Eagle.Platform) embeds
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
|
||||
|
||||
# Look for Pladform embeds
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+src="(?P<url>https?://out\.pladform\.ru/player\?.+?)"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Pladform')
|
||||
|
||||
def check_video(vurl):
|
||||
if YoutubeIE.suitable(vurl):
|
||||
return True
|
||||
@@ -1192,7 +1307,9 @@ class GenericIE(InfoExtractor):
|
||||
return entries[0]
|
||||
else:
|
||||
for num, e in enumerate(entries, start=1):
|
||||
e['title'] = '%s (%d)' % (e['title'], num)
|
||||
# 'url' results don't have a title
|
||||
if e.get('title') is not None:
|
||||
e['title'] = '%s (%d)' % (e['title'], num)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
|
@@ -20,7 +20,7 @@ class GloboIE(InfoExtractor):
|
||||
_VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
|
||||
|
||||
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
|
||||
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
|
||||
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=17.0.0.132&resource_id=%s'
|
||||
|
||||
_VIDEOID_REGEXES = [
|
||||
r'\bdata-video-id="(\d+)"',
|
||||
|
@@ -34,6 +34,9 @@ class IGNIE(InfoExtractor):
|
||||
},
|
||||
{
|
||||
'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
|
||||
'info_dict': {
|
||||
'id': '100-little-things-in-gta-5-that-will-blow-your-mind',
|
||||
},
|
||||
'playlist': [
|
||||
{
|
||||
'info_dict': {
|
||||
|
97
youtube_dl/extractor/imgur.py
Normal file
97
youtube_dl/extractor/imgur.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
mimetype2ext,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class ImgurIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:i\.)?imgur\.com/(?P<id>[a-zA-Z0-9]+)(?:\.mp4|\.gifv)?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://i.imgur.com/A61SaA1.gifv',
|
||||
'info_dict': {
|
||||
'id': 'A61SaA1',
|
||||
'ext': 'mp4',
|
||||
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
|
||||
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://imgur.com/A61SaA1',
|
||||
'info_dict': {
|
||||
'id': 'A61SaA1',
|
||||
'ext': 'mp4',
|
||||
'title': 're:Imgur GIF$|MRW gifv is up and running without any bugs$',
|
||||
'description': 're:The origin of the Internet\'s most viral images$|The Internet\'s visual storytelling community\. Explore, share, and discuss the best visual stories the Internet has to offer\.$',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
width = int_or_none(self._search_regex(
|
||||
r'<param name="width" value="([0-9]+)"',
|
||||
webpage, 'width', fatal=False))
|
||||
height = int_or_none(self._search_regex(
|
||||
r'<param name="height" value="([0-9]+)"',
|
||||
webpage, 'height', fatal=False))
|
||||
|
||||
video_elements = self._search_regex(
|
||||
r'(?s)<div class="video-elements">(.*?)</div>',
|
||||
webpage, 'video elements', default=None)
|
||||
if not video_elements:
|
||||
raise ExtractorError(
|
||||
'No sources found for video %s. Maybe an image?' % video_id,
|
||||
expected=True)
|
||||
|
||||
formats = []
|
||||
for m in re.finditer(r'<source\s+src="(?P<src>[^"]+)"\s+type="(?P<type>[^"]+)"', video_elements):
|
||||
formats.append({
|
||||
'format_id': m.group('type').partition('/')[2],
|
||||
'url': self._proto_relative_url(m.group('src')),
|
||||
'ext': mimetype2ext(m.group('type')),
|
||||
'acodec': 'none',
|
||||
'width': width,
|
||||
'height': height,
|
||||
'http_headers': {
|
||||
'User-Agent': 'youtube-dl (like wget)',
|
||||
},
|
||||
})
|
||||
|
||||
gif_json = self._search_regex(
|
||||
r'(?s)var\s+videoItem\s*=\s*(\{.*?\})',
|
||||
webpage, 'GIF code', fatal=False)
|
||||
if gif_json:
|
||||
gifd = self._parse_json(
|
||||
gif_json, video_id, transform_source=js_to_json)
|
||||
formats.append({
|
||||
'format_id': 'gif',
|
||||
'preference': -10,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'ext': 'gif',
|
||||
'acodec': 'none',
|
||||
'vcodec': 'gif',
|
||||
'container': 'gif',
|
||||
'url': self._proto_relative_url(gifd['gifUrl']),
|
||||
'filesize': gifd.get('size'),
|
||||
'http_headers': {
|
||||
'User-Agent': 'youtube-dl (like wget)',
|
||||
},
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'description': self._og_search_description(webpage),
|
||||
'title': self._og_search_title(webpage),
|
||||
}
|
@@ -2,7 +2,6 @@
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@@ -15,10 +14,10 @@ class JeuxVideoIE(InfoExtractor):
|
||||
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
||||
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
|
||||
'info_dict': {
|
||||
'id': '5182',
|
||||
'id': '114765',
|
||||
'ext': 'mp4',
|
||||
'title': 'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
||||
'title': 'Tearaway : GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.',
|
||||
},
|
||||
}
|
||||
|
||||
@@ -26,26 +25,29 @@ class JeuxVideoIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group(1)
|
||||
webpage = self._download_webpage(url, title)
|
||||
xml_link = self._html_search_regex(
|
||||
r'<param name="flashvars" value="config=(.*?)" />',
|
||||
title = self._html_search_meta('name', webpage)
|
||||
config_url = self._html_search_regex(
|
||||
r'data-src="(/contenu/medias/video.php.*?)"',
|
||||
webpage, 'config URL')
|
||||
config_url = 'http://www.jeuxvideo.com' + config_url
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||
xml_link, 'video ID')
|
||||
r'id=(\d+)',
|
||||
config_url, 'video ID')
|
||||
|
||||
config = self._download_xml(
|
||||
xml_link, title, 'Downloading XML config')
|
||||
info_json = config.find('format.json').text
|
||||
info = json.loads(info_json)['versions'][0]
|
||||
config = self._download_json(
|
||||
config_url, title, 'Downloading JSON config')
|
||||
|
||||
video_url = 'http://video720.jeuxvideo.com/' + info['file']
|
||||
formats = [{
|
||||
'url': source['file'],
|
||||
'format_id': source['label'],
|
||||
'resolution': source['label'],
|
||||
} for source in reversed(config['sources'])]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': config.find('titre_video').text,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': config.find('image').text,
|
||||
'thumbnail': config.get('image'),
|
||||
}
|
||||
|
138
youtube_dl/extractor/kaltura.py
Normal file
138
youtube_dl/extractor/kaltura.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class KalturaIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:kaltura:|
|
||||
https?://(:?(?:www|cdnapisec)\.)?kaltura\.com/index\.php/kwidget/(?:[^/]+/)*?wid/_
|
||||
)(?P<partner_id>\d+)
|
||||
(?::|
|
||||
/(?:[^/]+/)*?entry_id/
|
||||
)(?P<id>[0-9a-z_]+)'''
|
||||
_API_BASE = 'http://cdnapi.kaltura.com/api_v3/index.php?'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'kaltura:269692:1_1jc2y3e4',
|
||||
'md5': '3adcbdb3dcc02d647539e53f284ba171',
|
||||
'info_dict': {
|
||||
'id': '1_1jc2y3e4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Track 4',
|
||||
'upload_date': '20131219',
|
||||
'uploader_id': 'mlundberg@wolfgangsvault.com',
|
||||
'description': 'The Allman Brothers Band, 12/16/1981',
|
||||
'thumbnail': 're:^https?://.*/thumbnail/.*',
|
||||
'timestamp': int,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.kaltura.com/index.php/kwidget/cache_st/1300318621/wid/_269692/uiconf_id/3873291/entry_id/1_1jc2y3e4',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://cdnapisec.kaltura.com/index.php/kwidget/wid/_557781/uiconf_id/22845202/entry_id/1_plr1syf3',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _kaltura_api_call(self, video_id, actions, *args, **kwargs):
|
||||
params = actions[0]
|
||||
if len(actions) > 1:
|
||||
for i, a in enumerate(actions[1:], start=1):
|
||||
for k, v in a.items():
|
||||
params['%d:%s' % (i, k)] = v
|
||||
|
||||
query = compat_urllib_parse.urlencode(params)
|
||||
url = self._API_BASE + query
|
||||
data = self._download_json(url, video_id, *args, **kwargs)
|
||||
|
||||
status = data if len(actions) == 1 else data[0]
|
||||
if status.get('objectType') == 'KalturaAPIException':
|
||||
raise ExtractorError(
|
||||
'%s said: %s' % (self.IE_NAME, status['message']))
|
||||
|
||||
return data
|
||||
|
||||
def _get_kaltura_signature(self, video_id, partner_id):
|
||||
actions = [{
|
||||
'apiVersion': '3.1',
|
||||
'expiry': 86400,
|
||||
'format': 1,
|
||||
'service': 'session',
|
||||
'action': 'startWidgetSession',
|
||||
'widgetId': '_%s' % partner_id,
|
||||
}]
|
||||
return self._kaltura_api_call(
|
||||
video_id, actions, note='Downloading Kaltura signature')['ks']
|
||||
|
||||
def _get_video_info(self, video_id, partner_id):
|
||||
signature = self._get_kaltura_signature(video_id, partner_id)
|
||||
actions = [
|
||||
{
|
||||
'action': 'null',
|
||||
'apiVersion': '3.1.5',
|
||||
'clientTag': 'kdp:v3.8.5',
|
||||
'format': 1, # JSON, 2 = XML, 3 = PHP
|
||||
'service': 'multirequest',
|
||||
'ks': signature,
|
||||
},
|
||||
{
|
||||
'action': 'get',
|
||||
'entryId': video_id,
|
||||
'service': 'baseentry',
|
||||
'version': '-1',
|
||||
},
|
||||
{
|
||||
'action': 'getContextData',
|
||||
'contextDataParams:objectType': 'KalturaEntryContextDataParams',
|
||||
'contextDataParams:referrer': 'http://www.kaltura.com/',
|
||||
'contextDataParams:streamerType': 'http',
|
||||
'entryId': video_id,
|
||||
'service': 'baseentry',
|
||||
},
|
||||
]
|
||||
return self._kaltura_api_call(
|
||||
video_id, actions, note='Downloading video info JSON')
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
partner_id, entry_id = mobj.group('partner_id'), mobj.group('id')
|
||||
|
||||
info, source_data = self._get_video_info(entry_id, partner_id)
|
||||
|
||||
formats = [{
|
||||
'format_id': '%(fileExt)s-%(bitrate)s' % f,
|
||||
'ext': f['fileExt'],
|
||||
'tbr': f['bitrate'],
|
||||
'fps': f.get('frameRate'),
|
||||
'filesize_approx': int_or_none(f.get('size'), invscale=1024),
|
||||
'container': f.get('containerFormat'),
|
||||
'vcodec': f.get('videoCodecId'),
|
||||
'height': f.get('height'),
|
||||
'width': f.get('width'),
|
||||
'url': '%s/flavorId/%s' % (info['dataUrl'], f['id']),
|
||||
} for f in source_data['flavorAssets']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['name'],
|
||||
'formats': formats,
|
||||
'description': info.get('description'),
|
||||
'thumbnail': info.get('thumbnailUrl'),
|
||||
'duration': info.get('duration'),
|
||||
'timestamp': info.get('createdAt'),
|
||||
'uploader_id': info.get('userId'),
|
||||
'view_count': info.get('plays'),
|
||||
}
|
96
youtube_dl/extractor/kanalplay.py
Normal file
96
youtube_dl/extractor/kanalplay.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
class KanalPlayIE(InfoExtractor):
|
||||
IE_DESC = 'Kanal 5/9/11 Play'
|
||||
_VALID_URL = r'https?://(?:www\.)?kanal(?P<channel_id>5|9|11)play\.se/(?:#!/)?(?:play/)?program/\d+/video/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.kanal5play.se/#!/play/program/3060212363/video/3270012277',
|
||||
'info_dict': {
|
||||
'id': '3270012277',
|
||||
'ext': 'flv',
|
||||
'title': 'Saknar både dusch och avlopp',
|
||||
'description': 'md5:6023a95832a06059832ae93bc3c7efb7',
|
||||
'duration': 2636.36,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.kanal9play.se/#!/play/program/335032/video/246042',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.kanal11play.se/#!/play/program/232835958/video/367135199',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _fix_subtitles(self, subs):
|
||||
return '\r\n\r\n'.join(
|
||||
'%s\r\n%s --> %s\r\n%s'
|
||||
% (
|
||||
num,
|
||||
self._subtitles_timecode(item['startMillis'] / 1000.0),
|
||||
self._subtitles_timecode(item['endMillis'] / 1000.0),
|
||||
item['text'],
|
||||
) for num, item in enumerate(subs, 1))
|
||||
|
||||
def _get_subtitles(self, channel_id, video_id):
|
||||
subs = self._download_json(
|
||||
'http://www.kanal%splay.se/api/subtitles/%s' % (channel_id, video_id),
|
||||
video_id, 'Downloading subtitles JSON', fatal=False)
|
||||
return {'se': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]} if subs else {}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
channel_id = mobj.group('channel_id')
|
||||
|
||||
video = self._download_json(
|
||||
'http://www.kanal%splay.se/api/getVideo?format=FLASH&videoId=%s' % (channel_id, video_id),
|
||||
video_id)
|
||||
|
||||
reasons_for_no_streams = video.get('reasonsForNoStreams')
|
||||
if reasons_for_no_streams:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, '\n'.join(reasons_for_no_streams)),
|
||||
expected=True)
|
||||
|
||||
title = video['title']
|
||||
description = video.get('description')
|
||||
duration = float_or_none(video.get('length'), 1000)
|
||||
thumbnail = video.get('posterUrl')
|
||||
|
||||
stream_base_url = video['streamBaseUrl']
|
||||
|
||||
formats = [{
|
||||
'url': stream_base_url,
|
||||
'play_path': stream['source'],
|
||||
'ext': 'flv',
|
||||
'tbr': float_or_none(stream.get('bitrate'), 1000),
|
||||
'rtmp_real_time': True,
|
||||
} for stream in video['streams']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
if video.get('hasSubtitle'):
|
||||
subtitles = self.extract_subtitles(channel_id, video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
@@ -1,31 +1,32 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class Laola1TvIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
|
||||
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
|
||||
'info_dict': {
|
||||
'id': '250019',
|
||||
'id': '227883',
|
||||
'ext': 'mp4',
|
||||
'title': 'Bitburger Open Grand Prix Gold - Court 1',
|
||||
'categories': ['Badminton'],
|
||||
'uploader': 'BWF - Badminton World Federation',
|
||||
'is_live': True,
|
||||
'title': 'Straubing Tigers - Kölner Haie',
|
||||
'categories': ['Eishockey'],
|
||||
'is_live': False,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
_BROKEN = True # Not really - extractor works fine, but f4m downloader does not support live streams yet.
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
@@ -43,15 +44,22 @@ class Laola1TvIE(InfoExtractor):
|
||||
r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
|
||||
flashvars = dict((m[0], m[1]) for m in flashvars_m)
|
||||
|
||||
partner_id = self._search_regex(
|
||||
r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner id')
|
||||
|
||||
xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
|
||||
'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
|
||||
video_id, portal, lang))
|
||||
'play=%s&partner=%s&portal=%s&v5ident=&lang=%s' % (
|
||||
video_id, partner_id, portal, lang))
|
||||
hd_doc = self._download_xml(xml_url, video_id)
|
||||
|
||||
title = hd_doc.find('.//video/title').text
|
||||
flash_url = hd_doc.find('.//video/url').text
|
||||
categories = hd_doc.find('.//video/meta_sports').text.split(',')
|
||||
uploader = hd_doc.find('.//video/meta_organistation').text
|
||||
title = xpath_text(hd_doc, './/video/title', fatal=True)
|
||||
flash_url = xpath_text(hd_doc, './/video/url', fatal=True)
|
||||
uploader = xpath_text(hd_doc, './/video/meta_organistation')
|
||||
is_live = xpath_text(hd_doc, './/video/islive') == 'true'
|
||||
|
||||
categories = xpath_text(hd_doc, './/video/meta_sports')
|
||||
if categories:
|
||||
categories = categories.split(',')
|
||||
|
||||
ident = random.randint(10000000, 99999999)
|
||||
token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
|
||||
@@ -60,15 +68,16 @@ class Laola1TvIE(InfoExtractor):
|
||||
token_doc = self._download_xml(
|
||||
token_url, video_id, note='Downloading token')
|
||||
token_attrib = token_doc.find('.//token').attrib
|
||||
if token_attrib.get('auth') == 'blocked':
|
||||
raise ExtractorError('Token error: ' % token_attrib.get('comment'))
|
||||
if token_attrib.get('auth') in ('blocked', 'restricted'):
|
||||
raise ExtractorError(
|
||||
'Token error: %s' % token_attrib.get('comment'), expected=True)
|
||||
|
||||
video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
|
||||
token_attrib['url'], token_attrib['auth'])
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'is_live': True,
|
||||
'is_live': is_live,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
|
206
youtube_dl/extractor/letv.py
Normal file
206
youtube_dl/extractor/letv.py
Normal file
@@ -0,0 +1,206 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import re
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class LetvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.letv\.com/ptv/vplay/(?P<id>\d+).html'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
|
||||
'md5': 'cab23bd68d5a8db9be31c9a222c1e8df',
|
||||
'info_dict': {
|
||||
'id': '22005890',
|
||||
'ext': 'mp4',
|
||||
'title': '第87届奥斯卡颁奖礼完美落幕 《鸟人》成最大赢家',
|
||||
'timestamp': 1424747397,
|
||||
'upload_date': '20150224',
|
||||
'description': 'md5:a9cb175fd753e2962176b7beca21a47c',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
|
||||
'info_dict': {
|
||||
'id': '1415246',
|
||||
'ext': 'mp4',
|
||||
'title': '美人天下01',
|
||||
'description': 'md5:f88573d9d7225ada1359eaf0dbf8bcda',
|
||||
},
|
||||
}, {
|
||||
'note': 'This video is available only in Mainland China, thus a proxy is needed',
|
||||
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
|
||||
'md5': 'f80936fbe20fb2f58648e81386ff7927',
|
||||
'info_dict': {
|
||||
'id': '1118082',
|
||||
'ext': 'mp4',
|
||||
'title': '与龙共舞 完整版',
|
||||
'description': 'md5:7506a5eeb1722bb9d4068f85024e3986',
|
||||
},
|
||||
'params': {
|
||||
'cn_verification_proxy': 'http://proxy.uku.im:8888'
|
||||
},
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
def urshift(val, n):
|
||||
return val >> n if val >= 0 else (val + 0x100000000) >> n
|
||||
|
||||
# ror() and calc_time_key() are reversed from a embedded swf file in KLetvPlayer.swf
|
||||
def ror(self, param1, param2):
|
||||
_loc3_ = 0
|
||||
while _loc3_ < param2:
|
||||
param1 = self.urshift(param1, 1) + ((param1 & 1) << 31)
|
||||
_loc3_ += 1
|
||||
return param1
|
||||
|
||||
def calc_time_key(self, param1):
|
||||
_loc2_ = 773625421
|
||||
_loc3_ = self.ror(param1, _loc2_ % 13)
|
||||
_loc3_ = _loc3_ ^ _loc2_
|
||||
_loc3_ = self.ror(_loc3_, _loc2_ % 17)
|
||||
return _loc3_
|
||||
|
||||
def _real_extract(self, url):
|
||||
media_id = self._match_id(url)
|
||||
page = self._download_webpage(url, media_id)
|
||||
params = {
|
||||
'id': media_id,
|
||||
'platid': 1,
|
||||
'splatid': 101,
|
||||
'format': 1,
|
||||
'tkey': self.calc_time_key(int(time.time())),
|
||||
'domain': 'www.letv.com'
|
||||
}
|
||||
play_json_req = compat_urllib_request.Request(
|
||||
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
|
||||
)
|
||||
play_json_req.add_header(
|
||||
'Ytdl-request-proxy',
|
||||
self._downloader.params.get('cn_verification_proxy'))
|
||||
play_json = self._download_json(
|
||||
play_json_req,
|
||||
media_id, 'playJson data')
|
||||
|
||||
# Check for errors
|
||||
playstatus = play_json['playstatus']
|
||||
if playstatus['status'] == 0:
|
||||
flag = playstatus['flag']
|
||||
if flag == 1:
|
||||
msg = 'Country %s auth error' % playstatus['country']
|
||||
else:
|
||||
msg = 'Generic error. flag = %d' % flag
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
playurl = play_json['playurl']
|
||||
|
||||
formats = ['350', '1000', '1300', '720p', '1080p']
|
||||
dispatch = playurl['dispatch']
|
||||
|
||||
urls = []
|
||||
for format_id in formats:
|
||||
if format_id in dispatch:
|
||||
media_url = playurl['domain'][0] + dispatch[format_id][0]
|
||||
|
||||
# Mimic what flvxz.com do
|
||||
url_parts = list(compat_urlparse.urlparse(media_url))
|
||||
qs = dict(compat_urlparse.parse_qs(url_parts[4]))
|
||||
qs.update({
|
||||
'platid': '14',
|
||||
'splatid': '1401',
|
||||
'tss': 'no',
|
||||
'retry': 1
|
||||
})
|
||||
url_parts[4] = compat_urllib_parse.urlencode(qs)
|
||||
media_url = compat_urlparse.urlunparse(url_parts)
|
||||
|
||||
url_info_dict = {
|
||||
'url': media_url,
|
||||
'ext': determine_ext(dispatch[format_id][1]),
|
||||
'format_id': format_id,
|
||||
}
|
||||
|
||||
if format_id[-1:] == 'p':
|
||||
url_info_dict['height'] = format_id[:-1]
|
||||
|
||||
urls.append(url_info_dict)
|
||||
|
||||
publish_time = parse_iso8601(self._html_search_regex(
|
||||
r'发布时间 ([^<>]+) ', page, 'publish time', default=None),
|
||||
delimiter=' ', timezone=datetime.timedelta(hours=8))
|
||||
description = self._html_search_meta('description', page, fatal=False)
|
||||
|
||||
return {
|
||||
'id': media_id,
|
||||
'formats': urls,
|
||||
'title': playurl['title'],
|
||||
'thumbnail': playurl['pic'],
|
||||
'description': description,
|
||||
'timestamp': publish_time,
|
||||
}
|
||||
|
||||
|
||||
class LetvTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.letv.com/tv/(?P<id>\d+).html'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.letv.com/tv/46177.html',
|
||||
'info_dict': {
|
||||
'id': '46177',
|
||||
'title': '美人天下',
|
||||
'description': 'md5:395666ff41b44080396e59570dbac01c'
|
||||
},
|
||||
'playlist_count': 35
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
page = self._download_webpage(url, playlist_id)
|
||||
|
||||
media_urls = list(set(re.findall(
|
||||
r'http://www.letv.com/ptv/vplay/\d+.html', page)))
|
||||
entries = [self.url_result(media_url, ie='Letv')
|
||||
for media_url in media_urls]
|
||||
|
||||
title = self._html_search_meta('keywords', page,
|
||||
fatal=False).split(',')[0]
|
||||
description = self._html_search_meta('description', page, fatal=False)
|
||||
|
||||
return self.playlist_result(entries, playlist_id, playlist_title=title,
|
||||
playlist_description=description)
|
||||
|
||||
|
||||
class LetvPlaylistIE(LetvTvIE):
|
||||
_VALID_URL = r'http://tv.letv.com/[a-z]+/(?P<id>[a-z]+)/index.s?html'
|
||||
_TESTS = [{
|
||||
'url': 'http://tv.letv.com/izt/wuzetian/index.html',
|
||||
'info_dict': {
|
||||
'id': 'wuzetian',
|
||||
'title': '武媚娘传奇',
|
||||
'description': 'md5:e12499475ab3d50219e5bba00b3cb248'
|
||||
},
|
||||
# This playlist contains some extra videos other than the drama itself
|
||||
'playlist_mincount': 96
|
||||
}, {
|
||||
'url': 'http://tv.letv.com/pzt/lswjzzjc/index.shtml',
|
||||
'info_dict': {
|
||||
'id': 'lswjzzjc',
|
||||
# The title should be "劲舞青春", but I can't find a simple way to
|
||||
# determine the playlist title
|
||||
'title': '乐视午间自制剧场',
|
||||
'description': 'md5:b1eef244f45589a7b5b1af9ff25a4489'
|
||||
},
|
||||
'playlist_mincount': 7
|
||||
}]
|
@@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
@@ -37,8 +38,16 @@ class LivestreamIE(InfoExtractor):
|
||||
'url': 'http://new.livestream.com/tedx/cityenglish',
|
||||
'info_dict': {
|
||||
'title': 'TEDCity2.0 (English)',
|
||||
'id': '2245590',
|
||||
},
|
||||
'playlist_mincount': 4,
|
||||
}, {
|
||||
'url': 'http://new.livestream.com/chess24/tatasteelchess',
|
||||
'info_dict': {
|
||||
'title': 'Tata Steel Chess',
|
||||
'id': '3705884',
|
||||
},
|
||||
'playlist_mincount': 60,
|
||||
}, {
|
||||
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
|
||||
'only_matching': True,
|
||||
@@ -116,6 +125,30 @@ class LivestreamIE(InfoExtractor):
|
||||
'view_count': video_data.get('views'),
|
||||
}
|
||||
|
||||
def _extract_event(self, info):
|
||||
event_id = compat_str(info['id'])
|
||||
account = compat_str(info['owner_account_id'])
|
||||
root_url = (
|
||||
'https://new.livestream.com/api/accounts/{account}/events/{event}/'
|
||||
'feed.json'.format(account=account, event=event_id))
|
||||
|
||||
def _extract_videos():
|
||||
last_video = None
|
||||
for i in itertools.count(1):
|
||||
if last_video is None:
|
||||
info_url = root_url
|
||||
else:
|
||||
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
|
||||
root=root_url, id=last_video)
|
||||
videos_info = self._download_json(info_url, event_id, 'Downloading page {0}'.format(i))['data']
|
||||
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
|
||||
if not videos_info:
|
||||
break
|
||||
for v in videos_info:
|
||||
yield self._extract_video_info(v)
|
||||
last_video = videos_info[-1]['id']
|
||||
return self.playlist_result(_extract_videos(), event_id, info['full_name'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
@@ -143,13 +176,13 @@ class LivestreamIE(InfoExtractor):
|
||||
result = result and compat_str(vdata['data']['id']) == vid
|
||||
return result
|
||||
|
||||
videos = [self._extract_video_info(video_data['data'])
|
||||
for video_data in info['feed']['data']
|
||||
if is_relevant(video_data, video_id)]
|
||||
if video_id is None:
|
||||
# This is an event page:
|
||||
return self.playlist_result(videos, info['id'], info['full_name'])
|
||||
return self._extract_event(info)
|
||||
else:
|
||||
videos = [self._extract_video_info(video_data['data'])
|
||||
for video_data in info['feed']['data']
|
||||
if is_relevant(video_data, video_id)]
|
||||
if not videos:
|
||||
raise ExtractorError('Cannot find video %s' % video_id)
|
||||
return videos[0]
|
||||
|
@@ -52,6 +52,7 @@ class LRTIE(InfoExtractor):
|
||||
'url': data['streamer'],
|
||||
'play_path': 'mp4:%s' % data['file'],
|
||||
'preference': -1,
|
||||
'rtmp_real_time': True,
|
||||
})
|
||||
else:
|
||||
formats.extend(
|
||||
|
@@ -3,7 +3,6 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
import json
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
@@ -16,19 +15,74 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class LyndaIE(SubtitlesInfoExtractor):
|
||||
IE_NAME = 'lynda'
|
||||
IE_DESC = 'lynda.com videos'
|
||||
_VALID_URL = r'https?://www\.lynda\.com/[^/]+/[^/]+/\d+/(\d+)-\d\.html'
|
||||
class LyndaBaseIE(InfoExtractor):
|
||||
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
|
||||
_SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true'
|
||||
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
|
||||
_NETRC_MACHINE = 'lynda'
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_form = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'remember': 'false',
|
||||
'stayPut': 'false'
|
||||
}
|
||||
request = compat_urllib_request.Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
login_page = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
# Not (yet) logged in
|
||||
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
|
||||
if m is not None:
|
||||
response = m.group('json')
|
||||
response_json = json.loads(response)
|
||||
state = response_json['state']
|
||||
|
||||
if state == 'notlogged':
|
||||
raise ExtractorError(
|
||||
'Unable to login, incorrect username and/or password',
|
||||
expected=True)
|
||||
|
||||
# This is when we get popup:
|
||||
# > You're already logged in to lynda.com on two devices.
|
||||
# > If you log in here, we'll log you out of another device.
|
||||
# So, we need to confirm this.
|
||||
if state == 'conflicted':
|
||||
confirm_form = {
|
||||
'username': '',
|
||||
'password': '',
|
||||
'resolve': 'true',
|
||||
'remember': 'false',
|
||||
'stayPut': 'false',
|
||||
}
|
||||
request = compat_urllib_request.Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form))
|
||||
login_page = self._download_webpage(
|
||||
request, None,
|
||||
'Confirming log in and log out from another device')
|
||||
|
||||
if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
|
||||
class LyndaIE(LyndaBaseIE):
|
||||
IE_NAME = 'lynda'
|
||||
IE_DESC = 'lynda.com videos'
|
||||
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
|
||||
_NETRC_MACHINE = 'lynda'
|
||||
|
||||
_SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true'
|
||||
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
|
||||
|
||||
ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
|
||||
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
|
||||
'info_dict': {
|
||||
@@ -37,25 +91,27 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
'title': 'Using the exercise files',
|
||||
'duration': 68
|
||||
}
|
||||
}
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
}, {
|
||||
'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = self._match_id(url)
|
||||
|
||||
page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id,
|
||||
'Downloading video JSON')
|
||||
page = self._download_webpage(
|
||||
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
|
||||
video_id, 'Downloading video JSON')
|
||||
video_json = json.loads(page)
|
||||
|
||||
if 'Status' in video_json:
|
||||
raise ExtractorError('lynda returned error: %s' % video_json['Message'], expected=True)
|
||||
raise ExtractorError(
|
||||
'lynda returned error: %s' % video_json['Message'], expected=True)
|
||||
|
||||
if video_json['HasAccess'] is False:
|
||||
raise ExtractorError(
|
||||
'Video %s is only available for members. ' % video_id + self.ACCOUNT_CREDENTIALS_HINT, expected=True)
|
||||
'Video %s is only available for members. '
|
||||
% video_id + self._ACCOUNT_CREDENTIALS_HINT, expected=True)
|
||||
|
||||
video_id = compat_str(video_json['ID'])
|
||||
duration = video_json['DurationInSeconds']
|
||||
@@ -88,11 +144,7 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
self._check_formats(formats, video_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, page)
|
||||
return
|
||||
|
||||
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, page))
|
||||
subtitles = self.extract_subtitles(video_id, page)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -102,83 +154,37 @@ class LyndaIE(SubtitlesInfoExtractor):
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_form = {
|
||||
'username': username,
|
||||
'password': password,
|
||||
'remember': 'false',
|
||||
'stayPut': 'false'
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
|
||||
|
||||
# Not (yet) logged in
|
||||
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
|
||||
if m is not None:
|
||||
response = m.group('json')
|
||||
response_json = json.loads(response)
|
||||
state = response_json['state']
|
||||
|
||||
if state == 'notlogged':
|
||||
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
|
||||
|
||||
# This is when we get popup:
|
||||
# > You're already logged in to lynda.com on two devices.
|
||||
# > If you log in here, we'll log you out of another device.
|
||||
# So, we need to confirm this.
|
||||
if state == 'conflicted':
|
||||
confirm_form = {
|
||||
'username': '',
|
||||
'password': '',
|
||||
'resolve': 'true',
|
||||
'remember': 'false',
|
||||
'stayPut': 'false',
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form))
|
||||
login_page = self._download_webpage(request, None, 'Confirming log in and log out from another device')
|
||||
|
||||
if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _fix_subtitles(self, subtitles):
|
||||
if subtitles is None:
|
||||
return subtitles # subtitles not requested
|
||||
|
||||
fixed_subtitles = {}
|
||||
for k, v in subtitles.items():
|
||||
subs = json.loads(v)
|
||||
if len(subs) == 0:
|
||||
def _fix_subtitles(self, subs):
|
||||
srt = ''
|
||||
seq_counter = 0
|
||||
for pos in range(0, len(subs) - 1):
|
||||
seq_current = subs[pos]
|
||||
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
|
||||
if m_current is None:
|
||||
continue
|
||||
srt = ''
|
||||
for pos in range(0, len(subs) - 1):
|
||||
seq_current = subs[pos]
|
||||
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
|
||||
if m_current is None:
|
||||
continue
|
||||
seq_next = subs[pos + 1]
|
||||
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
|
||||
if m_next is None:
|
||||
continue
|
||||
appear_time = m_current.group('timecode')
|
||||
disappear_time = m_next.group('timecode')
|
||||
text = seq_current['Caption']
|
||||
srt += '%s\r\n%s --> %s\r\n%s' % (str(pos), appear_time, disappear_time, text)
|
||||
if srt:
|
||||
fixed_subtitles[k] = srt
|
||||
return fixed_subtitles
|
||||
seq_next = subs[pos + 1]
|
||||
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
|
||||
if m_next is None:
|
||||
continue
|
||||
appear_time = m_current.group('timecode')
|
||||
disappear_time = m_next.group('timecode')
|
||||
text = seq_current['Caption'].strip()
|
||||
if text:
|
||||
seq_counter += 1
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text)
|
||||
if srt:
|
||||
return srt
|
||||
|
||||
def _get_available_subtitles(self, video_id, webpage):
|
||||
def _get_subtitles(self, video_id, webpage):
|
||||
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
|
||||
sub = self._download_webpage(url, None, False)
|
||||
sub_json = json.loads(sub)
|
||||
return {'en': url} if len(sub_json) > 0 else {}
|
||||
subs = self._download_json(url, None, False)
|
||||
if subs:
|
||||
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
|
||||
else:
|
||||
return {}
|
||||
|
||||
|
||||
class LyndaCourseIE(InfoExtractor):
|
||||
class LyndaCourseIE(LyndaBaseIE):
|
||||
IE_NAME = 'lynda:course'
|
||||
IE_DESC = 'lynda.com online courses'
|
||||
|
||||
@@ -191,35 +197,37 @@ class LyndaCourseIE(InfoExtractor):
|
||||
course_path = mobj.group('coursepath')
|
||||
course_id = mobj.group('courseid')
|
||||
|
||||
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||
course_id, 'Downloading course JSON')
|
||||
page = self._download_webpage(
|
||||
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
|
||||
course_id, 'Downloading course JSON')
|
||||
course_json = json.loads(page)
|
||||
|
||||
if 'Status' in course_json and course_json['Status'] == 'NotFound':
|
||||
raise ExtractorError('Course %s does not exist' % course_id, expected=True)
|
||||
raise ExtractorError(
|
||||
'Course %s does not exist' % course_id, expected=True)
|
||||
|
||||
unaccessible_videos = 0
|
||||
videos = []
|
||||
(username, _) = self._get_login_info()
|
||||
|
||||
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
|
||||
# by single video API anymore
|
||||
|
||||
for chapter in course_json['Chapters']:
|
||||
for video in chapter['Videos']:
|
||||
if username is None and video['HasAccess'] is False:
|
||||
if video['HasAccess'] is False:
|
||||
unaccessible_videos += 1
|
||||
continue
|
||||
videos.append(video['ID'])
|
||||
|
||||
if unaccessible_videos > 0:
|
||||
self._downloader.report_warning('%s videos are only available for members and will not be downloaded. '
|
||||
% unaccessible_videos + LyndaIE.ACCOUNT_CREDENTIALS_HINT)
|
||||
self._downloader.report_warning(
|
||||
'%s videos are only available for members (or paid members) and will not be downloaded. '
|
||||
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
|
||||
|
||||
entries = [
|
||||
self.url_result('http://www.lynda.com/%s/%s-4.html' %
|
||||
(course_path, video_id),
|
||||
'Lynda')
|
||||
self.url_result(
|
||||
'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
|
||||
'Lynda')
|
||||
for video_id in videos]
|
||||
|
||||
course_title = course_json['Title']
|
||||
|
@@ -5,9 +5,6 @@ import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError,
|
||||
@@ -108,7 +105,6 @@ class OCWMITIE(InfoExtractor):
|
||||
'upload_date': '20121109',
|
||||
'uploader_id': 'MIT',
|
||||
'uploader': 'MIT OpenCourseWare',
|
||||
# 'subtitles': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/MIT6_041F11_lec07_300k.mp4.srt'
|
||||
}
|
||||
},
|
||||
{
|
||||
@@ -121,7 +117,6 @@ class OCWMITIE(InfoExtractor):
|
||||
'uploader_id': 'MIT',
|
||||
'uploader': 'MIT OpenCourseWare',
|
||||
'description': 'This section contains lecture video excerpts, lecture notes, an interactive mathlet with supporting documents, and problem solving videos.',
|
||||
# 'subtitles': 'http://ocw.mit.edu//courses/mathematics/18-01sc-single-variable-calculus-fall-2010/ocw-18.01-f07-lec01_300k.SRT'
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -140,7 +135,6 @@ class OCWMITIE(InfoExtractor):
|
||||
metadata = re.sub(r'[\'"]', '', embed_chapter_media.group(1))
|
||||
metadata = re.split(r', ?', metadata)
|
||||
yt = metadata[1]
|
||||
subs = compat_urlparse.urljoin(self._BASE_URL, metadata[7])
|
||||
else:
|
||||
# search for call to ocw_embed_chapter_media(container_id, media_url, provider, page_url, image_url, captions_file)
|
||||
embed_media = re.search(r'ocw_embed_media\((.+?)\)', webpage)
|
||||
@@ -148,7 +142,6 @@ class OCWMITIE(InfoExtractor):
|
||||
metadata = re.sub(r'[\'"]', '', embed_media.group(1))
|
||||
metadata = re.split(r', ?', metadata)
|
||||
yt = metadata[1]
|
||||
subs = compat_urlparse.urljoin(self._BASE_URL, metadata[5])
|
||||
else:
|
||||
raise ExtractorError('Unable to find embedded YouTube video.')
|
||||
video_id = YoutubeIE.extract_id(yt)
|
||||
@@ -159,7 +152,5 @@ class OCWMITIE(InfoExtractor):
|
||||
'title': title,
|
||||
'description': description,
|
||||
'url': yt,
|
||||
'url_transparent'
|
||||
'subtitles': subs,
|
||||
'ie_key': 'Youtube',
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ class MiTeleIE(InfoExtractor):
|
||||
IE_NAME = 'mitele.es'
|
||||
_VALID_URL = r'http://www\.mitele\.es/[^/]+/[^/]+/[^/]+/(?P<id>[^/]+)/'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144/',
|
||||
'md5': '6a75fe9d0d3275bead0cb683c616fddb',
|
||||
'info_dict': {
|
||||
@@ -29,7 +29,7 @@ class MiTeleIE(InfoExtractor):
|
||||
'display_id': 'programa-144',
|
||||
'duration': 2913,
|
||||
},
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
episode = self._match_id(url)
|
||||
|
@@ -5,7 +5,7 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class MporaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)'
|
||||
IE_NAME = 'MPORA'
|
||||
|
||||
_TEST = {
|
||||
@@ -25,7 +25,9 @@ class MporaIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
data_json = self._search_regex(
|
||||
r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", webpage, 'json')
|
||||
[r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;",
|
||||
r"new\s+FM\.Kaltura\.Player\('[^']+'\s*,\s*({.+?})\);"],
|
||||
webpage, 'json')
|
||||
data = self._parse_json(data_json, video_id)
|
||||
|
||||
uploader = data['info_overlay'].get('username')
|
||||
|
@@ -2,7 +2,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
@@ -23,7 +23,7 @@ def _media_xml_tag(tag):
|
||||
return '{http://search.yahoo.com/mrss/}%s' % tag
|
||||
|
||||
|
||||
class MTVServicesInfoExtractor(SubtitlesInfoExtractor):
|
||||
class MTVServicesInfoExtractor(InfoExtractor):
|
||||
_MOBILE_TEMPLATE = None
|
||||
|
||||
@staticmethod
|
||||
@@ -95,25 +95,15 @@ class MTVServicesInfoExtractor(SubtitlesInfoExtractor):
|
||||
|
||||
def _extract_subtitles(self, mdoc, mtvn_id):
|
||||
subtitles = {}
|
||||
FORMATS = {
|
||||
'scc': 'cea-608',
|
||||
'eia-608': 'cea-608',
|
||||
'xml': 'ttml',
|
||||
}
|
||||
subtitles_format = FORMATS.get(
|
||||
self._downloader.params.get('subtitlesformat'), 'ttml')
|
||||
for transcript in mdoc.findall('.//transcript'):
|
||||
if transcript.get('kind') != 'captions':
|
||||
continue
|
||||
lang = transcript.get('srclang')
|
||||
for typographic in transcript.findall('./typographic'):
|
||||
captions_format = typographic.get('format')
|
||||
if captions_format == subtitles_format:
|
||||
subtitles[lang] = compat_str(typographic.get('src'))
|
||||
break
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(mtvn_id, subtitles)
|
||||
return self.extract_subtitles(mtvn_id, subtitles)
|
||||
subtitles[lang] = [{
|
||||
'url': compat_str(typographic.get('src')),
|
||||
'ext': typographic.get('format')
|
||||
} for typographic in transcript.findall('./typographic')]
|
||||
return subtitles
|
||||
|
||||
def _get_video_info(self, itemdoc):
|
||||
uri = itemdoc.find('guid').text
|
||||
@@ -196,8 +186,6 @@ class MTVServicesInfoExtractor(SubtitlesInfoExtractor):
|
||||
webpage, 'mgid')
|
||||
|
||||
videos_info = self._get_videos_info(mgid)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
return
|
||||
return videos_info
|
||||
|
||||
|
||||
|
@@ -3,17 +3,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class MusicVaultIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.musicvault\.com/(?P<uploader_id>[^/?#]*)/video/(?P<display_id>[^/?#]*)_(?P<id>[0-9]+)\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.musicvault.com/the-allman-brothers-band/video/straight-from-the-heart_1010863.html',
|
||||
'md5': '2cdbb3ae75f7fb3519821507d2fb3c15',
|
||||
'md5': '3adcbdb3dcc02d647539e53f284ba171',
|
||||
'info_dict': {
|
||||
'id': '1010863',
|
||||
'ext': 'mp4',
|
||||
@@ -22,9 +18,10 @@ class MusicVaultIE(InfoExtractor):
|
||||
'duration': 244,
|
||||
'uploader': 'The Allman Brothers Band',
|
||||
'thumbnail': 're:^https?://.*/thumbnail/.*',
|
||||
'upload_date': '19811216',
|
||||
'upload_date': '20131219',
|
||||
'location': 'Capitol Theatre (Passaic, NJ)',
|
||||
'description': 'Listen to The Allman Brothers Band perform Straight from the Heart at Capitol Theatre (Passaic, NJ) on Dec 16, 1981',
|
||||
'timestamp': int,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,34 +40,24 @@ class MusicVaultIE(InfoExtractor):
|
||||
r'<h1.*?>(.*?)</h1>', data_div, 'uploader', fatal=False)
|
||||
title = self._html_search_regex(
|
||||
r'<h2.*?>(.*?)</h2>', data_div, 'title')
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r'<h3.*?>(.*?)</h3>', data_div, 'uploader', fatal=False))
|
||||
location = self._html_search_regex(
|
||||
r'<h4.*?>(.*?)</h4>', data_div, 'location', fatal=False)
|
||||
|
||||
duration = parse_duration(self._html_search_meta('duration', webpage))
|
||||
|
||||
VIDEO_URL_TEMPLATE = 'http://cdnapi.kaltura.com/p/%(uid)s/sp/%(wid)s/playManifest/entryId/%(entry_id)s/format/url/protocol/http'
|
||||
kaltura_id = self._search_regex(
|
||||
r'<div id="video-detail-player" data-kaltura-id="([^"]+)"',
|
||||
webpage, 'kaltura ID')
|
||||
video_url = VIDEO_URL_TEMPLATE % {
|
||||
'entry_id': kaltura_id,
|
||||
'wid': self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid'),
|
||||
'uid': self._search_regex(r'uiconf_id/([0-9]+)/', webpage, 'uid'),
|
||||
}
|
||||
wid = self._search_regex(r'/wid/_([0-9]+)/', webpage, 'wid')
|
||||
|
||||
return {
|
||||
'id': mobj.group('id'),
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'_type': 'url_transparent',
|
||||
'url': 'kaltura:%s:%s' % (wid, kaltura_id),
|
||||
'ie_key': 'Kaltura',
|
||||
'display_id': display_id,
|
||||
'uploader_id': mobj.group('uploader_id'),
|
||||
'thumbnail': thumbnail,
|
||||
'description': self._html_search_meta('description', webpage),
|
||||
'upload_date': upload_date,
|
||||
'location': location,
|
||||
'title': title,
|
||||
'uploader': uploader,
|
||||
'duration': duration,
|
||||
}
|
||||
|
38
youtube_dl/extractor/nationalgeographic.py
Normal file
38
youtube_dl/extractor/nationalgeographic.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
smuggle_url,
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class NationalGeographicIE(InfoExtractor):
|
||||
_VALID_URL = r'http://video\.nationalgeographic\.com/video/.*?'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://video.nationalgeographic.com/video/news/150210-news-crab-mating-vin?source=featuredvideo',
|
||||
'info_dict': {
|
||||
'id': '4DmDACA6Qtk_',
|
||||
'ext': 'flv',
|
||||
'title': 'Mating Crabs Busted by Sharks',
|
||||
'description': 'md5:16f25aeffdeba55aaa8ec37e093ad8b3',
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
name = url_basename(url)
|
||||
|
||||
webpage = self._download_webpage(url, name)
|
||||
feed_url = self._search_regex(r'data-feed-url="([^"]+)"', webpage, 'feed url')
|
||||
guid = self._search_regex(r'data-video-guid="([^"]+)"', webpage, 'guid')
|
||||
|
||||
feed = self._download_xml('%s?byGuid=%s' % (feed_url, guid), name)
|
||||
content = feed.find('.//{http://search.yahoo.com/mrss/}content')
|
||||
theplatform_id = url_basename(content.attrib.get('url'))
|
||||
|
||||
return self.url_result(smuggle_url(
|
||||
'http://link.theplatform.com/s/ngs/%s?format=SMIL&formats=MPEG4&manifest=f4m' % theplatform_id,
|
||||
# For some reason, the normal links don't work and we must force the use of f4m
|
||||
{'force_smil_url': True}))
|
@@ -18,13 +18,13 @@ class NBCIE(InfoExtractor):
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.nbc.com/chicago-fire/video/i-am-a-firefighter/2734188',
|
||||
'url': 'http://www.nbc.com/the-tonight-show/segments/112966',
|
||||
# md5 checksum is not stable
|
||||
'info_dict': {
|
||||
'id': 'bTmnLCvIbaaH',
|
||||
'id': 'c9xnCo0YPOPH',
|
||||
'ext': 'flv',
|
||||
'title': 'I Am a Firefighter',
|
||||
'description': 'An emergency puts Dawson\'sf irefighter skills to the ultimate test in this four-part digital series.',
|
||||
'title': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
|
||||
'description': 'Jimmy gives out free scoops of his new "Tonight Dough" ice cream flavor by surprising customers at the Ben & Jerry\'s scoop shop.',
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@@ -29,6 +29,9 @@ class NetzkinoIE(InfoExtractor):
|
||||
'timestamp': 1344858571,
|
||||
'age_limit': 12,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'Download only works from Germany',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@@ -41,7 +41,7 @@ class NiconicoIE(InfoExtractor):
|
||||
},
|
||||
}
|
||||
|
||||
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/((?:[a-z]{2})?[0-9]+)'
|
||||
_VALID_URL = r'https?://(?:www\.|secure\.)?nicovideo\.jp/watch/(?P<id>(?:[a-z]{2})?[0-9]+)'
|
||||
_NETRC_MACHINE = 'niconico'
|
||||
# Determine whether the downloader used authentication to download video
|
||||
_AUTHENTICATED = False
|
||||
@@ -76,8 +76,7 @@ class NiconicoIE(InfoExtractor):
|
||||
return True
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# Get video webpage. We are not actually interested in it, but need
|
||||
# the cookies in order to be able to download the info webpage
|
||||
|
@@ -1,6 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
fix_xml_ampersands,
|
||||
@@ -12,7 +11,7 @@ from ..utils import (
|
||||
)
|
||||
|
||||
|
||||
class NPOBaseIE(SubtitlesInfoExtractor):
|
||||
class NPOBaseIE(InfoExtractor):
|
||||
def _get_token(self, video_id):
|
||||
token_page = self._download_webpage(
|
||||
'http://ida.omroep.nl/npoplayer/i.js',
|
||||
@@ -164,13 +163,10 @@ class NPOIE(NPOBaseIE):
|
||||
|
||||
subtitles = {}
|
||||
if metadata.get('tt888') == 'ja':
|
||||
subtitles['nl'] = 'http://e.omroep.nl/tt888/%s' % video_id
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
subtitles = self.extract_subtitles(video_id, subtitles)
|
||||
subtitles['nl'] = [{
|
||||
'ext': 'vtt',
|
||||
'url': 'http://e.omroep.nl/tt888/%s' % video_id,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@@ -223,7 +219,8 @@ class NPOLiveIE(NPOBaseIE):
|
||||
if streams:
|
||||
for stream in streams:
|
||||
stream_type = stream.get('type').lower()
|
||||
if stream_type == 'ss':
|
||||
# smooth streaming is not supported
|
||||
if stream_type in ['ss', 'ms']:
|
||||
continue
|
||||
stream_info = self._download_json(
|
||||
'http://ida.omroep.nl/aapi/?stream=%s&token=%s&type=jsonp'
|
||||
@@ -246,6 +243,7 @@ class NPOLiveIE(NPOBaseIE):
|
||||
else:
|
||||
formats.append({
|
||||
'url': stream_url,
|
||||
'preference': -10,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
@@ -4,13 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
)
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
|
||||
|
||||
class NRKIE(InfoExtractor):
|
||||
@@ -73,7 +73,7 @@ class NRKIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class NRKTVIE(SubtitlesInfoExtractor):
|
||||
class NRKTVIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<baseurl>http://tv\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
|
||||
|
||||
_TESTS = [
|
||||
@@ -149,28 +149,29 @@ class NRKTVIE(SubtitlesInfoExtractor):
|
||||
}
|
||||
]
|
||||
|
||||
def _seconds2str(self, s):
|
||||
return '%02d:%02d:%02d.%03d' % (s / 3600, (s % 3600) / 60, s % 60, (s % 1) * 1000)
|
||||
|
||||
def _debug_print(self, txt):
|
||||
if self._downloader.params.get('verbose', False):
|
||||
self.to_screen('[debug] %s' % txt)
|
||||
|
||||
def _extract_captions(self, subtitlesurl, video_id, baseurl):
|
||||
def _get_subtitles(self, subtitlesurl, video_id, baseurl):
|
||||
url = "%s%s" % (baseurl, subtitlesurl)
|
||||
self._debug_print('%s: Subtitle url: %s' % (video_id, url))
|
||||
captions = self._download_xml(url, video_id, 'Downloading subtitles')
|
||||
captions = self._download_xml(
|
||||
url, video_id, 'Downloading subtitles',
|
||||
transform_source=lambda s: s.replace(r'<br />', '\r\n'))
|
||||
lang = captions.get('lang', 'no')
|
||||
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/ns/ttml}'))
|
||||
srt = ''
|
||||
for pos, p in enumerate(ps):
|
||||
begin = parse_duration(p.get('begin'))
|
||||
duration = parse_duration(p.get('dur'))
|
||||
starttime = self._seconds2str(begin)
|
||||
endtime = self._seconds2str(begin + duration)
|
||||
text = '\n'.join(p.itertext())
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), starttime, endtime, text)
|
||||
return {lang: srt}
|
||||
starttime = self._subtitles_timecode(begin)
|
||||
endtime = self._subtitles_timecode(begin + duration)
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (compat_str(pos), starttime, endtime, p.text)
|
||||
return {lang: [
|
||||
{'ext': 'ttml', 'url': url},
|
||||
{'ext': 'srt', 'data': srt},
|
||||
]}
|
||||
|
||||
def _extract_f4m(self, manifest_url, video_id):
|
||||
return self._extract_f4m_formats(manifest_url + '?hdcore=3.1.1&plugin=aasp-3.1.1.69.124', video_id)
|
||||
@@ -243,10 +244,7 @@ class NRKTVIE(SubtitlesInfoExtractor):
|
||||
webpage, 'subtitle URL', default=None)
|
||||
subtitles = None
|
||||
if subtitles_url:
|
||||
subtitles = self._extract_captions(subtitles_url, video_id, baseurl)
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
subtitles = self.extract_subtitles(subtitles_url, video_id, baseurl)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
85
youtube_dl/extractor/odnoklassniki.py
Normal file
85
youtube_dl/extractor/odnoklassniki.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
int_or_none,
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
class OdnoklassnikiIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:odnoklassniki|ok)\.ru/(?:video|web-api/video/moviePlayer)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://ok.ru/video/20079905452',
|
||||
'md5': '8e24ad2da6f387948e7a7d44eb8668fe',
|
||||
'info_dict': {
|
||||
'id': '20079905452',
|
||||
'ext': 'mp4',
|
||||
'title': 'Культура меняет нас (прекрасный ролик!))',
|
||||
'duration': 100,
|
||||
'upload_date': '20141207',
|
||||
'uploader_id': '330537914540',
|
||||
'uploader': 'Виталий Добровольский',
|
||||
'like_count': int,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player = self._parse_json(
|
||||
self._search_regex(
|
||||
r"OKVideo\.start\(({.+?})\s*,\s*'VideoAutoplay_player'", webpage, 'player'),
|
||||
video_id)
|
||||
|
||||
metadata = self._parse_json(player['flashvars']['metadata'], video_id)
|
||||
|
||||
movie = metadata['movie']
|
||||
title = movie['title']
|
||||
thumbnail = movie.get('poster')
|
||||
duration = int_or_none(movie.get('duration'))
|
||||
|
||||
author = metadata.get('author', {})
|
||||
uploader_id = author.get('id')
|
||||
uploader = author.get('name')
|
||||
|
||||
upload_date = unified_strdate(self._html_search_meta(
|
||||
'ya:ovs:upload_date', webpage, 'upload date'))
|
||||
|
||||
age_limit = None
|
||||
adult = self._html_search_meta(
|
||||
'ya:ovs:adult', webpage, 'age limit')
|
||||
if adult:
|
||||
age_limit = 18 if adult == 'true' else 0
|
||||
|
||||
like_count = int_or_none(metadata.get('likeCount'))
|
||||
|
||||
quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd'))
|
||||
|
||||
formats = [{
|
||||
'url': f['url'],
|
||||
'ext': 'mp4',
|
||||
'format_id': f['name'],
|
||||
'quality': quality(f['name']),
|
||||
} for f in metadata['videos']]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'upload_date': upload_date,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
@@ -11,6 +11,11 @@ from ..utils import (
|
||||
HEADRequest,
|
||||
unified_strdate,
|
||||
ExtractorError,
|
||||
strip_jsonp,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
determine_ext,
|
||||
remove_end,
|
||||
)
|
||||
|
||||
|
||||
@@ -197,3 +202,92 @@ class ORFFM4IE(InfoExtractor):
|
||||
'description': data['subtitle'],
|
||||
'entries': entries
|
||||
}
|
||||
|
||||
|
||||
class ORFIPTVIE(InfoExtractor):
|
||||
IE_NAME = 'orf:iptv'
|
||||
IE_DESC = 'iptv.ORF.at'
|
||||
_VALID_URL = r'http://iptv\.orf\.at/(?:#/)?stories/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://iptv.orf.at/stories/2267952',
|
||||
'md5': '26ffa4bab6dbce1eee78bbc7021016cd',
|
||||
'info_dict': {
|
||||
'id': '339775',
|
||||
'ext': 'flv',
|
||||
'title': 'Kreml-Kritiker Nawalny wieder frei',
|
||||
'description': 'md5:6f24e7f546d364dacd0e616a9e409236',
|
||||
'duration': 84.729,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'upload_date': '20150306',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
story_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://iptv.orf.at/stories/%s' % story_id, story_id)
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'data-video(?:id)?="(\d+)"', webpage, 'video id')
|
||||
|
||||
data = self._download_json(
|
||||
'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id,
|
||||
video_id)[0]
|
||||
|
||||
duration = float_or_none(data['duration'], 1000)
|
||||
|
||||
video = data['sources']['default']
|
||||
load_balancer_url = video['loadBalancerUrl']
|
||||
abr = int_or_none(video.get('audioBitrate'))
|
||||
vbr = int_or_none(video.get('bitrate'))
|
||||
fps = int_or_none(video.get('videoFps'))
|
||||
width = int_or_none(video.get('videoWidth'))
|
||||
height = int_or_none(video.get('videoHeight'))
|
||||
thumbnail = video.get('preview')
|
||||
|
||||
rendition = self._download_json(
|
||||
load_balancer_url, video_id, transform_source=strip_jsonp)
|
||||
|
||||
f = {
|
||||
'abr': abr,
|
||||
'vbr': vbr,
|
||||
'fps': fps,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}
|
||||
|
||||
formats = []
|
||||
for format_id, format_url in rendition['redirect'].items():
|
||||
if format_id == 'rtmp':
|
||||
ff = f.copy()
|
||||
ff.update({
|
||||
'url': format_url,
|
||||
'format_id': format_id,
|
||||
})
|
||||
formats.append(ff)
|
||||
elif determine_ext(format_url) == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
format_url, video_id, f4m_id=format_id))
|
||||
elif determine_ext(format_url) == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
format_url, video_id, 'mp4', m3u8_id=format_id))
|
||||
else:
|
||||
continue
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = remove_end(self._og_search_title(webpage), ' - iptv.ORF.at')
|
||||
description = self._og_search_description(webpage)
|
||||
upload_date = unified_strdate(self._html_search_meta(
|
||||
'dc.date', webpage, 'upload date'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'upload_date': upload_date,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@@ -1,9 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
js_to_json,
|
||||
@@ -11,7 +8,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class PatreonIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(.+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?patreon\.com/creation\?hid=(?P<id>[^&#]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.patreon.com/creation?hid=743933',
|
||||
@@ -35,6 +32,23 @@ class PatreonIE(InfoExtractor):
|
||||
'thumbnail': 're:^https?://.*$',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://www.patreon.com/creation?hid=1682498',
|
||||
'info_dict': {
|
||||
'id': 'SU4fj_aEMVw',
|
||||
'ext': 'mp4',
|
||||
'title': 'I\'m on Patreon!',
|
||||
'uploader': 'TraciJHines',
|
||||
'thumbnail': 're:^https?://.*$',
|
||||
'upload_date': '20150211',
|
||||
'description': 'md5:c5a706b1f687817a3de09db1eb93acd4',
|
||||
'uploader_id': 'TraciJHines',
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Currently Patreon exposes download URL via hidden CSS, so login is not
|
||||
@@ -65,26 +79,29 @@ class PatreonIE(InfoExtractor):
|
||||
'''
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._og_search_title(webpage).strip()
|
||||
|
||||
attach_fn = self._html_search_regex(
|
||||
r'<div class="attach"><a target="_blank" href="([^"]+)">',
|
||||
webpage, 'attachment URL', default=None)
|
||||
embed = self._html_search_regex(
|
||||
r'<div id="watchCreation">\s*<iframe class="embedly-embed" src="([^"]+)"',
|
||||
webpage, 'embedded URL', default=None)
|
||||
|
||||
if attach_fn is not None:
|
||||
video_url = 'http://www.patreon.com' + attach_fn
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
uploader = self._html_search_regex(
|
||||
r'<strong>(.*?)</strong> is creating', webpage, 'uploader')
|
||||
elif embed is not None:
|
||||
return self.url_result(embed)
|
||||
else:
|
||||
playlist_js = self._search_regex(
|
||||
playlist = self._parse_json(self._search_regex(
|
||||
r'(?s)new\s+jPlayerPlaylist\(\s*\{\s*[^}]*},\s*(\[.*?,?\s*\])',
|
||||
webpage, 'playlist JSON')
|
||||
playlist_json = js_to_json(playlist_js)
|
||||
playlist = json.loads(playlist_json)
|
||||
webpage, 'playlist JSON'),
|
||||
video_id, transform_source=js_to_json)
|
||||
data = playlist[0]
|
||||
video_url = self._proto_relative_url(data['mp3'])
|
||||
thumbnail = self._proto_relative_url(data.get('cover'))
|
||||
|
90
youtube_dl/extractor/pladform.py
Normal file
90
youtube_dl/extractor/pladform.py
Normal file
@@ -0,0 +1,90 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
xpath_text,
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
class PladformIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://
|
||||
(?:
|
||||
(?:
|
||||
out\.pladform\.ru/player|
|
||||
static\.pladform\.ru/player\.swf
|
||||
)
|
||||
\?.*\bvideoid=|
|
||||
video\.pladform\.ru/catalog/video/videoid/
|
||||
)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
_TESTS = [{
|
||||
# http://muz-tv.ru/kinozal/view/7400/
|
||||
'url': 'http://out.pladform.ru/player?pl=24822&videoid=100183293',
|
||||
'md5': '61f37b575dd27f1bb2e1854777fe31f4',
|
||||
'info_dict': {
|
||||
'id': '100183293',
|
||||
'ext': 'mp4',
|
||||
'title': 'Тайны перевала Дятлова • Тайна перевала Дятлова 1 серия 2 часть',
|
||||
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 694,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://static.pladform.ru/player.swf?pl=21469&videoid=100183293&vkcid=0',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://video.pladform.ru/catalog/video/videoid/100183293/vkcid/0',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
video = self._download_xml(
|
||||
'http://out.pladform.ru/getVideo?pl=1&videoid=%s' % video_id,
|
||||
video_id)
|
||||
|
||||
if video.tag == 'error':
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, video.text),
|
||||
expected=True)
|
||||
|
||||
quality = qualities(('ld', 'sd', 'hd'))
|
||||
|
||||
formats = [{
|
||||
'url': src.text,
|
||||
'format_id': src.get('quality'),
|
||||
'quality': quality(src.get('quality')),
|
||||
} for src in video.findall('./src')]
|
||||
self._sort_formats(formats)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://video.pladform.ru/catalog/video/videoid/%s' % video_id,
|
||||
video_id)
|
||||
|
||||
title = self._og_search_title(webpage, fatal=False) or xpath_text(
|
||||
video, './/title', 'title', fatal=True)
|
||||
description = self._search_regex(
|
||||
r'</h3>\s*<p>([^<]+)</p>', webpage, 'description', fatal=False)
|
||||
thumbnail = self._og_search_thumbnail(webpage) or xpath_text(
|
||||
video, './/cover', 'cover')
|
||||
|
||||
duration = int_or_none(xpath_text(video, './/time', 'duration'))
|
||||
age_limit = int_or_none(xpath_text(video, './/age18', 'age limit'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
78
youtube_dl/extractor/playwire.py
Normal file
78
youtube_dl/extractor/playwire.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
xpath_text,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class PlaywireIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:config|cdn)\.playwire\.com(?:/v2)?/(?P<publisher_id>\d+)/(?:videos/v2|embed|config)/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://config.playwire.com/14907/videos/v2/3353705/player.json',
|
||||
'md5': 'e6398701e3595888125729eaa2329ed9',
|
||||
'info_dict': {
|
||||
'id': '3353705',
|
||||
'ext': 'mp4',
|
||||
'title': 'S04_RM_UCL_Rus',
|
||||
'thumbnail': 're:^http://.*\.png$',
|
||||
'duration': 145.94,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://cdn.playwire.com/11625/embed/85228.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://config.playwire.com/12421/videos/v2/3389892/zeus.json',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://cdn.playwire.com/v2/12342/config/1532636.json',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
publisher_id, video_id = mobj.group('publisher_id'), mobj.group('id')
|
||||
|
||||
player = self._download_json(
|
||||
'http://config.playwire.com/%s/videos/v2/%s/zeus.json' % (publisher_id, video_id),
|
||||
video_id)
|
||||
|
||||
title = player['settings']['title']
|
||||
duration = float_or_none(player.get('duration'), 1000)
|
||||
|
||||
content = player['content']
|
||||
thumbnail = content.get('poster')
|
||||
src = content['media']['f4m']
|
||||
|
||||
f4m = self._download_xml(src, video_id)
|
||||
base_url = xpath_text(f4m, './{http://ns.adobe.com/f4m/1.0}baseURL', 'base url', fatal=True)
|
||||
formats = []
|
||||
for media in f4m.findall('./{http://ns.adobe.com/f4m/1.0}media'):
|
||||
media_url = media.get('url')
|
||||
if not media_url:
|
||||
continue
|
||||
tbr = int_or_none(media.get('bitrate'))
|
||||
width = int_or_none(media.get('width'))
|
||||
height = int_or_none(media.get('height'))
|
||||
f = {
|
||||
'url': '%s/%s' % (base_url, media.attrib['url']),
|
||||
'tbr': tbr,
|
||||
'width': width,
|
||||
'height': height,
|
||||
}
|
||||
if not (tbr or width or height):
|
||||
f['quality'] = 1 if '-hd.' in media_url else 0
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
@@ -56,7 +56,7 @@ class PornHubIE(InfoExtractor):
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
||||
video_uploader = self._html_search_regex(
|
||||
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|<span class="username)[^>]+>(.+?)<',
|
||||
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
|
||||
webpage, 'uploader', fatal=False)
|
||||
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
|
||||
if thumbnail:
|
||||
@@ -110,3 +110,33 @@ class PornHubIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'age_limit': 18,
|
||||
}
|
||||
|
||||
|
||||
class PornHubPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.pornhub.com/playlist/6201671',
|
||||
'info_dict': {
|
||||
'id': '6201671',
|
||||
'title': 'P0p4',
|
||||
},
|
||||
'playlist_mincount': 35,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
entries = [
|
||||
self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
|
||||
for video_url in set(re.findall('href="/?(view_video\.php\?viewkey=\d+[^"]*)"', webpage))
|
||||
]
|
||||
|
||||
playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
|
||||
playlist_id)
|
||||
|
||||
return self.playlist_result(
|
||||
entries, playlist_id, playlist.get('title'), playlist.get('description'))
|
||||
|
88
youtube_dl/extractor/puls4.py
Normal file
88
youtube_dl/extractor/puls4.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unified_strdate,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class Puls4IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?puls4\.com/video/[^/]+/play/(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.puls4.com/video/pro-und-contra/play/2716816',
|
||||
'md5': '49f6a6629747eeec43cef6a46b5df81d',
|
||||
'info_dict': {
|
||||
'id': '2716816',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pro und Contra vom 23.02.2015',
|
||||
'description': 'md5:293e44634d9477a67122489994675db6',
|
||||
'duration': 2989,
|
||||
'upload_date': '20150224',
|
||||
'uploader': 'PULS_4',
|
||||
},
|
||||
'skip': 'Only works from Germany',
|
||||
}, {
|
||||
'url': 'http://www.puls4.com/video/kult-spielfilme/play/1298106',
|
||||
'md5': '6a48316c8903ece8dab9b9a7bf7a59ec',
|
||||
'info_dict': {
|
||||
'id': '1298106',
|
||||
'ext': 'mp4',
|
||||
'title': 'Lucky Fritz',
|
||||
},
|
||||
'skip': 'Only works from Germany',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
error_message = self._html_search_regex(
|
||||
r'<div class="message-error">(.+?)</div>',
|
||||
webpage, 'error message', default=None)
|
||||
if error_message:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
|
||||
|
||||
real_url = self._html_search_regex(
|
||||
r'\"fsk-button\".+?href=\"([^"]+)',
|
||||
webpage, 'fsk_button', default=None)
|
||||
if real_url:
|
||||
webpage = self._download_webpage(real_url, video_id)
|
||||
|
||||
player = self._search_regex(
|
||||
r'p4_video_player(?:_iframe)?\("video_\d+_container"\s*,(.+?)\);\s*\}',
|
||||
webpage, 'player')
|
||||
|
||||
player_json = self._parse_json(
|
||||
'[%s]' % player, video_id,
|
||||
transform_source=lambda s: s.replace('undefined,', ''))
|
||||
|
||||
formats = None
|
||||
result = None
|
||||
|
||||
for v in player_json:
|
||||
if isinstance(v, list) and not formats:
|
||||
formats = [{
|
||||
'url': f['url'],
|
||||
'format': 'hd' if f.get('hd') else 'sd',
|
||||
'width': int_or_none(f.get('size_x')),
|
||||
'height': int_or_none(f.get('size_y')),
|
||||
'tbr': int_or_none(f.get('bitrate')),
|
||||
} for f in v]
|
||||
self._sort_formats(formats)
|
||||
elif isinstance(v, dict) and not result:
|
||||
result = {
|
||||
'id': video_id,
|
||||
'title': v['videopartname'].strip(),
|
||||
'description': v.get('videotitle'),
|
||||
'duration': int_or_none(v.get('videoduration') or v.get('episodeduration')),
|
||||
'upload_date': unified_strdate(v.get('clipreleasetime')),
|
||||
'uploader': v.get('channel'),
|
||||
}
|
||||
|
||||
result['formats'] = formats
|
||||
|
||||
return result
|
88
youtube_dl/extractor/r7.py
Normal file
88
youtube_dl/extractor/r7.py
Normal file
@@ -0,0 +1,88 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
js_to_json,
|
||||
unescapeHTML,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class R7IE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://
|
||||
(?:
|
||||
(?:[a-zA-Z]+)\.r7\.com(?:/[^/]+)+/idmedia/|
|
||||
noticias\.r7\.com(?:/[^/]+)+/[^/]+-|
|
||||
player\.r7\.com/video/i/
|
||||
)
|
||||
(?P<id>[\da-f]{24})
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html',
|
||||
'md5': '403c4e393617e8e8ddc748978ee8efde',
|
||||
'info_dict': {
|
||||
'id': '54e7050b0cf2ff57e0279389',
|
||||
'ext': 'mp4',
|
||||
'title': 'Policiais humilham suspeito à beira da morte: "Morre com dignidade"',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 98,
|
||||
'like_count': int,
|
||||
'view_count': int,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://esportes.r7.com/videos/cigano-manda-recado-aos-fas/idmedia/4e176727b51a048ee6646a1b.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://noticias.r7.com/record-news/video/representante-do-instituto-sou-da-paz-fala-sobre-fim-do-estatuto-do-desarmamento-5480fc580cf2285b117f438d/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://player.r7.com/video/i/54e7050b0cf2ff57e0279389?play=true&video=http://vsh.r7.com/54e7050b0cf2ff57e0279389/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-ATOS_copy.mp4&linkCallback=http://videos.r7.com/policiais-humilham-suspeito-a-beira-da-morte-morre-com-dignidade-/idmedia/54e7050b0cf2ff57e0279389.html&thumbnail=http://vtb.r7.com/ER7_RE_BG_MORTE_JOVENS_570kbps_2015-02-2009f17818-cc82-4c8f-86dc-89a66934e633-thumb.jpg&idCategory=192&share=true&layout=full&full=true',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://player.r7.com/video/i/%s' % video_id, video_id)
|
||||
|
||||
item = self._parse_json(js_to_json(self._search_regex(
|
||||
r'(?s)var\s+item\s*=\s*({.+?});', webpage, 'player')), video_id)
|
||||
|
||||
title = unescapeHTML(item['title'])
|
||||
thumbnail = item.get('init', {}).get('thumbUri')
|
||||
duration = None
|
||||
|
||||
statistics = item.get('statistics', {})
|
||||
like_count = int_or_none(statistics.get('likes'))
|
||||
view_count = int_or_none(statistics.get('views'))
|
||||
|
||||
formats = []
|
||||
for format_key, format_dict in item['playlist'][0].items():
|
||||
src = format_dict.get('src')
|
||||
if not src:
|
||||
continue
|
||||
format_id = format_dict.get('format') or format_key
|
||||
if duration is None:
|
||||
duration = format_dict.get('duration')
|
||||
if '.f4m' in src:
|
||||
formats.extend(self._extract_f4m_formats(src, video_id, preference=-1))
|
||||
elif src.endswith('.m3u8'):
|
||||
formats.extend(self._extract_m3u8_formats(src, video_id, 'mp4', preference=-2))
|
||||
else:
|
||||
formats.append({
|
||||
'url': src,
|
||||
'format_id': format_id,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'like_count': like_count,
|
||||
'view_count': view_count,
|
||||
'formats': formats,
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user