Compare commits
1262 Commits
2012.02.27
...
2013.07.11
Author | SHA1 | Date | |
---|---|---|---|
|
fd87ff26b9 | ||
|
85347e1cb6 | ||
|
41897817cc | ||
|
45ff2d51d0 | ||
|
5de3ece225 | ||
|
df50a41289 | ||
|
59ae56fad5 | ||
|
690e872c51 | ||
|
81082e046e | ||
|
3fa9550837 | ||
|
b1082f01a6 | ||
|
f35b84c807 | ||
|
117adb0f0f | ||
|
abb285fb1b | ||
|
a431154706 | ||
|
cfe50f04ed | ||
|
a7055eb956 | ||
|
0a1be1e997 | ||
|
c93898dae9 | ||
|
ebdf2af727 | ||
|
c108eb73cc | ||
|
3a1375dacf | ||
|
41bece30b4 | ||
|
16ea58cbda | ||
|
99e350d902 | ||
|
13e06d298c | ||
|
81f0259b9e | ||
|
fefcb5d314 | ||
|
345b0c9b46 | ||
|
20c3893f0e | ||
|
29293c1e09 | ||
|
5fe3a3c3fb | ||
|
b04621d155 | ||
|
b227060388 | ||
|
d93e4dcbb7 | ||
|
73e79f2a1b | ||
|
fc79158de2 | ||
|
7763b04e5f | ||
|
9d7b44b4cc | ||
|
897f36d179 | ||
|
94c3637f6d | ||
|
04cc96173c | ||
|
fbaaad49d7 | ||
|
b29f3b250d | ||
|
fa343954d4 | ||
|
2491f5898e | ||
|
b27c856fbc | ||
|
9941ceb331 | ||
|
c536d38059 | ||
|
8de64cac98 | ||
|
6d6d286539 | ||
|
5d2eac9eba | ||
|
9826925a20 | ||
|
24a267b562 | ||
|
d4da3d6116 | ||
|
d5a62e4f5f | ||
|
9a82b2389f | ||
|
8dba13f7e8 | ||
|
deacef651f | ||
|
2e1b3afeca | ||
|
652e776893 | ||
|
d055fe4cb0 | ||
|
131842bb0b | ||
|
59fc531f78 | ||
|
5c44c15438 | ||
|
62067cb9b8 | ||
|
0f81866329 | ||
|
2db67bc0f4 | ||
|
7dba9cd039 | ||
|
75dff0eef7 | ||
|
d828f3a550 | ||
|
bcd6e4bd07 | ||
|
53936f3d57 | ||
|
0beb3add18 | ||
|
f9bd64c098 | ||
|
d7f44b5bdb | ||
|
48bfb5f238 | ||
|
97ebe8dcaf | ||
|
d4409747ba | ||
|
37b6a6617f | ||
|
ca1c9cfe11 | ||
|
adeb4d7469 | ||
|
50587ee8ec | ||
|
8244288dfe | ||
|
6ffe72835a | ||
|
8ba5e990a5 | ||
|
9afb1afcc6 | ||
|
0e21093a8f | ||
|
9c5cd0948f | ||
|
1083705fe8 | ||
|
f3d294617f | ||
|
de33a30858 | ||
|
887a227953 | ||
|
705f6f35bc | ||
|
e648b22dbd | ||
|
257a2501fa | ||
|
99afb3ddd4 | ||
|
a3c776203f | ||
|
53f350c165 | ||
|
f46d31f948 | ||
|
bf64ff72db | ||
|
bc2884afc1 | ||
|
023fa8c440 | ||
|
427023a1e6 | ||
|
a924876fed | ||
|
3f223f7b2e | ||
|
fc2c063e1e | ||
|
20db33e299 | ||
|
c0109aa497 | ||
|
ba7a1de04d | ||
|
4269e78a80 | ||
|
6f5ac90cf3 | ||
|
de282fc217 | ||
|
ddbd903576 | ||
|
0c56a3f773 | ||
|
9d069c4778 | ||
|
0d843f796b | ||
|
67f51b3d8c | ||
|
5c5de1c79a | ||
|
0821771466 | ||
|
83f6f68e79 | ||
|
27473d18da | ||
|
0c6c096c20 | ||
|
52c8ade4ad | ||
|
0e853ca4c4 | ||
|
41beccbab0 | ||
|
2eb88d953f | ||
|
1f0483b4b1 | ||
|
6b47c7f24e | ||
|
d798e1c7a9 | ||
|
3a8736bd74 | ||
|
c8c5163618 | ||
|
500f3d2432 | ||
|
ed4a915e08 | ||
|
b8f7b1579a | ||
|
ed54491c60 | ||
|
e4decf2750 | ||
|
c90f13d106 | ||
|
62008f69c1 | ||
|
e88f5e0b4e | ||
|
769fda3c5a | ||
|
23300d7149 | ||
|
f5756f388a | ||
|
ee313cdcbf | ||
|
8b50fed04b | ||
|
5b66de8859 | ||
|
e38af9e00c | ||
|
6b37f0be55 | ||
|
6e5d5f2fc1 | ||
|
75c9481224 | ||
|
5746f9da99 | ||
|
112da0a0ce | ||
|
bcd606c0fe | ||
|
ed92bc9f6e | ||
|
9b0756f8f2 | ||
|
aa0c87391c | ||
|
b1dfdc51b1 | ||
|
2e32528012 | ||
|
f64e7695a1 | ||
|
5abeaf0650 | ||
|
8bcc355972 | ||
|
6b4642fae3 | ||
|
d1bd37deac | ||
|
405ec05cb2 | ||
|
52e8e1dc88 | ||
|
b98a6b2f72 | ||
|
0ca45b233f | ||
|
65cceef8f4 | ||
|
b004821fa9 | ||
|
81b42336ad | ||
|
c6c1974672 | ||
|
a545d1d262 | ||
|
037fcd0047 | ||
|
318452bc0c | ||
|
d746cd88c2 | ||
|
9c42603b5a | ||
|
ea93cce4f6 | ||
|
f4daa18152 | ||
|
9caa687d81 | ||
|
3b58c6fb54 | ||
|
5926c10690 | ||
|
df725153d2 | ||
|
d662896090 | ||
|
db241e8645 | ||
|
ead28ff30a | ||
|
515d7a5e73 | ||
|
14fbdc9cdd | ||
|
98bcd2834a | ||
|
f7ab6cbe16 | ||
|
28ef06f7c2 | ||
|
577d02370d | ||
|
50be92c11c | ||
|
d18596baf4 | ||
|
7ce7e39476 | ||
|
93eb15c573 | ||
|
9f4d83e3b1 | ||
|
1c251cd948 | ||
|
70d1924f8b | ||
|
7b4948b05f | ||
|
878b5d9f0d | ||
|
2bc1820660 | ||
|
8bf8b5a577 | ||
|
8222d8de88 | ||
|
c7253e2e8c | ||
|
d69cf69a6a | ||
|
d02ecdefab | ||
|
bc857bfce0 | ||
|
f8bf74575a | ||
|
964ac8b584 | ||
|
a3522dfddd | ||
|
d3a8613b6e | ||
|
200b388752 | ||
|
dabcaf3b06 | ||
|
e646ffe795 | ||
|
b0dcc3c47f | ||
|
b07d9c23c5 | ||
|
d71cae62cc | ||
|
633a50cf4b | ||
|
825e0984e2 | ||
|
d1cade5ade | ||
|
190717e31f | ||
|
0824c28c8b | ||
|
c59b4aaeef | ||
|
f9c6cbf002 | ||
|
b8fe71ab86 | ||
|
cb10cded2a | ||
|
cd8b830292 | ||
|
1ac4004f3a | ||
|
e17d368ae2 | ||
|
27110b0567 | ||
|
9fe4de3471 | ||
|
d26d440e19 | ||
|
9f5daf0006 | ||
|
eb1634cbf8 | ||
|
01c10ca26e | ||
|
45aef47281 | ||
|
ae287755b7 | ||
|
a37f27ae99 | ||
|
49f5f315fd | ||
|
97d2db017c | ||
|
2c64df0399 | ||
|
828400422a | ||
|
c3c77cec30 | ||
|
1183b85f50 | ||
|
0143dc029c | ||
|
e10e576fed | ||
|
78af8eb1d1 | ||
|
79e93125d0 | ||
|
48db0b1f4a | ||
|
8f0578f0fc | ||
|
250f557872 | ||
|
462dc88b17 | ||
|
570fa151fc | ||
|
9c286cfa00 | ||
|
80cbb6ddbb | ||
|
9fd5ce0cbe | ||
|
1736dec629 | ||
|
b8a360837a | ||
|
fc28721960 | ||
|
51ce3a75c9 | ||
|
335056663a | ||
|
5b286728de | ||
|
291a168bcc | ||
|
fda7d31aa0 | ||
|
cbf46c737c | ||
|
7beb36a529 | ||
|
153697660d | ||
|
60a72e8d45 | ||
|
426ff04282 | ||
|
a50e1b32e4 | ||
|
9eae41ddef | ||
|
aad0d6d5ba | ||
|
7aca14a1ec | ||
|
d1596ef439 | ||
|
ea63e4998b | ||
|
a08dfd27a8 | ||
|
f58848011e | ||
|
934858ad86 | ||
|
3c25b9abae | ||
|
3fc03845a1 | ||
|
9b122384e9 | ||
|
9f4e6bbaeb | ||
|
b05654f0e3 | ||
|
9b3a760bbb | ||
|
d5822b96b0 | ||
|
b3d14cbfa7 | ||
|
d6039175e5 | ||
|
97d6faaced | ||
|
219b8130df | ||
|
38cbc40a64 | ||
|
93d3a642a9 | ||
|
c5e8d7af0e | ||
|
d6983cb460 | ||
|
dd9829292e | ||
|
89cb0eb0b6 | ||
|
9b5fffb149 | ||
|
1f90438025 | ||
|
a130adb25b | ||
|
8756c5fe7a | ||
|
828dba2983 | ||
|
6b3f5a329b | ||
|
63ef586b05 | ||
|
383a6a61b1 | ||
|
4fdd4e6f6f | ||
|
01ba4b80a7 | ||
|
de66764e4e | ||
|
1037d53988 | ||
|
c3ab8f866c | ||
|
94eb2dd1fe | ||
|
346b5ce8fd | ||
|
b37fbb990b | ||
|
ef75f76f5c | ||
|
e296100005 | ||
|
953dd93a48 | ||
|
e704f4d378 | ||
|
77d0f05f71 | ||
|
50d2376769 | ||
|
759d525301 | ||
|
fcfa188548 | ||
|
f4c8bbcfc2 | ||
|
31eead52e7 | ||
|
038a3a1a61 | ||
|
587c68b2cd | ||
|
377fdf5dde | ||
|
5c67601931 | ||
|
68f54207a3 | ||
|
bb47437686 | ||
|
213b715893 | ||
|
449d5c910c | ||
|
0251f9c9c0 | ||
|
8bc7c3d858 | ||
|
af44c94862 | ||
|
36ed7177f0 | ||
|
32aa88bcae | ||
|
51090d636b | ||
|
31513ea6b9 | ||
|
88cebbd7b8 | ||
|
fb8f7280bc | ||
|
f380401bbd | ||
|
9abc6c8b31 | ||
|
8cd252f115 | ||
|
53f72b11e5 | ||
|
ee55fcbe12 | ||
|
78d3442b12 | ||
|
979a9dd4c4 | ||
|
d5979c5d55 | ||
|
8027175600 | ||
|
3054ff0cbe | ||
|
cd453d38bb | ||
|
f5a290eed9 | ||
|
ecb3e676a5 | ||
|
8b59a98610 | ||
|
8409501206 | ||
|
be95cac157 | ||
|
476203d025 | ||
|
468e2e926b | ||
|
ac3e9394e7 | ||
|
868d62a509 | ||
|
157b864a01 | ||
|
951b9dfd94 | ||
|
1142d31164 | ||
|
9131bde941 | ||
|
1132c10dc2 | ||
|
c978a96c02 | ||
|
71e458d437 | ||
|
57bde0d9c7 | ||
|
50b4d25980 | ||
|
eda60e8251 | ||
|
c794cbbb19 | ||
|
4a76d1dbe5 | ||
|
418f734a58 | ||
|
dc1c355b72 | ||
|
1b2b22ed9f | ||
|
f2cd958c0a | ||
|
57adeaea87 | ||
|
8f3f1aef05 | ||
|
51d2453c7a | ||
|
45014296be | ||
|
afef36c950 | ||
|
b31756c18e | ||
|
f008688520 | ||
|
5b68ea215b | ||
|
b1d568f0bc | ||
|
17bd1b2f41 | ||
|
5b0d3cc0cd | ||
|
d4f76f1674 | ||
|
340fa21198 | ||
|
de5d66d431 | ||
|
7bdb17d4d5 | ||
|
419c64b107 | ||
|
99a5ae3f8e | ||
|
c7563c528b | ||
|
e30e9318da | ||
|
5c51028d38 | ||
|
c1d58e1c67 | ||
|
02030ff7fe | ||
|
f45c185fa9 | ||
|
1bd96c3a60 | ||
|
929f85d851 | ||
|
98d4a4e6bc | ||
|
fb2f83360c | ||
|
3c5e7729e1 | ||
|
5a853e1423 | ||
|
2f58b12dad | ||
|
59f4fd4dc6 | ||
|
5738240ee8 | ||
|
86fd453ea8 | ||
|
c83411b9ee | ||
|
057c9938a1 | ||
|
9259966132 | ||
|
b08980412e | ||
|
532a1e0429 | ||
|
2a36c352a0 | ||
|
1a2adf3f49 | ||
|
43b62accbb | ||
|
be74864ace | ||
|
0ae456f08a | ||
|
0f75d25991 | ||
|
67129e4a15 | ||
|
dfb9323cf9 | ||
|
7f5bd09baf | ||
|
02d5eb935f | ||
|
94ca71b7cc | ||
|
b338f1b154 | ||
|
486f0c9476 | ||
|
d96680f58d | ||
|
f8602d3242 | ||
|
0c021ad171 | ||
|
086d7b4500 | ||
|
891629c84a | ||
|
ea6d901e51 | ||
|
4539dd30e6 | ||
|
c43e57242e | ||
|
db8fd71ca9 | ||
|
f4f316881d | ||
|
0e16f09474 | ||
|
09dd418f53 | ||
|
decd1d1737 | ||
|
180e689f7e | ||
|
7da5556ac2 | ||
|
f23a03a89b | ||
|
84e4682f0e | ||
|
1f99511210 | ||
|
0d94f2474c | ||
|
480b6c1e8b | ||
|
95464f14d1 | ||
|
c34407d16c | ||
|
5e34d2ebbf | ||
|
815dd2ffa8 | ||
|
ecd5fb49c5 | ||
|
b86174e7a3 | ||
|
2e2038dc35 | ||
|
46bfb42258 | ||
|
feecf22511 | ||
|
4c4f15eb78 | ||
|
104ccdb8b4 | ||
|
6ccff79594 | ||
|
aed523ecc1 | ||
|
d496a75d0a | ||
|
5c01dd1e73 | ||
|
11d9224e3b | ||
|
34c29ba1d7 | ||
|
6cd657f9f2 | ||
|
4ae9e55822 | ||
|
8749b71273 | ||
|
dbc50fdf82 | ||
|
b1d2ef9255 | ||
|
5fb16555af | ||
|
ba7c775a04 | ||
|
fe348844d9 | ||
|
767e00277f | ||
|
6ce533a220 | ||
|
08b2ac745a | ||
|
46a127eecb | ||
|
fc63faf070 | ||
|
9665577802 | ||
|
434aca5b14 | ||
|
e31852aba9 | ||
|
37254abc36 | ||
|
a11ea50319 | ||
|
81df121dd3 | ||
|
50f6412eb8 | ||
|
bf50b0383e | ||
|
bd55852517 | ||
|
4c9f7a9988 | ||
|
aba8df23ed | ||
|
3820df0106 | ||
|
e74c504f91 | ||
|
fa70605db2 | ||
|
0d173446ff | ||
|
320e26a0af | ||
|
a3d689cfb3 | ||
|
59cc5d9380 | ||
|
28535652ab | ||
|
7b670a4483 | ||
|
69fc019f26 | ||
|
613bf66939 | ||
|
9edb0916f4 | ||
|
f4b659f782 | ||
|
c70446c7df | ||
|
c76cb6d548 | ||
|
71f37e90ef | ||
|
75b5c590a8 | ||
|
4469666780 | ||
|
c15e024141 | ||
|
8cb94542f4 | ||
|
c681a03918 | ||
|
30f2999962 | ||
|
74e3452b9e | ||
|
9e1cf0c200 | ||
|
e11eb11906 | ||
|
c04bca6f60 | ||
|
b0936ef423 | ||
|
41a6eb949a | ||
|
f17ce13a92 | ||
|
8c416ad29a | ||
|
c72938240e | ||
|
e905b6f80e | ||
|
6de8f1afb7 | ||
|
9341212642 | ||
|
f7a9721e16 | ||
|
089e843b0f | ||
|
c8056d866a | ||
|
49da66e459 | ||
|
fb6c319904 | ||
|
5a8d13199c | ||
|
dce9027045 | ||
|
feba604e92 | ||
|
d22f65413a | ||
|
0599ef8c08 | ||
|
bfdf469295 | ||
|
32c96387c1 | ||
|
c8c5443bb5 | ||
|
a60b854d90 | ||
|
b8ad4f02a2 | ||
|
d281274bf2 | ||
|
b625bc2c31 | ||
|
f4381ab88a | ||
|
744435f2a4 | ||
|
855703e55e | ||
|
927c8c4924 | ||
|
0ba994e9e3 | ||
|
af9ad45cd4 | ||
|
e0fee250c3 | ||
|
72ca05016d | ||
|
844d1f9fa1 | ||
|
213c31ae16 | ||
|
04f3d551a0 | ||
|
e8600d69fd | ||
|
b03d65c237 | ||
|
8743974189 | ||
|
dc36bc9434 | ||
|
bce878a7c1 | ||
|
532d797824 | ||
|
146c12a2da | ||
|
d39919c03e | ||
|
df2dedeefb | ||
|
adb029ed81 | ||
|
43ff1a347d | ||
|
14294236bf | ||
|
c2b293ba30 | ||
|
37cd9f522f | ||
|
f33154cd39 | ||
|
bafeed9f5d | ||
|
ef767f9fd5 | ||
|
bc97f6d60c | ||
|
90a99c1b5e | ||
|
f375d4b7de | ||
|
fa41fbd318 | ||
|
6a205c8876 | ||
|
0fb3756409 | ||
|
fbbdf475b1 | ||
|
c238be3e3a | ||
|
1bf2801e6a | ||
|
c9c8402093 | ||
|
6060788083 | ||
|
e3700fc9e4 | ||
|
b693216d8d | ||
|
46b9d8295d | ||
|
7decf8951c | ||
|
1f46c15262 | ||
|
0cd358676c | ||
|
43113d92cc | ||
|
7eab8dc750 | ||
|
44e939514e | ||
|
95506f1235 | ||
|
a91556fd74 | ||
|
1447f728b5 | ||
|
d2c690828a | ||
|
cfa90f4adc | ||
|
898280a056 | ||
|
59b4a2f0e4 | ||
|
1ee9778405 | ||
|
db74c11d2b | ||
|
5011cded16 | ||
|
f10b2a9c14 | ||
|
5cb3c0b319 | ||
|
b9fc428494 | ||
|
c0ba104674 | ||
|
2a4093eaf3 | ||
|
9e62bc4439 | ||
|
553d097442 | ||
|
ae608b8076 | ||
|
c397187061 | ||
|
e32b06e977 | ||
|
8c42c506cd | ||
|
8cc83b8dbe | ||
|
51af426d89 | ||
|
08ec0af7c6 | ||
|
3b221c5406 | ||
|
3d3423574d | ||
|
e5edd51de4 | ||
|
64c78d50cc | ||
|
b3bcca0844 | ||
|
61e40c88a9 | ||
|
40634747f7 | ||
|
c2e21f2f0d | ||
|
47dcd621c0 | ||
|
a0d6fe7b92 | ||
|
c9fa1cbab6 | ||
|
8a38a194fb | ||
|
6ac7f082c4 | ||
|
f6e6da9525 | ||
|
597cc8a455 | ||
|
3370abd509 | ||
|
631f73978c | ||
|
e5f30ade10 | ||
|
6622d22c79 | ||
|
4e1582f372 | ||
|
967897fd22 | ||
|
f918ec7ea2 | ||
|
a2ae43a55f | ||
|
7ae153ee9c | ||
|
f7b567ff84 | ||
|
f2e237adc8 | ||
|
2e5457be1d | ||
|
7f9d41a55e | ||
|
8207626bbe | ||
|
df8db1aa21 | ||
|
691db5ba02 | ||
|
acb8752f80 | ||
|
679790eee1 | ||
|
6bf48bd866 | ||
|
790d4fcbe1 | ||
|
89de9eb125 | ||
|
6324fd1d74 | ||
|
9e07cf2955 | ||
|
f03b88b3fb | ||
|
97d0365f49 | ||
|
12887875a2 | ||
|
450e709972 | ||
|
9befce2b8c | ||
|
cb99797798 | ||
|
f82b28146a | ||
|
4dc72b830c | ||
|
ea05129ebd | ||
|
35d217133f | ||
|
d1b7a24354 | ||
|
c85538dba1 | ||
|
60bd48b175 | ||
|
4be0aa3539 | ||
|
f636c34481 | ||
|
3bf79c752e | ||
|
cdb130b09a | ||
|
2e5d60b7db | ||
|
8271226a55 | ||
|
1013186a17 | ||
|
7c038b3c32 | ||
|
c8cd8e5f55 | ||
|
471cf47796 | ||
|
d8f64574a4 | ||
|
e711babbd1 | ||
|
a72b0f2b6f | ||
|
434eb6f26b | ||
|
197080b10b | ||
|
7796e8c2cb | ||
|
6d4363368a | ||
|
414638cd50 | ||
|
2a9983b78f | ||
|
b17c974a88 | ||
|
5717d91ab7 | ||
|
79eb0287ab | ||
|
58994225bc | ||
|
59d4c2fe1b | ||
|
3a468f2d8b | ||
|
1ad5d872b9 | ||
|
355fc8e944 | ||
|
380a29dbf7 | ||
|
1528d6642d | ||
|
7311fef854 | ||
|
906417c7c5 | ||
|
6aabe82035 | ||
|
f0877a445e | ||
|
da06e2daf8 | ||
|
d3f5f9f6b9 | ||
|
bfc6ea7935 | ||
|
8edc2cf8ca | ||
|
fb778e66df | ||
|
3a9918d37f | ||
|
ccb0cae134 | ||
|
085c8b75a6 | ||
|
dbf2ba3d61 | ||
|
b47bbac393 | ||
|
229cac754a | ||
|
0e33684194 | ||
|
9e982f9e4e | ||
|
c7a725cfad | ||
|
450a30cae8 | ||
|
9cd5e4fce8 | ||
|
edba5137b8 | ||
|
233a22960a | ||
|
3b024e17af | ||
|
a32b573ccb | ||
|
ec71c13ab8 | ||
|
f0bad2b026 | ||
|
25580f3251 | ||
|
da4de959df | ||
|
d0d51a8afa | ||
|
c67598c3e1 | ||
|
811d253bc2 | ||
|
c3a1642ead | ||
|
ccf65f9dee | ||
|
b954070d70 | ||
|
30e9f4496b | ||
|
271d3fbdaa | ||
|
6df40dcbe0 | ||
|
97f194c1fb | ||
|
4da769ccca | ||
|
253d96f2e2 | ||
|
bbc3e2753a | ||
|
67353612ba | ||
|
bffbd5f038 | ||
|
d8bbf2018e | ||
|
187f491ad2 | ||
|
335959e778 | ||
|
3b83bf8f6a | ||
|
51719893bf | ||
|
1841f65e64 | ||
|
bb28998920 | ||
|
fbc5f99db9 | ||
|
ca0a0bbeec | ||
|
6119f78cb9 | ||
|
539679c7f9 | ||
|
b642cd44c1 | ||
|
fffec3b9d9 | ||
|
3446dfb7cb | ||
|
db16276b7c | ||
|
629fcdd135 | ||
|
64ce2aada8 | ||
|
565f751967 | ||
|
6017964580 | ||
|
1d16b0c3fe | ||
|
7851b37993 | ||
|
d81edc573e | ||
|
ef0c8d5f9f | ||
|
db30f02b50 | ||
|
4ba7262467 | ||
|
67d0c25eab | ||
|
09f9552b40 | ||
|
142d38f776 | ||
|
6dd3471900 | ||
|
280d67896a | ||
|
510e6f6dc1 | ||
|
712e86b999 | ||
|
74fdba620d | ||
|
dc1c479a6f | ||
|
119d536e07 | ||
|
fa1bf9c653 | ||
|
814eed0ea1 | ||
|
0aa3068e9e | ||
|
db2d6124b1 | ||
|
039dc61bd2 | ||
|
4b879984ea | ||
|
55e286ba55 | ||
|
9450bfa26e | ||
|
18be482a6f | ||
|
ca6710ee41 | ||
|
9314810243 | ||
|
7717ae19fa | ||
|
32635ec685 | ||
|
caec7618a1 | ||
|
7e7ab2815c | ||
|
d7744f2219 | ||
|
7161829de5 | ||
|
991ba7fae3 | ||
|
a7539296ce | ||
|
258d5850c9 | ||
|
20759b340a | ||
|
8e5f761870 | ||
|
26714799c9 | ||
|
5e9d042d8f | ||
|
9cf98a2bcc | ||
|
f5ebb61495 | ||
|
431d88dd31 | ||
|
876f1a86af | ||
|
01951dda7a | ||
|
6e3dba168b | ||
|
d851e895d5 | ||
|
b962b76f43 | ||
|
26cf040827 | ||
|
8e241d1a1a | ||
|
3a648b209c | ||
|
c80f0a417a | ||
|
4fcca4bb18 | ||
|
511eda8eda | ||
|
5f9551719c | ||
|
d830b7c297 | ||
|
1c256f7047 | ||
|
a34dd63beb | ||
|
4aeae91f86 | ||
|
c073e35b1e | ||
|
5c892b0ba9 | ||
|
6985325e01 | ||
|
911ee27e83 | ||
|
2069acc6a4 | ||
|
278986ea0f | ||
|
6535e9511f | ||
|
60c7520a51 | ||
|
deb594a9a0 | ||
|
e314ba675b | ||
|
0214ce7c75 | ||
|
95fedbf86b | ||
|
b7769a05ec | ||
|
067f6a3536 | ||
|
8cad53e84c | ||
|
d5ed35b664 | ||
|
f427df17ab | ||
|
4e38899e97 | ||
|
cb6ff87fbb | ||
|
0deac3a2d8 | ||
|
92e3e18a1d | ||
|
3bb6165927 | ||
|
d0d4f277da | ||
|
99b0a1292b | ||
|
dc23886a77 | ||
|
b7298b6e2a | ||
|
3e6c3f52a9 | ||
|
0c0074328b | ||
|
f0648fc18c | ||
|
a7c0f8602e | ||
|
21a9c6aaac | ||
|
162e3c5261 | ||
|
6b3aef80ce | ||
|
77c4beab8a | ||
|
1a2c3c0f3e | ||
|
0eaf520d77 | ||
|
056d857571 | ||
|
69a3883199 | ||
|
0dcfb234ed | ||
|
43e8fafd49 | ||
|
314d506b96 | ||
|
af42895612 | ||
|
bfa6389b74 | ||
|
9b14f51a3e | ||
|
f4bfd65ff2 | ||
|
3cc687d486 | ||
|
cdb3076445 | ||
|
8a2f13c304 | ||
|
77bd7968ea | ||
|
993693aa79 | ||
|
ce4be3a91d | ||
|
937021133f | ||
|
f7b111b7d1 | ||
|
80d3177e5c | ||
|
5e5ddcfbcf | ||
|
5910e210f4 | ||
|
b375c8b946 | ||
|
88f6c78b02 | ||
|
4096b60948 | ||
|
2ab1c5ed1a | ||
|
0b40544f29 | ||
|
187da2c093 | ||
|
9a2cf56d51 | ||
|
0be41ec241 | ||
|
f1171f7c2d | ||
|
28ca6b5afa | ||
|
bec102a843 | ||
|
8f6f40d991 | ||
|
e2a8ff24a9 | ||
|
8588a86f9e | ||
|
5cb9c3129b | ||
|
4cc3d07426 | ||
|
5d01a64719 | ||
|
a276e06080 | ||
|
fd5ff02042 | ||
|
2b5b2cb84c | ||
|
ca6849e65d | ||
|
1535ac2ae9 | ||
|
a4680a590f | ||
|
fedb6816cd | ||
|
f6152b4b64 | ||
|
4b618047ce | ||
|
2c6945be30 | ||
|
9a6f4429a0 | ||
|
4c21c56bfe | ||
|
2a298b72eb | ||
|
55c0539872 | ||
|
9789a05c20 | ||
|
d050de77f9 | ||
|
95eb771dcd | ||
|
4fb1acc212 | ||
|
d3d3199870 | ||
|
1ca63e3ae3 | ||
|
59ce201915 | ||
|
8d5d3a5d00 | ||
|
37c8fd4842 | ||
|
3c6ffbaedb | ||
|
c7287a3caf | ||
|
5a304a7637 | ||
|
4c1d273e88 | ||
|
a9d2f7e894 | ||
|
682407f2d5 | ||
|
bdff345529 | ||
|
23109d6a9c | ||
|
4bb028f48e | ||
|
fec89790b1 | ||
|
a5741a3f5e | ||
|
863baa16ec | ||
|
c7214f9a6f | ||
|
8fd3afd56c | ||
|
f9b2f2b955 | ||
|
633b4a5ff6 | ||
|
b4cd069d5e | ||
|
0f8d03f81c | ||
|
077174f4ed | ||
|
e387eb5aba | ||
|
4083bf81a0 | ||
|
796173d08b | ||
|
e575b6821e | ||
|
d78be7e331 | ||
|
15c8d83358 | ||
|
e91d2338d8 | ||
|
4b235346d6 | ||
|
ad348291bb | ||
|
2f1765c4ea | ||
|
3c5b63d2d6 | ||
|
cc51a7d4e0 | ||
|
8af4ed7b4f | ||
|
8192ebe1f8 | ||
|
20ba04267c | ||
|
743b28ce11 | ||
|
caaa47d372 | ||
|
10f100ac8a | ||
|
8176041605 | ||
|
87bec4c715 | ||
|
190e8e27d8 | ||
|
4efe62a016 | ||
|
c64de2c980 | ||
|
6ad98fb3fd | ||
|
b08e09c370 | ||
|
cdab8aa389 | ||
|
3cd69a54b2 | ||
|
627dcfff39 | ||
|
df5cff3751 | ||
|
79ae0a06d5 | ||
|
2d2fa229ec | ||
|
5a59fd6392 | ||
|
0eb0faa26f | ||
|
32761d863c | ||
|
799c076384 | ||
|
f1cb5bcad2 | ||
|
9e8056d5a7 | ||
|
c6f3620859 | ||
|
59ae15a507 | ||
|
40b35b4aa6 | ||
|
be0f77d075 | ||
|
0f00efed4c | ||
|
e6137fd61d | ||
|
8cd10ac4ef | ||
|
64a57846d3 | ||
|
72f976701a | ||
|
5bd9cc7a6a | ||
|
f660c89d51 | ||
|
73dce4b2e4 | ||
|
9f37a95941 | ||
|
a130bc6d02 | ||
|
348d0a7a18 | ||
|
03f9daab34 | ||
|
a8156c1d2e | ||
|
3e669f369f | ||
|
da779b4924 | ||
|
89fb51dd2d | ||
|
01ba00ca42 | ||
|
e08bee320e | ||
|
96731798db | ||
|
c116339ddb | ||
|
e643e2c6b7 | ||
|
c63cc10ffa | ||
|
dae7c920f6 | ||
|
f462df021a | ||
|
1a84d8675b | ||
|
18ea0cefc3 | ||
|
c806f804d8 | ||
|
03c5b0fbd4 | ||
|
95649b3936 | ||
|
3aeb78ea4e | ||
|
dd109dee8e | ||
|
b514df2034 | ||
|
0969bdd305 | ||
|
1a9c655e3b | ||
|
88db5ef279 | ||
|
f8d8b39bba | ||
|
dcd60025f8 | ||
|
7e4674830e | ||
|
9ce5d9ee75 | ||
|
b49e75ff9a | ||
|
abe7a3ac2a | ||
|
717b1f72ed | ||
|
26396311b5 | ||
|
dffe658bac | ||
|
33d94a6c99 | ||
|
4d47921c9e | ||
|
d94adc2638 | ||
|
5c5d06d31d | ||
|
cc872b68a8 | ||
|
17cb14a336 | ||
|
877f4c45d3 | ||
|
02531431f2 | ||
|
e02066e7ff | ||
|
c9128b353d | ||
|
e7c6f1a2dc | ||
|
1a911e60a4 | ||
|
46cbda0be4 | ||
|
fa59f4b6a9 | ||
|
4a702f3819 | ||
|
6bac102a4d | ||
|
958a22b7cf | ||
|
97cd3afc75 | ||
|
aa2a94ed81 | ||
|
c7032546f1 | ||
|
56781d3d2e | ||
|
feb22fe5fe | ||
|
d8dddb7c02 | ||
|
4408d996fb | ||
|
ed7516c69d | ||
|
89af8e9d32 | ||
|
36a9c0b5ff | ||
|
9fb3bfb45a | ||
|
d479e34043 | ||
|
240089e5df | ||
|
1c469a9480 | ||
|
71f36332dd | ||
|
8179d2ba74 | ||
|
df4bad3245 | ||
|
a7b5c8d6a8 | ||
|
92b91c1878 | ||
|
7ec1a206ea | ||
|
51937c0869 | ||
|
6b50761222 | ||
|
6571408dc6 | ||
|
b6fab35b9f | ||
|
baec15387c | ||
|
297d7fd9c0 | ||
|
5002aea371 | ||
|
5f7ad21633 | ||
|
089d47f8d5 | ||
|
74033a662d | ||
|
fdef722fa1 | ||
|
110d4f4c91 | ||
|
0526e4f55a | ||
|
39973a0236 | ||
|
5d40a470a2 | ||
|
4cc391461a | ||
|
bf95333e5e | ||
|
b7a34316d2 | ||
|
74e453bdea | ||
|
156a59e7a9 | ||
|
aeca861f22 | ||
|
42cb53fcfa | ||
|
fe4d68e196 | ||
|
25b7fd9c01 | ||
|
e79e8b7dc4 | ||
|
965a8b2bc4 | ||
|
a8ac2f8664 | ||
|
fb0e99b884 | ||
|
9c6e9a4532 | ||
|
67af74992e | ||
|
103c508ffa | ||
|
2876773381 | ||
|
f06eaa873e | ||
|
ece34e8951 | ||
|
2262a32dd7 | ||
|
c6c0e23a32 | ||
|
02b324a23d | ||
|
b8005afc20 | ||
|
073522bc6c | ||
|
9248cb0549 | ||
|
6b41b61119 | ||
|
591bbe9c90 | ||
|
fc7376016c | ||
|
97a37c2319 | ||
|
3afed78a6a | ||
|
4279a0ca98 | ||
|
edcc7d2dd3 | ||
|
7f60b5aa40 | ||
|
65adb79fb6 | ||
|
aeeb29a356 | ||
|
902b2a0a45 | ||
|
6d9c22cd26 | ||
|
729baf58b2 | ||
|
4c9afeca34 | ||
|
6da7877bf5 | ||
|
b4e5de51ec | ||
|
a4b5f22554 | ||
|
ff08984246 | ||
|
137c5803c3 | ||
|
3eec021a1f | ||
|
5a33b73309 | ||
|
0b4e98490b | ||
|
80a846e119 | ||
|
434d60cd95 | ||
|
efe8902f0b | ||
|
44fb345437 | ||
|
9993976ae4 | ||
|
b387fb0385 | ||
|
10daa766a1 | ||
|
7b107eea51 | ||
|
646b885cbf | ||
|
0bfd0b598a | ||
|
fd873c69a4 | ||
|
d64db7409b | ||
|
27fec0e3bd | ||
|
65f934dc93 | ||
|
d51d784f85 | ||
|
aa85963987 | ||
|
413575f7a5 | ||
|
b7b4796bf2 | ||
|
fcbc8c830e | ||
|
f48ce130c7 | ||
|
13e69f546c | ||
|
63ec7b7479 | ||
|
7b6d7001d8 | ||
|
39ce6e79e7 | ||
|
5c961d89df | ||
|
3c4d6c9eba | ||
|
349e2e3e21 | ||
|
551fa9dfbf | ||
|
ce3674430b | ||
|
5cdfaeb37b | ||
|
38612b4edc | ||
|
6c5b442a9b | ||
|
5a5523698d | ||
|
05a2c206be | ||
|
8ca21983d8 | ||
|
20326b8b1b | ||
|
5d534e2fe6 | ||
|
234e230c87 | ||
|
34ae0f9d20 | ||
|
df09e5f9e1 | ||
|
3af2f7656c | ||
|
74e716bb64 | ||
|
85f76ac90b | ||
|
7f36e39676 | ||
|
ebe3f89ea4 | ||
|
b5de8af234 | ||
|
eb817499b0 | ||
|
e2af9232b2 | ||
|
9ca667065e | ||
|
ae16f68f4a | ||
|
3cd98c7894 | ||
|
2866e68838 | ||
|
be8786a6a4 | ||
|
0e841bdc54 | ||
|
225dceb046 | ||
|
b0d4f95899 | ||
|
d443aca863 | ||
|
2ebc6e6a92 | ||
|
f2ad10a97d | ||
|
ea46fe2dd4 | ||
|
202e76cfb0 | ||
|
3a68d7b467 | ||
|
795cc5059a | ||
|
5dc846fad0 | ||
|
d5c4c4c10e | ||
|
1ac3e3315e | ||
|
0e4dc2fc74 | ||
|
9bb8dc8e42 | ||
|
154b55dae3 | ||
|
6de7ef9b8d | ||
|
392105265c | ||
|
51661d8600 | ||
|
b5809a68bf | ||
|
7733d455c8 | ||
|
0a98b09bc2 | ||
|
302efc19ea | ||
|
55a1fa8a56 | ||
|
dce1088450 | ||
|
a171dbfc27 | ||
|
11a141dec9 | ||
|
818282710b | ||
|
7a7c093ab0 | ||
|
ce7b2a40d0 | ||
|
cfcec69331 | ||
|
91645066e2 | ||
|
dee5d76923 | ||
|
363a4e1114 | ||
|
ef0c08cdfe | ||
|
3210735c49 | ||
|
aab4fca422 | ||
|
891d7f2329 | ||
|
b24676ce88 | ||
|
cca4828ac9 | ||
|
bae611f216 | ||
|
d4e16d3e97 | ||
|
65dc7d0272 | ||
|
5404179338 | ||
|
7df97fb59f | ||
|
3187e42a23 | ||
|
f1927d71e4 | ||
|
eeeb4daabc | ||
|
3c4fc580bb | ||
|
17f3c40a31 | ||
|
505ed3088f | ||
|
0b976545c7 | ||
|
a047951477 | ||
|
6ab92c8b62 | ||
|
f36cd07685 | ||
|
668d975039 | ||
|
9ab3406ddb | ||
|
1b91a2e2cf | ||
|
2c288bda42 | ||
|
0b8c922da9 | ||
|
3fe294e4ef | ||
|
921a145592 | ||
|
0c24eed73a | ||
|
29ce2c1201 | ||
|
532c74ae86 | ||
|
9beb5af82e | ||
|
9e6dd23876 | ||
|
7a8501e307 | ||
|
781cc523af | ||
|
c6f45d4314 | ||
|
d11d05d07a | ||
|
e179aadfdf | ||
|
d6a9615347 | ||
|
c6306eb798 | ||
|
bcfde70d73 | ||
|
53e893615d | ||
|
303692b5ed | ||
|
58ca755f40 | ||
|
770234afa2 | ||
|
d77c3dfd02 | ||
|
c23d8a74dc | ||
|
74a5ff5f43 | ||
|
071940680f | ||
|
69d3b2d824 | ||
|
d891ff9fd9 | ||
|
6af22cf0ef | ||
|
fff24d5e35 | ||
|
ceba827e9a | ||
|
a0432a1e80 | ||
|
cfcf32d038 | ||
|
a67bdc34fa | ||
|
b3a653c245 | ||
|
4a34b7252e | ||
|
7e45ec57a8 | ||
|
afbaa80b8b | ||
|
115d243428 | ||
|
7151f63a5f | ||
|
597e7b1805 | ||
|
2934c2ce43 | ||
|
0f6e296a8e |
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,3 +1,20 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
*~
|
||||
*.DS_Store
|
||||
wine-py2exe/
|
||||
py2exe.log
|
||||
*.kate-swp
|
||||
build/
|
||||
dist/
|
||||
MANIFEST
|
||||
README.txt
|
||||
youtube-dl.1
|
||||
youtube-dl.bash-completion
|
||||
youtube-dl
|
||||
youtube-dl.exe
|
||||
youtube-dl.tar.gz
|
||||
.coverage
|
||||
cover/
|
||||
updates_key.pem
|
||||
*.egg-info
|
15
.travis.yml
Normal file
15
.travis.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
language: python
|
||||
python:
|
||||
- "2.6"
|
||||
- "2.7"
|
||||
- "3.3"
|
||||
script: nosetests test --verbose
|
||||
notifications:
|
||||
email:
|
||||
- filippo.valsorda@gmail.com
|
||||
- phihag@phihag.de
|
||||
- jaime.marquinez.ferrandiz+travis@gmail.com
|
||||
# irc:
|
||||
# channels:
|
||||
# - "irc.freenode.org#youtube-dl"
|
||||
# skip_join: true
|
14
CHANGELOG
Normal file
14
CHANGELOG
Normal file
@@ -0,0 +1,14 @@
|
||||
2013.01.02 Codename: GIULIA
|
||||
|
||||
* Add support for ComedyCentral clips <nto>
|
||||
* Corrected Vimeo description fetching <Nick Daniels>
|
||||
* Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
|
||||
* --verbose offers more environment info
|
||||
* New info_dict field: uploader_id
|
||||
* New updates system, with signature checking
|
||||
* New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
|
||||
* Fixed IEs: BlipTv
|
||||
* Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
|
||||
* Simplified IEs and test code
|
||||
* Various (Python 3 and other) fixes
|
||||
* Revamped and expanded tests
|
@@ -1 +1 @@
|
||||
2012.02.27
|
||||
2012.12.99
|
||||
|
24
LICENSE
Normal file
24
LICENSE
Normal file
@@ -0,0 +1,24 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
5
MANIFEST.in
Normal file
5
MANIFEST.in
Normal file
@@ -0,0 +1,5 @@
|
||||
include README.md
|
||||
include test/*.py
|
||||
include test/*.json
|
||||
include youtube-dl.bash-completion
|
||||
include youtube-dl.1
|
91
Makefile
91
Makefile
@@ -1,23 +1,78 @@
|
||||
default: update
|
||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
|
||||
update: compile update-readme update-latest
|
||||
clean:
|
||||
rm -rf youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
||||
|
||||
update-latest:
|
||||
./youtube-dl.dev --version > LATEST_VERSION
|
||||
cleanall: clean
|
||||
rm -f youtube-dl youtube-dl.exe
|
||||
|
||||
update-readme:
|
||||
@options=$$(COLUMNS=80 ./youtube-dl.dev --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/### \1/') && \
|
||||
header=$$(sed -e '/.*## OPTIONS/,$$ d' README.md) && \
|
||||
footer=$$(sed -e '1,/.*## FAQ/ d' README.md) && \
|
||||
echo "$${header}" > README.md && \
|
||||
echo >> README.md && \
|
||||
echo '## OPTIONS' >> README.md && \
|
||||
echo "$${options}" >> README.md&& \
|
||||
echo >> README.md && \
|
||||
echo '## FAQ' >> README.md && \
|
||||
echo "$${footer}" >> README.md
|
||||
PREFIX=/usr/local
|
||||
BINDIR=$(PREFIX)/bin
|
||||
MANDIR=$(PREFIX)/man
|
||||
PYTHON=/usr/bin/env python
|
||||
|
||||
compile:
|
||||
cp youtube_dl/__init__.py youtube-dl
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
ifeq ($(PREFIX),/usr)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
ifeq ($(PREFIX),/usr/local)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
SYSCONFDIR=$(PREFIX)/etc
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: default compile update update-latest update-readme
|
||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
|
||||
install -d $(DESTDIR)$(BINDIR)
|
||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||
install -d $(DESTDIR)$(MANDIR)/man1
|
||||
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
||||
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
||||
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
||||
|
||||
test:
|
||||
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
|
||||
nosetests --verbose test
|
||||
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
.PHONY: all clean install test tar bash-completion pypi-files
|
||||
|
||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
|
||||
|
||||
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
||||
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
||||
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
|
||||
echo '#!$(PYTHON)' > youtube-dl
|
||||
cat youtube-dl.zip >> youtube-dl
|
||||
rm youtube-dl.zip
|
||||
chmod a+x youtube-dl
|
||||
|
||||
README.md: youtube_dl/*.py youtube_dl/*/*.py
|
||||
COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
|
||||
|
||||
README.txt: README.md
|
||||
pandoc -f markdown -t plain README.md -o README.txt
|
||||
|
||||
youtube-dl.1: README.md
|
||||
pandoc -s -f markdown -t man README.md -o youtube-dl.1
|
||||
|
||||
youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-completion.in
|
||||
python devscripts/bash-completion.py
|
||||
|
||||
bash-completion: youtube-dl.bash-completion
|
||||
|
||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||
--exclude '*.DS_Store' \
|
||||
--exclude '*.kate-swp' \
|
||||
--exclude '*.pyc' \
|
||||
--exclude '*.pyo' \
|
||||
--exclude '*~' \
|
||||
--exclude '__pycache' \
|
||||
--exclude '.git' \
|
||||
-- \
|
||||
bin devscripts test youtube_dl \
|
||||
CHANGELOG LICENSE README.md README.txt \
|
||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
|
||||
youtube-dl
|
||||
|
297
README.md
297
README.md
@@ -1,104 +1,207 @@
|
||||
# youtube-dl
|
||||
% YOUTUBE-DL(1)
|
||||
|
||||
## USAGE
|
||||
youtube-dl [options] url [url...]
|
||||
# NAME
|
||||
youtube-dl - download videos from youtube.com or other video platforms
|
||||
|
||||
## DESCRIPTION
|
||||
# SYNOPSIS
|
||||
**youtube-dl** [OPTIONS] URL [URL...]
|
||||
|
||||
# DESCRIPTION
|
||||
**youtube-dl** is a small command-line program to download videos from
|
||||
YouTube.com and a few more sites. It requires the Python interpreter, version
|
||||
2.x (x being at least 5), and it is not platform specific. It should work in
|
||||
your Unix box, in Windows or in Mac OS X. It is released to the public domain,
|
||||
2.6, 2.7, or 3.3+, and it is not platform specific. It should work on
|
||||
your Unix box, on Windows or on Mac OS X. It is released to the public domain,
|
||||
which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
## OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version
|
||||
-i, --ignore-errors continue on download errors
|
||||
-r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--dump-user-agent display the current browser identification
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
# OPTIONS
|
||||
-h, --help print this help text and exit
|
||||
--version print program version and exit
|
||||
-U, --update update this program to latest version
|
||||
-i, --ignore-errors continue on download errors
|
||||
--dump-user-agent display the current browser identification
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video access
|
||||
is restricted to one domain
|
||||
--list-extractors List all supported extractors and the URLs they
|
||||
would handle
|
||||
--extractor-descriptions Output descriptions of all supported extractors
|
||||
--proxy URL Use the specified HTTP/HTTPS proxy
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
|
||||
### Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or caseless
|
||||
sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
--playlist-end NUMBER playlist video to end at (default is last)
|
||||
--match-title REGEX download only matching titles (regex or caseless
|
||||
sub-string)
|
||||
--reject-title REGEX skip download for matching titles (regex or
|
||||
caseless sub-string)
|
||||
--max-downloads NUMBER Abort after downloading NUMBER files
|
||||
--min-filesize SIZE Do not download any videos smaller than SIZE
|
||||
(e.g. 50k or 44.6m)
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE (e.g.
|
||||
50k or 44.6m)
|
||||
--date DATE download only videos uploaded in this date
|
||||
--datebefore DATE download only videos uploaded before this date
|
||||
--dateafter DATE download only videos uploaded after this date
|
||||
|
||||
### Filesystem Options:
|
||||
-t, --title use title in file name
|
||||
-l, --literal use literal title in file name
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(stitle)s to get the
|
||||
title, %(uploader)s for the uploader name,
|
||||
%(autonumber)s to get an automatically incremented
|
||||
number, %(ext)s for the filename extension,
|
||||
%(upload_date)s for the upload date (YYYYMMDD), and
|
||||
%% for a literal percent. Use - to output to
|
||||
stdout.
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue resume partially downloaded files
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set the file
|
||||
modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
## Download Options:
|
||||
-r, --rate-limit LIMIT maximum download rate (e.g. 50k or 44.6m)
|
||||
-R, --retries RETRIES number of retries (default is 10)
|
||||
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k)
|
||||
(default is 1024)
|
||||
--no-resize-buffer do not automatically adjust the buffer size. By
|
||||
default, the buffer size is automatically resized
|
||||
from an initial value of SIZE.
|
||||
|
||||
### Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write anything
|
||||
to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
## Filesystem Options:
|
||||
-t, --title use title in file name (default)
|
||||
--id use only video ID in file name
|
||||
-l, --literal [deprecated] alias of --title
|
||||
-A, --auto-number number downloaded files starting from 00000
|
||||
-o, --output TEMPLATE output filename template. Use %(title)s to get
|
||||
the title, %(uploader)s for the uploader name,
|
||||
%(uploader_id)s for the uploader nickname if
|
||||
different, %(autonumber)s to get an automatically
|
||||
incremented number, %(ext)s for the filename
|
||||
extension, %(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the video id
|
||||
, %(playlist)s for the playlist the video is in,
|
||||
%(playlist_index)s for the position in the
|
||||
playlist and %% for a literal percent. Use - to
|
||||
output to stdout. Can also be used to download to
|
||||
a different directory, for example with -o '/my/d
|
||||
ownloads/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in %(autonumber)s
|
||||
when it is present in output filename template or
|
||||
--autonumber option is given
|
||||
--restrict-filenames Restrict filenames to only ASCII characters, and
|
||||
avoid "&" and spaces in filenames
|
||||
-a, --batch-file FILE file containing URLs to download ('-' for stdin)
|
||||
-w, --no-overwrites do not overwrite files
|
||||
-c, --continue resume partially downloaded files
|
||||
--no-continue do not resume partially downloaded files (restart
|
||||
from beginning)
|
||||
--cookies FILE file to read cookies from and dump cookie jar in
|
||||
--no-part do not use .part files
|
||||
--no-mtime do not use the Last-modified header to set the
|
||||
file modification time
|
||||
--write-description write video description to a .description file
|
||||
--write-info-json write video metadata to a .info.json file
|
||||
--write-thumbnail write thumbnail image to disk
|
||||
|
||||
### Video Format Options:
|
||||
-f, --format FORMAT video format code
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one is
|
||||
requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats (currently youtube only)
|
||||
## Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download do not download the video
|
||||
-g, --get-url simulate, quiet but print URL
|
||||
-e, --get-title simulate, quiet but print title
|
||||
--get-id simulate, quiet but print id
|
||||
--get-thumbnail simulate, quiet but print thumbnail URL
|
||||
--get-description simulate, quiet but print video description
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
-v, --verbose print various debugging information
|
||||
--dump-intermediate-pages print downloaded pages to debug problems(very
|
||||
verbose)
|
||||
|
||||
### Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code, specifiy the order of
|
||||
preference using slashes: "-f 22/17/18"
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific one
|
||||
is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats (currently youtube
|
||||
only)
|
||||
--write-sub write subtitle file (currently youtube only)
|
||||
--write-auto-sub write automatic subtitle file (currently youtube
|
||||
only)
|
||||
--only-sub [deprecated] alias of --skip-download
|
||||
--all-subs downloads all the available subtitles of the
|
||||
video (currently youtube only)
|
||||
--list-subs lists all available subtitles for the video
|
||||
(currently youtube only)
|
||||
--sub-format FORMAT subtitle format [srt/sbv/vtt] (default=srt)
|
||||
(currently youtube only)
|
||||
--sub-lang LANG language of the subtitles to download (optional)
|
||||
use IETF language tags like 'en'
|
||||
|
||||
### Post-processing Options:
|
||||
--extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg and ffprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", or "wav";
|
||||
best by default
|
||||
--audio-quality QUALITY ffmpeg audio bitrate specification, 128k by default
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-p, --password PASSWORD account password
|
||||
-n, --netrc use .netrc authentication data
|
||||
--video-password PASSWORD video password (vimeo only)
|
||||
|
||||
## FAQ
|
||||
## Post-processing Options:
|
||||
-x, --extract-audio convert video files to audio-only files (requires
|
||||
ffmpeg or avconv and ffprobe or avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a", "opus", or
|
||||
"wav"; best by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification, insert
|
||||
a value between 0 (better) and 9 (worse) for VBR
|
||||
or a specific bitrate like 128K (default 5)
|
||||
--recode-video FORMAT Encode the video to another format if necessary
|
||||
(currently supported: mp4|flv|ogg|webm)
|
||||
-k, --keep-video keeps the video file on disk after the post-
|
||||
processing; the video is erased by default
|
||||
--no-post-overwrites do not overwrite post-processed files; the post-
|
||||
processed files are overwritten by default
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`.
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
|
||||
|
||||
- `id`: The sequence will be replaced by the video identifier.
|
||||
- `url`: The sequence will be replaced by the video URL.
|
||||
- `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
|
||||
- `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
|
||||
- `title`: The sequence will be replaced by the video title.
|
||||
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
|
||||
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
|
||||
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
|
||||
- `playlist`: The name or the id of the playlist that contains the video.
|
||||
- `playlist_index`: The index of the video in the playlist, a five-digit number.
|
||||
|
||||
The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
|
||||
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
|
||||
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
|
||||
youtube-dl_test_video_.mp4 # A simple file name
|
||||
|
||||
# VIDEO SELECTION
|
||||
|
||||
Videos can be filtered by their upload date using the options `--date`, `--datebefore` or `--dateafter`, they accept dates in two formats:
|
||||
|
||||
- Absolute dates: Dates in the format `YYYYMMDD`.
|
||||
- Relative dates: Dates in the format `(now|today)[+-][0-9](day|week|month|year)(s)?`
|
||||
|
||||
Examples:
|
||||
|
||||
$ youtube-dl --dateafter now-6months #will only download the videos uploaded in the last 6 months
|
||||
$ youtube-dl --date 19700101 #will only download the videos uploaded in January 1, 1970
|
||||
$ youtube-dl --dateafter 20000101 --datebefore 20100101 #will only download the videos uploaded between 2000 and 2010
|
||||
|
||||
# FAQ
|
||||
|
||||
### Can you please put the -b option back?
|
||||
|
||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the -b option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you''re interested in. In that case, simply request it with the -f option and youtube-dl will try to download it.
|
||||
Most people asking this question are not aware that youtube-dl now defaults to downloading the highest available quality as reported by YouTube, which will be 1080p or 720p in some cases, so you no longer need the `-b` option. For some specific videos, maybe YouTube does not report them to be available in a specific high quality format you're interested in. In that case, simply request it with the `-f` option and youtube-dl will try to download it.
|
||||
|
||||
### I get HTTP error 402 when trying to download a video. What's this?
|
||||
|
||||
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We''re [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
|
||||
Apparently YouTube requires you to pass a CAPTCHA test if you download too much. We're [considering to provide a way to let you solve the CAPTCHA](https://github.com/rg3/youtube-dl/issues/154), but at the moment, your best course of action is pointing a webbrowser to the youtube URL, solving the CAPTCHA, and restart youtube-dl.
|
||||
|
||||
### I have downloaded a video but how can I play it?
|
||||
|
||||
@@ -106,25 +209,49 @@ Once the video is fully downloaded, use any video player, such as [vlc](http://w
|
||||
|
||||
### The links provided by youtube-dl -g are not working anymore
|
||||
|
||||
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file.
|
||||
The URLs youtube-dl outputs require the downloader to have the correct cookies. Use the `--cookies` option to write the required cookies into a file, and advise your downloader to read cookies from that file. Some sites also require a common user agent to be used, use `--dump-user-agent` to see the one in use by youtube-dl.
|
||||
|
||||
### ERROR: no fmt_url_map or conn information found in video info
|
||||
|
||||
youtube has switched to a new video info format in July 2011 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
||||
|
||||
## COPYRIGHT
|
||||
### ERROR: unable to download video ###
|
||||
|
||||
youtube requires an additional signature since September 2012 which is not supported by old versions of youtube-dl. You can update youtube-dl with `sudo youtube-dl --update`.
|
||||
|
||||
### SyntaxError: Non-ASCII character ###
|
||||
|
||||
The error
|
||||
|
||||
File "youtube-dl", line 2
|
||||
SyntaxError: Non-ASCII character '\x93' ...
|
||||
|
||||
means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
|
||||
|
||||
### What is this binary file? Where has the code gone?
|
||||
|
||||
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
|
||||
|
||||
### The exe throws a *Runtime error from Visual C++*
|
||||
|
||||
To run the exe you need to install first the [Microsoft Visual C++ 2008 Redistributable Package](http://www.microsoft.com/en-us/download/details.aspx?id=29).
|
||||
|
||||
# COPYRIGHT
|
||||
|
||||
youtube-dl is released into the public domain by the copyright holders.
|
||||
|
||||
This README file was originally written by Daniel Bolton (<https://github.com/dbbolton>) and is likewise released into the public domain.
|
||||
|
||||
## BUGS
|
||||
# BUGS
|
||||
|
||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>
|
||||
|
||||
Please include:
|
||||
|
||||
* Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
|
||||
* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
|
||||
* The output of `youtube-dl --version`
|
||||
* The output of `python --version`
|
||||
* The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
|
||||
|
||||
For discussions, join us in the irc channel #youtube-dl on freenode.
|
||||
|
6
bin/youtube-dl
Executable file
6
bin/youtube-dl
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import youtube_dl
|
||||
|
||||
if __name__ == '__main__':
|
||||
youtube_dl.main()
|
BIN
devscripts/SizeOfImage.patch
Normal file
BIN
devscripts/SizeOfImage.patch
Normal file
Binary file not shown.
BIN
devscripts/SizeOfImage_w.patch
Normal file
BIN
devscripts/SizeOfImage_w.patch
Normal file
Binary file not shown.
14
devscripts/bash-completion.in
Normal file
14
devscripts/bash-completion.in
Normal file
@@ -0,0 +1,14 @@
|
||||
__youtube-dl()
|
||||
{
|
||||
local cur prev opts
|
||||
COMPREPLY=()
|
||||
cur="${COMP_WORDS[COMP_CWORD]}"
|
||||
opts="{{flags}}"
|
||||
|
||||
if [[ ${cur} == * ]] ; then
|
||||
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
complete -F __youtube-dl youtube-dl
|
26
devscripts/bash-completion.py
Executable file
26
devscripts/bash-completion.py
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
|
||||
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
|
||||
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
|
||||
|
||||
def build_completion(opt_parser):
|
||||
opts_flag = []
|
||||
for group in opt_parser.option_groups:
|
||||
for option in group.option_list:
|
||||
#for every long flag
|
||||
opts_flag.append(option.get_opt_string())
|
||||
with open(BASH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
with open(BASH_COMPLETION_FILE, "w") as f:
|
||||
#just using the special char
|
||||
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
|
||||
f.write(filled_template)
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
build_completion(parser)
|
33
devscripts/gh-pages/add-version.py
Executable file
33
devscripts/gh-pages/add-version.py
Executable file
@@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import json
|
||||
import sys
|
||||
import hashlib
|
||||
import urllib.request
|
||||
|
||||
if len(sys.argv) <= 1:
|
||||
print('Specify the version number as parameter')
|
||||
sys.exit()
|
||||
version = sys.argv[1]
|
||||
|
||||
with open('update/LATEST_VERSION', 'w') as f:
|
||||
f.write(version)
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
|
||||
new_version = {}
|
||||
|
||||
filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
|
||||
for key, filename in filenames.items():
|
||||
print('Downloading and checksumming %s...' %filename)
|
||||
url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
|
||||
data = urllib.request.urlopen(url).read()
|
||||
sha256sum = hashlib.sha256(data).hexdigest()
|
||||
new_version[key] = (url, sha256sum)
|
||||
|
||||
versions_info['versions'][version] = new_version
|
||||
versions_info['latest'] = version
|
||||
|
||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
32
devscripts/gh-pages/generate-download.py
Executable file
32
devscripts/gh-pages/generate-download.py
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env python3
|
||||
import hashlib
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import urllib.request
|
||||
import json
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
version = versions_info['latest']
|
||||
URL = versions_info['versions'][version]['bin'][0]
|
||||
|
||||
data = urllib.request.urlopen(URL).read()
|
||||
|
||||
# Read template page
|
||||
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
|
||||
template = tmplf.read()
|
||||
|
||||
md5sum = hashlib.md5(data).hexdigest()
|
||||
sha1sum = hashlib.sha1(data).hexdigest()
|
||||
sha256sum = hashlib.sha256(data).hexdigest()
|
||||
template = template.replace('@PROGRAM_VERSION@', version)
|
||||
template = template.replace('@PROGRAM_URL@', URL)
|
||||
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
|
||||
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
|
||||
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
|
||||
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
|
||||
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
|
||||
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
|
||||
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
|
||||
with open('download.html', 'w', encoding='utf-8') as dlf:
|
||||
dlf.write(template)
|
32
devscripts/gh-pages/sign-versions.py
Executable file
32
devscripts/gh-pages/sign-versions.py
Executable file
@@ -0,0 +1,32 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import rsa
|
||||
import json
|
||||
from binascii import hexlify
|
||||
|
||||
try:
|
||||
input = raw_input
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
if 'signature' in versions_info:
|
||||
del versions_info['signature']
|
||||
|
||||
print('Enter the PKCS1 private key, followed by a blank line:')
|
||||
privkey = b''
|
||||
while True:
|
||||
try:
|
||||
line = input()
|
||||
except EOFError:
|
||||
break
|
||||
if line == '':
|
||||
break
|
||||
privkey += line.encode('ascii') + b'\n'
|
||||
privkey = rsa.PrivateKey.load_pkcs1(privkey)
|
||||
|
||||
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
|
||||
print('signature: ' + signature)
|
||||
|
||||
versions_info['signature'] = signature
|
||||
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)
|
21
devscripts/gh-pages/update-copyright.py
Executable file
21
devscripts/gh-pages/update-copyright.py
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
import datetime
|
||||
import glob
|
||||
import io # For Python 2 compatibilty
|
||||
import os
|
||||
import re
|
||||
|
||||
year = str(datetime.datetime.now().year)
|
||||
for fn in glob.glob('*.html*'):
|
||||
with io.open(fn, encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
|
||||
if content != newc:
|
||||
tmpFn = fn + '.part'
|
||||
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
|
||||
outf.write(newc)
|
||||
os.rename(tmpFn, fn)
|
57
devscripts/gh-pages/update-feed.py
Executable file
57
devscripts/gh-pages/update-feed.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import datetime
|
||||
|
||||
import textwrap
|
||||
|
||||
import json
|
||||
|
||||
atom_template=textwrap.dedent("""\
|
||||
<?xml version='1.0' encoding='utf-8'?>
|
||||
<atom:feed xmlns:atom="http://www.w3.org/2005/Atom">
|
||||
<atom:title>youtube-dl releases</atom:title>
|
||||
<atom:id>youtube-dl-updates-feed</atom:id>
|
||||
<atom:updated>@TIMESTAMP@</atom:updated>
|
||||
@ENTRIES@
|
||||
</atom:feed>""")
|
||||
|
||||
entry_template=textwrap.dedent("""
|
||||
<atom:entry>
|
||||
<atom:id>youtube-dl-@VERSION@</atom:id>
|
||||
<atom:title>New version @VERSION@</atom:title>
|
||||
<atom:link href="http://rg3.github.io/youtube-dl" />
|
||||
<atom:content type="xhtml">
|
||||
<div xmlns="http://www.w3.org/1999/xhtml">
|
||||
Downloads available at <a href="http://youtube-dl.org/downloads/@VERSION@/">http://youtube-dl.org/downloads/@VERSION@/</a>
|
||||
</div>
|
||||
</atom:content>
|
||||
<atom:author>
|
||||
<atom:name>The youtube-dl maintainers</atom:name>
|
||||
</atom:author>
|
||||
<atom:updated>@TIMESTAMP@</atom:updated>
|
||||
</atom:entry>
|
||||
""")
|
||||
|
||||
now = datetime.datetime.now()
|
||||
now_iso = now.isoformat()
|
||||
|
||||
atom_template = atom_template.replace('@TIMESTAMP@',now_iso)
|
||||
|
||||
entries=[]
|
||||
|
||||
versions_info = json.load(open('update/versions.json'))
|
||||
versions = list(versions_info['versions'].keys())
|
||||
versions.sort()
|
||||
|
||||
for v in versions:
|
||||
entry = entry_template.replace('@TIMESTAMP@',v.replace('.','-'))
|
||||
entry = entry.replace('@VERSION@',v)
|
||||
entries.append(entry)
|
||||
|
||||
entries_str = textwrap.indent(''.join(entries), '\t')
|
||||
atom_template = atom_template.replace('@ENTRIES@', entries_str)
|
||||
|
||||
with open('update/releases.atom','w',encoding='utf-8') as atom_file:
|
||||
atom_file.write(atom_template)
|
||||
|
||||
|
20
devscripts/make_readme.py
Executable file
20
devscripts/make_readme.py
Executable file
@@ -0,0 +1,20 @@
|
||||
import sys
|
||||
import re
|
||||
|
||||
README_FILE = 'README.md'
|
||||
helptext = sys.stdin.read()
|
||||
|
||||
with open(README_FILE) as f:
|
||||
oldreadme = f.read()
|
||||
|
||||
header = oldreadme[:oldreadme.index('# OPTIONS')]
|
||||
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
|
||||
|
||||
options = helptext[helptext.index(' General Options:')+19:]
|
||||
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
|
||||
options = '# OPTIONS\n' + options + '\n'
|
||||
|
||||
with open(README_FILE, 'w') as f:
|
||||
f.write(header)
|
||||
f.write(options)
|
||||
f.write(footer)
|
0
devscripts/posix-locale.sh
Normal file → Executable file
0
devscripts/posix-locale.sh
Normal file → Executable file
104
devscripts/release.sh
Executable file
104
devscripts/release.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
|
||||
# IMPORTANT: the following assumptions are made
|
||||
# * the GH repo is on the origin remote
|
||||
# * the gh-pages branch is named so locally
|
||||
# * the git config user.signingkey is properly set
|
||||
|
||||
# You will need
|
||||
# pip install coverage nose rsa
|
||||
|
||||
# TODO
|
||||
# release notes
|
||||
# make hash on local files
|
||||
|
||||
set -e
|
||||
|
||||
skip_tests=false
|
||||
if [ "$1" = '--skip-test' ]; then
|
||||
skip_tests=true
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
|
||||
version="$1"
|
||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
||||
if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
|
||||
|
||||
/bin/echo -e "\n### First of all, testing..."
|
||||
make cleanall
|
||||
if $skip_tests ; then
|
||||
echo 'SKIPPING TESTS'
|
||||
else
|
||||
nosetests --verbose --with-coverage --cover-package=youtube_dl --cover-html test --stop || exit 1
|
||||
fi
|
||||
|
||||
/bin/echo -e "\n### Changing version in version.py..."
|
||||
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
|
||||
|
||||
/bin/echo -e "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
|
||||
make README.md
|
||||
git add CHANGELOG README.md youtube_dl/version.py
|
||||
git commit -m "release $version"
|
||||
|
||||
/bin/echo -e "\n### Now tagging, signing and pushing..."
|
||||
git tag -s -m "Release $version" "$version"
|
||||
git show "$version"
|
||||
read -p "Is it good, can I push? (y/n) " -n 1
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
|
||||
echo
|
||||
MASTER=$(git rev-parse --abbrev-ref HEAD)
|
||||
git push origin $MASTER:master
|
||||
git push origin "$version"
|
||||
|
||||
/bin/echo -e "\n### OK, now it is time to build the binaries..."
|
||||
REV=$(git rev-parse HEAD)
|
||||
make youtube-dl youtube-dl.tar.gz
|
||||
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
|
||||
wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
|
||||
mkdir -p "build/$version"
|
||||
mv youtube-dl youtube-dl.exe "build/$version"
|
||||
mv youtube-dl.tar.gz "build/$version/youtube-dl-$version.tar.gz"
|
||||
RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
||||
(cd build/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
|
||||
(cd build/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
|
||||
(cd build/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
|
||||
(cd build/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
|
||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
||||
|
||||
/bin/echo -e "\n### Signing and uploading the new binaries to youtube-dl.org..."
|
||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
||||
scp -r "build/$version" ytdl@yt-dl.org:html/tmp/
|
||||
ssh ytdl@yt-dl.org "mv html/tmp/$version html/downloads/"
|
||||
ssh ytdl@yt-dl.org "sh html/update_latest.sh $version"
|
||||
|
||||
/bin/echo -e "\n### Now switching to gh-pages..."
|
||||
git clone --branch gh-pages --single-branch . build/gh-pages
|
||||
ROOT=$(pwd)
|
||||
(
|
||||
set -e
|
||||
ORIGIN_URL=$(git config --get remote.origin.url)
|
||||
cd build/gh-pages
|
||||
"$ROOT/devscripts/gh-pages/add-version.py" $version
|
||||
"$ROOT/devscripts/gh-pages/update-feed.py"
|
||||
"$ROOT/devscripts/gh-pages/sign-versions.py" < "$ROOT/updates_key.pem"
|
||||
"$ROOT/devscripts/gh-pages/generate-download.py"
|
||||
"$ROOT/devscripts/gh-pages/update-copyright.py"
|
||||
git add *.html *.html.in update
|
||||
git commit -m "release $version"
|
||||
git show HEAD
|
||||
read -p "Is it good, can I push? (y/n) " -n 1
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
|
||||
echo
|
||||
git push "$ROOT" gh-pages
|
||||
git push "$ORIGIN_URL" gh-pages
|
||||
)
|
||||
rm -rf build
|
||||
|
||||
make pypi-files
|
||||
echo "Uploading to PyPi ..."
|
||||
python setup.py sdist upload
|
||||
make clean
|
||||
|
||||
/bin/echo -e "\n### DONE!"
|
40
devscripts/transition_helper.py
Normal file
40
devscripts/transition_helper.py
Normal file
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
||||
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
|
||||
|
||||
try:
|
||||
raw_input()
|
||||
except NameError: # Python 3
|
||||
input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
|
||||
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
|
||||
|
||||
if not os.access(filename, os.W_OK):
|
||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
||||
|
||||
try:
|
||||
urlh = compat_urllib_request.urlopen(BIN_URL)
|
||||
newcontent = urlh.read()
|
||||
urlh.close()
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to download latest version')
|
||||
|
||||
try:
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(newcontent)
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to overwrite current version')
|
||||
|
||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
12
devscripts/transition_helper_exe/setup.py
Normal file
12
devscripts/transition_helper_exe/setup.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from distutils.core import setup
|
||||
import py2exe
|
||||
|
||||
py2exe_options = {
|
||||
"bundle_files": 1,
|
||||
"compressed": 1,
|
||||
"optimize": 2,
|
||||
"dist_dir": '.',
|
||||
"dll_excludes": ['w9xpopen.exe']
|
||||
}
|
||||
|
||||
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)
|
102
devscripts/transition_helper_exe/youtube-dl.py
Normal file
102
devscripts/transition_helper_exe/youtube-dl.py
Normal file
@@ -0,0 +1,102 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys, os
|
||||
import urllib2
|
||||
import json, hashlib
|
||||
|
||||
def rsa_verify(message, signature, key):
|
||||
from struct import pack
|
||||
from hashlib import sha256
|
||||
from sys import version_info
|
||||
def b(x):
|
||||
if version_info[0] == 2: return x
|
||||
else: return x.encode('latin1')
|
||||
assert(type(message) == type(b('')))
|
||||
block_size = 0
|
||||
n = key[0]
|
||||
while n:
|
||||
block_size += 1
|
||||
n >>= 8
|
||||
signature = pow(int(signature, 16), key[1], key[0])
|
||||
raw_bytes = []
|
||||
while signature:
|
||||
raw_bytes.insert(0, pack("B", signature & 0xFF))
|
||||
signature >>= 8
|
||||
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
|
||||
if signature[0:2] != b('\x00\x01'): return False
|
||||
signature = signature[2:]
|
||||
if not b('\x00') in signature: return False
|
||||
signature = signature[signature.index(b('\x00'))+1:]
|
||||
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
|
||||
signature = signature[19:]
|
||||
if signature != sha256(message).digest(): return False
|
||||
return True
|
||||
|
||||
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
|
||||
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
|
||||
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
|
||||
|
||||
raw_input()
|
||||
|
||||
filename = sys.argv[0]
|
||||
|
||||
UPDATE_URL = "http://rg3.github.io/youtube-dl/update/"
|
||||
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
|
||||
JSON_URL = UPDATE_URL + 'versions.json'
|
||||
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
|
||||
|
||||
if not os.access(filename, os.W_OK):
|
||||
sys.exit('ERROR: no write permissions on %s' % filename)
|
||||
|
||||
exe = os.path.abspath(filename)
|
||||
directory = os.path.dirname(exe)
|
||||
if not os.access(directory, os.W_OK):
|
||||
sys.exit('ERROR: no write permissions on %s' % directory)
|
||||
|
||||
try:
|
||||
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
|
||||
versions_info = json.loads(versions_info)
|
||||
except:
|
||||
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
|
||||
if not 'signature' in versions_info:
|
||||
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
|
||||
signature = versions_info['signature']
|
||||
del versions_info['signature']
|
||||
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
|
||||
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
|
||||
|
||||
version = versions_info['versions'][versions_info['latest']]
|
||||
|
||||
try:
|
||||
urlh = urllib2.urlopen(version['exe'][0])
|
||||
newcontent = urlh.read()
|
||||
urlh.close()
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to download latest version')
|
||||
|
||||
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
|
||||
if newcontent_hash != version['exe'][1]:
|
||||
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
|
||||
|
||||
try:
|
||||
with open(exe + '.new', 'wb') as outf:
|
||||
outf.write(newcontent)
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit(u'ERROR: unable to write the new version')
|
||||
|
||||
try:
|
||||
bat = os.path.join(directory, 'youtube-dl-updater.bat')
|
||||
b = open(bat, 'w')
|
||||
b.write("""
|
||||
echo Updating youtube-dl...
|
||||
ping 127.0.0.1 -n 5 -w 1000 > NUL
|
||||
move /Y "%s.new" "%s"
|
||||
del "%s"
|
||||
\n""" %(exe, exe, bat))
|
||||
b.close()
|
||||
|
||||
os.startfile(bat)
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to overwrite current version')
|
||||
|
||||
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')
|
56
devscripts/wine-py2exe.sh
Executable file
56
devscripts/wine-py2exe.sh
Executable file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Run with as parameter a setup.py that works in the current directory
|
||||
# e.g. no os.chdir()
|
||||
# It will run twice, the first time will crash
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$( cd "$( dirname "$0" )" && pwd )"
|
||||
|
||||
if [ ! -d wine-py2exe ]; then
|
||||
|
||||
sudo apt-get install wine1.3 axel bsdiff
|
||||
|
||||
mkdir wine-py2exe
|
||||
cd wine-py2exe
|
||||
export WINEPREFIX=`pwd`
|
||||
|
||||
axel -a "http://www.python.org/ftp/python/2.7/python-2.7.msi"
|
||||
axel -a "http://downloads.sourceforge.net/project/py2exe/py2exe/0.6.9/py2exe-0.6.9.win32-py2.7.exe"
|
||||
#axel -a "http://winetricks.org/winetricks"
|
||||
|
||||
# http://appdb.winehq.org/objectManager.php?sClass=version&iId=21957
|
||||
echo "Follow python setup on screen"
|
||||
wine msiexec /i python-2.7.msi
|
||||
|
||||
echo "Follow py2exe setup on screen"
|
||||
wine py2exe-0.6.9.win32-py2.7.exe
|
||||
|
||||
#echo "Follow Microsoft Visual C++ 2008 Redistributable Package setup on screen"
|
||||
#bash winetricks vcrun2008
|
||||
|
||||
rm py2exe-0.6.9.win32-py2.7.exe
|
||||
rm python-2.7.msi
|
||||
#rm winetricks
|
||||
|
||||
# http://bugs.winehq.org/show_bug.cgi?id=3591
|
||||
|
||||
mv drive_c/Python27/Lib/site-packages/py2exe/run.exe drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup
|
||||
bspatch drive_c/Python27/Lib/site-packages/py2exe/run.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run.exe "$SCRIPT_DIR/SizeOfImage.patch"
|
||||
mv drive_c/Python27/Lib/site-packages/py2exe/run_w.exe drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup
|
||||
bspatch drive_c/Python27/Lib/site-packages/py2exe/run_w.exe.backup drive_c/Python27/Lib/site-packages/py2exe/run_w.exe "$SCRIPT_DIR/SizeOfImage_w.patch"
|
||||
|
||||
cd -
|
||||
|
||||
else
|
||||
|
||||
export WINEPREFIX="$( cd wine-py2exe && pwd )"
|
||||
|
||||
fi
|
||||
|
||||
wine "C:\\Python27\\python.exe" "$1" py2exe > "py2exe.log" 2>&1 || true
|
||||
echo '# Copying python27.dll' >> "py2exe.log"
|
||||
cp "$WINEPREFIX/drive_c/windows/system32/python27.dll" build/bdist.win32/winexe/bundle-2.7/
|
||||
wine "C:\\Python27\\python.exe" "$1" py2exe >> "py2exe.log" 2>&1
|
||||
|
83
devscripts/youtube_genalgo.py
Normal file
83
devscripts/youtube_genalgo.py
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Generate youtube signature algorithm from test cases
|
||||
|
||||
import sys
|
||||
|
||||
tests = [
|
||||
# 88
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<",
|
||||
"J:|}][{=+-_)(*&;%$#@>MNBVCXZASDFGH^KLPOIUYTREWQ0987654321mnbvcxzasdfghrklpoiuytej"),
|
||||
# 87
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$^&*()_-+={[]}|:;?/>.<",
|
||||
"!?;:|}][{=+-_)(*&^$#@/MNBVCXZASqFGHJKLPOIUYTREWQ0987654321mnbvcxzasdfghjklpoiuytr"),
|
||||
# 86 - vfl_ymO4Z 2013/06/27
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[|};?/>.<",
|
||||
"ertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!/#$%^&*()_-+={[|};?@"),
|
||||
# 85
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?/>.<",
|
||||
"{>/?;}[.=+-_)(*&^%$#@!MqBVCXZASDFwHJKLPOIUYTREWQ0987654321mnbvcxzasdfghjklpoiuytr"),
|
||||
# 84
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?>.<",
|
||||
"<.>?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWe098765432rmnbvcxzasdfghjklpoiuyt1"),
|
||||
# 83 - vflcaqGO8 2013/07/11
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!#$%^&*()_+={[};?/>.<",
|
||||
"urty8ioplkjhgfdsazxcvbqm1234567S90QWERTYUIOPLKJHGFDnAZXCVBNM!#$%^&*()_+={[};?/>.<"),
|
||||
# 82
|
||||
("qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKHGFDSAZXCVBNM!@#$%^&*(-+={[};?/>.<",
|
||||
"Q>/?;}[{=+-(*<^%$#@!MNBVCXZASDFGHKLPOIUY8REWT0q&7654321mnbvcxzasdfghjklpoiuytrew9"),
|
||||
]
|
||||
|
||||
def find_matching(wrong, right):
|
||||
idxs = [wrong.index(c) for c in right]
|
||||
return compress(idxs)
|
||||
return ('s[%d]' % i for i in idxs)
|
||||
|
||||
def compress(idxs):
|
||||
def _genslice(start, end, step):
|
||||
starts = '' if start == 0 else str(start)
|
||||
ends = ':%d' % (end+step)
|
||||
steps = '' if step == 1 else (':%d' % step)
|
||||
return 's[%s%s%s]' % (starts, ends, steps)
|
||||
|
||||
step = None
|
||||
for i, prev in zip(idxs[1:], idxs[:-1]):
|
||||
if step is not None:
|
||||
if i - prev == step:
|
||||
continue
|
||||
yield _genslice(start, prev, step)
|
||||
step = None
|
||||
continue
|
||||
if i - prev in [-1, 1]:
|
||||
step = i - prev
|
||||
start = prev
|
||||
continue
|
||||
else:
|
||||
yield 's[%d]' % prev
|
||||
if step is None:
|
||||
yield 's[%d]' % i
|
||||
else:
|
||||
yield _genslice(start, i, step)
|
||||
|
||||
def _assert_compress(inp, exp):
|
||||
res = list(compress(inp))
|
||||
if res != exp:
|
||||
print('Got %r, expected %r' % (res, exp))
|
||||
assert res == exp
|
||||
_assert_compress([0,2,4,6], ['s[0]', 's[2]', 's[4]', 's[6]'])
|
||||
_assert_compress([0,1,2,4,6,7], ['s[:3]', 's[4]', 's[6:8]'])
|
||||
_assert_compress([8,0,1,2,4,7,6,9], ['s[8]', 's[:3]', 's[4]', 's[7:5:-1]', 's[9]'])
|
||||
|
||||
def gen(wrong, right, indent):
|
||||
code = ' + '.join(find_matching(wrong, right))
|
||||
return 'if len(s) == %d:\n%s return %s\n' % (len(wrong), indent, code)
|
||||
|
||||
def genall(tests):
|
||||
indent = ' ' * 8
|
||||
return indent + (indent + 'el').join(gen(wrong, right, indent) for wrong,right in tests)
|
||||
|
||||
def main():
|
||||
print(genall(tests))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
86
setup.py
Normal file
86
setup.py
Normal file
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
try:
|
||||
from setuptools import setup
|
||||
except ImportError:
|
||||
from distutils.core import setup
|
||||
|
||||
try:
|
||||
# This will create an exe that needs Microsoft Visual C++ 2008
|
||||
# Redistributable Package
|
||||
import py2exe
|
||||
except ImportError:
|
||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
print("Cannot import py2exe", file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
py2exe_options = {
|
||||
"bundle_files": 1,
|
||||
"compressed": 1,
|
||||
"optimize": 2,
|
||||
"dist_dir": '.',
|
||||
"dll_excludes": ['w9xpopen.exe'],
|
||||
}
|
||||
|
||||
py2exe_console = [{
|
||||
"script": "./youtube_dl/__main__.py",
|
||||
"dest_base": "youtube-dl",
|
||||
}]
|
||||
|
||||
py2exe_params = {
|
||||
'console': py2exe_console,
|
||||
'options': {"py2exe": py2exe_options},
|
||||
'zipfile': None
|
||||
}
|
||||
|
||||
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
params = py2exe_params
|
||||
else:
|
||||
params = {
|
||||
'scripts': ['bin/youtube-dl'],
|
||||
'data_files': [ # Installing system-wide would require sudo...
|
||||
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
||||
('share/doc/youtube_dl', ['README.txt']),
|
||||
('share/man/man1/', ['youtube-dl.1'])
|
||||
]
|
||||
}
|
||||
|
||||
# Get the version from youtube_dl/version.py without importing the package
|
||||
exec(compile(open('youtube_dl/version.py').read(),
|
||||
'youtube_dl/version.py', 'exec'))
|
||||
|
||||
setup(
|
||||
name='youtube_dl',
|
||||
version=__version__,
|
||||
description='YouTube video downloader',
|
||||
long_description='Small command-line program to download videos from'
|
||||
' YouTube.com and other video sites.',
|
||||
url='https://github.com/rg3/youtube-dl',
|
||||
author='Ricardo Garcia',
|
||||
maintainer='Philipp Hagemeister',
|
||||
maintainer_email='phihag@phihag.de',
|
||||
packages=['youtube_dl', 'youtube_dl.extractor'],
|
||||
|
||||
# Provokes warning on most systems (why?!)
|
||||
# test_suite = 'nose.collector',
|
||||
# test_requires = ['nosetest'],
|
||||
|
||||
classifiers=[
|
||||
"Topic :: Multimedia :: Video",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Environment :: Console",
|
||||
"License :: Public Domain",
|
||||
"Programming Language :: Python :: 2.6",
|
||||
"Programming Language :: Python :: 2.7",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.3"
|
||||
],
|
||||
|
||||
**params
|
||||
)
|
44
test/helper.py
Normal file
44
test/helper.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import io
|
||||
import json
|
||||
import os.path
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL, YoutubeDLHandler
|
||||
from youtube_dl.utils import (
|
||||
compat_cookiejar,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
|
||||
class FakeYDL(YoutubeDL):
|
||||
def __init__(self):
|
||||
self.result = []
|
||||
# Different instances of the downloader can't share the same dictionary
|
||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||
self.params = dict(parameters)
|
||||
def to_screen(self, s):
|
||||
print(s)
|
||||
def trouble(self, s, tb=None):
|
||||
raise Exception(s)
|
||||
def download(self, x):
|
||||
self.result.append(x)
|
||||
|
||||
def get_testcases():
|
||||
for ie in youtube_dl.extractor.gen_extractors():
|
||||
t = getattr(ie, '_TEST', None)
|
||||
if t:
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
||||
for t in getattr(ie, '_TESTS', []):
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
44
test/parameters.json
Normal file
44
test/parameters.json
Normal file
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"consoletitle": false,
|
||||
"continuedl": true,
|
||||
"forcedescription": false,
|
||||
"forcefilename": false,
|
||||
"forceformat": false,
|
||||
"forcethumbnail": false,
|
||||
"forcetitle": false,
|
||||
"forceurl": false,
|
||||
"format": null,
|
||||
"format_limit": null,
|
||||
"ignoreerrors": false,
|
||||
"listformats": null,
|
||||
"logtostderr": false,
|
||||
"matchtitle": null,
|
||||
"max_downloads": null,
|
||||
"nooverwrites": false,
|
||||
"nopart": false,
|
||||
"noprogress": false,
|
||||
"outtmpl": "%(id)s.%(ext)s",
|
||||
"password": null,
|
||||
"playlistend": -1,
|
||||
"playliststart": 1,
|
||||
"prefer_free_formats": false,
|
||||
"quiet": false,
|
||||
"ratelimit": null,
|
||||
"rejecttitle": null,
|
||||
"retries": 10,
|
||||
"simulate": false,
|
||||
"skip_download": false,
|
||||
"subtitleslang": null,
|
||||
"subtitlesformat": "srt",
|
||||
"test": true,
|
||||
"updatetime": true,
|
||||
"usenetrc": false,
|
||||
"username": null,
|
||||
"verbose": true,
|
||||
"writedescription": false,
|
||||
"writeinfojson": true,
|
||||
"writesubtitles": false,
|
||||
"onlysubtitles": false,
|
||||
"allsubtitles": false,
|
||||
"listssubtitles": false
|
||||
}
|
77
test/test_all_urls.py
Normal file
77
test/test_all_urls.py
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.extractor import YoutubeIE, YoutubePlaylistIE, YoutubeChannelIE, JustinTVIE, gen_extractors
|
||||
from helper import get_testcases
|
||||
|
||||
class TestAllURLsMatching(unittest.TestCase):
|
||||
def test_youtube_playlist_matching(self):
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'UUBABnxM4Ar9ten8Mdjj1j0Q')) #585
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'PL63F0C78739B09958'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC'))
|
||||
self.assertTrue(YoutubePlaylistIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertFalse(YoutubePlaylistIE.suitable(u'PLtS2H6bU1M'))
|
||||
|
||||
def test_youtube_matching(self):
|
||||
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
|
||||
def test_youtube_channel_matching(self):
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM'))
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM?feature=gb_ch_rec'))
|
||||
self.assertTrue(YoutubeChannelIE.suitable('https://www.youtube.com/channel/HCtnHdj3df7iM/videos'))
|
||||
|
||||
def test_justin_tv_channelid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv/"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/"))
|
||||
|
||||
def test_justintv_videoid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/b/328087483"))
|
||||
|
||||
def test_justin_tv_chapterid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
|
||||
|
||||
def test_youtube_extract(self):
|
||||
self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
|
||||
|
||||
def test_no_duplicates(self):
|
||||
ies = gen_extractors()
|
||||
for tc in get_testcases():
|
||||
url = tc['url']
|
||||
for ie in ies:
|
||||
if type(ie).__name__ in ['GenericIE', tc['name'] + 'IE']:
|
||||
self.assertTrue(ie.suitable(url), '%s should match URL %r' % (type(ie).__name__, url))
|
||||
else:
|
||||
self.assertFalse(ie.suitable(url), '%s should not match URL %r' % (type(ie).__name__, url))
|
||||
|
||||
def test_keywords(self):
|
||||
ies = gen_extractors()
|
||||
matching_ies = lambda url: [ie.IE_NAME for ie in ies
|
||||
if ie.suitable(url) and ie.IE_NAME != 'generic']
|
||||
self.assertEqual(matching_ies(':ytsubs'), ['youtube:subscriptions'])
|
||||
self.assertEqual(matching_ies(':ytsubscriptions'), ['youtube:subscriptions'])
|
||||
self.assertEqual(matching_ies(':thedailyshow'), ['ComedyCentral'])
|
||||
self.assertEqual(matching_ies(':tds'), ['ComedyCentral'])
|
||||
self.assertEqual(matching_ies(':colbertreport'), ['ComedyCentral'])
|
||||
self.assertEqual(matching_ies(':cr'), ['ComedyCentral'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@@ -1,29 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Various small unit tests
|
||||
|
||||
import os,sys
|
||||
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
|
||||
|
||||
import youtube_dl
|
||||
|
||||
def test_simplify_title():
|
||||
assert youtube_dl._simplify_title(u'abc') == u'abc'
|
||||
assert youtube_dl._simplify_title(u'abc_d-e') == u'abc_d-e'
|
||||
|
||||
assert youtube_dl._simplify_title(u'123') == u'123'
|
||||
|
||||
assert u'/' not in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'abc' in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'de' in youtube_dl._simplify_title(u'abc/de')
|
||||
assert u'/' not in youtube_dl._simplify_title(u'abc/de///')
|
||||
|
||||
assert u'\\' not in youtube_dl._simplify_title(u'abc\\de')
|
||||
assert u'abc' in youtube_dl._simplify_title(u'abc\\de')
|
||||
assert u'de' in youtube_dl._simplify_title(u'abc\\de')
|
||||
|
||||
assert youtube_dl._simplify_title(u'ä') == u'ä'
|
||||
assert youtube_dl._simplify_title(u'кириллица') == u'кириллица'
|
||||
|
||||
# Strip underlines
|
||||
assert youtube_dl._simplify_title(u'\'a_') == u'a'
|
169
test/test_download.py
Normal file
169
test/test_download.py
Normal file
@@ -0,0 +1,169 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
import io
|
||||
import os
|
||||
import json
|
||||
import unittest
|
||||
import sys
|
||||
import socket
|
||||
import binascii
|
||||
|
||||
# Allow direct execution
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
from youtube_dl.utils import *
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
|
||||
RETRIES = 3
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(10)
|
||||
|
||||
def _try_rm(filename):
|
||||
""" Remove a file if it exists """
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError as ose:
|
||||
if ose.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.to_stderr = self.to_screen
|
||||
self.processed_info_dicts = []
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
def report_warning(self, message):
|
||||
# Don't accept warnings during tests
|
||||
raise ExtractorError(message)
|
||||
def process_info(self, info_dict):
|
||||
self.processed_info_dicts.append(info_dict)
|
||||
return super(YoutubeDL, self).process_info(info_dict)
|
||||
|
||||
def _file_md5(fn):
|
||||
with open(fn, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
|
||||
from helper import get_testcases
|
||||
defs = get_testcases()
|
||||
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
parameters = json.load(pf)
|
||||
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
maxDiff = None
|
||||
def setUp(self):
|
||||
self.parameters = parameters
|
||||
self.defs = defs
|
||||
|
||||
### Dynamically generate tests
|
||||
def generator(test_case):
|
||||
|
||||
def test_template(self):
|
||||
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])
|
||||
def print_skipping(reason):
|
||||
print('Skipping %s: %s' % (test_case['name'], reason))
|
||||
if not ie._WORKING:
|
||||
print_skipping('IE marked as not _WORKING')
|
||||
return
|
||||
if 'playlist' not in test_case and not test_case['file']:
|
||||
print_skipping('No output file specified')
|
||||
return
|
||||
if 'skip' in test_case:
|
||||
print_skipping(test_case['skip'])
|
||||
return
|
||||
|
||||
params = self.parameters.copy()
|
||||
params.update(test_case.get('params', {}))
|
||||
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_default_info_extractors()
|
||||
finished_hook_called = set()
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
ydl.fd.add_progress_hook(_hook)
|
||||
|
||||
test_cases = test_case.get('playlist', [test_case])
|
||||
for tc in test_cases:
|
||||
_try_rm(tc['file'])
|
||||
_try_rm(tc['file'] + '.part')
|
||||
_try_rm(tc['file'] + '.info.json')
|
||||
try:
|
||||
for retry in range(1, RETRIES + 1):
|
||||
try:
|
||||
ydl.download([test_case['url']])
|
||||
except (DownloadError, ExtractorError) as err:
|
||||
if retry == RETRIES: raise
|
||||
|
||||
# Check if the exception is not a network related one
|
||||
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
|
||||
raise
|
||||
|
||||
print('Retrying: {0} failed tries\n\n##########\n\n'.format(retry))
|
||||
else:
|
||||
break
|
||||
|
||||
for tc in test_cases:
|
||||
if not test_case.get('params', {}).get('skip_download', False):
|
||||
self.assertTrue(os.path.exists(tc['file']), msg='Missing file ' + tc['file'])
|
||||
self.assertTrue(tc['file'] in finished_hook_called)
|
||||
self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
|
||||
if 'md5' in tc:
|
||||
md5_for_file = _file_md5(tc['file'])
|
||||
self.assertEqual(md5_for_file, tc['md5'])
|
||||
with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
for (info_field, expected) in tc.get('info_dict', {}).items():
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
self.assertEqual(expected, 'md5:' + md5(info_dict.get(info_field)))
|
||||
else:
|
||||
got = info_dict.get(info_field)
|
||||
self.assertEqual(
|
||||
expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# If checkable fields are missing from the test case, print the info_dict
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in info_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=2) + u'\n')
|
||||
|
||||
# Check for the presence of mandatory fields
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
self.assertTrue(key in info_dict.keys() and info_dict[key])
|
||||
finally:
|
||||
for tc in test_cases:
|
||||
_try_rm(tc['file'])
|
||||
_try_rm(tc['file'] + '.part')
|
||||
_try_rm(tc['file'] + '.info.json')
|
||||
|
||||
return test_template
|
||||
|
||||
### And add them to TestDownload
|
||||
for n, test_case in enumerate(defs):
|
||||
test_method = generator(test_case)
|
||||
tname = 'test_' + str(test_case['name'])
|
||||
i = 1
|
||||
while hasattr(TestDownload, tname):
|
||||
tname = 'test_' + str(test_case['name']) + '_' + str(i)
|
||||
i += 1
|
||||
test_method.__name__ = tname
|
||||
setattr(TestDownload, test_method.__name__, test_method)
|
||||
del test_method
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
26
test/test_execution.py
Normal file
26
test/test_execution.py
Normal file
@@ -0,0 +1,26 @@
|
||||
import unittest
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
try:
|
||||
_DEV_NULL = subprocess.DEVNULL
|
||||
except AttributeError:
|
||||
_DEV_NULL = open(os.devnull, 'wb')
|
||||
|
||||
class TestExecution(unittest.TestCase):
|
||||
def test_import(self):
|
||||
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
|
||||
|
||||
def test_module_exec(self):
|
||||
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
|
||||
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
|
||||
def test_main_exec(self):
|
||||
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
131
test/test_utils.py
Normal file
131
test/test_utils.py
Normal file
@@ -0,0 +1,131 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Various small unit tests
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import xml.etree.ElementTree
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
#from youtube_dl.utils import htmlentity_transform
|
||||
from youtube_dl.utils import timeconvert
|
||||
from youtube_dl.utils import sanitize_filename
|
||||
from youtube_dl.utils import unescapeHTML
|
||||
from youtube_dl.utils import orderedSet
|
||||
from youtube_dl.utils import DateRange
|
||||
from youtube_dl.utils import unified_strdate
|
||||
from youtube_dl.utils import find_xpath_attr
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
_compat_str = lambda b: b.decode('unicode-escape')
|
||||
else:
|
||||
_compat_str = lambda s: s
|
||||
|
||||
|
||||
class TestUtil(unittest.TestCase):
|
||||
def test_timeconvert(self):
|
||||
self.assertTrue(timeconvert('') is None)
|
||||
self.assertTrue(timeconvert('bougrg') is None)
|
||||
|
||||
def test_sanitize_filename(self):
|
||||
self.assertEqual(sanitize_filename('abc'), 'abc')
|
||||
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
|
||||
|
||||
self.assertEqual(sanitize_filename('123'), '123')
|
||||
|
||||
self.assertEqual('abc_de', sanitize_filename('abc/de'))
|
||||
self.assertFalse('/' in sanitize_filename('abc/de///'))
|
||||
|
||||
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
|
||||
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
|
||||
self.assertEqual('yes no', sanitize_filename('yes? no'))
|
||||
self.assertEqual('this - that', sanitize_filename('this: that'))
|
||||
|
||||
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
|
||||
aumlaut = _compat_str('\xe4')
|
||||
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
|
||||
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
|
||||
self.assertEqual(sanitize_filename(tests), tests)
|
||||
|
||||
forbidden = '"\0\\/'
|
||||
for fc in forbidden:
|
||||
for fbc in forbidden:
|
||||
self.assertTrue(fbc not in sanitize_filename(fc))
|
||||
|
||||
def test_sanitize_filename_restricted(self):
|
||||
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
|
||||
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
|
||||
|
||||
self.assertEqual(sanitize_filename('123', restricted=True), '123')
|
||||
|
||||
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
|
||||
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
|
||||
|
||||
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
|
||||
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
|
||||
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
|
||||
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
|
||||
|
||||
tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
|
||||
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
|
||||
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
|
||||
|
||||
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
|
||||
for fc in forbidden:
|
||||
for fbc in forbidden:
|
||||
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
|
||||
|
||||
# Handle a common case more neatly
|
||||
self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
|
||||
self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
|
||||
# .. but make sure the file name is never empty
|
||||
self.assertTrue(sanitize_filename('-', restricted=True) != '')
|
||||
self.assertTrue(sanitize_filename(':', restricted=True) != '')
|
||||
|
||||
def test_sanitize_ids(self):
|
||||
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
|
||||
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
|
||||
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
|
||||
|
||||
def test_ordered_set(self):
|
||||
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
|
||||
self.assertEqual(orderedSet([]), [])
|
||||
self.assertEqual(orderedSet([1]), [1])
|
||||
#keep the list ordered
|
||||
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
|
||||
|
||||
def test_unescape_html(self):
|
||||
self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
|
||||
|
||||
def test_daterange(self):
|
||||
_20century = DateRange("19000101","20000101")
|
||||
self.assertFalse("17890714" in _20century)
|
||||
_ac = DateRange("00010101")
|
||||
self.assertTrue("19690721" in _ac)
|
||||
_firstmilenium = DateRange(end="10000101")
|
||||
self.assertTrue("07110427" in _firstmilenium)
|
||||
|
||||
def test_unified_dates(self):
|
||||
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
|
||||
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||
|
||||
def test_find_xpath_attr(self):
|
||||
testxml = u'''<root>
|
||||
<node/>
|
||||
<node x="a"/>
|
||||
<node x="a" y="c" />
|
||||
<node x="b" y="d" />
|
||||
</root>'''
|
||||
doc = xml.etree.ElementTree.fromstring(testxml)
|
||||
|
||||
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
77
test/test_write_info_json.py
Normal file
77
test/test_write_info_json.py
Normal file
@@ -0,0 +1,77 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
# Allow direct execution
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl.utils import *
|
||||
|
||||
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
|
||||
|
||||
# General configuration (from __init__, not very elegant...)
|
||||
jar = compat_cookiejar.CookieJar()
|
||||
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
|
||||
proxy_handler = compat_urllib_request.ProxyHandler()
|
||||
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
|
||||
compat_urllib_request.install_opener(opener)
|
||||
|
||||
class YoutubeDL(youtube_dl.YoutubeDL):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(YoutubeDL, self).__init__(*args, **kwargs)
|
||||
self.to_stderr = self.to_screen
|
||||
|
||||
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
|
||||
params = json.load(pf)
|
||||
params['writeinfojson'] = True
|
||||
params['skip_download'] = True
|
||||
params['writedescription'] = True
|
||||
|
||||
TEST_ID = 'BaW_jenozKc'
|
||||
INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
|
||||
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
|
||||
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
|
||||
|
||||
This is a test video for youtube-dl.
|
||||
|
||||
For more information, contact phihag@phihag.de .'''
|
||||
|
||||
class TestInfoJSON(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# Clear old files
|
||||
self.tearDown()
|
||||
|
||||
def test_info_json(self):
|
||||
ie = youtube_dl.extractor.YoutubeIE()
|
||||
ydl = YoutubeDL(params)
|
||||
ydl.add_info_extractor(ie)
|
||||
ydl.download([TEST_ID])
|
||||
self.assertTrue(os.path.exists(INFO_JSON_FILE))
|
||||
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
|
||||
jd = json.load(jsonf)
|
||||
self.assertEqual(jd['upload_date'], u'20121002')
|
||||
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
|
||||
self.assertEqual(jd['id'], TEST_ID)
|
||||
self.assertEqual(jd['extractor'], 'youtube')
|
||||
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
|
||||
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
|
||||
|
||||
self.assertTrue(os.path.exists(DESCRIPTION_FILE))
|
||||
with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
|
||||
descr = descf.read()
|
||||
self.assertEqual(descr, EXPECTED_DESCRIPTION)
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.exists(INFO_JSON_FILE):
|
||||
os.remove(INFO_JSON_FILE)
|
||||
if os.path.exists(DESCRIPTION_FILE):
|
||||
os.remove(DESCRIPTION_FILE)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
98
test/test_youtube_lists.py
Normal file
98
test/test_youtube_lists.py
Normal file
@@ -0,0 +1,98 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import json
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.extractor import YoutubeUserIE, YoutubePlaylistIE, YoutubeIE, YoutubeChannelIE, YoutubeShowIE
|
||||
from youtube_dl.utils import *
|
||||
|
||||
from helper import FakeYDL
|
||||
|
||||
class TestYoutubeLists(unittest.TestCase):
|
||||
def assertIsPlaylist(self,info):
|
||||
"""Make sure the info has '_type' set to 'playlist'"""
|
||||
self.assertEqual(info['_type'], 'playlist')
|
||||
|
||||
def test_youtube_playlist(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')[0]
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'ytdl test PL')
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||
|
||||
def test_issue_673(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('PLBB231211A4F62143')[0]
|
||||
self.assertTrue(len(result['entries']) > 25)
|
||||
|
||||
def test_youtube_playlist_long(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')[0]
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertTrue(len(result['entries']) >= 799)
|
||||
|
||||
def test_youtube_playlist_with_deleted(self):
|
||||
#651
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')[0]
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
||||
|
||||
def test_youtube_playlist_empty(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLtPgu7CB4gbZDA7i_euNxn75ISqxwZPYx')[0]
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(len(result['entries']), 0)
|
||||
|
||||
def test_youtube_course(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')[0]
|
||||
entries = result['entries']
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeChannelIE(dl)
|
||||
#test paginated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')[0]
|
||||
self.assertTrue(len(result['entries']) > 90)
|
||||
#test autogenerated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')[0]
|
||||
self.assertTrue(len(result['entries']) >= 18)
|
||||
|
||||
def test_youtube_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeUserIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')[0]
|
||||
self.assertTrue(len(result['entries']) >= 320)
|
||||
|
||||
def test_youtube_safe_search(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('PLtPgu7CB4gbY9oDN3drwC3cMbJggS7dKl')[0]
|
||||
self.assertEqual(len(result['entries']), 2)
|
||||
|
||||
def test_youtube_show(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeShowIE(dl)
|
||||
result = ie.extract('http://www.youtube.com/show/airdisasters')
|
||||
self.assertTrue(len(result) >= 4)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
57
test/test_youtube_sig.py
Executable file
57
test/test_youtube_sig.py
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.extractor.youtube import YoutubeIE
|
||||
from helper import FakeYDL
|
||||
|
||||
sig = YoutubeIE(FakeYDL())._decrypt_signature
|
||||
|
||||
class TestYoutubeSig(unittest.TestCase):
|
||||
def test_43_43(self):
|
||||
wrong = '5AEEAE0EC39677BC65FD9021CCD115F1F2DBD5A59E4.C0B243A3E2DED6769199AF3461781E75122AE135135'
|
||||
right = '931EA22157E1871643FA9519676DED253A342B0C.4E95A5DBD2F1F511DCC1209DF56CB77693CE0EAE'
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_88(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[]}|:;?/>.<"
|
||||
right = "J:|}][{=+-_)(*&;%$#@>MNBVCXZASDFGH^KLPOIUYTREWQ0987654321mnbvcxzasdfghrklpoiuytej"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_87(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$^&*()_-+={[]}|:;?/>.<"
|
||||
right = "!?;:|}][{=+-_)(*&^$#@/MNBVCXZASqFGHJKLPOIUYTREWQ0987654321mnbvcxzasdfghjklpoiuytr"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_86(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[|};?/>.<"
|
||||
right = "ertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!/#$%^&*()_-+={[|};?@"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_85(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?/>.<"
|
||||
right = "{>/?;}[.=+-_)(*&^%$#@!MqBVCXZASDFwHJKLPOIUYTREWQ0987654321mnbvcxzasdfghjklpoiuytr"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_84(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!@#$%^&*()_-+={[};?>.<"
|
||||
right = "<.>?;}[{=+-_)(*&^%$#@!MNBVCXZASDFGHJKLPOIUYTREWe098765432rmnbvcxzasdfghjklpoiuyt1"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_83(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKJHGFDSAZXCVBNM!#$%^&*()_+={[};?/>.<"
|
||||
right = "urty8ioplkjhgfdsazxcvbqm1234567S90QWERTYUIOPLKJHGFDnAZXCVBNM!#$%^&*()_+={[};?/>.<"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
def test_82(self):
|
||||
wrong = "qwertyuioplkjhgfdsazxcvbnm1234567890QWERTYUIOPLKHGFDSAZXCVBNM!@#$%^&*(-+={[};?/>.<"
|
||||
right = "Q>/?;}[{=+-(*<^%$#@!MNBVCXZASDFGHKLPOIUY8REWT0q&7654321mnbvcxzasdfghjklpoiuytrew9"
|
||||
self.assertEqual(sig(wrong), right)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
95
test/test_youtube_subtitles.py
Normal file
95
test/test_youtube_subtitles.py
Normal file
@@ -0,0 +1,95 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import unittest
|
||||
import json
|
||||
import io
|
||||
import hashlib
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.utils import *
|
||||
from helper import FakeYDL
|
||||
|
||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
|
||||
class TestYoutubeSubtitles(unittest.TestCase):
|
||||
def setUp(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['allsubtitles'] = False
|
||||
DL.params['writesubtitles'] = False
|
||||
DL.params['subtitlesformat'] = 'srt'
|
||||
DL.params['listsubtitles'] = False
|
||||
def test_youtube_no_subtitles(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = False
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
subtitles = info_dict[0]['subtitles']
|
||||
self.assertEqual(subtitles, None)
|
||||
def test_youtube_subtitles(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
|
||||
def test_youtube_subtitles_it(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['subtitleslang'] = 'it'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '164a51f16f260476a05b50fe4c2f161d')
|
||||
def test_youtube_onlysubtitles(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['onlysubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '4cd9278a35ba2305f47354ee13472260')
|
||||
def test_youtube_allsubtitles(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['allsubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
subtitles = info_dict[0]['subtitles']
|
||||
self.assertEqual(len(subtitles), 13)
|
||||
def test_youtube_subtitles_sbv_format(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['subtitlesformat'] = 'sbv'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '13aeaa0c245a8bed9a451cb643e3ad8b')
|
||||
def test_youtube_subtitles_vtt_format(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writesubtitles'] = True
|
||||
DL.params['subtitlesformat'] = 'vtt'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertEqual(md5(sub[2]), '356cdc577fde0c6783b9b822e7206ff7')
|
||||
def test_youtube_list_subtitles(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['listsubtitles'] = True
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('QRS8MkLhQmM')
|
||||
self.assertEqual(info_dict, None)
|
||||
def test_youtube_automatic_captions(self):
|
||||
DL = FakeYDL()
|
||||
DL.params['writeautomaticsub'] = True
|
||||
DL.params['subtitleslang'] = 'it'
|
||||
IE = YoutubeIE(DL)
|
||||
info_dict = IE.extract('8YoUxe5ncPo')
|
||||
sub = info_dict[0]['subtitles'][0]
|
||||
self.assertTrue(sub[2] is not None)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
4712
youtube-dl
4712
youtube-dl
File diff suppressed because it is too large
Load Diff
@@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import youtube_dl
|
||||
|
||||
youtube_dl.main()
|
BIN
youtube-dl.exe
Normal file
BIN
youtube-dl.exe
Normal file
Binary file not shown.
544
youtube_dl/FileDownloader.py
Normal file
544
youtube_dl/FileDownloader.py
Normal file
@@ -0,0 +1,544 @@
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
if os.name == 'nt':
|
||||
import ctypes
|
||||
|
||||
from .utils import *
|
||||
|
||||
|
||||
class FileDownloader(object):
|
||||
"""File Downloader class.
|
||||
|
||||
File downloader objects are the ones responsible of downloading the
|
||||
actual video file and writing it to disk.
|
||||
|
||||
File downloaders accept a lot of parameters. In order not to saturate
|
||||
the object constructor with arguments, it receives a dictionary of
|
||||
options instead.
|
||||
|
||||
Available options:
|
||||
|
||||
verbose: Print additional info to stdout.
|
||||
quiet: Do not print messages to stdout.
|
||||
ratelimit: Download speed limit, in bytes/sec.
|
||||
retries: Number of times to retry for HTTP error 5xx
|
||||
buffersize: Size of download buffer in bytes.
|
||||
noresizebuffer: Do not automatically resize the download buffer.
|
||||
continuedl: Try to continue downloads if possible.
|
||||
noprogress: Do not print the progress bar.
|
||||
logtostderr: Log messages to stderr instead of stdout.
|
||||
consoletitle: Display progress in console window's titlebar.
|
||||
nopart: Do not use temporary .part files.
|
||||
updatetime: Use the Last-modified header to set output file timestamps.
|
||||
test: Download only first bytes to test the downloader.
|
||||
min_filesize: Skip files smaller than this size
|
||||
max_filesize: Skip files larger than this size
|
||||
"""
|
||||
|
||||
params = None
|
||||
|
||||
def __init__(self, ydl, params):
|
||||
"""Create a FileDownloader object with the given options."""
|
||||
self.ydl = ydl
|
||||
self._progress_hooks = []
|
||||
self.params = params
|
||||
|
||||
@staticmethod
|
||||
def format_bytes(bytes):
|
||||
if bytes is None:
|
||||
return 'N/A'
|
||||
if type(bytes) is str:
|
||||
bytes = float(bytes)
|
||||
if bytes == 0.0:
|
||||
exponent = 0
|
||||
else:
|
||||
exponent = int(math.log(bytes, 1024.0))
|
||||
suffix = ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB','YiB'][exponent]
|
||||
converted = float(bytes) / float(1024 ** exponent)
|
||||
return '%.2f%s' % (converted, suffix)
|
||||
|
||||
@staticmethod
|
||||
def calc_percent(byte_counter, data_len):
|
||||
if data_len is None:
|
||||
return '---.-%'
|
||||
return '%6s' % ('%3.1f%%' % (float(byte_counter) / float(data_len) * 100.0))
|
||||
|
||||
@staticmethod
|
||||
def calc_eta(start, now, total, current):
|
||||
if total is None:
|
||||
return '--:--'
|
||||
dif = now - start
|
||||
if current == 0 or dif < 0.001: # One millisecond
|
||||
return '--:--'
|
||||
rate = float(current) / dif
|
||||
eta = int((float(total) - float(current)) / rate)
|
||||
(eta_mins, eta_secs) = divmod(eta, 60)
|
||||
if eta_mins > 99:
|
||||
return '--:--'
|
||||
return '%02d:%02d' % (eta_mins, eta_secs)
|
||||
|
||||
@staticmethod
|
||||
def calc_speed(start, now, bytes):
|
||||
dif = now - start
|
||||
if bytes == 0 or dif < 0.001: # One millisecond
|
||||
return '%10s' % '---b/s'
|
||||
return '%10s' % ('%s/s' % FileDownloader.format_bytes(float(bytes) / dif))
|
||||
|
||||
@staticmethod
|
||||
def best_block_size(elapsed_time, bytes):
|
||||
new_min = max(bytes / 2.0, 1.0)
|
||||
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
|
||||
if elapsed_time < 0.001:
|
||||
return int(new_max)
|
||||
rate = bytes / elapsed_time
|
||||
if rate > new_max:
|
||||
return int(new_max)
|
||||
if rate < new_min:
|
||||
return int(new_min)
|
||||
return int(rate)
|
||||
|
||||
@staticmethod
|
||||
def parse_bytes(bytestr):
|
||||
"""Parse a string indicating a byte quantity into an integer."""
|
||||
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
|
||||
if matchobj is None:
|
||||
return None
|
||||
number = float(matchobj.group(1))
|
||||
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
|
||||
return int(round(number * multiplier))
|
||||
|
||||
def to_screen(self, *args, **kargs):
|
||||
self.ydl.to_screen(*args, **kargs)
|
||||
|
||||
def to_stderr(self, message):
|
||||
self.ydl.to_screen(message)
|
||||
|
||||
def to_cons_title(self, message):
|
||||
"""Set console/terminal window title to message."""
|
||||
if not self.params.get('consoletitle', False):
|
||||
return
|
||||
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
|
||||
# c_wchar_p() might not be necessary if `message` is
|
||||
# already of type unicode()
|
||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||
elif 'TERM' in os.environ:
|
||||
self.to_screen('\033]0;%s\007' % message, skip_eol=True)
|
||||
|
||||
def trouble(self, *args, **kargs):
|
||||
self.ydl.trouble(*args, **kargs)
|
||||
|
||||
def report_warning(self, *args, **kargs):
|
||||
self.ydl.report_warning(*args, **kargs)
|
||||
|
||||
def report_error(self, *args, **kargs):
|
||||
self.ydl.report_error(*args, **kargs)
|
||||
|
||||
def slow_down(self, start_time, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
rate_limit = self.params.get('ratelimit', None)
|
||||
if rate_limit is None or byte_counter == 0:
|
||||
return
|
||||
now = time.time()
|
||||
elapsed = now - start_time
|
||||
if elapsed <= 0.0:
|
||||
return
|
||||
speed = float(byte_counter) / elapsed
|
||||
if speed > rate_limit:
|
||||
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
||||
|
||||
def temp_name(self, filename):
|
||||
"""Returns a temporary filename for the given filename."""
|
||||
if self.params.get('nopart', False) or filename == u'-' or \
|
||||
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
|
||||
return filename
|
||||
return filename + u'.part'
|
||||
|
||||
def undo_temp_name(self, filename):
|
||||
if filename.endswith(u'.part'):
|
||||
return filename[:-len(u'.part')]
|
||||
return filename
|
||||
|
||||
def try_rename(self, old_filename, new_filename):
|
||||
try:
|
||||
if old_filename == new_filename:
|
||||
return
|
||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||
except (IOError, OSError) as err:
|
||||
self.report_error(u'unable to rename file')
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
if last_modified_hdr is None:
|
||||
return
|
||||
if not os.path.isfile(encodeFilename(filename)):
|
||||
return
|
||||
timestr = last_modified_hdr
|
||||
if timestr is None:
|
||||
return
|
||||
filetime = timeconvert(timestr)
|
||||
if filetime is None:
|
||||
return filetime
|
||||
# Ignore obviously invalid dates
|
||||
if filetime == 0:
|
||||
return
|
||||
try:
|
||||
os.utime(filename, (time.time(), filetime))
|
||||
except:
|
||||
pass
|
||||
return filetime
|
||||
|
||||
def report_destination(self, filename):
|
||||
"""Report destination filename."""
|
||||
self.to_screen(u'[download] Destination: ' + filename)
|
||||
|
||||
def report_progress(self, percent_str, data_len_str, speed_str, eta_str):
|
||||
"""Report download progress."""
|
||||
if self.params.get('noprogress', False):
|
||||
return
|
||||
clear_line = (u'\x1b[K' if sys.stderr.isatty() and os.name != 'nt' else u'')
|
||||
if self.params.get('progress_with_newline', False):
|
||||
self.to_screen(u'[download] %s of %s at %s ETA %s' %
|
||||
(percent_str, data_len_str, speed_str, eta_str))
|
||||
else:
|
||||
self.to_screen(u'\r%s[download] %s of %s at %s ETA %s' %
|
||||
(clear_line, percent_str, data_len_str, speed_str, eta_str), skip_eol=True)
|
||||
self.to_cons_title(u'youtube-dl - %s of %s at %s ETA %s' %
|
||||
(percent_str.strip(), data_len_str.strip(), speed_str.strip(), eta_str.strip()))
|
||||
|
||||
def report_resuming_byte(self, resume_len):
|
||||
"""Report attempt to resume at given byte."""
|
||||
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
|
||||
|
||||
def report_retry(self, count, retries):
|
||||
"""Report retry in case of HTTP error 5xx"""
|
||||
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
||||
except (UnicodeEncodeError) as err:
|
||||
self.to_screen(u'[download] The file has already been downloaded')
|
||||
|
||||
def report_unable_to_resume(self):
|
||||
"""Report it was impossible to resume download."""
|
||||
self.to_screen(u'[download] Unable to resume')
|
||||
|
||||
def report_finish(self):
|
||||
"""Report download finished."""
|
||||
if self.params.get('noprogress', False):
|
||||
self.to_screen(u'[download] Download completed')
|
||||
else:
|
||||
self.to_screen(u'')
|
||||
|
||||
def _download_with_rtmpdump(self, filename, url, player_url, page_url, play_path, tc_url):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
# Check for rtmpdump first
|
||||
try:
|
||||
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
|
||||
return False
|
||||
verbosity_option = '--verbose' if self.params.get('verbose', False) else '--quiet'
|
||||
|
||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||
# the connection was interrumpted and resuming appears to be
|
||||
# possible. This is part of rtmpdump's normal usage, AFAIK.
|
||||
basic_args = ['rtmpdump', verbosity_option, '-r', url, '-o', tmpfilename]
|
||||
if player_url is not None:
|
||||
basic_args += ['--swfVfy', player_url]
|
||||
if page_url is not None:
|
||||
basic_args += ['--pageUrl', page_url]
|
||||
if play_path is not None:
|
||||
basic_args += ['--playpath', play_path]
|
||||
if tc_url is not None:
|
||||
basic_args += ['--tcUrl', url]
|
||||
args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
|
||||
if self.params.get('verbose', False):
|
||||
try:
|
||||
import pipes
|
||||
shell_quote = lambda args: ' '.join(map(pipes.quote, args))
|
||||
except ImportError:
|
||||
shell_quote = repr
|
||||
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(args))
|
||||
retval = subprocess.call(args)
|
||||
while retval == 2 or retval == 1:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'\r[rtmpdump] %s bytes' % prevsize, skip_eol=True)
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
retval = subprocess.call(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
if prevsize == cursize and retval == 1:
|
||||
break
|
||||
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
||||
if prevsize == cursize and retval == 2 and cursize > 1024:
|
||||
self.to_screen(u'\r[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
|
||||
retval = 0
|
||||
break
|
||||
if retval == 0:
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'\r[rtmpdump] %s bytes' % fsize)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'rtmpdump exited with code %d' % retval)
|
||||
return False
|
||||
|
||||
def _download_with_mplayer(self, filename, url):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
args = ['mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy', '-dumpstream', '-dumpfile', tmpfilename, url]
|
||||
# Check for mplayer first
|
||||
try:
|
||||
subprocess.call(['mplayer', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'MMS or RTSP download detected but "%s" could not be run' % args[0] )
|
||||
return False
|
||||
|
||||
# Download using mplayer.
|
||||
retval = subprocess.call(args)
|
||||
if retval == 0:
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'\r[%s] %s bytes' % (args[0], fsize))
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'mplayer exited with code %d' % retval)
|
||||
return False
|
||||
|
||||
|
||||
def _do_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
|
||||
# Check file already present
|
||||
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
|
||||
self.report_file_already_downloaded(filename)
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
|
||||
# Attempt to download using rtmpdump
|
||||
if url.startswith('rtmp'):
|
||||
return self._download_with_rtmpdump(filename, url,
|
||||
info_dict.get('player_url', None),
|
||||
info_dict.get('page_url', None),
|
||||
info_dict.get('play_path', None),
|
||||
info_dict.get('tc_url', None))
|
||||
|
||||
# Attempt to download using mplayer
|
||||
if url.startswith('mms') or url.startswith('rtsp'):
|
||||
return self._download_with_mplayer(filename, url)
|
||||
|
||||
tmpfilename = self.temp_name(filename)
|
||||
stream = None
|
||||
|
||||
# Do not include the Accept-Encoding header
|
||||
headers = {'Youtubedl-no-compression': 'True'}
|
||||
if 'user_agent' in info_dict:
|
||||
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
||||
request = compat_urllib_request.Request(url, None, headers)
|
||||
|
||||
if self.params.get('test', False):
|
||||
request.add_header('Range','bytes=0-10240')
|
||||
|
||||
# Establish possible resume length
|
||||
if os.path.isfile(encodeFilename(tmpfilename)):
|
||||
resume_len = os.path.getsize(encodeFilename(tmpfilename))
|
||||
else:
|
||||
resume_len = 0
|
||||
|
||||
open_mode = 'wb'
|
||||
if resume_len != 0:
|
||||
if self.params.get('continuedl', False):
|
||||
self.report_resuming_byte(resume_len)
|
||||
request.add_header('Range','bytes=%d-' % resume_len)
|
||||
open_mode = 'ab'
|
||||
else:
|
||||
resume_len = 0
|
||||
|
||||
count = 0
|
||||
retries = self.params.get('retries', 0)
|
||||
while count <= retries:
|
||||
# Establish connection
|
||||
try:
|
||||
if count == 0 and 'urlhandle' in info_dict:
|
||||
data = info_dict['urlhandle']
|
||||
data = compat_urllib_request.urlopen(request)
|
||||
break
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||
# Unexpected HTTP error
|
||||
raise
|
||||
elif err.code == 416:
|
||||
# Unable to resume (requested range not satisfiable)
|
||||
try:
|
||||
# Open the connection again without the range header
|
||||
data = compat_urllib_request.urlopen(basic_request)
|
||||
content_length = data.info()['Content-Length']
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if err.code < 500 or err.code >= 600:
|
||||
raise
|
||||
else:
|
||||
# Examine the reported length
|
||||
if (content_length is not None and
|
||||
(resume_len - 100 < int(content_length) < resume_len + 100)):
|
||||
# The file had already been fully downloaded.
|
||||
# Explanation to the above condition: in issue #175 it was revealed that
|
||||
# YouTube sometimes adds or removes a few bytes from the end of the file,
|
||||
# changing the file size slightly and causing problems for some users. So
|
||||
# I decided to implement a suggested change and consider the file
|
||||
# completely downloaded if the file size differs less than 100 bytes from
|
||||
# the one in the hard drive.
|
||||
self.report_file_already_downloaded(filename)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
self.report_unable_to_resume()
|
||||
open_mode = 'wb'
|
||||
break
|
||||
# Retry
|
||||
count += 1
|
||||
if count <= retries:
|
||||
self.report_retry(count, retries)
|
||||
|
||||
if count > retries:
|
||||
self.report_error(u'giving up after %s retries' % retries)
|
||||
return False
|
||||
|
||||
data_len = data.info().get('Content-length', None)
|
||||
if data_len is not None:
|
||||
data_len = int(data_len) + resume_len
|
||||
min_data_len = self.params.get("min_filesize", None)
|
||||
max_data_len = self.params.get("max_filesize", None)
|
||||
if min_data_len is not None and data_len < min_data_len:
|
||||
self.to_screen(u'\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||
return False
|
||||
if max_data_len is not None and data_len > max_data_len:
|
||||
self.to_screen(u'\r[download] File is larger than max-filesize (%s bytes > %s bytes). Aborting.' % (data_len, max_data_len))
|
||||
return False
|
||||
|
||||
data_len_str = self.format_bytes(data_len)
|
||||
byte_counter = 0 + resume_len
|
||||
block_size = self.params.get('buffersize', 1024)
|
||||
start = time.time()
|
||||
while True:
|
||||
# Download and write
|
||||
before = time.time()
|
||||
data_block = data.read(block_size)
|
||||
after = time.time()
|
||||
if len(data_block) == 0:
|
||||
break
|
||||
byte_counter += len(data_block)
|
||||
|
||||
# Open file just in time
|
||||
if stream is None:
|
||||
try:
|
||||
(stream, tmpfilename) = sanitize_open(tmpfilename, open_mode)
|
||||
assert stream is not None
|
||||
filename = self.undo_temp_name(tmpfilename)
|
||||
self.report_destination(filename)
|
||||
except (OSError, IOError) as err:
|
||||
self.report_error(u'unable to open for writing: %s' % str(err))
|
||||
return False
|
||||
try:
|
||||
stream.write(data_block)
|
||||
except (IOError, OSError) as err:
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'unable to write data: %s' % str(err))
|
||||
return False
|
||||
if not self.params.get('noresizebuffer', False):
|
||||
block_size = self.best_block_size(after - before, len(data_block))
|
||||
|
||||
# Progress message
|
||||
speed_str = self.calc_speed(start, time.time(), byte_counter - resume_len)
|
||||
if data_len is None:
|
||||
self.report_progress('Unknown %', data_len_str, speed_str, 'Unknown ETA')
|
||||
else:
|
||||
percent_str = self.calc_percent(byte_counter, data_len)
|
||||
eta_str = self.calc_eta(start, time.time(), data_len - resume_len, byte_counter - resume_len)
|
||||
self.report_progress(percent_str, data_len_str, speed_str, eta_str)
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': data_len,
|
||||
'tmpfilename': tmpfilename,
|
||||
'filename': filename,
|
||||
'status': 'downloading',
|
||||
})
|
||||
|
||||
# Apply rate limit
|
||||
self.slow_down(start, byte_counter - resume_len)
|
||||
|
||||
if stream is None:
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'Did not get any data blocks')
|
||||
return False
|
||||
stream.close()
|
||||
self.report_finish()
|
||||
if data_len is not None and byte_counter != data_len:
|
||||
raise ContentTooShortError(byte_counter, int(data_len))
|
||||
self.try_rename(tmpfilename, filename)
|
||||
|
||||
# Update file modification time
|
||||
if self.params.get('updatetime', True):
|
||||
info_dict['filetime'] = self.try_utime(filename, data.info().get('last-modified', None))
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
|
||||
return True
|
||||
|
||||
def _hook_progress(self, status):
|
||||
for ph in self._progress_hooks:
|
||||
ph(status)
|
||||
|
||||
def add_progress_hook(self, ph):
|
||||
""" ph gets called on download progress, with a dictionary with the entries
|
||||
* filename: The final filename
|
||||
* status: One of "downloading" and "finished"
|
||||
|
||||
It can also have some of the following entries:
|
||||
|
||||
* downloaded_bytes: Bytes on disks
|
||||
* total_bytes: Total bytes, None if unknown
|
||||
* tmpfilename: The filename we're currently writing to
|
||||
|
||||
Hooks are guaranteed to be called at least once (with status "finished")
|
||||
if the download is successful.
|
||||
"""
|
||||
self._progress_hooks.append(ph)
|
4
youtube_dl/InfoExtractors.py
Executable file
4
youtube_dl/InfoExtractors.py
Executable file
@@ -0,0 +1,4 @@
|
||||
# Legacy file for backwards compatibility, use youtube_dl.extractor instead!
|
||||
|
||||
from .extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
from .extractor import gen_extractors, get_info_extractor
|
233
youtube_dl/PostProcessor.py
Normal file
233
youtube_dl/PostProcessor.py
Normal file
@@ -0,0 +1,233 @@
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
from .utils import *
|
||||
|
||||
|
||||
class PostProcessor(object):
|
||||
"""Post Processor class.
|
||||
|
||||
PostProcessor objects can be added to downloaders with their
|
||||
add_post_processor() method. When the downloader has finished a
|
||||
successful download, it will take its internal chain of PostProcessors
|
||||
and start calling the run() method on each one of them, first with
|
||||
an initial argument and then with the returned value of the previous
|
||||
PostProcessor.
|
||||
|
||||
The chain will be stopped if one of them ever returns None or the end
|
||||
of the chain is reached.
|
||||
|
||||
PostProcessor objects follow a "mutual registration" process similar
|
||||
to InfoExtractor objects.
|
||||
"""
|
||||
|
||||
_downloader = None
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
self._downloader = downloader
|
||||
|
||||
def set_downloader(self, downloader):
|
||||
"""Sets the downloader for this PP."""
|
||||
self._downloader = downloader
|
||||
|
||||
def run(self, information):
|
||||
"""Run the PostProcessor.
|
||||
|
||||
The "information" argument is a dictionary like the ones
|
||||
composed by InfoExtractors. The only difference is that this
|
||||
one has an extra field called "filepath" that points to the
|
||||
downloaded file.
|
||||
|
||||
This method returns a tuple, the first element of which describes
|
||||
whether the original file should be kept (i.e. not deleted - None for
|
||||
no preference), and the second of which is the updated information.
|
||||
|
||||
In addition, this method may raise a PostProcessingError
|
||||
exception if post processing fails.
|
||||
"""
|
||||
return None, information # by default, keep file and do nothing
|
||||
|
||||
class FFmpegPostProcessorError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class AudioConversionError(PostProcessingError):
|
||||
pass
|
||||
|
||||
class FFmpegPostProcessor(PostProcessor):
|
||||
def __init__(self,downloader=None):
|
||||
PostProcessor.__init__(self, downloader)
|
||||
self._exes = self.detect_executables()
|
||||
|
||||
@staticmethod
|
||||
def detect_executables():
|
||||
def executable(exe):
|
||||
try:
|
||||
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
except OSError:
|
||||
return False
|
||||
return exe
|
||||
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
|
||||
return dict((program, executable(program)) for program in programs)
|
||||
|
||||
def run_ffmpeg(self, path, out_path, opts):
|
||||
if not self._exes['ffmpeg'] and not self._exes['avconv']:
|
||||
raise FFmpegPostProcessorError(u'ffmpeg or avconv not found. Please install one.')
|
||||
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path)]
|
||||
+ opts +
|
||||
[encodeFilename(self._ffmpeg_filename_argument(out_path))])
|
||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout,stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
stderr = stderr.decode('utf-8', 'replace')
|
||||
msg = stderr.strip().split('\n')[-1]
|
||||
raise FFmpegPostProcessorError(msg)
|
||||
|
||||
def _ffmpeg_filename_argument(self, fn):
|
||||
# ffmpeg broke --, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details
|
||||
if fn.startswith(u'-'):
|
||||
return u'./' + fn
|
||||
return fn
|
||||
|
||||
class FFmpegExtractAudioPP(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
|
||||
FFmpegPostProcessor.__init__(self, downloader)
|
||||
if preferredcodec is None:
|
||||
preferredcodec = 'best'
|
||||
self._preferredcodec = preferredcodec
|
||||
self._preferredquality = preferredquality
|
||||
self._nopostoverwrites = nopostoverwrites
|
||||
|
||||
def get_audio_codec(self, path):
|
||||
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
|
||||
try:
|
||||
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', encodeFilename(self._ffmpeg_filename_argument(path))]
|
||||
handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
|
||||
output = handle.communicate()[0]
|
||||
if handle.wait() != 0:
|
||||
return None
|
||||
except (IOError, OSError):
|
||||
return None
|
||||
audio_codec = None
|
||||
for line in output.decode('ascii', 'ignore').split('\n'):
|
||||
if line.startswith('codec_name='):
|
||||
audio_codec = line.split('=')[1].strip()
|
||||
elif line.strip() == 'codec_type=audio' and audio_codec is not None:
|
||||
return audio_codec
|
||||
return None
|
||||
|
||||
def run_ffmpeg(self, path, out_path, codec, more_opts):
|
||||
if not self._exes['ffmpeg'] and not self._exes['avconv']:
|
||||
raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
|
||||
if codec is None:
|
||||
acodec_opts = []
|
||||
else:
|
||||
acodec_opts = ['-acodec', codec]
|
||||
opts = ['-vn'] + acodec_opts + more_opts
|
||||
try:
|
||||
FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
|
||||
except FFmpegPostProcessorError as err:
|
||||
raise AudioConversionError(err.message)
|
||||
|
||||
def run(self, information):
|
||||
path = information['filepath']
|
||||
|
||||
filecodec = self.get_audio_codec(path)
|
||||
if filecodec is None:
|
||||
raise PostProcessingError(u'WARNING: unable to obtain file audio codec with ffprobe')
|
||||
|
||||
more_opts = []
|
||||
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
|
||||
if filecodec == 'aac' and self._preferredcodec in ['m4a', 'best']:
|
||||
# Lossless, but in another container
|
||||
acodec = 'copy'
|
||||
extension = 'm4a'
|
||||
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
|
||||
elif filecodec in ['aac', 'mp3', 'vorbis', 'opus']:
|
||||
# Lossless if possible
|
||||
acodec = 'copy'
|
||||
extension = filecodec
|
||||
if filecodec == 'aac':
|
||||
more_opts = ['-f', 'adts']
|
||||
if filecodec == 'vorbis':
|
||||
extension = 'ogg'
|
||||
else:
|
||||
# MP3 otherwise.
|
||||
acodec = 'libmp3lame'
|
||||
extension = 'mp3'
|
||||
more_opts = []
|
||||
if self._preferredquality is not None:
|
||||
if int(self._preferredquality) < 10:
|
||||
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
|
||||
else:
|
||||
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
|
||||
else:
|
||||
# We convert the audio (lossy)
|
||||
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'opus': 'opus', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
|
||||
extension = self._preferredcodec
|
||||
more_opts = []
|
||||
if self._preferredquality is not None:
|
||||
if int(self._preferredquality) < 10:
|
||||
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
|
||||
else:
|
||||
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
|
||||
if self._preferredcodec == 'aac':
|
||||
more_opts += ['-f', 'adts']
|
||||
if self._preferredcodec == 'm4a':
|
||||
more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
|
||||
if self._preferredcodec == 'vorbis':
|
||||
extension = 'ogg'
|
||||
if self._preferredcodec == 'wav':
|
||||
extension = 'wav'
|
||||
more_opts += ['-f', 'wav']
|
||||
|
||||
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
|
||||
new_path = prefix + sep + extension
|
||||
|
||||
# If we download foo.mp3 and convert it to... foo.mp3, then don't delete foo.mp3, silly.
|
||||
if new_path == path:
|
||||
self._nopostoverwrites = True
|
||||
|
||||
try:
|
||||
if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
|
||||
self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
|
||||
else:
|
||||
self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
|
||||
self.run_ffmpeg(path, new_path, acodec, more_opts)
|
||||
except:
|
||||
etype,e,tb = sys.exc_info()
|
||||
if isinstance(e, AudioConversionError):
|
||||
msg = u'audio conversion failed: ' + e.message
|
||||
else:
|
||||
msg = u'error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')
|
||||
raise PostProcessingError(msg)
|
||||
|
||||
# Try to update the date time for extracted audio file.
|
||||
if information.get('filetime') is not None:
|
||||
try:
|
||||
os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
|
||||
except:
|
||||
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
|
||||
|
||||
information['filepath'] = new_path
|
||||
return self._nopostoverwrites,information
|
||||
|
||||
class FFmpegVideoConvertor(FFmpegPostProcessor):
|
||||
def __init__(self, downloader=None,preferedformat=None):
|
||||
super(FFmpegVideoConvertor, self).__init__(downloader)
|
||||
self._preferedformat=preferedformat
|
||||
|
||||
def run(self, information):
|
||||
path = information['filepath']
|
||||
prefix, sep, ext = path.rpartition(u'.')
|
||||
outpath = prefix + sep + self._preferedformat
|
||||
if information['ext'] == self._preferedformat:
|
||||
self._downloader.to_screen(u'[ffmpeg] Not converting video file %s - already is in target format %s' % (path, self._preferedformat))
|
||||
return True,information
|
||||
self._downloader.to_screen(u'['+'ffmpeg'+'] Converting video from %s to %s, Destination: ' % (information['ext'], self._preferedformat) +outpath)
|
||||
self.run_ffmpeg(path, outpath, [])
|
||||
information['filepath'] = outpath
|
||||
information['format'] = self._preferedformat
|
||||
information['ext'] = self._preferedformat
|
||||
return False,information
|
605
youtube_dl/YoutubeDL.py
Normal file
605
youtube_dl/YoutubeDL.py
Normal file
@@ -0,0 +1,605 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from .utils import *
|
||||
from .extractor import get_info_extractor, gen_extractors
|
||||
from .FileDownloader import FileDownloader
|
||||
|
||||
|
||||
class YoutubeDL(object):
|
||||
"""YoutubeDL class.
|
||||
|
||||
YoutubeDL objects are the ones responsible of downloading the
|
||||
actual video file and writing it to disk if the user has requested
|
||||
it, among some other tasks. In most cases there should be one per
|
||||
program. As, given a video URL, the downloader doesn't know how to
|
||||
extract all the needed information, task that InfoExtractors do, it
|
||||
has to pass the URL to one of them.
|
||||
|
||||
For this, YoutubeDL objects have a method that allows
|
||||
InfoExtractors to be registered in a given order. When it is passed
|
||||
a URL, the YoutubeDL object handles it to the first InfoExtractor it
|
||||
finds that reports being able to handle it. The InfoExtractor extracts
|
||||
all the information about the video or videos the URL refers to, and
|
||||
YoutubeDL process the extracted information, possibly using a File
|
||||
Downloader to download the video.
|
||||
|
||||
YoutubeDL objects accept a lot of parameters. In order not to saturate
|
||||
the object constructor with arguments, it receives a dictionary of
|
||||
options instead. These options are available through the params
|
||||
attribute for the InfoExtractors to use. The YoutubeDL also
|
||||
registers itself as the downloader in charge for the InfoExtractors
|
||||
that are added to it, so this is a "mutual registration".
|
||||
|
||||
Available options:
|
||||
|
||||
username: Username for authentication purposes.
|
||||
password: Password for authentication purposes.
|
||||
videopassword: Password for acces a video.
|
||||
usenetrc: Use netrc for authentication instead.
|
||||
verbose: Print additional info to stdout.
|
||||
quiet: Do not print messages to stdout.
|
||||
forceurl: Force printing final URL.
|
||||
forcetitle: Force printing title.
|
||||
forceid: Force printing ID.
|
||||
forcethumbnail: Force printing thumbnail URL.
|
||||
forcedescription: Force printing description.
|
||||
forcefilename: Force printing final filename.
|
||||
simulate: Do not download the video files.
|
||||
format: Video format code.
|
||||
format_limit: Highest quality format to try.
|
||||
outtmpl: Template for output names.
|
||||
restrictfilenames: Do not allow "&" and spaces in file names
|
||||
ignoreerrors: Do not stop on download errors.
|
||||
nooverwrites: Prevent overwriting files.
|
||||
playliststart: Playlist item to start at.
|
||||
playlistend: Playlist item to end at.
|
||||
matchtitle: Download only matching titles.
|
||||
rejecttitle: Reject downloads for matching titles.
|
||||
logtostderr: Log messages to stderr instead of stdout.
|
||||
writedescription: Write the video description to a .description file
|
||||
writeinfojson: Write the video description to a .info.json file
|
||||
writethumbnail: Write the thumbnail image to a file
|
||||
writesubtitles: Write the video subtitles to a file
|
||||
writeautomaticsub: Write the automatic subtitles to a file
|
||||
allsubtitles: Downloads all the subtitles of the video
|
||||
listsubtitles: Lists all available subtitles for the video
|
||||
subtitlesformat: Subtitle format [srt/sbv/vtt] (default=srt)
|
||||
subtitleslang: Language of the subtitles to download
|
||||
keepvideo: Keep the video file after post-processing
|
||||
daterange: A DateRange object, download only if the upload_date is in the range.
|
||||
skip_download: Skip the actual download of the video file
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
the FileDownloader:
|
||||
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
|
||||
noresizebuffer, retries, continuedl, noprogress, consoletitle
|
||||
"""
|
||||
|
||||
params = None
|
||||
_ies = []
|
||||
_pps = []
|
||||
_download_retcode = None
|
||||
_num_downloads = None
|
||||
_screen_file = None
|
||||
|
||||
def __init__(self, params):
|
||||
"""Create a FileDownloader object with the given options."""
|
||||
self._ies = []
|
||||
self._pps = []
|
||||
self._progress_hooks = []
|
||||
self._download_retcode = 0
|
||||
self._num_downloads = 0
|
||||
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
||||
self.params = params
|
||||
self.fd = FileDownloader(self, self.params)
|
||||
|
||||
if '%(stitle)s' in self.params['outtmpl']:
|
||||
self.report_warning(u'%(stitle)s is deprecated. Use the %(title)s and the --restrict-filenames flag(which also secures %(uploader)s et al) instead.')
|
||||
|
||||
def add_info_extractor(self, ie):
|
||||
"""Add an InfoExtractor object to the end of the list."""
|
||||
self._ies.append(ie)
|
||||
ie.set_downloader(self)
|
||||
|
||||
def add_default_info_extractors(self):
|
||||
"""
|
||||
Add the InfoExtractors returned by gen_extractors to the end of the list
|
||||
"""
|
||||
for ie in gen_extractors():
|
||||
self.add_info_extractor(ie)
|
||||
|
||||
def add_post_processor(self, pp):
|
||||
"""Add a PostProcessor object to the end of the chain."""
|
||||
self._pps.append(pp)
|
||||
pp.set_downloader(self)
|
||||
|
||||
def to_screen(self, message, skip_eol=False):
|
||||
"""Print message to stdout if not in quiet mode."""
|
||||
assert type(message) == type(u'')
|
||||
if not self.params.get('quiet', False):
|
||||
terminator = [u'\n', u''][skip_eol]
|
||||
output = message + terminator
|
||||
if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
|
||||
output = output.encode(preferredencoding(), 'ignore')
|
||||
self._screen_file.write(output)
|
||||
self._screen_file.flush()
|
||||
|
||||
def to_stderr(self, message):
|
||||
"""Print message to stderr."""
|
||||
assert type(message) == type(u'')
|
||||
output = message + u'\n'
|
||||
if 'b' in getattr(self._screen_file, 'mode', '') or sys.version_info[0] < 3: # Python 2 lies about the mode of sys.stdout/sys.stderr
|
||||
output = output.encode(preferredencoding())
|
||||
sys.stderr.write(output)
|
||||
|
||||
def fixed_template(self):
|
||||
"""Checks if the output template is fixed."""
|
||||
return (re.search(u'(?u)%\\(.+?\\)s', self.params['outtmpl']) is None)
|
||||
|
||||
def trouble(self, message=None, tb=None):
|
||||
"""Determine action to take when a download problem appears.
|
||||
|
||||
Depending on if the downloader has been configured to ignore
|
||||
download errors or not, this method may throw an exception or
|
||||
not when errors are found, after printing the message.
|
||||
|
||||
tb, if given, is additional traceback information.
|
||||
"""
|
||||
if message is not None:
|
||||
self.to_stderr(message)
|
||||
if self.params.get('verbose'):
|
||||
if tb is None:
|
||||
if sys.exc_info()[0]: # if .trouble has been called from an except block
|
||||
tb = u''
|
||||
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
||||
tb += u''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
|
||||
tb += compat_str(traceback.format_exc())
|
||||
else:
|
||||
tb_data = traceback.format_list(traceback.extract_stack())
|
||||
tb = u''.join(tb_data)
|
||||
self.to_stderr(tb)
|
||||
if not self.params.get('ignoreerrors', False):
|
||||
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
||||
exc_info = sys.exc_info()[1].exc_info
|
||||
else:
|
||||
exc_info = sys.exc_info()
|
||||
raise DownloadError(message, exc_info)
|
||||
self._download_retcode = 1
|
||||
|
||||
def report_warning(self, message):
|
||||
'''
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_msg_header=u'\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header=u'WARNING:'
|
||||
warning_message=u'%s %s' % (_msg_header,message)
|
||||
self.to_stderr(warning_message)
|
||||
|
||||
def report_error(self, message, tb=None):
|
||||
'''
|
||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||
in red if stderr is a tty file.
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_msg_header = u'\033[0;31mERROR:\033[0m'
|
||||
else:
|
||||
_msg_header = u'ERROR:'
|
||||
error_message = u'%s %s' % (_msg_header, message)
|
||||
self.trouble(error_message, tb)
|
||||
|
||||
def slow_down(self, start_time, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
rate_limit = self.params.get('ratelimit', None)
|
||||
if rate_limit is None or byte_counter == 0:
|
||||
return
|
||||
now = time.time()
|
||||
elapsed = now - start_time
|
||||
if elapsed <= 0.0:
|
||||
return
|
||||
speed = float(byte_counter) / elapsed
|
||||
if speed > rate_limit:
|
||||
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
|
||||
|
||||
def report_writedescription(self, descfn):
|
||||
""" Report that the description file is being written """
|
||||
self.to_screen(u'[info] Writing video description to: ' + descfn)
|
||||
|
||||
def report_writesubtitles(self, sub_filename):
|
||||
""" Report that the subtitles file is being written """
|
||||
self.to_screen(u'[info] Writing video subtitles to: ' + sub_filename)
|
||||
|
||||
def report_writeinfojson(self, infofn):
|
||||
""" Report that the metadata file has been written """
|
||||
self.to_screen(u'[info] Video description metadata as JSON to: ' + infofn)
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
||||
except (UnicodeEncodeError) as err:
|
||||
self.to_screen(u'[download] The file has already been downloaded')
|
||||
|
||||
def increment_downloads(self):
|
||||
"""Increment the ordinal that assigns a number to each file."""
|
||||
self._num_downloads += 1
|
||||
|
||||
def prepare_filename(self, info_dict):
|
||||
"""Generate the output filename."""
|
||||
try:
|
||||
template_dict = dict(info_dict)
|
||||
|
||||
template_dict['epoch'] = int(time.time())
|
||||
autonumber_size = self.params.get('autonumber_size')
|
||||
if autonumber_size is None:
|
||||
autonumber_size = 5
|
||||
autonumber_templ = u'%0' + str(autonumber_size) + u'd'
|
||||
template_dict['autonumber'] = autonumber_templ % self._num_downloads
|
||||
if template_dict['playlist_index'] is not None:
|
||||
template_dict['playlist_index'] = u'%05d' % template_dict['playlist_index']
|
||||
|
||||
sanitize = lambda k,v: sanitize_filename(
|
||||
u'NA' if v is None else compat_str(v),
|
||||
restricted=self.params.get('restrictfilenames'),
|
||||
is_id=(k==u'id'))
|
||||
template_dict = dict((k, sanitize(k, v)) for k,v in template_dict.items())
|
||||
|
||||
filename = self.params['outtmpl'] % template_dict
|
||||
return filename
|
||||
except KeyError as err:
|
||||
self.report_error(u'Erroneous output template')
|
||||
return None
|
||||
except ValueError as err:
|
||||
self.report_error(u'Insufficient system charset ' + repr(preferredencoding()))
|
||||
return None
|
||||
|
||||
def _match_entry(self, info_dict):
|
||||
""" Returns None iff the file should be downloaded """
|
||||
|
||||
title = info_dict['title']
|
||||
matchtitle = self.params.get('matchtitle', False)
|
||||
if matchtitle:
|
||||
if not re.search(matchtitle, title, re.IGNORECASE):
|
||||
return u'[download] "' + title + '" title did not match pattern "' + matchtitle + '"'
|
||||
rejecttitle = self.params.get('rejecttitle', False)
|
||||
if rejecttitle:
|
||||
if re.search(rejecttitle, title, re.IGNORECASE):
|
||||
return u'"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
||||
date = info_dict.get('upload_date', None)
|
||||
if date is not None:
|
||||
dateRange = self.params.get('daterange', DateRange())
|
||||
if date not in dateRange:
|
||||
return u'[download] %s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
||||
return None
|
||||
|
||||
def extract_info(self, url, download=True, ie_key=None, extra_info={}):
|
||||
'''
|
||||
Returns a list with a dictionary for each video we find.
|
||||
If 'download', also downloads the videos.
|
||||
extra_info is a dict containing the extra values to add to each result
|
||||
'''
|
||||
|
||||
if ie_key:
|
||||
ie = get_info_extractor(ie_key)()
|
||||
ie.set_downloader(self)
|
||||
ies = [ie]
|
||||
else:
|
||||
ies = self._ies
|
||||
|
||||
for ie in ies:
|
||||
if not ie.suitable(url):
|
||||
continue
|
||||
|
||||
if not ie.working():
|
||||
self.report_warning(u'The program functionality for this site has been marked as broken, '
|
||||
u'and will probably not work.')
|
||||
|
||||
try:
|
||||
ie_result = ie.extract(url)
|
||||
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
||||
break
|
||||
if isinstance(ie_result, list):
|
||||
# Backwards compatibility: old IE result format
|
||||
for result in ie_result:
|
||||
result.update(extra_info)
|
||||
ie_result = {
|
||||
'_type': 'compat_list',
|
||||
'entries': ie_result,
|
||||
}
|
||||
else:
|
||||
ie_result.update(extra_info)
|
||||
if 'extractor' not in ie_result:
|
||||
ie_result['extractor'] = ie.IE_NAME
|
||||
return self.process_ie_result(ie_result, download=download)
|
||||
except ExtractorError as de: # An error we somewhat expected
|
||||
self.report_error(compat_str(de), de.format_traceback())
|
||||
break
|
||||
except Exception as e:
|
||||
if self.params.get('ignoreerrors', False):
|
||||
self.report_error(compat_str(e), tb=compat_str(traceback.format_exc()))
|
||||
break
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self.report_error(u'no suitable InfoExtractor: %s' % url)
|
||||
|
||||
def process_ie_result(self, ie_result, download=True, extra_info={}):
|
||||
"""
|
||||
Take the result of the ie(may be modified) and resolve all unresolved
|
||||
references (URLs, playlist items).
|
||||
|
||||
It will also download the videos if 'download'.
|
||||
Returns the resolved ie_result.
|
||||
"""
|
||||
|
||||
result_type = ie_result.get('_type', 'video') # If not given we suppose it's a video, support the default old system
|
||||
if result_type == 'video':
|
||||
ie_result.update(extra_info)
|
||||
if 'playlist' not in ie_result:
|
||||
# It isn't part of a playlist
|
||||
ie_result['playlist'] = None
|
||||
ie_result['playlist_index'] = None
|
||||
if download:
|
||||
self.process_info(ie_result)
|
||||
return ie_result
|
||||
elif result_type == 'url':
|
||||
# We have to add extra_info to the results because it may be
|
||||
# contained in a playlist
|
||||
return self.extract_info(ie_result['url'],
|
||||
download,
|
||||
ie_key=ie_result.get('ie_key'),
|
||||
extra_info=extra_info)
|
||||
elif result_type == 'playlist':
|
||||
# We process each entry in the playlist
|
||||
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
||||
self.to_screen(u'[download] Downloading playlist: %s' % playlist)
|
||||
|
||||
playlist_results = []
|
||||
|
||||
n_all_entries = len(ie_result['entries'])
|
||||
playliststart = self.params.get('playliststart', 1) - 1
|
||||
playlistend = self.params.get('playlistend', -1)
|
||||
|
||||
if playlistend == -1:
|
||||
entries = ie_result['entries'][playliststart:]
|
||||
else:
|
||||
entries = ie_result['entries'][playliststart:playlistend]
|
||||
|
||||
n_entries = len(entries)
|
||||
|
||||
self.to_screen(u"[%s] playlist '%s': Collected %d video ids (downloading %d of them)" %
|
||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||
|
||||
for i,entry in enumerate(entries,1):
|
||||
self.to_screen(u'[download] Downloading video #%s of %s' %(i, n_entries))
|
||||
extra = {
|
||||
'playlist': playlist,
|
||||
'playlist_index': i + playliststart,
|
||||
}
|
||||
if not 'extractor' in entry:
|
||||
# We set the extractor, if it's an url it will be set then to
|
||||
# the new extractor, but if it's already a video we must make
|
||||
# sure it's present: see issue #877
|
||||
entry['extractor'] = ie_result['extractor']
|
||||
entry_result = self.process_ie_result(entry,
|
||||
download=download,
|
||||
extra_info=extra)
|
||||
playlist_results.append(entry_result)
|
||||
ie_result['entries'] = playlist_results
|
||||
return ie_result
|
||||
elif result_type == 'compat_list':
|
||||
def _fixup(r):
|
||||
r.setdefault('extractor', ie_result['extractor'])
|
||||
return r
|
||||
ie_result['entries'] = [
|
||||
self.process_ie_result(_fixup(r), download=download)
|
||||
for r in ie_result['entries']
|
||||
]
|
||||
return ie_result
|
||||
else:
|
||||
raise Exception('Invalid result type: %s' % result_type)
|
||||
|
||||
def process_info(self, info_dict):
|
||||
"""Process a single resolved IE result."""
|
||||
|
||||
assert info_dict.get('_type', 'video') == 'video'
|
||||
#We increment the download the download count here to match the previous behaviour.
|
||||
self.increment_downloads()
|
||||
|
||||
info_dict['fulltitle'] = info_dict['title']
|
||||
if len(info_dict['title']) > 200:
|
||||
info_dict['title'] = info_dict['title'][:197] + u'...'
|
||||
|
||||
# Keep for backwards compatibility
|
||||
info_dict['stitle'] = info_dict['title']
|
||||
|
||||
if not 'format' in info_dict:
|
||||
info_dict['format'] = info_dict['ext']
|
||||
|
||||
reason = self._match_entry(info_dict)
|
||||
if reason is not None:
|
||||
self.to_screen(u'[download] ' + reason)
|
||||
return
|
||||
|
||||
max_downloads = self.params.get('max_downloads')
|
||||
if max_downloads is not None:
|
||||
if self._num_downloads > int(max_downloads):
|
||||
raise MaxDownloadsReached()
|
||||
|
||||
filename = self.prepare_filename(info_dict)
|
||||
|
||||
# Forced printings
|
||||
if self.params.get('forcetitle', False):
|
||||
compat_print(info_dict['title'])
|
||||
if self.params.get('forceid', False):
|
||||
compat_print(info_dict['id'])
|
||||
if self.params.get('forceurl', False):
|
||||
compat_print(info_dict['url'])
|
||||
if self.params.get('forcethumbnail', False) and 'thumbnail' in info_dict:
|
||||
compat_print(info_dict['thumbnail'])
|
||||
if self.params.get('forcedescription', False) and 'description' in info_dict:
|
||||
compat_print(info_dict['description'])
|
||||
if self.params.get('forcefilename', False) and filename is not None:
|
||||
compat_print(filename)
|
||||
if self.params.get('forceformat', False):
|
||||
compat_print(info_dict['format'])
|
||||
|
||||
# Do nothing else if in simulate mode
|
||||
if self.params.get('simulate', False):
|
||||
return
|
||||
|
||||
if filename is None:
|
||||
return
|
||||
|
||||
try:
|
||||
dn = os.path.dirname(encodeFilename(filename))
|
||||
if dn != '' and not os.path.exists(dn):
|
||||
os.makedirs(dn)
|
||||
except (OSError, IOError) as err:
|
||||
self.report_error(u'unable to create directory ' + compat_str(err))
|
||||
return
|
||||
|
||||
if self.params.get('writedescription', False):
|
||||
try:
|
||||
descfn = filename + u'.description'
|
||||
self.report_writedescription(descfn)
|
||||
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
||||
descfile.write(info_dict['description'])
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write description file ' + descfn)
|
||||
return
|
||||
|
||||
if (self.params.get('writesubtitles', False) or self.params.get('writeautomaticsub')) and 'subtitles' in info_dict and info_dict['subtitles']:
|
||||
# subtitles download errors are already managed as troubles in relevant IE
|
||||
# that way it will silently go on when used with unsupporting IE
|
||||
subtitle = info_dict['subtitles'][0]
|
||||
(sub_error, sub_lang, sub) = subtitle
|
||||
sub_format = self.params.get('subtitlesformat')
|
||||
if sub_error:
|
||||
self.report_warning("Some error while getting the subtitles")
|
||||
else:
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
|
||||
if self.params.get('allsubtitles', False) and 'subtitles' in info_dict and info_dict['subtitles']:
|
||||
subtitles = info_dict['subtitles']
|
||||
sub_format = self.params.get('subtitlesformat')
|
||||
for subtitle in subtitles:
|
||||
(sub_error, sub_lang, sub) = subtitle
|
||||
if sub_error:
|
||||
self.report_warning("Some error while getting the subtitles")
|
||||
else:
|
||||
try:
|
||||
sub_filename = filename.rsplit('.', 1)[0] + u'.' + sub_lang + u'.' + sub_format
|
||||
self.report_writesubtitles(sub_filename)
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write subtitles file ' + descfn)
|
||||
return
|
||||
|
||||
if self.params.get('writeinfojson', False):
|
||||
infofn = filename + u'.info.json'
|
||||
self.report_writeinfojson(infofn)
|
||||
try:
|
||||
json_info_dict = dict((k, v) for k,v in info_dict.items() if not k in ['urlhandle'])
|
||||
write_json_file(json_info_dict, encodeFilename(infofn))
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'Cannot write metadata to JSON file ' + infofn)
|
||||
return
|
||||
|
||||
if self.params.get('writethumbnail', False):
|
||||
if 'thumbnail' in info_dict:
|
||||
thumb_format = info_dict['thumbnail'].rpartition(u'/')[2].rpartition(u'.')[2]
|
||||
if not thumb_format:
|
||||
thumb_format = 'jpg'
|
||||
thumb_filename = filename.rpartition('.')[0] + u'.' + thumb_format
|
||||
self.to_screen(u'[%s] %s: Downloading thumbnail ...' %
|
||||
(info_dict['extractor'], info_dict['id']))
|
||||
uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
|
||||
with open(thumb_filename, 'wb') as thumbf:
|
||||
shutil.copyfileobj(uf, thumbf)
|
||||
self.to_screen(u'[%s] %s: Writing thumbnail to: %s' %
|
||||
(info_dict['extractor'], info_dict['id'], thumb_filename))
|
||||
|
||||
if not self.params.get('skip_download', False):
|
||||
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(filename)):
|
||||
success = True
|
||||
else:
|
||||
try:
|
||||
success = self.fd._do_download(filename, info_dict)
|
||||
except (OSError, IOError) as err:
|
||||
raise UnavailableVideoError()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self.report_error(u'unable to download video data: %s' % str(err))
|
||||
return
|
||||
except (ContentTooShortError, ) as err:
|
||||
self.report_error(u'content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
|
||||
if success:
|
||||
try:
|
||||
self.post_process(filename, info_dict)
|
||||
except (PostProcessingError) as err:
|
||||
self.report_error(u'postprocessing: %s' % str(err))
|
||||
return
|
||||
|
||||
def download(self, url_list):
|
||||
"""Download a given list of URLs."""
|
||||
if len(url_list) > 1 and self.fixed_template():
|
||||
raise SameFileError(self.params['outtmpl'])
|
||||
|
||||
for url in url_list:
|
||||
try:
|
||||
#It also downloads the videos
|
||||
videos = self.extract_info(url)
|
||||
except UnavailableVideoError:
|
||||
self.report_error(u'unable to download video')
|
||||
except MaxDownloadsReached:
|
||||
self.to_screen(u'[info] Maximum number of downloaded files reached.')
|
||||
raise
|
||||
|
||||
return self._download_retcode
|
||||
|
||||
def post_process(self, filename, ie_info):
|
||||
"""Run all the postprocessors on the given file."""
|
||||
info = dict(ie_info)
|
||||
info['filepath'] = filename
|
||||
keep_video = None
|
||||
for pp in self._pps:
|
||||
try:
|
||||
keep_video_wish,new_info = pp.run(info)
|
||||
if keep_video_wish is not None:
|
||||
if keep_video_wish:
|
||||
keep_video = keep_video_wish
|
||||
elif keep_video is None:
|
||||
# No clear decision yet, let IE decide
|
||||
keep_video = keep_video_wish
|
||||
except PostProcessingError as e:
|
||||
self.to_stderr(u'ERROR: ' + e.msg)
|
||||
if keep_video is False and not self.params.get('keepvideo', False):
|
||||
try:
|
||||
self.to_screen(u'Deleting original file %s (pass -k to keep)' % filename)
|
||||
os.remove(encodeFilename(filename))
|
||||
except (IOError, OSError):
|
||||
self.report_warning(u'Unable to remove downloaded video file')
|
5255
youtube_dl/__init__.py
Executable file → Normal file
5255
youtube_dl/__init__.py
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
18
youtube_dl/__main__.py
Executable file
18
youtube_dl/__main__.py
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Execute with
|
||||
# $ python youtube_dl/__main__.py (2.6+)
|
||||
# $ python -m youtube_dl (2.7+)
|
||||
|
||||
import sys
|
||||
|
||||
if __package__ is None and not hasattr(sys, "frozen"):
|
||||
# direct call of __main__.py
|
||||
import os.path
|
||||
path = os.path.realpath(os.path.abspath(__file__))
|
||||
sys.path.append(os.path.dirname(os.path.dirname(path)))
|
||||
|
||||
import youtube_dl
|
||||
|
||||
if __name__ == '__main__':
|
||||
youtube_dl.main()
|
103
youtube_dl/extractor/__init__.py
Normal file
103
youtube_dl/extractor/__init__.py
Normal file
@@ -0,0 +1,103 @@
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
from .ard import ARDIE
|
||||
from .arte import ArteTvIE
|
||||
from .auengine import AUEngineIE
|
||||
from .bandcamp import BandcampIE
|
||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||
from .breakcom import BreakIE
|
||||
from .brightcove import BrightcoveIE
|
||||
from .collegehumor import CollegeHumorIE
|
||||
from .comedycentral import ComedyCentralIE
|
||||
from .cspan import CSpanIE
|
||||
from .dailymotion import DailymotionIE
|
||||
from .depositfiles import DepositFilesIE
|
||||
from .dotsub import DotsubIE
|
||||
from .dreisat import DreiSatIE
|
||||
from .ehow import EHowIE
|
||||
from .eighttracks import EightTracksIE
|
||||
from .escapist import EscapistIE
|
||||
from .facebook import FacebookIE
|
||||
from .flickr import FlickrIE
|
||||
from .funnyordie import FunnyOrDieIE
|
||||
from .gamespot import GameSpotIE
|
||||
from .gametrailers import GametrailersIE
|
||||
from .generic import GenericIE
|
||||
from .googleplus import GooglePlusIE
|
||||
from .googlesearch import GoogleSearchIE
|
||||
from .hotnewhiphop import HotNewHipHopIE
|
||||
from .howcast import HowcastIE
|
||||
from .hypem import HypemIE
|
||||
from .ina import InaIE
|
||||
from .infoq import InfoQIE
|
||||
from .instagram import InstagramIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .justintv import JustinTVIE
|
||||
from .keek import KeekIE
|
||||
from .liveleak import LiveLeakIE
|
||||
from .metacafe import MetacafeIE
|
||||
from .mixcloud import MixcloudIE
|
||||
from .mtv import MTVIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvideo import MyVideoIE
|
||||
from .nba import NBAIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .pornotube import PornotubeIE
|
||||
from .rbmaradio import RBMARadioIE
|
||||
from .redtube import RedTubeIE
|
||||
from .ringtv import RingTVIE
|
||||
from .soundcloud import SoundcloudIE, SoundcloudSetIE
|
||||
from .spiegel import SpiegelIE
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .statigram import StatigramIE
|
||||
from .steam import SteamIE
|
||||
from .teamcoco import TeamcocoIE
|
||||
from .ted import TEDIE
|
||||
from .tf1 import TF1IE
|
||||
from .traileraddict import TrailerAddictIE
|
||||
from .tudou import TudouIE
|
||||
from .tumblr import TumblrIE
|
||||
from .tutv import TutvIE
|
||||
from .ustream import UstreamIE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veoh import VeohIE
|
||||
from .vevo import VevoIE
|
||||
from .vimeo import VimeoIE
|
||||
from .vine import VineIE
|
||||
from .wat import WatIE
|
||||
from .wimp import WimpIE
|
||||
from .worldstarhiphop import WorldStarHipHopIE
|
||||
from .xhamster import XHamsterIE
|
||||
from .xnxx import XNXXIE
|
||||
from .xvideos import XVideosIE
|
||||
from .yahoo import YahooIE, YahooSearchIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import YoukuIE
|
||||
from .youporn import YouPornIE
|
||||
from .youtube import (
|
||||
YoutubeIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeSearchIE,
|
||||
YoutubeUserIE,
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeSubscriptionsIE,
|
||||
)
|
||||
from .zdf import ZDFIE
|
||||
|
||||
|
||||
_ALL_CLASSES = [
|
||||
klass
|
||||
for name, klass in globals().items()
|
||||
if name.endswith('IE') and name != 'GenericIE'
|
||||
]
|
||||
_ALL_CLASSES.append(GenericIE)
|
||||
|
||||
def gen_extractors():
|
||||
""" Return a list of an instance of every supported extractor.
|
||||
The order does matter; the first extractor matched is the one handling the URL.
|
||||
"""
|
||||
return [klass() for klass in _ALL_CLASSES]
|
||||
|
||||
def get_info_extractor(ie_name):
|
||||
"""Returns the info extractor class with the given ie_name"""
|
||||
return globals()[ie_name+'IE']
|
67
youtube_dl/extractor/archiveorg.py
Normal file
67
youtube_dl/extractor/archiveorg.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ArchiveOrgIE(InfoExtractor):
|
||||
IE_NAME = 'archive.org'
|
||||
IE_DESC = 'archive.org videos'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?archive.org/details/(?P<id>[^?/]+)(?:[?].*)?$'
|
||||
_TEST = {
|
||||
u"url": u"http://archive.org/details/XD300-23_68HighlightsAResearchCntAugHumanIntellect",
|
||||
u'file': u'XD300-23_68HighlightsAResearchCntAugHumanIntellect.ogv',
|
||||
u'md5': u'8af1d4cf447933ed3c7f4871162602db',
|
||||
u'info_dict': {
|
||||
u"title": u"1968 Demo - FJCC Conference Presentation Reel #1",
|
||||
u"description": u"Reel 1 of 3: Also known as the \"Mother of All Demos\", Doug Engelbart's presentation at the Fall Joint Computer Conference in San Francisco, December 9, 1968 titled \"A Research Center for Augmenting Human Intellect.\" For this presentation, Doug and his team astonished the audience by not only relating their research, but demonstrating it live. This was the debut of the mouse, interactive computing, hypermedia, computer supported software engineering, video teleconferencing, etc. See also <a href=\"http://dougengelbart.org/firsts/dougs-1968-demo.html\" rel=\"nofollow\">Doug's 1968 Demo page</a> for more background, highlights, links, and the detailed paper published in this conference proceedings. Filmed on 3 reels: Reel 1 | <a href=\"http://www.archive.org/details/XD300-24_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 2</a> | <a href=\"http://www.archive.org/details/XD300-25_68HighlightsAResearchCntAugHumanIntellect\" rel=\"nofollow\">Reel 3</a>",
|
||||
u"upload_date": u"19681210",
|
||||
u"uploader": u"SRI International"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
json_url = url + (u'?' if u'?' in url else '&') + u'output=json'
|
||||
json_data = self._download_webpage(json_url, video_id)
|
||||
data = json.loads(json_data)
|
||||
|
||||
title = data['metadata']['title'][0]
|
||||
description = data['metadata']['description'][0]
|
||||
uploader = data['metadata']['creator'][0]
|
||||
upload_date = unified_strdate(data['metadata']['date'][0])
|
||||
|
||||
formats = [{
|
||||
'format': fdata['format'],
|
||||
'url': 'http://' + data['server'] + data['dir'] + fn,
|
||||
'file_size': int(fdata['size']),
|
||||
}
|
||||
for fn,fdata in data['files'].items()
|
||||
if 'Video' in fdata['format']]
|
||||
formats.sort(key=lambda fdata: fdata['file_size'])
|
||||
|
||||
info = {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': description,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
thumbnail = data.get('misc', {}).get('image')
|
||||
if thumbnail:
|
||||
info['thumbnail'] = thumbnail
|
||||
|
||||
# TODO: Remove when #980 has been merged
|
||||
info['url'] = formats[-1]['url']
|
||||
info['ext'] = determine_ext(formats[-1]['url'])
|
||||
|
||||
return info
|
54
youtube_dl/extractor/ard.py
Normal file
54
youtube_dl/extractor/ard.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:(?:www\.)?ardmediathek\.de|mediathek\.daserste\.de)/(?:.*/)(?P<video_id>[^/\?]+)(?:\?.*)?'
|
||||
_TITLE = r'<h1(?: class="boxTopHeadline")?>(?P<title>.*)</h1>'
|
||||
_MEDIA_STREAM = r'mediaCollection\.addMediaStream\((?P<media_type>\d+), (?P<quality>\d+), "(?P<rtmp_url>[^"]*)", "(?P<video_url>[^"]*)", "[^"]*"\)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.ardmediathek.de/das-erste/tagesschau-in-100-sek?documentId=14077640',
|
||||
u'file': u'14077640.mp4',
|
||||
u'md5': u'6ca8824255460c787376353f9e20bbd8',
|
||||
u'info_dict': {
|
||||
u"title": u"11.04.2013 09:23 Uhr - Tagesschau in 100 Sekunden"
|
||||
},
|
||||
u'skip': u'Requires rtmpdump'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# determine video id from url
|
||||
m = re.match(self._VALID_URL, url)
|
||||
|
||||
numid = re.search(r'documentId=([0-9]+)', url)
|
||||
if numid:
|
||||
video_id = numid.group(1)
|
||||
else:
|
||||
video_id = m.group('video_id')
|
||||
|
||||
# determine title and media streams from webpage
|
||||
html = self._download_webpage(url, video_id)
|
||||
title = re.search(self._TITLE, html).group('title')
|
||||
streams = [mo.groupdict() for mo in re.finditer(self._MEDIA_STREAM, html)]
|
||||
if not streams:
|
||||
assert '"fsk"' in html
|
||||
raise ExtractorError(u'This video is only available after 8:00 pm')
|
||||
|
||||
# choose default media type and highest quality for now
|
||||
stream = max([s for s in streams if int(s["media_type"]) == 0],
|
||||
key=lambda s: int(s["quality"]))
|
||||
|
||||
# there's two possibilities: RTMP stream or HTTP download
|
||||
info = {'id': video_id, 'title': title, 'ext': 'mp4'}
|
||||
if stream['rtmp_url']:
|
||||
self.to_screen(u'RTMP download detected')
|
||||
assert stream['video_url'].startswith('mp4:')
|
||||
info["url"] = stream["rtmp_url"]
|
||||
info["play_path"] = stream['video_url']
|
||||
else:
|
||||
assert stream["video_url"].endswith('.mp4')
|
||||
info["url"] = stream["video_url"]
|
||||
return [info]
|
146
youtube_dl/extractor/arte.py
Normal file
146
youtube_dl/extractor/arte.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import re
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
"""
|
||||
There are two sources of video in arte.tv: videos.arte.tv and
|
||||
www.arte.tv/guide, the extraction process is different for each one.
|
||||
The videos expire in 7 days, so we can't add tests.
|
||||
"""
|
||||
_EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
|
||||
_LIVE_URL = r'index-[0-9]+\.html$'
|
||||
|
||||
IE_NAME = u'arte.tv'
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL))
|
||||
|
||||
# TODO implement Live Stream
|
||||
# from ..utils import compat_urllib_parse
|
||||
# def extractLiveStream(self, url):
|
||||
# video_lang = url.split('/')[-4]
|
||||
# info = self.grep_webpage(
|
||||
# url,
|
||||
# r'src="(.*?/videothek_js.*?\.js)',
|
||||
# 0,
|
||||
# [
|
||||
# (1, 'url', u'Invalid URL: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# http_host = url.split('/')[2]
|
||||
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
||||
# info = self.grep_webpage(
|
||||
# next_url,
|
||||
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
||||
# '(http://.*?\.swf).*?' +
|
||||
# '(rtmp://.*?)\'',
|
||||
# re.DOTALL,
|
||||
# [
|
||||
# (1, 'path', u'could not extract video path: %s' % url),
|
||||
# (2, 'player', u'could not extract video player: %s' % url),
|
||||
# (3, 'url', u'could not extract video url: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._EMISSION_URL, url)
|
||||
if mobj is not None:
|
||||
lang = mobj.group('lang')
|
||||
# This is not a real id, it can be for example AJT for the news
|
||||
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
||||
video_id = mobj.group('id')
|
||||
return self._extract_emission(url, video_id, lang)
|
||||
|
||||
mobj = re.match(self._VIDEOS_URL, url)
|
||||
if mobj is not None:
|
||||
id = mobj.group('id')
|
||||
lang = mobj.group('lang')
|
||||
return self._extract_video(url, id, lang)
|
||||
|
||||
if re.search(self._LIVE_URL, video_id) is not None:
|
||||
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
||||
# self.extractLiveStream(url)
|
||||
# return
|
||||
|
||||
def _extract_emission(self, url, video_id, lang):
|
||||
"""Extract from www.arte.tv/guide"""
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
||||
|
||||
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
||||
self.report_extraction(video_id)
|
||||
info = json.loads(json_info)
|
||||
player_info = info['videoJsonPlayer']
|
||||
|
||||
info_dict = {'id': player_info['VID'],
|
||||
'title': player_info['VTI'],
|
||||
'description': player_info['VDE'],
|
||||
'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
|
||||
'thumbnail': player_info['programImage'],
|
||||
'ext': 'flv',
|
||||
}
|
||||
|
||||
formats = player_info['VSR'].values()
|
||||
def _match_lang(f):
|
||||
# Return true if that format is in the language of the url
|
||||
if lang == 'fr':
|
||||
l = 'F'
|
||||
elif lang == 'de':
|
||||
l = 'A'
|
||||
regexes = [r'VO?%s' % l, r'V%s-ST.' % l]
|
||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
||||
# Some formats may not be in the same language as the url
|
||||
formats = filter(_match_lang, formats)
|
||||
# We order the formats by quality
|
||||
formats = sorted(formats, key=lambda f: int(f['height']))
|
||||
# Pick the best quality
|
||||
format_info = formats[-1]
|
||||
if format_info['mediaType'] == u'rtmp':
|
||||
info_dict['url'] = format_info['streamer']
|
||||
info_dict['play_path'] = 'mp4:' + format_info['url']
|
||||
else:
|
||||
info_dict['url'] = format_info['url']
|
||||
|
||||
return info_dict
|
||||
|
||||
def _extract_video(self, url, video_id, lang):
|
||||
"""Extract from videos.arte.tv"""
|
||||
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
||||
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
||||
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
|
||||
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
|
||||
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
|
||||
config_xml_url = config_node.attrib['ref']
|
||||
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
||||
|
||||
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
||||
def _key(m):
|
||||
quality = m.group('quality')
|
||||
if quality == 'hd':
|
||||
return 2
|
||||
else:
|
||||
return 1
|
||||
# We pick the best quality
|
||||
video_urls = sorted(video_urls, key=_key)
|
||||
video_url = list(video_urls)[-1].group('url')
|
||||
|
||||
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
||||
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
||||
config_xml, 'thumbnail')
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
}
|
46
youtube_dl/extractor/auengine.py
Normal file
46
youtube_dl/extractor/auengine.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
|
||||
class AUEngineIE(InfoExtractor):
|
||||
_TEST = {
|
||||
u'url': u'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
|
||||
u'file': u'lfvlytY6.mp4',
|
||||
u'md5': u'48972bdbcf1a3a2f5533e62425b41d4f',
|
||||
u'info_dict': {
|
||||
u"title": u"[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]"
|
||||
}
|
||||
}
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed.php\?.*?file=([^&]+).*?'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||
webpage, u'title')
|
||||
title = title.strip()
|
||||
links = re.findall(r'[^A-Za-z0-9]?(?:file|url):\s*["\'](http[^\'"&]*)', webpage)
|
||||
links = [compat_urllib_parse.unquote(l) for l in links]
|
||||
for link in links:
|
||||
root, pathext = os.path.splitext(compat_urllib_parse_urlparse(link).path)
|
||||
if pathext == '.png':
|
||||
thumbnail = link
|
||||
elif pathext == '.mp4':
|
||||
url = link
|
||||
ext = pathext
|
||||
if ext == title[-len(ext):]:
|
||||
title = title[:-len(ext)]
|
||||
ext = ext[1:]
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
}]
|
63
youtube_dl/extractor/bandcamp.py
Normal file
63
youtube_dl/extractor/bandcamp.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class BandcampIE(InfoExtractor):
|
||||
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||
u'file': u'1812978515.mp3',
|
||||
u'md5': u'cdeb30cdae1921719a3cbcab696ef53c',
|
||||
u'info_dict': {
|
||||
u"title": u"youtube-dl test song \"'/\\\u00e4\u21ad"
|
||||
},
|
||||
u'skip': u'There is a limit of 200 free downloads / month for the test song'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, title)
|
||||
# We get the link to the free download page
|
||||
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
|
||||
if m_download is None:
|
||||
raise ExtractorError(u'No free songs found')
|
||||
|
||||
download_link = m_download.group(1)
|
||||
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
||||
webpage, re.MULTILINE|re.DOTALL).group('id')
|
||||
|
||||
download_webpage = self._download_webpage(download_link, id,
|
||||
'Downloading free downloads page')
|
||||
# We get the dictionary of the track from some javascrip code
|
||||
info = re.search(r'items: (.*?),$',
|
||||
download_webpage, re.MULTILINE).group(1)
|
||||
info = json.loads(info)[0]
|
||||
# We pick mp3-320 for now, until format selection can be easily implemented.
|
||||
mp3_info = info[u'downloads'][u'mp3-320']
|
||||
# If we try to use this url it says the link has expired
|
||||
initial_url = mp3_info[u'url']
|
||||
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
||||
m_url = re.match(re_url, initial_url)
|
||||
#We build the url we will use to get the final track url
|
||||
# This url is build in Bandcamp in the script download_bunde_*.js
|
||||
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
|
||||
final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
|
||||
# If we could correctly generate the .rand field the url would be
|
||||
#in the "download_url" key
|
||||
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
||||
|
||||
track_info = {'id':id,
|
||||
'title' : info[u'title'],
|
||||
'ext' : 'mp3',
|
||||
'url' : final_url,
|
||||
'thumbnail' : info[u'thumb_url'],
|
||||
'uploader' : info[u'artist']
|
||||
}
|
||||
|
||||
return [track_info]
|
193
youtube_dl/extractor/bliptv.py
Normal file
193
youtube_dl/extractor/bliptv.py
Normal file
@@ -0,0 +1,193 @@
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class BlipTVIE(InfoExtractor):
|
||||
"""Information extractor for blip.tv"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?blip\.tv/((.+/)|(play/)|(api\.swf#))(.+)$'
|
||||
_URL_EXT = r'^.*\.([a-z0-9]+)$'
|
||||
IE_NAME = u'blip.tv'
|
||||
_TEST = {
|
||||
u'url': u'http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352',
|
||||
u'file': u'5779306.m4v',
|
||||
u'md5': u'80baf1ec5c3d2019037c1c707d676b9f',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20111205",
|
||||
u"description": u"md5:9bc31f227219cde65e47eeec8d2dc596",
|
||||
u"uploader": u"Comic Book Resources - CBR TV",
|
||||
u"title": u"CBR EXCLUSIVE: \"Gotham City Imposters\" Bats VS Jokerz Short 3"
|
||||
}
|
||||
}
|
||||
|
||||
def report_direct_download(self, title):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Direct download detected' % title)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
# See https://github.com/rg3/youtube-dl/issues/857
|
||||
api_mobj = re.match(r'http://a\.blip\.tv/api\.swf#(?P<video_id>[\d\w]+)', url)
|
||||
if api_mobj is not None:
|
||||
url = 'http://blip.tv/play/g_%s' % api_mobj.group('video_id')
|
||||
urlp = compat_urllib_parse_urlparse(url)
|
||||
if urlp.path.startswith('/play/'):
|
||||
request = compat_urllib_request.Request(url)
|
||||
response = compat_urllib_request.urlopen(request)
|
||||
redirecturl = response.geturl()
|
||||
rurlp = compat_urllib_parse_urlparse(redirecturl)
|
||||
file_id = compat_parse_qs(rurlp.fragment)['file'][0].rpartition('/')[2]
|
||||
url = 'http://blip.tv/a/a-' + file_id
|
||||
return self._real_extract(url)
|
||||
|
||||
|
||||
if '?' in url:
|
||||
cchar = '&'
|
||||
else:
|
||||
cchar = '?'
|
||||
json_url = url + cchar + 'skin=json&version=2&no_wrap=1'
|
||||
request = compat_urllib_request.Request(json_url)
|
||||
request.add_header('User-Agent', 'iTunes/10.6.1')
|
||||
self.report_extraction(mobj.group(1))
|
||||
info = None
|
||||
try:
|
||||
urlh = compat_urllib_request.urlopen(request)
|
||||
if urlh.headers.get('Content-Type', '').startswith('video/'): # Direct download
|
||||
basename = url.split('/')[-1]
|
||||
title,ext = os.path.splitext(basename)
|
||||
title = title.decode('UTF-8')
|
||||
ext = ext.replace('.', '')
|
||||
self.report_direct_download(title)
|
||||
info = {
|
||||
'id': title,
|
||||
'url': url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': title,
|
||||
'ext': ext,
|
||||
'urlhandle': urlh
|
||||
}
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
if info is None: # Regular URL
|
||||
try:
|
||||
json_code_bytes = urlh.read()
|
||||
json_code = json_code_bytes.decode('utf-8')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to read video info webpage: %s' % compat_str(err))
|
||||
|
||||
try:
|
||||
json_data = json.loads(json_code)
|
||||
if 'Post' in json_data:
|
||||
data = json_data['Post']
|
||||
else:
|
||||
data = json_data
|
||||
|
||||
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
||||
if 'additionalMedia' in data:
|
||||
formats = sorted(data['additionalMedia'], key=lambda f: int(f['media_height']))
|
||||
best_format = formats[-1]
|
||||
video_url = best_format['url']
|
||||
else:
|
||||
video_url = data['media']['url']
|
||||
umobj = re.match(self._URL_EXT, video_url)
|
||||
if umobj is None:
|
||||
raise ValueError('Can not determine filename extension')
|
||||
ext = umobj.group(1)
|
||||
|
||||
info = {
|
||||
'id': data['item_id'],
|
||||
'url': video_url,
|
||||
'uploader': data['display_name'],
|
||||
'upload_date': upload_date,
|
||||
'title': data['title'],
|
||||
'ext': ext,
|
||||
'format': data['media']['mimeType'],
|
||||
'thumbnail': data['thumbnailUrl'],
|
||||
'description': data['description'],
|
||||
'player_url': data['embedUrl'],
|
||||
'user_agent': 'iTunes/10.6.1',
|
||||
}
|
||||
except (ValueError,KeyError) as err:
|
||||
raise ExtractorError(u'Unable to parse video information: %s' % repr(err))
|
||||
|
||||
return [info]
|
||||
|
||||
|
||||
class BlipTVUserIE(InfoExtractor):
|
||||
"""Information Extractor for blip.tv users."""
|
||||
|
||||
_VALID_URL = r'(?:(?:(?:https?://)?(?:\w+\.)?blip\.tv/)|bliptvuser:)([^/]+)/*$'
|
||||
_PAGE_SIZE = 12
|
||||
IE_NAME = u'blip.tv:user'
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract username
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
username = mobj.group(1)
|
||||
|
||||
page_base = 'http://m.blip.tv/pr/show_get_full_episode_list?users_id=%s&lite=0&esi=1'
|
||||
|
||||
page = self._download_webpage(url, username, u'Downloading user page')
|
||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
||||
page_base = page_base % mobj.group(1)
|
||||
|
||||
|
||||
# Download video ids using BlipTV Ajax calls. Result size per
|
||||
# query is limited (currently to 12 videos) so we need to query
|
||||
# page by page until there are no video ids - it means we got
|
||||
# all of them.
|
||||
|
||||
video_ids = []
|
||||
pagenum = 1
|
||||
|
||||
while True:
|
||||
url = page_base + "&page=" + str(pagenum)
|
||||
page = self._download_webpage(url, username,
|
||||
u'Downloading video ids from page %d' % pagenum)
|
||||
|
||||
# Extract video identifiers
|
||||
ids_in_page = []
|
||||
|
||||
for mobj in re.finditer(r'href="/([^"]+)"', page):
|
||||
if mobj.group(1) not in ids_in_page:
|
||||
ids_in_page.append(unescapeHTML(mobj.group(1)))
|
||||
|
||||
video_ids.extend(ids_in_page)
|
||||
|
||||
# A little optimization - if current page is not
|
||||
# "full", ie. does not contain PAGE_SIZE video ids then
|
||||
# we can assume that this page is the last one - there
|
||||
# are no more ids on further pages - no need to query
|
||||
# again.
|
||||
|
||||
if len(ids_in_page) < self._PAGE_SIZE:
|
||||
break
|
||||
|
||||
pagenum += 1
|
||||
|
||||
urls = [u'http://blip.tv/%s' % video_id for video_id in video_ids]
|
||||
url_entries = [self.url_result(vurl, 'BlipTV') for vurl in urls]
|
||||
return [self.playlist_result(url_entries, playlist_title = username)]
|
33
youtube_dl/extractor/breakcom.py
Normal file
33
youtube_dl/extractor/breakcom.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
u'file': u'2468056.mp4',
|
||||
u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||
u'info_dict': {
|
||||
u"title": u"When Girls Act Like D-Bags"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1).split("-")[-1]
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = re.search(r"videoPath: '(.+?)',",webpage).group(1)
|
||||
key = re.search(r"icon: '(.+?)',",webpage).group(1)
|
||||
final_url = str(video_url)+"?"+str(key)
|
||||
thumbnail_url = re.search(r"thumbnailURL: '(.+?)'",webpage).group(1)
|
||||
title = re.search(r"sVidTitle: '(.+)',",webpage).group(1)
|
||||
ext = video_url.split('.')[-1]
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail_url,
|
||||
}]
|
85
youtube_dl/extractor/brightcove.py
Normal file
85
youtube_dl/extractor/brightcove.py
Normal file
@@ -0,0 +1,85 @@
|
||||
import re
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
find_xpath_attr,
|
||||
)
|
||||
|
||||
class BrightcoveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||
_PLAYLIST_URL_TEMPLATE = 'http://c.brightcove.com/services/json/experience/runtime/?command=get_programming_for_experience&playerKey=%s'
|
||||
|
||||
# There is a test for Brigtcove in GenericIE, that way we test both the download
|
||||
# and the detection of videos, and we don't have to find an URL that is always valid
|
||||
|
||||
@classmethod
|
||||
def _build_brighcove_url(cls, object_str):
|
||||
"""
|
||||
Build a Brightcove url from a xml string containing
|
||||
<object class="BrightcoveExperience">{params}</object>
|
||||
"""
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
||||
assert u'BrightcoveExperience' in object_doc.attrib['class']
|
||||
params = {'flashID': object_doc.attrib['id'],
|
||||
'playerID': find_xpath_attr(object_doc, './param', 'name', 'playerID').attrib['value'],
|
||||
}
|
||||
playerKey = find_xpath_attr(object_doc, './param', 'name', 'playerKey')
|
||||
# Not all pages define this value
|
||||
if playerKey is not None:
|
||||
params['playerKey'] = playerKey.attrib['value']
|
||||
videoPlayer = find_xpath_attr(object_doc, './param', 'name', '@videoPlayer')
|
||||
if videoPlayer is not None:
|
||||
params['@videoPlayer'] = videoPlayer.attrib['value']
|
||||
data = compat_urllib_parse.urlencode(params)
|
||||
return cls._FEDERATED_URL_TEMPLATE % data
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
query = mobj.group('query')
|
||||
|
||||
m_video_id = re.search(r'videoPlayer=(\d+)', query)
|
||||
if m_video_id is not None:
|
||||
video_id = m_video_id.group(1)
|
||||
return self._get_video_info(video_id, query)
|
||||
else:
|
||||
player_key = self._search_regex(r'playerKey=(.+?)(&|$)', query, 'playlist_id')
|
||||
return self._get_playlist_info(player_key)
|
||||
|
||||
def _get_video_info(self, video_id, query):
|
||||
request_url = self._FEDERATED_URL_TEMPLATE % query
|
||||
webpage = self._download_webpage(request_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
info = self._search_regex(r'var experienceJSON = ({.*?});', webpage, 'json')
|
||||
info = json.loads(info)['data']
|
||||
video_info = info['programmedContent']['videoPlayer']['mediaDTO']
|
||||
|
||||
return self._extract_video_info(video_info)
|
||||
|
||||
def _get_playlist_info(self, player_key):
|
||||
playlist_info = self._download_webpage(self._PLAYLIST_URL_TEMPLATE % player_key,
|
||||
player_key, u'Downloading playlist information')
|
||||
|
||||
playlist_info = json.loads(playlist_info)['videoList']
|
||||
videos = [self._extract_video_info(video_info) for video_info in playlist_info['mediaCollectionDTO']['videoDTOs']]
|
||||
|
||||
return self.playlist_result(videos, playlist_id=playlist_info['id'],
|
||||
playlist_title=playlist_info['mediaCollectionDTO']['displayName'])
|
||||
|
||||
def _extract_video_info(self, video_info):
|
||||
renditions = video_info['renditions']
|
||||
renditions = sorted(renditions, key=lambda r: r['size'])
|
||||
best_format = renditions[-1]
|
||||
|
||||
return {'id': video_info['id'],
|
||||
'title': video_info['displayName'],
|
||||
'url': best_format['defaultURL'],
|
||||
'ext': 'mp4',
|
||||
'description': video_info.get('shortDescription'),
|
||||
'thumbnail': video_info.get('videoStillURL') or video_info.get('thumbnailURL'),
|
||||
'uploader': video_info.get('publisherName'),
|
||||
}
|
74
youtube_dl/extractor/collegehumor.py
Normal file
74
youtube_dl/extractor/collegehumor.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import re
|
||||
import socket
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class CollegeHumorIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?collegehumor\.com/video/(?P<videoid>[0-9]+)/(?P<shorttitle>.*)$'
|
||||
|
||||
def report_manifest(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Downloading XML manifest' % video_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
}
|
||||
|
||||
self.report_extraction(video_id)
|
||||
xmlUrl = 'http://www.collegehumor.com/moogaloop/video/' + video_id
|
||||
try:
|
||||
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
|
||||
|
||||
mdoc = xml.etree.ElementTree.fromstring(metaXml)
|
||||
try:
|
||||
videoNode = mdoc.findall('./video')[0]
|
||||
info['description'] = videoNode.findall('./description')[0].text
|
||||
info['title'] = videoNode.findall('./caption')[0].text
|
||||
info['thumbnail'] = videoNode.findall('./thumbnail')[0].text
|
||||
manifest_url = videoNode.findall('./file')[0].text
|
||||
except IndexError:
|
||||
raise ExtractorError(u'Invalid metadata XML file')
|
||||
|
||||
manifest_url += '?hdcore=2.10.3'
|
||||
self.report_manifest(video_id)
|
||||
try:
|
||||
manifestXml = compat_urllib_request.urlopen(manifest_url).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
|
||||
|
||||
adoc = xml.etree.ElementTree.fromstring(manifestXml)
|
||||
try:
|
||||
media_node = adoc.findall('./{http://ns.adobe.com/f4m/1.0}media')[0]
|
||||
node_id = media_node.attrib['url']
|
||||
video_id = adoc.findall('./{http://ns.adobe.com/f4m/1.0}id')[0].text
|
||||
except IndexError as err:
|
||||
raise ExtractorError(u'Invalid manifest file')
|
||||
|
||||
url_pr = compat_urllib_parse_urlparse(manifest_url)
|
||||
url = url_pr.scheme + '://' + url_pr.netloc + '/z' + video_id[:-2] + '/' + node_id + 'Seg1-Frag1'
|
||||
|
||||
info['url'] = url
|
||||
info['ext'] = 'f4f'
|
||||
return [info]
|
189
youtube_dl/extractor/comedycentral.py
Normal file
189
youtube_dl/extractor/comedycentral.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ComedyCentralIE(InfoExtractor):
|
||||
IE_DESC = u'The Daily Show / Colbert Report'
|
||||
# urls can be abbreviations like :thedailyshow or :colbert
|
||||
# urls for episodes like:
|
||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
|
||||
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
|
||||
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|
||||
|(https?://)?(www\.)?
|
||||
(?P<showname>thedailyshow|colbertnation)\.com/
|
||||
(full-episodes/(?P<episode>.*)|
|
||||
(?P<clip>
|
||||
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|
||||
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))))
|
||||
$"""
|
||||
_TEST = {
|
||||
u'url': u'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
u'file': u'422212.mp4',
|
||||
u'md5': u'4e2f5cb088a83cd8cdb7756132f9739d',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20121214",
|
||||
u"description": u"Kristen Stewart",
|
||||
u"uploader": u"thedailyshow",
|
||||
u"title": u"thedailyshow-kristen-stewart part 1"
|
||||
}
|
||||
}
|
||||
|
||||
_available_formats = ['3500', '2200', '1700', '1200', '750', '400']
|
||||
|
||||
_video_extensions = {
|
||||
'3500': 'mp4',
|
||||
'2200': 'mp4',
|
||||
'1700': 'mp4',
|
||||
'1200': 'mp4',
|
||||
'750': 'mp4',
|
||||
'400': 'mp4',
|
||||
}
|
||||
_video_dimensions = {
|
||||
'3500': '1280x720',
|
||||
'2200': '960x540',
|
||||
'1700': '768x432',
|
||||
'1200': '640x360',
|
||||
'750': '512x288',
|
||||
'400': '384x216',
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
|
||||
|
||||
def _print_formats(self, formats):
|
||||
print('Available formats:')
|
||||
for x in formats:
|
||||
print('%s\t:\t%s\t[%s]' %(x, self._video_extensions.get(x, 'mp4'), self._video_dimensions.get(x, '???')))
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
if mobj.group('shortname'):
|
||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||
url = u'http://www.thedailyshow.com/full-episodes/'
|
||||
else:
|
||||
url = u'http://www.colbertnation.com/full-episodes/'
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
assert mobj is not None
|
||||
|
||||
if mobj.group('clip'):
|
||||
if mobj.group('showname') == 'thedailyshow':
|
||||
epTitle = mobj.group('tdstitle')
|
||||
else:
|
||||
epTitle = mobj.group('cntitle')
|
||||
dlNewest = False
|
||||
else:
|
||||
dlNewest = not mobj.group('episode')
|
||||
if dlNewest:
|
||||
epTitle = mobj.group('showname')
|
||||
else:
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
self.report_extraction(epTitle)
|
||||
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
if dlNewest:
|
||||
url = htmlHandle.geturl()
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid redirected URL: ' + url)
|
||||
if mobj.group('episode') == '':
|
||||
raise ExtractorError(u'Redirected URL is still not specific: ' + url)
|
||||
epTitle = mobj.group('episode')
|
||||
|
||||
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
||||
|
||||
if len(mMovieParams) == 0:
|
||||
# The Colbert Report embeds the information in a without
|
||||
# a URL prefix; so extract the alternate reference
|
||||
# and then add the URL prefix manually.
|
||||
|
||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError(u'unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||
indexXml = self._download_webpage(indexUrl, epTitle,
|
||||
u'Downloading show index',
|
||||
u'unable to download episode index')
|
||||
|
||||
results = []
|
||||
|
||||
idoc = xml.etree.ElementTree.fromstring(indexXml)
|
||||
itemEls = idoc.findall('.//item')
|
||||
for partNum,itemEl in enumerate(itemEls):
|
||||
mediaId = itemEl.findall('./guid')[0].text
|
||||
shortMediaId = mediaId.split(':')[-1]
|
||||
showId = mediaId.split(':')[-2].replace('.com', '')
|
||||
officialTitle = itemEl.findall('./title')[0].text
|
||||
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||
|
||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
configXml = self._download_webpage(configUrl, epTitle,
|
||||
u'Downloading configuration for %s' % shortMediaId)
|
||||
|
||||
cdoc = xml.etree.ElementTree.fromstring(configXml)
|
||||
turls = []
|
||||
for rendition in cdoc.findall('.//rendition'):
|
||||
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
|
||||
turls.append(finfo)
|
||||
|
||||
if len(turls) == 0:
|
||||
self._downloader.report_error(u'unable to download ' + mediaId + ': No videos found')
|
||||
continue
|
||||
|
||||
if self._downloader.params.get('listformats', None):
|
||||
self._print_formats([i[0] for i in turls])
|
||||
return
|
||||
|
||||
# For now, just pick the highest bitrate
|
||||
format,rtmp_video_url = turls[-1]
|
||||
|
||||
# Get the format arg from the arg stream
|
||||
req_format = self._downloader.params.get('format', None)
|
||||
|
||||
# Select format if we can find one
|
||||
for f,v in turls:
|
||||
if f == req_format:
|
||||
format, rtmp_video_url = f, v
|
||||
break
|
||||
|
||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp.comedystor/.*)$', rtmp_video_url)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot transform RTMP url')
|
||||
base = 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=1+_pxI0=Ripod-h264+_pxL0=undefined+_pxM0=+_pxK=18639+_pxE=mp4/44620/mtvnorigin/'
|
||||
video_url = base + m.group('finalid')
|
||||
|
||||
effTitle = showId + u'-' + epTitle + u' part ' + compat_str(partNum+1)
|
||||
info = {
|
||||
'id': shortMediaId,
|
||||
'url': video_url,
|
||||
'uploader': showId,
|
||||
'upload_date': officialDate,
|
||||
'title': effTitle,
|
||||
'ext': 'mp4',
|
||||
'format': format,
|
||||
'thumbnail': None,
|
||||
'description': compat_str(officialTitle),
|
||||
}
|
||||
results.append(info)
|
||||
|
||||
return results
|
301
youtube_dl/extractor/common.py
Normal file
301
youtube_dl/extractor/common.py
Normal file
@@ -0,0 +1,301 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import netrc
|
||||
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
compat_str,
|
||||
|
||||
clean_html,
|
||||
compiled_regex_type,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class InfoExtractor(object):
|
||||
"""Information Extractor class.
|
||||
|
||||
Information extractors are the classes that, given a URL, extract
|
||||
information about the video (or videos) the URL refers to. This
|
||||
information includes the real video URL, the video title, author and
|
||||
others. The information is stored in a dictionary which is then
|
||||
passed to the FileDownloader. The FileDownloader processes this
|
||||
information possibly downloading the video to the file system, among
|
||||
other possible outcomes.
|
||||
|
||||
The dictionaries must include the following fields:
|
||||
|
||||
id: Video identifier.
|
||||
url: Final video URL.
|
||||
title: Video title, unescaped.
|
||||
ext: Video filename extension.
|
||||
|
||||
The following fields are optional:
|
||||
|
||||
format: The video format, defaults to ext (used for --get-format)
|
||||
thumbnails: A list of dictionaries (with the entries "resolution" and
|
||||
"url") for the varying thumbnails
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: One-line video description.
|
||||
uploader: Full name of the video uploader.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
location: Physical location of the video.
|
||||
player_url: SWF Player URL (used for rtmpdump).
|
||||
subtitles: The subtitle file contents.
|
||||
view_count: How many users have watched the video on the platform.
|
||||
urlhandle: [internal] The urlHandle to be used to download the file,
|
||||
like returned by urllib.request.urlopen
|
||||
|
||||
The fields should all be Unicode strings.
|
||||
|
||||
Subclasses of this one should re-define the _real_initialize() and
|
||||
_real_extract() methods and define a _VALID_URL regexp.
|
||||
Probably, they should also be added to the list of extractors.
|
||||
|
||||
_real_extract() must return a *list* of information dictionaries as
|
||||
described above.
|
||||
|
||||
Finally, the _WORKING attribute should be set to False for broken IEs
|
||||
in order to warn the users and skip the tests.
|
||||
"""
|
||||
|
||||
_ready = False
|
||||
_downloader = None
|
||||
_WORKING = True
|
||||
|
||||
def __init__(self, downloader=None):
|
||||
"""Constructor. Receives an optional downloader."""
|
||||
self._ready = False
|
||||
self.set_downloader(downloader)
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
return re.match(cls._VALID_URL, url) is not None
|
||||
|
||||
@classmethod
|
||||
def working(cls):
|
||||
"""Getter method for _WORKING."""
|
||||
return cls._WORKING
|
||||
|
||||
def initialize(self):
|
||||
"""Initializes an instance (authentication, etc)."""
|
||||
if not self._ready:
|
||||
self._real_initialize()
|
||||
self._ready = True
|
||||
|
||||
def extract(self, url):
|
||||
"""Extracts URL information and returns it in list of dicts."""
|
||||
self.initialize()
|
||||
return self._real_extract(url)
|
||||
|
||||
def set_downloader(self, downloader):
|
||||
"""Sets the downloader for this IE."""
|
||||
self._downloader = downloader
|
||||
|
||||
def _real_initialize(self):
|
||||
"""Real initialization process. Redefine in subclasses."""
|
||||
pass
|
||||
|
||||
def _real_extract(self, url):
|
||||
"""Real extraction process. Redefine in subclasses."""
|
||||
pass
|
||||
|
||||
@property
|
||||
def IE_NAME(self):
|
||||
return type(self).__name__[:-2]
|
||||
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the response handle """
|
||||
if note is None:
|
||||
self.report_download_webpage(video_id)
|
||||
elif note is not False:
|
||||
self.to_screen(u'%s: %s' % (video_id, note))
|
||||
try:
|
||||
return compat_urllib_request.urlopen(url_or_request)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
if errnote is None:
|
||||
errnote = u'Unable to download webpage'
|
||||
raise ExtractorError(u'%s: %s' % (errnote, compat_str(err)), sys.exc_info()[2])
|
||||
|
||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns a tuple (page content as string, URL handle) """
|
||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote)
|
||||
content_type = urlh.headers.get('Content-Type', '')
|
||||
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
|
||||
if m:
|
||||
encoding = m.group(1)
|
||||
else:
|
||||
encoding = 'utf-8'
|
||||
webpage_bytes = urlh.read()
|
||||
if self._downloader.params.get('dump_intermediate_pages', False):
|
||||
try:
|
||||
url = url_or_request.get_full_url()
|
||||
except AttributeError:
|
||||
url = url_or_request
|
||||
self.to_screen(u'Dumping request to ' + url)
|
||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||
self._downloader.to_screen(dump)
|
||||
content = webpage_bytes.decode(encoding, 'replace')
|
||||
return (content, urlh)
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None):
|
||||
""" Returns the data of the page as a string """
|
||||
return self._download_webpage_handle(url_or_request, video_id, note, errnote)[0]
|
||||
|
||||
def to_screen(self, msg):
|
||||
"""Print msg to screen, prefixing it with '[ie_name]'"""
|
||||
self._downloader.to_screen(u'[%s] %s' % (self.IE_NAME, msg))
|
||||
|
||||
def report_extraction(self, id_or_name):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Extracting information' % id_or_name)
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
self.to_screen(u'%s: Downloading webpage' % video_id)
|
||||
|
||||
def report_age_confirmation(self):
|
||||
"""Report attempt to confirm age."""
|
||||
self.to_screen(u'Confirming age')
|
||||
|
||||
def report_login(self):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
#Methods for following #608
|
||||
def url_result(self, url, ie=None):
|
||||
"""Returns a url that points to a page that should be processed"""
|
||||
#TODO: ie should be the class used for getting the info
|
||||
video_info = {'_type': 'url',
|
||||
'url': url,
|
||||
'ie_key': ie}
|
||||
return video_info
|
||||
def playlist_result(self, entries, playlist_id=None, playlist_title=None):
|
||||
"""Returns a playlist"""
|
||||
video_info = {'_type': 'playlist',
|
||||
'entries': entries}
|
||||
if playlist_id:
|
||||
video_info['id'] = playlist_id
|
||||
if playlist_title:
|
||||
video_info['title'] = playlist_title
|
||||
return video_info
|
||||
|
||||
def _search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
||||
"""
|
||||
Perform a regex search on the given string, using a single or a list of
|
||||
patterns returning the first matching group.
|
||||
In case of failure return a default value or raise a WARNING or a
|
||||
ExtractorError, depending on fatal, specifying the field name.
|
||||
"""
|
||||
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
|
||||
mobj = re.search(pattern, string, flags)
|
||||
else:
|
||||
for p in pattern:
|
||||
mobj = re.search(p, string, flags)
|
||||
if mobj: break
|
||||
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_name = u'\033[0;34m%s\033[0m' % name
|
||||
else:
|
||||
_name = name
|
||||
|
||||
if mobj:
|
||||
# return the first matching group
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
elif default is not None:
|
||||
return default
|
||||
elif fatal:
|
||||
raise ExtractorError(u'Unable to extract %s' % _name)
|
||||
else:
|
||||
self._downloader.report_warning(u'unable to extract %s; '
|
||||
u'please report this issue on http://yt-dl.org/bug' % _name)
|
||||
return None
|
||||
|
||||
def _html_search_regex(self, pattern, string, name, default=None, fatal=True, flags=0):
|
||||
"""
|
||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags)
|
||||
if res:
|
||||
return clean_html(res).strip()
|
||||
else:
|
||||
return res
|
||||
|
||||
def _get_login_info(self):
|
||||
"""
|
||||
Get the the login info as (username, password)
|
||||
It will look in the netrc file using the _NETRC_MACHINE value
|
||||
If there's no info available, return (None, None)
|
||||
"""
|
||||
if self._downloader is None:
|
||||
return (None, None)
|
||||
|
||||
username = None
|
||||
password = None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get('username', None) is not None:
|
||||
username = downloader_params['username']
|
||||
password = downloader_params['password']
|
||||
elif downloader_params.get('usenetrc', False):
|
||||
try:
|
||||
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
|
||||
if info is not None:
|
||||
username = info[0]
|
||||
password = info[2]
|
||||
else:
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
|
||||
|
||||
return (username, password)
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
Base class for paged search queries extractors.
|
||||
They accept urls in the format _SEARCH_KEY(|all|[0-9]):{query}
|
||||
Instances should define _SEARCH_KEY and _MAX_RESULTS.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def _make_valid_url(cls):
|
||||
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return re.match(cls._make_valid_url(), url) is not None
|
||||
|
||||
def _real_extract(self, query):
|
||||
mobj = re.match(self._make_valid_url(), query)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid search query "%s"' % query)
|
||||
|
||||
prefix = mobj.group('prefix')
|
||||
query = mobj.group('query')
|
||||
if prefix == '':
|
||||
return self._get_n_results(query, 1)
|
||||
elif prefix == 'all':
|
||||
return self._get_n_results(query, self._MAX_RESULTS)
|
||||
else:
|
||||
n = int(prefix)
|
||||
if n <= 0:
|
||||
raise ExtractorError(u'invalid download number %s for query "%s"' % (n, query))
|
||||
elif n > self._MAX_RESULTS:
|
||||
self._downloader.report_warning(u'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
|
||||
n = self._MAX_RESULTS
|
||||
return self._get_n_results(query, n)
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
raise NotImplementedError("This method must be implemented by sublclasses")
|
||||
|
||||
@property
|
||||
def SEARCH_KEY(self):
|
||||
return self._SEARCH_KEY
|
53
youtube_dl/extractor/cspan.py
Normal file
53
youtube_dl/extractor/cspan.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
class CSpanIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.c-spanvideo.org/program/(.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.c-spanvideo.org/program/HolderonV',
|
||||
u'file': u'315139.flv',
|
||||
u'md5': u'74a623266956f69e4df0068ab6c80fe4',
|
||||
u'info_dict': {
|
||||
u"title": u"Attorney General Eric Holder on Voting Rights Act Decision"
|
||||
},
|
||||
u'skip': u'Requires rtmpdump'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
prog_name = mobj.group(1)
|
||||
webpage = self._download_webpage(url, prog_name)
|
||||
video_id = self._search_regex(r'programid=(.*?)&', webpage, 'video id')
|
||||
data = compat_urllib_parse.urlencode({'programid': video_id,
|
||||
'dynamic':'1'})
|
||||
info_url = 'http://www.c-spanvideo.org/common/services/flashXml.php?' + data
|
||||
video_info = self._download_webpage(info_url, video_id, u'Downloading video info')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
title = self._html_search_regex(r'<string name="title">(.*?)</string>',
|
||||
video_info, 'title')
|
||||
description = self._html_search_regex(r'<meta (?:property="og:|name=")description" content="(.*?)"',
|
||||
webpage, 'description',
|
||||
flags=re.MULTILINE|re.DOTALL)
|
||||
thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.*?)"',
|
||||
webpage, 'thumbnail')
|
||||
|
||||
url = self._search_regex(r'<string name="URL">(.*?)</string>',
|
||||
video_info, 'video url')
|
||||
url = url.replace('$(protocol)', 'rtmp').replace('$(port)', '443')
|
||||
path = self._search_regex(r'<string name="path">(.*?)</string>',
|
||||
video_info, 'rtmp play path')
|
||||
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
'ext': 'flv',
|
||||
'url': url,
|
||||
'play_path': path,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
82
youtube_dl/extractor/dailymotion.py
Normal file
82
youtube_dl/extractor/dailymotion.py
Normal file
@@ -0,0 +1,82 @@
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class DailymotionIE(InfoExtractor):
|
||||
"""Information Extractor for Dailymotion"""
|
||||
|
||||
_VALID_URL = r'(?i)(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/video/([^/]+)'
|
||||
IE_NAME = u'dailymotion'
|
||||
_TEST = {
|
||||
u'url': u'http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech',
|
||||
u'file': u'x33vw9.mp4',
|
||||
u'md5': u'392c4b85a60a90dc4792da41ce3144eb',
|
||||
u'info_dict': {
|
||||
u"uploader": u"Alex and Van .",
|
||||
u"title": u"Tutoriel de Youtubeur\"DL DES VIDEO DE YOUTUBE\""
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id and simplified title from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group(1).split('_')[0].split('?')[0]
|
||||
|
||||
video_extension = 'mp4'
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
request = compat_urllib_request.Request(url)
|
||||
request.add_header('Cookie', 'family_filter=off')
|
||||
webpage = self._download_webpage(request, video_id)
|
||||
|
||||
# Extract URL, uploader and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<meta property="og:title" content="(.*?)" />',
|
||||
webpage, 'title')
|
||||
|
||||
video_uploader = self._search_regex([r'(?im)<span class="owner[^\"]+?">[^<]+?<a [^>]+?>([^<]+?)</a>',
|
||||
# Looking for official user
|
||||
r'<(?:span|a) .*?rel="author".*?>([^<]+?)</'],
|
||||
webpage, 'video uploader')
|
||||
|
||||
video_upload_date = None
|
||||
mobj = re.search(r'<div class="[^"]*uploaded_cont[^"]*" title="[^"]*">([0-9]{2})-([0-9]{2})-([0-9]{4})</div>', webpage)
|
||||
if mobj is not None:
|
||||
video_upload_date = mobj.group(3) + mobj.group(2) + mobj.group(1)
|
||||
|
||||
embed_url = 'http://www.dailymotion.com/embed/video/%s' % video_id
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
u'Downloading embed page')
|
||||
info = self._search_regex(r'var info = ({.*?}),', embed_page, 'video info')
|
||||
info = json.loads(info)
|
||||
|
||||
# TODO: support choosing qualities
|
||||
|
||||
for key in ['stream_h264_hd1080_url','stream_h264_hd_url',
|
||||
'stream_h264_hq_url','stream_h264_url',
|
||||
'stream_h264_ld_url']:
|
||||
if info.get(key):#key in info and info[key]:
|
||||
max_quality = key
|
||||
self.to_screen(u'Using %s' % key)
|
||||
break
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract video URL')
|
||||
video_url = info[max_quality]
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': video_upload_date,
|
||||
'title': video_title,
|
||||
'ext': video_extension,
|
||||
'thumbnail': info['thumbnail_url']
|
||||
}]
|
60
youtube_dl/extractor/depositfiles.py
Normal file
60
youtube_dl/extractor/depositfiles.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import re
|
||||
import os
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class DepositFilesIE(InfoExtractor):
|
||||
"""Information extractor for depositfiles.com"""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
file_id = url.split('/')[-1]
|
||||
# Rebuild url in english locale
|
||||
url = 'http://depositfiles.com/en/files/' + file_id
|
||||
|
||||
# Retrieve file webpage with 'Free download' button pressed
|
||||
free_download_indication = { 'gateway_result' : '1' }
|
||||
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
||||
try:
|
||||
self.report_download_webpage(file_id)
|
||||
webpage = compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
|
||||
|
||||
# Search for the real file URL
|
||||
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
|
||||
if (mobj is None) or (mobj.group(1) is None):
|
||||
# Try to figure out reason of the error.
|
||||
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
|
||||
if (mobj is not None) and (mobj.group(1) is not None):
|
||||
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
|
||||
raise ExtractorError(u'%s' % restriction_message)
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract download URL from: %s' % url)
|
||||
|
||||
file_url = mobj.group(1)
|
||||
file_extension = os.path.splitext(file_url)[1][1:]
|
||||
|
||||
# Search for file title
|
||||
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': file_title,
|
||||
'ext': file_extension.decode('utf-8'),
|
||||
}]
|
41
youtube_dl/extractor/dotsub.py
Normal file
41
youtube_dl/extractor/dotsub.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import re
|
||||
import json
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DotsubIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||
u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
|
||||
u'md5': u'0914d4d69605090f623b7ac329fea66e',
|
||||
u'info_dict': {
|
||||
u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
|
||||
u"uploader": u"4v4l0n42",
|
||||
u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
||||
u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
||||
u'upload_date': u'20101213',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
|
||||
webpage = self._download_webpage(info_url, video_id)
|
||||
info = json.loads(webpage)
|
||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': info['mediaURI'],
|
||||
'ext': 'flv',
|
||||
'title': info['title'],
|
||||
'thumbnail': info['screenshotURI'],
|
||||
'description': info['description'],
|
||||
'uploader': info['user'],
|
||||
'view_count': info['numberOfViews'],
|
||||
'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
||||
}]
|
85
youtube_dl/extractor/dreisat.py
Normal file
85
youtube_dl/extractor/dreisat.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# coding: utf-8
|
||||
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class DreiSatIE(InfoExtractor):
|
||||
IE_NAME = '3sat'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?3sat.de/mediathek/index.php\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
||||
_TEST = {
|
||||
u"url": u"http://www.3sat.de/mediathek/index.php?obj=36983",
|
||||
u'file': u'36983.webm',
|
||||
u'md5': u'57c97d0469d71cf874f6815aa2b7c944',
|
||||
u'info_dict': {
|
||||
u"title": u"Kaffeeland Schweiz",
|
||||
u"description": u"Über 80 Kaffeeröstereien liefern in der Schweiz das Getränk, in das das Land so vernarrt ist: Mehr als 1000 Tassen trinkt ein Schweizer pro Jahr. SCHWEIZWEIT nimmt die Kaffeekultur unter die...",
|
||||
u"uploader": u"3sat",
|
||||
u"upload_date": u"20130622"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
details_url = 'http://www.3sat.de/mediathek/xmlservice/web/beitragsDetails?ak=web&id=%s' % video_id
|
||||
details_xml = self._download_webpage(details_url, video_id, note=u'Downloading video details')
|
||||
details_doc = xml.etree.ElementTree.fromstring(details_xml.encode('utf-8'))
|
||||
|
||||
thumbnail_els = details_doc.findall('.//teaserimage')
|
||||
thumbnails = [{
|
||||
'width': te.attrib['key'].partition('x')[0],
|
||||
'height': te.attrib['key'].partition('x')[2],
|
||||
'url': te.text,
|
||||
} for te in thumbnail_els]
|
||||
|
||||
information_el = details_doc.find('.//information')
|
||||
video_title = information_el.find('./title').text
|
||||
video_description = information_el.find('./detail').text
|
||||
|
||||
details_el = details_doc.find('.//details')
|
||||
video_uploader = details_el.find('./channel').text
|
||||
upload_date = unified_strdate(details_el.find('./airtime').text)
|
||||
|
||||
format_els = details_doc.findall('.//formitaet')
|
||||
formats = [{
|
||||
'format_id': fe.attrib['basetype'],
|
||||
'width': int(fe.find('./width').text),
|
||||
'height': int(fe.find('./height').text),
|
||||
'url': fe.find('./url').text,
|
||||
'filesize': int(fe.find('./filesize').text),
|
||||
'video_bitrate': int(fe.find('./videoBitrate').text),
|
||||
'3sat_qualityname': fe.find('./quality').text,
|
||||
} for fe in format_els
|
||||
if not fe.find('./url').text.startswith('http://www.metafilegenerator.de/')]
|
||||
|
||||
def _sortkey(format):
|
||||
qidx = ['low', 'med', 'high', 'veryhigh'].index(format['3sat_qualityname'])
|
||||
prefer_http = 1 if 'rtmp' in format['url'] else 0
|
||||
return (qidx, prefer_http, format['video_bitrate'])
|
||||
formats.sort(key=_sortkey)
|
||||
|
||||
info = {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'description': video_description,
|
||||
'thumbnails': thumbnails,
|
||||
'thumbnail': thumbnails[-1]['url'],
|
||||
'uploader': video_uploader,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
# TODO: Remove when #980 has been merged
|
||||
info['url'] = formats[-1]['url']
|
||||
info['ext'] = determine_ext(formats[-1]['url'])
|
||||
|
||||
return info
|
51
youtube_dl/extractor/ehow.py
Normal file
51
youtube_dl/extractor/ehow.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import re
|
||||
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EHowIE(InfoExtractor):
|
||||
IE_NAME = u'eHow'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
|
||||
u'file': u'12245069.flv',
|
||||
u'md5': u'9809b4e3f115ae2088440bcb4efbf371',
|
||||
u'info_dict': {
|
||||
u"title": u"Hardwood Flooring Basics",
|
||||
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
||||
u"uploader": u"Erick Nathan"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
||||
webpage, u'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
thumbnail_url = self._search_regex(r'<meta property="og:image" content="(.+?)" />',
|
||||
webpage, u'thumbnail URL')
|
||||
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
||||
webpage, u'uploader')
|
||||
title = self._search_regex(r'<meta property="og:title" content="(.+?)" />',
|
||||
webpage, u'Video title').replace(' | eHow', '')
|
||||
description = self._search_regex(r'<meta property="og:description" content="(.+?)" />',
|
||||
webpage, u'video description')
|
||||
ext = determine_ext(final_url)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail_url,
|
||||
'description': description,
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
122
youtube_dl/extractor/eighttracks.py
Normal file
122
youtube_dl/extractor/eighttracks.py
Normal file
@@ -0,0 +1,122 @@
|
||||
import itertools
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class EightTracksIE(InfoExtractor):
|
||||
IE_NAME = '8tracks'
|
||||
_VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
||||
_TEST = {
|
||||
u"name": u"EightTracks",
|
||||
u"url": u"http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
||||
u"playlist": [
|
||||
{
|
||||
u"file": u"11885610.m4a",
|
||||
u"md5": u"96ce57f24389fc8734ce47f4c1abcc55",
|
||||
u"info_dict": {
|
||||
u"title": u"youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885608.m4a",
|
||||
u"md5": u"4ab26f05c1f7291ea460a3920be8021f",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885679.m4a",
|
||||
u"md5": u"d30b5b5f74217410f4689605c35d1fd7",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885680.m4a",
|
||||
u"md5": u"4eb0a669317cd725f6bbd336a29f923a",
|
||||
u"info_dict": {
|
||||
u"title": u"youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885682.m4a",
|
||||
u"md5": u"1893e872e263a2705558d1d319ad19e8",
|
||||
u"info_dict": {
|
||||
u"title": u"PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885683.m4a",
|
||||
u"md5": u"b673c46f47a216ab1741ae8836af5899",
|
||||
u"info_dict": {
|
||||
u"title": u"PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885684.m4a",
|
||||
u"md5": u"1d74534e95df54986da7f5abf7d842b7",
|
||||
u"info_dict": {
|
||||
u"title": u"phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file": u"11885685.m4a",
|
||||
u"md5": u"f081f47af8f6ae782ed131d38b9cd1c0",
|
||||
u"info_dict": {
|
||||
u"title": u"phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
u"uploader_id": u"ytdl"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
playlist_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
|
||||
data = json.loads(json_like)
|
||||
|
||||
session = str(random.randint(0, 1000000000))
|
||||
mix_id = data['id']
|
||||
track_count = data['tracks_count']
|
||||
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
|
||||
next_url = first_url
|
||||
res = []
|
||||
for i in itertools.count():
|
||||
api_json = self._download_webpage(next_url, playlist_id,
|
||||
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
|
||||
errnote=u'Failed to download song information')
|
||||
api_data = json.loads(api_json)
|
||||
track_data = api_data[u'set']['track']
|
||||
info = {
|
||||
'id': track_data['id'],
|
||||
'url': track_data['track_file_stream_url'],
|
||||
'title': track_data['performer'] + u' - ' + track_data['name'],
|
||||
'raw_title': track_data['name'],
|
||||
'uploader_id': data['user']['login'],
|
||||
'ext': 'm4a',
|
||||
}
|
||||
res.append(info)
|
||||
if api_data['set']['at_last_track']:
|
||||
break
|
||||
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
|
||||
return res
|
78
youtube_dl/extractor/escapist.py
Normal file
78
youtube_dl/extractor/escapist.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class EscapistIE(InfoExtractor):
|
||||
_VALID_URL = r'^(https?://)?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4',
|
||||
u'md5': u'c6793dbda81388f4264c1ba18684a74d',
|
||||
u'info_dict': {
|
||||
u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
||||
u"uploader": u"the-escapist-presents",
|
||||
u"title": u"Breaking Down Baldur's Gate"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
showName = mobj.group('showname')
|
||||
videoId = mobj.group('episode')
|
||||
|
||||
self.report_extraction(videoId)
|
||||
webpage = self._download_webpage(url, videoId)
|
||||
|
||||
videoDesc = self._html_search_regex('<meta name="description" content="([^"]*)"',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
imgUrl = self._html_search_regex('<meta property="og:image" content="([^"]*)"',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
|
||||
playerUrl = self._html_search_regex('<meta property="og:video" content="([^"]*)"',
|
||||
webpage, u'player url')
|
||||
|
||||
title = self._html_search_regex('<meta name="title" content="([^"]*)"',
|
||||
webpage, u'player url').split(' : ')[-1]
|
||||
|
||||
configUrl = self._search_regex('config=(.*)$', playerUrl, u'config url')
|
||||
configUrl = compat_urllib_parse.unquote(configUrl)
|
||||
|
||||
configJSON = self._download_webpage(configUrl, videoId,
|
||||
u'Downloading configuration',
|
||||
u'unable to download configuration')
|
||||
|
||||
# Technically, it's JavaScript, not JSON
|
||||
configJSON = configJSON.replace("'", '"')
|
||||
|
||||
try:
|
||||
config = json.loads(configJSON)
|
||||
except (ValueError,) as err:
|
||||
raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
|
||||
|
||||
playlist = config['playlist']
|
||||
videoUrl = playlist[1]['url']
|
||||
|
||||
info = {
|
||||
'id': videoId,
|
||||
'url': videoUrl,
|
||||
'uploader': showName,
|
||||
'upload_date': None,
|
||||
'title': title,
|
||||
'ext': 'mp4',
|
||||
'thumbnail': imgUrl,
|
||||
'description': videoDesc,
|
||||
'player_url': playerUrl,
|
||||
}
|
||||
|
||||
return [info]
|
120
youtube_dl/extractor/facebook.py
Normal file
120
youtube_dl/extractor/facebook.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import json
|
||||
import netrc
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class FacebookIE(InfoExtractor):
|
||||
"""Information Extractor for Facebook"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?facebook\.com/(?:video/video|photo)\.php\?(?:.*?)v=(?P<ID>\d+)(?:.*)'
|
||||
_LOGIN_URL = 'https://login.facebook.com/login.php?m&next=http%3A%2F%2Fm.facebook.com%2Fhome.php&'
|
||||
_NETRC_MACHINE = 'facebook'
|
||||
IE_NAME = u'facebook'
|
||||
_TEST = {
|
||||
u'url': u'https://www.facebook.com/photo.php?v=120708114770723',
|
||||
u'file': u'120708114770723.mp4',
|
||||
u'md5': u'48975a41ccc4b7a581abd68651c1a5a8',
|
||||
u'info_dict': {
|
||||
u"duration": 279,
|
||||
u"title": u"PEOPLE ARE AWESOME 2013"
|
||||
}
|
||||
}
|
||||
|
||||
def report_login(self):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
def _real_initialize(self):
|
||||
if self._downloader is None:
|
||||
return
|
||||
|
||||
useremail = None
|
||||
password = None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get('username', None) is not None:
|
||||
useremail = downloader_params['username']
|
||||
password = downloader_params['password']
|
||||
elif downloader_params.get('usenetrc', False):
|
||||
try:
|
||||
info = netrc.netrc().authenticators(self._NETRC_MACHINE)
|
||||
if info is not None:
|
||||
useremail = info[0]
|
||||
password = info[2]
|
||||
else:
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.report_warning(u'parsing .netrc: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
if useremail is None:
|
||||
return
|
||||
|
||||
# Log in
|
||||
login_form = {
|
||||
'email': useremail,
|
||||
'pass': password,
|
||||
'login': 'Log+In'
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
try:
|
||||
self.report_login()
|
||||
login_results = compat_urllib_request.urlopen(request).read()
|
||||
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
|
||||
self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
|
||||
return
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('ID')
|
||||
|
||||
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
|
||||
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
|
||||
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot parse data')
|
||||
data = dict(json.loads(m.group(1)))
|
||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
video_data = params['video_data'][0]
|
||||
video_url = video_data.get('hd_src')
|
||||
if not video_url:
|
||||
video_url = video_data['sd_src']
|
||||
if not video_url:
|
||||
raise ExtractorError(u'Cannot find video URL')
|
||||
video_duration = int(video_data['video_duration'])
|
||||
thumbnail = video_data['thumbnail_src']
|
||||
|
||||
video_title = self._html_search_regex('<h2 class="uiHeaderTitle">([^<]+)</h2>',
|
||||
webpage, u'title')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'duration': video_duration,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
return [info]
|
67
youtube_dl/extractor/flickr.py
Normal file
67
youtube_dl/extractor/flickr.py
Normal file
@@ -0,0 +1,67 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class FlickrIE(InfoExtractor):
|
||||
"""Information Extractor for Flickr videos"""
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||
_TEST = {
|
||||
u'url': u'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
|
||||
u'file': u'5645318632.mp4',
|
||||
u'md5': u'6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
||||
u'info_dict': {
|
||||
u"description": u"Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||
u"uploader_id": u"forestwander-nature-pictures",
|
||||
u"title": u"Dark Hollow Waterfalls"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
video_uploader_id = mobj.group('uploader_id')
|
||||
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
|
||||
|
||||
first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
|
||||
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
||||
|
||||
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
||||
first_xml, u'node_id')
|
||||
|
||||
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
||||
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract video url')
|
||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||
|
||||
video_title = self._html_search_regex(r'<meta property="og:title" content=(?:"([^"]+)"|\'([^\']+)\')',
|
||||
webpage, u'video title')
|
||||
|
||||
video_description = self._html_search_regex(r'<meta property="og:description" content=(?:"([^"]+)"|\'([^\']+)\')',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_regex(r'<meta property="og:image" content=(?:"([^"]+)"|\'([^\']+)\')',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader_id': video_uploader_id,
|
||||
}]
|
40
youtube_dl/extractor/funnyordie.py
Normal file
40
youtube_dl/extractor/funnyordie.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FunnyOrDieIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||
u'file': u'0732f586d7.mp4',
|
||||
u'md5': u'f647e9e90064b53b6e046e75d0241fbd',
|
||||
u'info_dict': {
|
||||
u"description": u"Lyrics changed to match the video. Spoken cameo by Obscurus Lupa (from ThatGuyWithTheGlasses.com). Based on a concept by Dustin McLean (DustFilms.com). Performed, edited, and written by David A. Scott.",
|
||||
u"title": u"Heart-Shaped Box: Literal Video Version"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._html_search_regex(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"',
|
||||
webpage, u'video URL', flags=re.DOTALL)
|
||||
|
||||
title = self._html_search_regex((r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>",
|
||||
r'<title>(?P<title>[^<]+?)</title>'), webpage, 'title', flags=re.DOTALL)
|
||||
|
||||
video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
||||
webpage, u'description', fatal=False, flags=re.DOTALL)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
'description': video_description,
|
||||
}
|
||||
return [info]
|
55
youtube_dl/extractor/gamespot.py
Normal file
55
youtube_dl/extractor/gamespot.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
class GameSpotIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
||||
_TEST = {
|
||||
u"url": u"http://www.gamespot.com/arma-iii/videos/arma-iii-community-guide-sitrep-i-6410818/",
|
||||
u"file": u"6410818.mp4",
|
||||
u"md5": u"b2a30deaa8654fcccd43713a6b6a4825",
|
||||
u"info_dict": {
|
||||
u"title": u"Arma III - Community Guide: SITREP I",
|
||||
u"upload_date": u"20130627",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_id = mobj.group('page_id')
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
video_id = self._html_search_regex([r'"og:video" content=".*?\?id=(\d+)"',
|
||||
r'http://www\.gamespot\.com/videoembed/(\d+)'],
|
||||
webpage, 'video id')
|
||||
data = compat_urllib_parse.urlencode({'id': video_id, 'newplayer': '1'})
|
||||
info_url = 'http://www.gamespot.com/pages/video_player/xml.php?' + data
|
||||
info_xml = self._download_webpage(info_url, video_id)
|
||||
doc = xml.etree.ElementTree.fromstring(info_xml)
|
||||
clip_el = doc.find('./playList/clip')
|
||||
|
||||
http_urls = [{'url': node.find('filePath').text,
|
||||
'rate': int(node.find('rate').text)}
|
||||
for node in clip_el.find('./httpURI')]
|
||||
best_quality = sorted(http_urls, key=lambda f: f['rate'])[-1]
|
||||
video_url = best_quality['url']
|
||||
title = clip_el.find('./title').text
|
||||
ext = video_url.rpartition('.')[2]
|
||||
thumbnail_url = clip_el.find('./screenGrabURI').text
|
||||
view_count = int(clip_el.find('./views').text)
|
||||
upload_date = unified_strdate(clip_el.find('./postDate').text)
|
||||
|
||||
return [{
|
||||
'id' : video_id,
|
||||
'url' : video_url,
|
||||
'ext' : ext,
|
||||
'title' : title,
|
||||
'thumbnail' : thumbnail_url,
|
||||
'upload_date' : upload_date,
|
||||
'view_count' : view_count,
|
||||
}]
|
63
youtube_dl/extractor/gametrailers.py
Normal file
63
youtube_dl/extractor/gametrailers.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class GametrailersIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
|
||||
u'file': u'70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.flv',
|
||||
u'md5': u'c3edbc995ab4081976e16779bd96a878',
|
||||
u'info_dict': {
|
||||
u"title": u"E3 2013: Debut Trailer"
|
||||
},
|
||||
u'skip': u'Requires rtmpdump'
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
mgid = self._search_regex([r'data-video="(?P<mgid>mgid:.*?)"',
|
||||
r'data-contentId=\'(?P<mgid>mgid:.*?)\''],
|
||||
webpage, u'mgid')
|
||||
|
||||
data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
|
||||
info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
|
||||
video_id, u'Downloading video info')
|
||||
doc = xml.etree.ElementTree.fromstring(info_page.encode('utf-8'))
|
||||
default_thumb = doc.find('./channel/image/url').text
|
||||
|
||||
media_namespace = {'media': 'http://search.yahoo.com/mrss/'}
|
||||
parts = [{
|
||||
'title': video_doc.find('title').text,
|
||||
'ext': 'flv',
|
||||
'id': video_doc.find('guid').text.rpartition(':')[2],
|
||||
# Videos are actually flv not mp4
|
||||
'url': self._get_video_url(video_doc.find('media:group/media:content', media_namespace).attrib['url'], video_id),
|
||||
# The thumbnail may not be defined, it would be ''
|
||||
'thumbnail': video_doc.find('media:group/media:thumbnail', media_namespace).attrib['url'] or default_thumb,
|
||||
'description': video_doc.find('description').text,
|
||||
} for video_doc in doc.findall('./channel/item')]
|
||||
return parts
|
||||
|
||||
def _get_video_url(self, mediagen_url, video_id):
|
||||
if 'acceptMethods' not in mediagen_url:
|
||||
mediagen_url += '&acceptMethods=fms'
|
||||
links_webpage = self._download_webpage(mediagen_url,
|
||||
video_id, u'Downloading video urls info')
|
||||
doc = xml.etree.ElementTree.fromstring(links_webpage)
|
||||
urls = list(doc.iter('src'))
|
||||
if len(urls) == 0:
|
||||
raise ExtractorError(u'Unable to extract video url')
|
||||
# They are sorted from worst to best quality
|
||||
return urls[-1].text
|
||||
|
182
youtube_dl/extractor/generic.py
Normal file
182
youtube_dl/extractor/generic.py
Normal file
@@ -0,0 +1,182 @@
|
||||
# encoding: utf-8
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
from .brightcove import BrightcoveIE
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
IE_DESC = u'Generic downloader that works on some sites'
|
||||
_VALID_URL = r'.*'
|
||||
IE_NAME = u'generic'
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
|
||||
u'file': u'13601338388002.mp4',
|
||||
u'md5': u'85b90ccc9d73b4acd9138d3af4c27f89',
|
||||
u'info_dict': {
|
||||
u"uploader": u"www.hodiho.fr",
|
||||
u"title": u"R\u00e9gis plante sa Jeep"
|
||||
}
|
||||
},
|
||||
{
|
||||
u'url': u'http://www.8tv.cat/8aldia/videos/xavier-sala-i-martin-aquesta-tarda-a-8-al-dia/',
|
||||
u'file': u'2371591881001.mp4',
|
||||
u'md5': u'9e80619e0a94663f0bdc849b4566af19',
|
||||
u'note': u'Test Brightcove downloads and detection in GenericIE',
|
||||
u'info_dict': {
|
||||
u'title': u'Xavier Sala i Martín: “Un banc que no presta és un banc zombi que no serveix per a res”',
|
||||
u'uploader': u'8TV',
|
||||
u'description': u'md5:a950cc4285c43e44d763d036710cd9cd',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
if not self._downloader.params.get('test', False):
|
||||
self._downloader.report_warning(u'Falling back on generic information extractor.')
|
||||
super(GenericIE, self).report_download_webpage(video_id)
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen(u'[redirect] Following redirect to %s' % new_url)
|
||||
|
||||
def _test_redirect(self, url):
|
||||
"""Check if it is a redirect, like url shorteners, in case return the new url."""
|
||||
class HeadRequest(compat_urllib_request.Request):
|
||||
def get_method(self):
|
||||
return "HEAD"
|
||||
|
||||
class HEADRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
|
||||
"""
|
||||
Subclass the HTTPRedirectHandler to make it use our
|
||||
HeadRequest also on the redirected URL
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
if code in (301, 302, 303, 307):
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return HeadRequest(newurl,
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True)
|
||||
else:
|
||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||
|
||||
class HTTPMethodFallback(compat_urllib_request.BaseHandler):
|
||||
"""
|
||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||
"""
|
||||
def http_error_405(self, req, fp, code, msg, headers):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(compat_urllib_request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
unverifiable=True))
|
||||
|
||||
# Build our opener
|
||||
opener = compat_urllib_request.OpenerDirector()
|
||||
for handler in [compat_urllib_request.HTTPHandler, compat_urllib_request.HTTPDefaultErrorHandler,
|
||||
HTTPMethodFallback, HEADRedirectHandler,
|
||||
compat_urllib_request.HTTPErrorProcessor, compat_urllib_request.HTTPSHandler]:
|
||||
opener.add_handler(handler())
|
||||
|
||||
response = opener.open(HeadRequest(url))
|
||||
if response is None:
|
||||
raise ExtractorError(u'Invalid URL protocol')
|
||||
new_url = response.geturl()
|
||||
|
||||
if url == new_url:
|
||||
return False
|
||||
|
||||
self.report_following_redirect(new_url)
|
||||
return new_url
|
||||
|
||||
def _real_extract(self, url):
|
||||
new_url = self._test_redirect(url)
|
||||
if new_url: return [self.url_result(new_url)]
|
||||
|
||||
video_id = url.split('/')[-1]
|
||||
try:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ValueError:
|
||||
# since this is the last-resort InfoExtractor, if
|
||||
# this error is thrown, it'll be thrown here
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
# Look for BrigthCove:
|
||||
m_brightcove = re.search(r'<object.+?class=".*?BrightcoveExperience.*?".+?</object>', webpage, re.DOTALL)
|
||||
if m_brightcove is not None:
|
||||
self.to_screen(u'Brightcove video detected.')
|
||||
bc_url = BrightcoveIE._build_brighcove_url(m_brightcove.group())
|
||||
return self.url_result(bc_url, 'Brightcove')
|
||||
|
||||
# Start with something easy: JW Player in SWFObject
|
||||
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
# Broaden the search a little bit
|
||||
mobj = re.search(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
# Broaden the search a little bit: JWPlayer JS loader
|
||||
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
# Try to find twitter cards info
|
||||
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||
if mobj is None:
|
||||
# We look for Open Graph info:
|
||||
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
||||
m_video_type = re.search(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||
if m_video_type is not None:
|
||||
mobj = re.search(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
# It's possible that one of the regexes
|
||||
# matched, but returned an empty group:
|
||||
if mobj.group(1) is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
video_url = compat_urllib_parse.unquote(mobj.group(1))
|
||||
video_id = os.path.basename(video_url)
|
||||
|
||||
# here's a fun little line of code for you:
|
||||
video_extension = os.path.splitext(video_id)[1][1:]
|
||||
video_id = os.path.splitext(video_id)[0]
|
||||
|
||||
# it's tempting to parse this further, but you would
|
||||
# have to take into account all the variations like
|
||||
# Video Title - Site Name
|
||||
# Site Name | Video Title
|
||||
# Video Title - Tagline | Site Name
|
||||
# and so on and so forth; it's just not practical
|
||||
video_title = self._html_search_regex(r'<title>(.*)</title>',
|
||||
webpage, u'video title', default=u'video', flags=re.DOTALL)
|
||||
|
||||
# video uploader is domain name
|
||||
video_uploader = self._search_regex(r'(?:https?://)?([^/]*)/.*',
|
||||
url, u'video uploader')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': video_uploader,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_extension,
|
||||
}]
|
96
youtube_dl/extractor/googleplus.py
Normal file
96
youtube_dl/extractor/googleplus.py
Normal file
@@ -0,0 +1,96 @@
|
||||
# coding: utf-8
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class GooglePlusIE(InfoExtractor):
|
||||
IE_DESC = u'Google Plus'
|
||||
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
|
||||
IE_NAME = u'plus.google'
|
||||
_TEST = {
|
||||
u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
|
||||
u"file": u"ZButuJc6CtH.flv",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120613",
|
||||
u"uploader": u"井上ヨシマサ",
|
||||
u"title": u"嘆きの天使 降臨"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
post_url = mobj.group(0)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
video_extension = 'flv'
|
||||
|
||||
# Step 1, Retrieve post webpage to extract further information
|
||||
webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Extract update date
|
||||
upload_date = self._html_search_regex('title="Timestamp">(.*?)</a>',
|
||||
webpage, u'upload date', fatal=False)
|
||||
if upload_date:
|
||||
# Convert timestring to a format suitable for filename
|
||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
||||
upload_date = upload_date.strftime('%Y%m%d')
|
||||
|
||||
# Extract uploader
|
||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||
webpage, u'uploader', fatal=False)
|
||||
|
||||
# Extract title
|
||||
# Get the first line for title
|
||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||
webpage, 'title', default=u'NA')
|
||||
|
||||
# Step 2, Simulate clicking the image box to launch video
|
||||
DOMAIN = 'https://plus.google.com'
|
||||
video_page = self._search_regex(r'<a href="((?:%s)?/photos/.*?)"' % re.escape(DOMAIN),
|
||||
webpage, u'video page URL')
|
||||
if not video_page.startswith(DOMAIN):
|
||||
video_page = DOMAIN + video_page
|
||||
|
||||
webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
|
||||
|
||||
# Extract video links on video page
|
||||
"""Extract video links of all sizes"""
|
||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
||||
mobj = re.findall(pattern, webpage)
|
||||
if len(mobj) == 0:
|
||||
raise ExtractorError(u'Unable to extract video links')
|
||||
|
||||
# Sort in resolution
|
||||
links = sorted(mobj)
|
||||
|
||||
# Choose the lowest of the sort, i.e. highest resolution
|
||||
video_url = links[-1]
|
||||
# Only get the url. The resolution part in the tuple has no use anymore
|
||||
video_url = video_url[-1]
|
||||
# Treat escaped \u0026 style hex
|
||||
try:
|
||||
video_url = video_url.decode("unicode_escape")
|
||||
except AttributeError: # Python 3
|
||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': video_extension,
|
||||
}]
|
39
youtube_dl/extractor/googlesearch.py
Normal file
39
youtube_dl/extractor/googlesearch.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import itertools
|
||||
import re
|
||||
|
||||
from .common import SearchInfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class GoogleSearchIE(SearchInfoExtractor):
|
||||
IE_DESC = u'Google Video search'
|
||||
_MORE_PAGES_INDICATOR = r'id="pnnext" class="pn"'
|
||||
_MAX_RESULTS = 1000
|
||||
IE_NAME = u'video.google:search'
|
||||
_SEARCH_KEY = 'gvsearch'
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
|
||||
res = {
|
||||
'_type': 'playlist',
|
||||
'id': query,
|
||||
'entries': []
|
||||
}
|
||||
|
||||
for pagenum in itertools.count(1):
|
||||
result_url = u'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en' % (compat_urllib_parse.quote_plus(query), pagenum*10)
|
||||
webpage = self._download_webpage(result_url, u'gvsearch:' + query,
|
||||
note='Downloading result page ' + str(pagenum))
|
||||
|
||||
for mobj in re.finditer(r'<h3 class="r"><a href="([^"]+)"', webpage):
|
||||
e = {
|
||||
'_type': 'url',
|
||||
'url': mobj.group(1)
|
||||
}
|
||||
res['entries'].append(e)
|
||||
|
||||
if (pagenum * 10 > n) or not re.search(self._MORE_PAGES_INDICATOR, webpage):
|
||||
return res
|
48
youtube_dl/extractor/hotnewhiphop.py
Normal file
48
youtube_dl/extractor/hotnewhiphop.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import re
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HotNewHipHopIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.hotnewhiphop.com/.*\.(?P<id>.*)\.html'
|
||||
_TEST = {
|
||||
u'url': u"http://www.hotnewhiphop.com/freddie-gibbs-lay-it-down-song.1435540.html'",
|
||||
u'file': u'1435540.mp3',
|
||||
u'md5': u'2c2cd2f76ef11a9b3b581e8b232f3d96',
|
||||
u'info_dict': {
|
||||
u"title": u"Freddie Gibbs Songs - Lay It Down"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('id')
|
||||
|
||||
webpage_src = self._download_webpage(url, video_id)
|
||||
|
||||
video_url_base64 = self._search_regex(r'data-path="(.*?)"',
|
||||
webpage_src, u'video URL', fatal=False)
|
||||
|
||||
if video_url_base64 == None:
|
||||
video_url = self._search_regex(r'"contentUrl" content="(.*?)"', webpage_src,
|
||||
u'video URL')
|
||||
return self.url_result(video_url, ie='Youtube')
|
||||
|
||||
video_url = base64.b64decode(video_url_base64).decode('utf-8')
|
||||
|
||||
video_title = self._html_search_regex(r"<title>(.*)</title>",
|
||||
webpage_src, u'title')
|
||||
|
||||
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
|
||||
thumbnail = self._html_search_regex(r'"og:image" content="(.*)"',
|
||||
webpage_src, u'thumbnail', fatal=False)
|
||||
|
||||
results = [{
|
||||
'id': video_id,
|
||||
'url' : video_url,
|
||||
'title' : video_title,
|
||||
'thumbnail' : thumbnail,
|
||||
'ext' : 'mp3',
|
||||
}]
|
||||
return results
|
46
youtube_dl/extractor/howcast.py
Normal file
46
youtube_dl/extractor/howcast.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HowcastIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
||||
u'file': u'390161.mp4',
|
||||
u'md5': u'1d7ba54e2c9d7dc6935ef39e00529138',
|
||||
u'info_dict': {
|
||||
u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",
|
||||
u"title": u"How to Tie a Square Knot Properly"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage_url = 'http://www.howcast.com/videos/' + video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
|
||||
webpage, u'title')
|
||||
|
||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'thumbnail': thumbnail,
|
||||
}]
|
71
youtube_dl/extractor/hypem.py
Normal file
71
youtube_dl/extractor/hypem.py
Normal file
@@ -0,0 +1,71 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class HypemIE(InfoExtractor):
|
||||
"""Information Extractor for hypem"""
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://hypem.com/track/1v6ga/BODYWORK+-+TAME',
|
||||
u'file': u'1v6ga.mp3',
|
||||
u'md5': u'b9cc91b5af8995e9f0c1cee04c575828',
|
||||
u'info_dict': {
|
||||
u"title": u"Tame"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
track_id = mobj.group(1)
|
||||
|
||||
data = { 'ax': 1, 'ts': time.time() }
|
||||
data_encoded = compat_urllib_parse.urlencode(data)
|
||||
complete_url = url + "?" + data_encoded
|
||||
request = compat_urllib_request.Request(complete_url)
|
||||
response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
|
||||
cookie = urlh.headers.get('Set-Cookie', '')
|
||||
|
||||
self.report_extraction(track_id)
|
||||
|
||||
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
||||
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
||||
try:
|
||||
track_list = json.loads(html_tracks)
|
||||
track = track_list[u'tracks'][0]
|
||||
except ValueError:
|
||||
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
||||
|
||||
key = track[u"key"]
|
||||
track_id = track[u"id"]
|
||||
artist = track[u"artist"]
|
||||
title = track[u"song"]
|
||||
|
||||
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
||||
request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
|
||||
request.add_header('cookie', cookie)
|
||||
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
||||
try:
|
||||
song_data = json.loads(song_data_json)
|
||||
except ValueError:
|
||||
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
||||
final_url = song_data[u"url"]
|
||||
|
||||
return [{
|
||||
'id': track_id,
|
||||
'url': final_url,
|
||||
'ext': "mp3",
|
||||
'title': title,
|
||||
'artist': artist,
|
||||
}]
|
39
youtube_dl/extractor/ina.py
Normal file
39
youtube_dl/extractor/ina.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class InaIE(InfoExtractor):
|
||||
"""Information Extractor for Ina.fr"""
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
|
||||
_TEST = {
|
||||
u'url': u'www.ina.fr/video/I12055569/francois-hollande-je-crois-que-c-est-clair-video.html',
|
||||
u'file': u'I12055569.mp4',
|
||||
u'md5': u'a667021bf2b41f8dc6049479d9bb38a3',
|
||||
u'info_dict': {
|
||||
u"title": u"Fran\u00e7ois Hollande \"Je crois que c'est clair\""
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self,url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
|
||||
video_extension = 'mp4'
|
||||
webpage = self._download_webpage(mrss_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
|
||||
webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_extension,
|
||||
'title': video_title,
|
||||
}]
|
62
youtube_dl/extractor/infoq.py
Normal file
62
youtube_dl/extractor/infoq.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import base64
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class InfoQIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?infoq\.com/[^/]+/[^/]+$'
|
||||
_TEST = {
|
||||
u"name": u"InfoQ",
|
||||
u"url": u"http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
||||
u"file": u"12-jan-pythonthings.mp4",
|
||||
u"info_dict": {
|
||||
u"description": u"Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
|
||||
u"title": u"A Few of My Favorite [Python] Things"
|
||||
},
|
||||
u"params": {
|
||||
u"skip_download": True
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id=url)
|
||||
self.report_extraction(url)
|
||||
|
||||
# Extract video URL
|
||||
mobj = re.search(r"jsclassref ?= ?'([^']*)'", webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract video url')
|
||||
real_id = compat_urllib_parse.unquote(base64.b64decode(mobj.group(1).encode('ascii')).decode('utf-8'))
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
|
||||
|
||||
# Extract title
|
||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||
webpage, u'title')
|
||||
|
||||
# Extract description
|
||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
video_filename = video_url.split('/')[-1]
|
||||
video_id, extension = video_filename.split('.')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': extension, # Extension is always(?) mp4, but seems to be flv
|
||||
'thumbnail': None,
|
||||
'description': video_description,
|
||||
}
|
||||
|
||||
return [info]
|
42
youtube_dl/extractor/instagram.py
Normal file
42
youtube_dl/extractor/instagram.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
class InstagramIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?instagram.com/p/(.*?)/'
|
||||
_TEST = {
|
||||
u'url': u'http://instagram.com/p/aye83DjauH/#',
|
||||
u'file': u'aye83DjauH.mp4',
|
||||
u'md5': u'0d2da106a9d2631273e192b372806516',
|
||||
u'info_dict': {
|
||||
u"uploader_id": u"naomipq",
|
||||
u"title": u"Video by naomipq"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._html_search_regex(
|
||||
r'<meta property="og:video" content="(.+?)"',
|
||||
webpage, u'video URL')
|
||||
thumbnail_url = self._html_search_regex(
|
||||
r'<meta property="og:image" content="(.+?)" />',
|
||||
webpage, u'thumbnail URL', fatal=False)
|
||||
html_title = self._html_search_regex(
|
||||
r'<title>(.+?)</title>',
|
||||
webpage, u'title', flags=re.DOTALL)
|
||||
title = re.sub(u'(?: *\(Videos?\))? \u2022 Instagram$', '', html_title).strip()
|
||||
uploader_id = self._html_search_regex(r'content="(.*?)\'s video on Instagram',
|
||||
webpage, u'uploader name', fatal=False)
|
||||
ext = 'mp4'
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail_url,
|
||||
'uploader_id' : uploader_id
|
||||
}]
|
56
youtube_dl/extractor/jukebox.py
Normal file
56
youtube_dl/extractor/jukebox.py
Normal file
@@ -0,0 +1,56 @@
|
||||
# coding: utf-8
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
class JukeboxIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+).html'
|
||||
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
||||
_VIDEO_URL = r'"config":{"file":"(?P<video_url>http:[^"]+[.](?P<video_ext>[^.?]+)[?]mdtk=[0-9]+)"'
|
||||
_TITLE = r'<h1 class="inline">(?P<title>[^<]+)</h1>.*<span id="infos_article_artist">(?P<artist>[^<]+)</span>'
|
||||
_IS_YOUTUBE = r'config":{"file":"(?P<youtube_url>http:[\\][/][\\][/]www[.]youtube[.]com[\\][/]watch[?]v=[^"]+)"'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
html = self._download_webpage(url, video_id)
|
||||
|
||||
mobj = re.search(self._IFRAME, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract iframe url')
|
||||
iframe_url = unescapeHTML(mobj.group('iframe'))
|
||||
|
||||
iframe_html = self._download_webpage(iframe_url, video_id, 'Downloading iframe')
|
||||
mobj = re.search(r'class="jkb_waiting"', iframe_html)
|
||||
if mobj is not None:
|
||||
raise ExtractorError(u'Video is not available(in your country?)!')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
mobj = re.search(self._VIDEO_URL, iframe_html)
|
||||
if mobj is None:
|
||||
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract video url')
|
||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/','/')
|
||||
self.to_screen(u'Youtube video detected')
|
||||
return self.url_result(youtube_url,ie='Youtube')
|
||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/','/')
|
||||
video_ext = unescapeHTML(mobj.group('video_ext'))
|
||||
|
||||
mobj = re.search(self._TITLE, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract title')
|
||||
title = unescapeHTML(mobj.group('title'))
|
||||
artist = unescapeHTML(mobj.group('artist'))
|
||||
|
||||
return [{'id': video_id,
|
||||
'url': video_url,
|
||||
'title': artist + '-' + title,
|
||||
'ext': video_ext
|
||||
}]
|
155
youtube_dl/extractor/justintv.py
Normal file
155
youtube_dl/extractor/justintv.py
Normal file
@@ -0,0 +1,155 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
formatSeconds,
|
||||
)
|
||||
|
||||
|
||||
class JustinTVIE(InfoExtractor):
|
||||
"""Information extractor for justin.tv and twitch.tv"""
|
||||
# TODO: One broadcast may be split into multiple videos. The key
|
||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||
# starts at 1 and increases. Can we treat all parts as one video?
|
||||
|
||||
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
|
||||
(?:
|
||||
(?P<channelid>[^/]+)|
|
||||
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
|
||||
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
|
||||
)
|
||||
/?(?:\#.*)?$
|
||||
"""
|
||||
_JUSTIN_PAGE_LIMIT = 100
|
||||
IE_NAME = u'justin.tv'
|
||||
_TEST = {
|
||||
u'url': u'http://www.twitch.tv/thegamedevhub/b/296128360',
|
||||
u'file': u'296128360.flv',
|
||||
u'md5': u'ecaa8a790c22a40770901460af191c9a',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20110927",
|
||||
u"uploader_id": 25114803,
|
||||
u"uploader": u"thegamedevhub",
|
||||
u"title": u"Beginner Series - Scripting With Python Pt.1"
|
||||
}
|
||||
}
|
||||
|
||||
def report_download_page(self, channel, offset):
|
||||
"""Report attempt to download a single page of videos."""
|
||||
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
||||
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
||||
|
||||
# Return count of items, list of *valid* items
|
||||
def _parse_page(self, url, video_id):
|
||||
info_json = self._download_webpage(url, video_id,
|
||||
u'Downloading video info JSON',
|
||||
u'unable to download video info JSON')
|
||||
|
||||
response = json.loads(info_json)
|
||||
if type(response) != list:
|
||||
error_text = response.get('error', 'unknown error')
|
||||
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
||||
info = []
|
||||
for clip in response:
|
||||
video_url = clip['video_file_url']
|
||||
if video_url:
|
||||
video_extension = os.path.splitext(video_url)[1][1:]
|
||||
video_date = re.sub('-', '', clip['start_time'][:10])
|
||||
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
|
||||
video_id = clip['id']
|
||||
video_title = clip.get('title', video_id)
|
||||
info.append({
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'uploader': clip.get('channel_name', video_uploader_id),
|
||||
'uploader_id': video_uploader_id,
|
||||
'upload_date': video_date,
|
||||
'ext': video_extension,
|
||||
})
|
||||
return (len(response), info)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'invalid URL: %s' % url)
|
||||
|
||||
api_base = 'http://api.justin.tv'
|
||||
paged = False
|
||||
if mobj.group('channelid'):
|
||||
paged = True
|
||||
video_id = mobj.group('channelid')
|
||||
api = api_base + '/channel/archives/%s.json' % video_id
|
||||
elif mobj.group('chapterid'):
|
||||
chapter_id = mobj.group('chapterid')
|
||||
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
|
||||
if not m:
|
||||
raise ExtractorError(u'Cannot find archive of a chapter')
|
||||
archive_id = m.group(1)
|
||||
|
||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||
chapter_info_xml = self._download_webpage(api, chapter_id,
|
||||
note=u'Downloading chapter information',
|
||||
errnote=u'Chapter information download failed')
|
||||
doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
|
||||
for a in doc.findall('.//archive'):
|
||||
if archive_id == a.find('./id').text:
|
||||
break
|
||||
else:
|
||||
raise ExtractorError(u'Could not find chapter in chapter information')
|
||||
|
||||
video_url = a.find('./video_file_url').text
|
||||
video_ext = video_url.rpartition('.')[2] or u'flv'
|
||||
|
||||
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
chapter_info = json.loads(chapter_info_json)
|
||||
|
||||
bracket_start = int(doc.find('.//bracket_start').text)
|
||||
bracket_end = int(doc.find('.//bracket_end').text)
|
||||
|
||||
# TODO determine start (and probably fix up file)
|
||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||
#video_url += u'?start=' + TODO:start_timestamp
|
||||
# bracket_start is 13290, but we want 51670615
|
||||
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
||||
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||
|
||||
info = {
|
||||
'id': u'c' + chapter_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': chapter_info['title'],
|
||||
'thumbnail': chapter_info['preview'],
|
||||
'description': chapter_info['description'],
|
||||
'uploader': chapter_info['channel']['display_name'],
|
||||
'uploader_id': chapter_info['channel']['name'],
|
||||
}
|
||||
return [info]
|
||||
else:
|
||||
video_id = mobj.group('videoid')
|
||||
api = api_base + '/broadcast/by_archive/%s.json' % video_id
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
info = []
|
||||
offset = 0
|
||||
limit = self._JUSTIN_PAGE_LIMIT
|
||||
while True:
|
||||
if paged:
|
||||
self.report_download_page(video_id, offset)
|
||||
page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
|
||||
page_count, page_info = self._parse_page(page_url, video_id)
|
||||
info.extend(page_info)
|
||||
if not paged or page_count != limit:
|
||||
break
|
||||
offset += limit
|
||||
return info
|
41
youtube_dl/extractor/keek.py
Normal file
41
youtube_dl/extractor/keek.py
Normal file
@@ -0,0 +1,41 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class KeekIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
|
||||
IE_NAME = u'keek'
|
||||
_TEST = {
|
||||
u'url': u'http://www.keek.com/ytdl/keeks/NODfbab',
|
||||
u'file': u'NODfbab.mp4',
|
||||
u'md5': u'9b0636f8c0f7614afa4ea5e4c6e57e83',
|
||||
u'info_dict': {
|
||||
u"uploader": u"ytdl",
|
||||
u"title": u"test chars: \"'/\\\u00e4<>This is a test video for youtube-dl.For more information, contact phihag@phihag.de ."
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('videoID')
|
||||
|
||||
video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
|
||||
thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
|
||||
webpage, u'title')
|
||||
|
||||
uploader = self._html_search_regex(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
|
||||
webpage, u'uploader', fatal=False)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': uploader
|
||||
}
|
||||
return [info]
|
54
youtube_dl/extractor/liveleak.py
Normal file
54
youtube_dl/extractor/liveleak.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class LiveLeakIE(InfoExtractor):
|
||||
|
||||
_VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
|
||||
IE_NAME = u'liveleak'
|
||||
_TEST = {
|
||||
u'url': u'http://www.liveleak.com/view?i=757_1364311680',
|
||||
u'file': u'757_1364311680.mp4',
|
||||
u'md5': u'0813c2430bea7a46bf13acf3406992f4',
|
||||
u'info_dict': {
|
||||
u"description": u"extremely bad day for this guy..!",
|
||||
u"uploader": u"ljfriel2",
|
||||
u"title": u"Most unlucky car accident"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._search_regex(r'file: "(.*?)",',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
|
||||
webpage, u'title').replace('LiveLeak.com -', '').strip()
|
||||
|
||||
video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
||||
webpage, u'description', fatal=False)
|
||||
|
||||
video_uploader = self._html_search_regex(r'By:.*?(\w+)</a>',
|
||||
webpage, u'uploader', fatal=False)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
'description': video_description,
|
||||
'uploader': video_uploader
|
||||
}
|
||||
|
||||
return [info]
|
123
youtube_dl/extractor/metacafe.py
Normal file
123
youtube_dl/extractor/metacafe.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_parse_qs,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_str,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class MetacafeIE(InfoExtractor):
|
||||
"""Information Extractor for metacafe.com."""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
|
||||
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
|
||||
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
|
||||
IE_NAME = u'metacafe'
|
||||
_TEST = {
|
||||
u"add_ie": ["Youtube"],
|
||||
u"url": u"http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
|
||||
u"file": u"_aUehQsCQtM.flv",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20090102",
|
||||
u"title": u"The Electric Company | \"Short I\" | PBS KIDS GO!",
|
||||
u"description": u"md5:2439a8ef6d5a70e380c22f5ad323e5a8",
|
||||
u"uploader": u"PBS",
|
||||
u"uploader_id": u"PBS"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def report_disclaimer(self):
|
||||
"""Report disclaimer retrieval."""
|
||||
self.to_screen(u'Retrieving disclaimer')
|
||||
|
||||
def _real_initialize(self):
|
||||
# Retrieve disclaimer
|
||||
request = compat_urllib_request.Request(self._DISCLAIMER)
|
||||
try:
|
||||
self.report_disclaimer()
|
||||
compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to retrieve disclaimer: %s' % compat_str(err))
|
||||
|
||||
# Confirm age
|
||||
disclaimer_form = {
|
||||
'filters': '0',
|
||||
'submit': "Continue - I'm over 18",
|
||||
}
|
||||
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
|
||||
try:
|
||||
self.report_age_confirmation()
|
||||
compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to confirm age: %s' % compat_str(err))
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id and simplified title from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group(1)
|
||||
|
||||
# Check if video comes from YouTube
|
||||
mobj2 = re.match(r'^yt-(.*)$', video_id)
|
||||
if mobj2 is not None:
|
||||
return [self.url_result('http://www.youtube.com/watch?v=%s' % mobj2.group(1), 'Youtube')]
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
webpage = self._download_webpage('http://www.metacafe.com/watch/%s/' % video_id, video_id)
|
||||
|
||||
# Extract URL, uploader and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
|
||||
if mobj is not None:
|
||||
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
|
||||
video_extension = mediaURL[-3:]
|
||||
|
||||
# Extract gdaKey if available
|
||||
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
|
||||
if mobj is None:
|
||||
video_url = mediaURL
|
||||
else:
|
||||
gdaKey = mobj.group(1)
|
||||
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
|
||||
else:
|
||||
mobj = re.search(r' name="flashvars" value="(.*?)"', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract media URL')
|
||||
vardict = compat_parse_qs(mobj.group(1))
|
||||
if 'mediaData' not in vardict:
|
||||
raise ExtractorError(u'Unable to extract media URL')
|
||||
mobj = re.search(r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract media URL')
|
||||
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
|
||||
video_extension = mediaURL[-3:]
|
||||
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
|
||||
|
||||
mobj = re.search(r'(?im)<title>(.*) - Video</title>', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract title')
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
|
||||
mobj = re.search(r'submitter=(.*?);', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract uploader nickname')
|
||||
video_uploader = mobj.group(1)
|
||||
|
||||
return [{
|
||||
'id': video_id.decode('utf-8'),
|
||||
'url': video_url.decode('utf-8'),
|
||||
'uploader': video_uploader.decode('utf-8'),
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_extension.decode('utf-8'),
|
||||
}]
|
115
youtube_dl/extractor/mixcloud.py
Normal file
115
youtube_dl/extractor/mixcloud.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class MixcloudIE(InfoExtractor):
|
||||
_WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
|
||||
IE_NAME = u'mixcloud'
|
||||
|
||||
def report_download_json(self, file_id):
|
||||
"""Report JSON download."""
|
||||
self.to_screen(u'Downloading json')
|
||||
|
||||
def get_urls(self, jsonData, fmt, bitrate='best'):
|
||||
"""Get urls from 'audio_formats' section in json"""
|
||||
try:
|
||||
bitrate_list = jsonData[fmt]
|
||||
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
|
||||
bitrate = max(bitrate_list) # select highest
|
||||
|
||||
url_list = jsonData[fmt][bitrate]
|
||||
except TypeError: # we have no bitrate info.
|
||||
url_list = jsonData[fmt]
|
||||
return url_list
|
||||
|
||||
def check_urls(self, url_list):
|
||||
"""Returns 1st active url from list"""
|
||||
for url in url_list:
|
||||
try:
|
||||
compat_urllib_request.urlopen(url)
|
||||
return url
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error):
|
||||
url = None
|
||||
|
||||
return None
|
||||
|
||||
def _print_formats(self, formats):
|
||||
print('Available formats:')
|
||||
for fmt in formats.keys():
|
||||
for b in formats[fmt]:
|
||||
try:
|
||||
ext = formats[fmt][b][0]
|
||||
print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
|
||||
except TypeError: # we have no bitrate info
|
||||
ext = formats[fmt][0]
|
||||
print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
|
||||
break
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
# extract uploader & filename from url
|
||||
uploader = mobj.group(1).decode('utf-8')
|
||||
file_id = uploader + "-" + mobj.group(2).decode('utf-8')
|
||||
|
||||
# construct API request
|
||||
file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
|
||||
# retrieve .json file with links to files
|
||||
request = compat_urllib_request.Request(file_url)
|
||||
try:
|
||||
self.report_download_json(file_url)
|
||||
jsonData = compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
|
||||
|
||||
# parse JSON
|
||||
json_data = json.loads(jsonData)
|
||||
player_url = json_data['player_swf_url']
|
||||
formats = dict(json_data['audio_formats'])
|
||||
|
||||
req_format = self._downloader.params.get('format', None)
|
||||
|
||||
if self._downloader.params.get('listformats', None):
|
||||
self._print_formats(formats)
|
||||
return
|
||||
|
||||
if req_format is None or req_format == 'best':
|
||||
for format_param in formats.keys():
|
||||
url_list = self.get_urls(formats, format_param)
|
||||
# check urls
|
||||
file_url = self.check_urls(url_list)
|
||||
if file_url is not None:
|
||||
break # got it!
|
||||
else:
|
||||
if req_format not in formats:
|
||||
raise ExtractorError(u'Format is not available')
|
||||
|
||||
url_list = self.get_urls(formats, req_format)
|
||||
file_url = self.check_urls(url_list)
|
||||
format_param = req_format
|
||||
|
||||
return [{
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'uploader': uploader.decode('utf-8'),
|
||||
'upload_date': None,
|
||||
'title': json_data['name'],
|
||||
'ext': file_url.split('.')[-1].decode('utf-8'),
|
||||
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
|
||||
'thumbnail': json_data['thumbnail_url'],
|
||||
'description': json_data['description'],
|
||||
'player_url': player_url.decode('utf-8'),
|
||||
}]
|
80
youtube_dl/extractor/mtv.py
Normal file
80
youtube_dl/extractor/mtv.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import re
|
||||
import socket
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class MTVIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?P<proto>https?://)?(?:www\.)?mtv\.com/videos/[^/]+/(?P<videoid>[0-9]+)/[^/]+$'
|
||||
_WORKING = False
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
if not mobj.group('proto'):
|
||||
url = 'http://' + url
|
||||
video_id = mobj.group('videoid')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Some videos come from Vevo.com
|
||||
m_vevo = re.search(r'isVevoVideo = true;.*?vevoVideoId = "(.*?)";',
|
||||
webpage, re.DOTALL)
|
||||
if m_vevo:
|
||||
vevo_id = m_vevo.group(1);
|
||||
self.to_screen(u'Vevo video detected: %s' % vevo_id)
|
||||
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
|
||||
|
||||
#song_name = self._html_search_regex(r'<meta name="mtv_vt" content="([^"]+)"/>',
|
||||
# webpage, u'song name', fatal=False)
|
||||
|
||||
video_title = self._html_search_regex(r'<meta name="mtv_an" content="([^"]+)"/>',
|
||||
webpage, u'title')
|
||||
|
||||
mtvn_uri = self._html_search_regex(r'<meta name="mtvn_uri" content="([^"]+)"/>',
|
||||
webpage, u'mtvn_uri', fatal=False)
|
||||
|
||||
content_id = self._search_regex(r'MTVN.Player.defaultPlaylistId = ([0-9]+);',
|
||||
webpage, u'content id', fatal=False)
|
||||
|
||||
videogen_url = 'http://www.mtv.com/player/includes/mediaGen.jhtml?uri=' + mtvn_uri + '&id=' + content_id + '&vid=' + video_id + '&ref=www.mtvn.com&viewUri=' + mtvn_uri
|
||||
self.report_extraction(video_id)
|
||||
request = compat_urllib_request.Request(videogen_url)
|
||||
try:
|
||||
metadataXml = compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to download video metadata: %s' % compat_str(err))
|
||||
|
||||
mdoc = xml.etree.ElementTree.fromstring(metadataXml)
|
||||
renditions = mdoc.findall('.//rendition')
|
||||
|
||||
# For now, always pick the highest quality.
|
||||
rendition = renditions[-1]
|
||||
|
||||
try:
|
||||
_,_,ext = rendition.attrib['type'].partition('/')
|
||||
format = ext + '-' + rendition.attrib['width'] + 'x' + rendition.attrib['height'] + '_' + rendition.attrib['bitrate']
|
||||
video_url = rendition.find('./src').text
|
||||
except KeyError:
|
||||
raise ExtractorError('Invalid rendition field.')
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': ext,
|
||||
'format': format,
|
||||
}
|
||||
|
||||
return [info]
|
73
youtube_dl/extractor/myspass.py
Normal file
73
youtube_dl/extractor/myspass.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import os.path
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class MySpassIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.myspass.de/.*'
|
||||
_TEST = {
|
||||
u'url': u'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
|
||||
u'file': u'11741.mp4',
|
||||
u'md5': u'0b49f4844a068f8b33f4b7c88405862b',
|
||||
u'info_dict': {
|
||||
u"description": u"Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
|
||||
u"title": u"Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
|
||||
|
||||
# video id is the last path element of the URL
|
||||
# usually there is a trailing slash, so also try the second but last
|
||||
url_path = compat_urllib_parse_urlparse(url).path
|
||||
url_parent_path, video_id = os.path.split(url_path)
|
||||
if not video_id:
|
||||
_, video_id = os.path.split(url_parent_path)
|
||||
|
||||
# get metadata
|
||||
metadata_url = META_DATA_URL_TEMPLATE % video_id
|
||||
metadata_text = self._download_webpage(metadata_url, video_id)
|
||||
metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
|
||||
|
||||
# extract values from metadata
|
||||
url_flv_el = metadata.find('url_flv')
|
||||
if url_flv_el is None:
|
||||
raise ExtractorError(u'Unable to extract download url')
|
||||
video_url = url_flv_el.text
|
||||
extension = os.path.splitext(video_url)[1][1:]
|
||||
title_el = metadata.find('title')
|
||||
if title_el is None:
|
||||
raise ExtractorError(u'Unable to extract title')
|
||||
title = title_el.text
|
||||
format_id_el = metadata.find('format_id')
|
||||
if format_id_el is None:
|
||||
format = 'mp4'
|
||||
else:
|
||||
format = format_id_el.text
|
||||
description_el = metadata.find('description')
|
||||
if description_el is not None:
|
||||
description = description_el.text
|
||||
else:
|
||||
description = None
|
||||
imagePreview_el = metadata.find('imagePreview')
|
||||
if imagePreview_el is not None:
|
||||
thumbnail = imagePreview_el.text
|
||||
else:
|
||||
thumbnail = None
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'ext': extension,
|
||||
'format': format,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description
|
||||
}
|
||||
return [info]
|
172
youtube_dl/extractor/myvideo.py
Normal file
172
youtube_dl/extractor/myvideo.py
Normal file
@@ -0,0 +1,172 @@
|
||||
import binascii
|
||||
import base64
|
||||
import hashlib
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_ord,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
|
||||
class MyVideoIE(InfoExtractor):
|
||||
"""Information Extractor for myvideo.de."""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?myvideo\.de/watch/([0-9]+)/([^?/]+).*'
|
||||
IE_NAME = u'myvideo'
|
||||
_TEST = {
|
||||
u'url': u'http://www.myvideo.de/watch/8229274/bowling_fail_or_win',
|
||||
u'file': u'8229274.flv',
|
||||
u'md5': u'2d2753e8130479ba2cb7e0a37002053e',
|
||||
u'info_dict': {
|
||||
u"title": u"bowling-fail-or-win"
|
||||
}
|
||||
}
|
||||
|
||||
# Original Code from: https://github.com/dersphere/plugin.video.myvideo_de.git
|
||||
# Released into the Public Domain by Tristan Fischer on 2013-05-19
|
||||
# https://github.com/rg3/youtube-dl/pull/842
|
||||
def __rc4crypt(self,data, key):
|
||||
x = 0
|
||||
box = list(range(256))
|
||||
for i in list(range(256)):
|
||||
x = (x + box[i] + compat_ord(key[i % len(key)])) % 256
|
||||
box[i], box[x] = box[x], box[i]
|
||||
x = 0
|
||||
y = 0
|
||||
out = ''
|
||||
for char in data:
|
||||
x = (x + 1) % 256
|
||||
y = (y + box[x]) % 256
|
||||
box[x], box[y] = box[y], box[x]
|
||||
out += chr(compat_ord(char) ^ box[(box[x] + box[y]) % 256])
|
||||
return out
|
||||
|
||||
def __md5(self,s):
|
||||
return hashlib.md5(s).hexdigest().encode()
|
||||
|
||||
def _real_extract(self,url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group(1)
|
||||
|
||||
GK = (
|
||||
b'WXpnME1EZGhNRGhpTTJNM01XVmhOREU0WldNNVpHTTJOakpt'
|
||||
b'TW1FMU5tVTBNR05pWkRaa05XRXhNVFJoWVRVd1ptSXhaVEV3'
|
||||
b'TnpsbA0KTVRkbU1tSTRNdz09'
|
||||
)
|
||||
|
||||
# Get video webpage
|
||||
webpage_url = 'http://www.myvideo.de/watch/%s' % video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
mobj = re.search('source src=\'(.+?)[.]([^.]+)\'', webpage)
|
||||
if mobj is not None:
|
||||
self.report_extraction(video_id)
|
||||
video_url = mobj.group(1) + '.flv'
|
||||
|
||||
video_title = self._html_search_regex('<title>([^<]+)</title>',
|
||||
webpage, u'title')
|
||||
|
||||
video_ext = self._search_regex('[.](.+?)$', video_url, u'extension')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_ext,
|
||||
}]
|
||||
|
||||
# try encxml
|
||||
mobj = re.search('var flashvars={(.+?)}', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract video')
|
||||
|
||||
params = {}
|
||||
encxml = ''
|
||||
sec = mobj.group(1)
|
||||
for (a, b) in re.findall('(.+?):\'(.+?)\',?', sec):
|
||||
if not a == '_encxml':
|
||||
params[a] = b
|
||||
else:
|
||||
encxml = compat_urllib_parse.unquote(b)
|
||||
if not params.get('domain'):
|
||||
params['domain'] = 'www.myvideo.de'
|
||||
xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
|
||||
if 'flash_playertype=MTV' in xmldata_url:
|
||||
self._downloader.report_warning(u'avoiding MTV player')
|
||||
xmldata_url = (
|
||||
'http://www.myvideo.de/dynamic/get_player_video_xml.php'
|
||||
'?flash_playertype=D&ID=%s&_countlimit=4&autorun=yes'
|
||||
) % video_id
|
||||
|
||||
# get enc data
|
||||
enc_data = self._download_webpage(xmldata_url, video_id).split('=')[1]
|
||||
enc_data_b = binascii.unhexlify(enc_data)
|
||||
sk = self.__md5(
|
||||
base64.b64decode(base64.b64decode(GK)) +
|
||||
self.__md5(
|
||||
str(video_id).encode('utf-8')
|
||||
)
|
||||
)
|
||||
dec_data = self.__rc4crypt(enc_data_b, sk)
|
||||
|
||||
# extracting infos
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = None
|
||||
mobj = re.search('connectionurl=\'(.*?)\'', dec_data)
|
||||
if mobj:
|
||||
video_url = compat_urllib_parse.unquote(mobj.group(1))
|
||||
if 'myvideo2flash' in video_url:
|
||||
self._downloader.report_warning(u'forcing RTMPT ...')
|
||||
video_url = video_url.replace('rtmpe://', 'rtmpt://')
|
||||
|
||||
if not video_url:
|
||||
# extract non rtmp videos
|
||||
mobj = re.search('path=\'(http.*?)\' source=\'(.*?)\'', dec_data)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'unable to extract url')
|
||||
video_url = compat_urllib_parse.unquote(mobj.group(1)) + compat_urllib_parse.unquote(mobj.group(2))
|
||||
|
||||
video_file = self._search_regex('source=\'(.*?)\'', dec_data, u'video file')
|
||||
video_file = compat_urllib_parse.unquote(video_file)
|
||||
|
||||
if not video_file.endswith('f4m'):
|
||||
ppath, prefix = video_file.split('.')
|
||||
video_playpath = '%s:%s' % (prefix, ppath)
|
||||
video_hls_playlist = ''
|
||||
else:
|
||||
video_playpath = ''
|
||||
video_hls_playlist = (
|
||||
video_file
|
||||
).replace('.f4m', '.m3u8')
|
||||
|
||||
video_swfobj = self._search_regex('swfobject.embedSWF\(\'(.+?)\'', webpage, u'swfobj')
|
||||
video_swfobj = compat_urllib_parse.unquote(video_swfobj)
|
||||
|
||||
video_title = self._html_search_regex("<h1(?: class='globalHd')?>(.*?)</h1>",
|
||||
webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'tc_url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': u'flv',
|
||||
'play_path': video_playpath,
|
||||
'video_file': video_file,
|
||||
'video_hls_playlist': video_hls_playlist,
|
||||
'player_url': video_swfobj,
|
||||
}]
|
||||
|
49
youtube_dl/extractor/nba.py
Normal file
49
youtube_dl/extractor/nba.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class NBAIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:watch\.|www\.)?nba\.com/(?:nba/)?video(/[^?]*?)(?:/index\.html)?(?:\?.*)?$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
|
||||
u'file': u'0021200253-okc-bkn-recap.nba.mp4',
|
||||
u'md5': u'c0edcfc37607344e2ff8f13c378c88a4',
|
||||
u'info_dict': {
|
||||
u"description": u"Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.",
|
||||
u"title": u"Thunder vs. Nets"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group(1)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = u'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
|
||||
|
||||
shortened_video_id = video_id.rpartition('/')[2]
|
||||
title = self._html_search_regex(r'<meta property="og:title" content="(.*?)"',
|
||||
webpage, 'title', default=shortened_video_id).replace('NBA.com: ', '')
|
||||
|
||||
# It isn't there in the HTML it returns to us
|
||||
# uploader_date = self._html_search_regex(r'<b>Date:</b> (.*?)</div>', webpage, 'upload_date', fatal=False)
|
||||
|
||||
description = self._html_search_regex(r'<meta name="description" (?:content|value)="(.*?)" />', webpage, 'description', fatal=False)
|
||||
|
||||
info = {
|
||||
'id': shortened_video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
# 'uploader_date': uploader_date,
|
||||
'description': description,
|
||||
}
|
||||
return [info]
|
76
youtube_dl/extractor/photobucket.py
Normal file
76
youtube_dl/extractor/photobucket.py
Normal file
@@ -0,0 +1,76 @@
|
||||
import datetime
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
class PhotobucketIE(InfoExtractor):
|
||||
"""Information extractor for photobucket.com."""
|
||||
|
||||
# TODO: the original _VALID_URL was:
|
||||
# r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*[\?\&]current=(.*\.flv)'
|
||||
# Check if it's necessary to keep the old extracion process
|
||||
_VALID_URL = r'(?:http://)?(?:[a-z0-9]+\.)?photobucket\.com/.*(([\?\&]current=)|_)(?P<id>.*)\.(?P<ext>(flv)|(mp4))'
|
||||
IE_NAME = u'photobucket'
|
||||
_TEST = {
|
||||
u'url': u'http://media.photobucket.com/user/rachaneronas/media/TiredofLinkBuildingTryBacklinkMyDomaincom_zpsc0c3b9fa.mp4.html?filters[term]=search&filters[primary]=videos&filters[secondary]=images&sort=1&o=0',
|
||||
u'file': u'zpsc0c3b9fa.mp4',
|
||||
u'md5': u'7dabfb92b0a31f6c16cebc0f8e60ff99',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20130504",
|
||||
u"uploader": u"rachaneronas",
|
||||
u"title": u"Tired of Link Building? Try BacklinkMyDomain.com!"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_extension = mobj.group('ext')
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Extract URL, uploader, and title from webpage
|
||||
self.report_extraction(video_id)
|
||||
# We try first by looking the javascript code:
|
||||
mobj = re.search(r'Pb\.Data\.Shared\.put\(Pb\.Data\.Shared\.MEDIA, (?P<json>.*?)\);', webpage)
|
||||
if mobj is not None:
|
||||
info = json.loads(mobj.group('json'))
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': info[u'downloadUrl'],
|
||||
'uploader': info[u'username'],
|
||||
'upload_date': datetime.date.fromtimestamp(info[u'creationDate']).strftime('%Y%m%d'),
|
||||
'title': info[u'title'],
|
||||
'ext': video_extension,
|
||||
'thumbnail': info[u'thumbUrl'],
|
||||
}]
|
||||
|
||||
# We try looking in other parts of the webpage
|
||||
video_url = self._search_regex(r'<link rel="video_src" href=".*\?file=([^"]+)" />',
|
||||
webpage, u'video URL')
|
||||
|
||||
mobj = re.search(r'<title>(.*) video by (.*) - Photobucket</title>', webpage)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Unable to extract title')
|
||||
video_title = mobj.group(1).decode('utf-8')
|
||||
video_uploader = mobj.group(2).decode('utf-8')
|
||||
|
||||
return [{
|
||||
'id': video_id.decode('utf-8'),
|
||||
'url': video_url.decode('utf-8'),
|
||||
'uploader': video_uploader,
|
||||
'upload_date': None,
|
||||
'title': video_title,
|
||||
'ext': video_extension.decode('utf-8'),
|
||||
}]
|
50
youtube_dl/extractor/pornotube.py
Normal file
50
youtube_dl/extractor/pornotube.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class PornotubeIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
||||
_TEST = {
|
||||
u'url': u'http://pornotube.com/c/173/m/1689755/Marilyn-Monroe-Bathing',
|
||||
u'file': u'1689755.flv',
|
||||
u'md5': u'374dd6dcedd24234453b295209aa69b6',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20090708",
|
||||
u"title": u"Marilyn-Monroe-Bathing"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('videoid')
|
||||
video_title = mobj.group('title')
|
||||
|
||||
# Get webpage content
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# Get the video URL
|
||||
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
||||
video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
|
||||
video_url = compat_urllib_parse.unquote(video_url)
|
||||
|
||||
#Get the uploaded date
|
||||
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
||||
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
|
||||
if upload_date: upload_date = unified_strdate(upload_date)
|
||||
|
||||
info = {'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': None,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'format': 'flv'}
|
||||
|
||||
return [info]
|
56
youtube_dl/extractor/rbmaradio.py
Normal file
56
youtube_dl/extractor/rbmaradio.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class RBMARadioIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.rbmaradio.com/shows/ford-lopatin-live-at-primavera-sound-2011',
|
||||
u'file': u'ford-lopatin-live-at-primavera-sound-2011.mp3',
|
||||
u'md5': u'6bc6f9bcb18994b4c983bc3bf4384d95',
|
||||
u'info_dict': {
|
||||
u"uploader_id": u"ford-lopatin",
|
||||
u"location": u"Spain",
|
||||
u"description": u"Joel Ford and Daniel \u2019Oneohtrix Point Never\u2019 Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
|
||||
u"uploader": u"Ford & Lopatin",
|
||||
u"title": u"Live at Primavera Sound 2011"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('videoID')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
|
||||
webpage, u'json data', flags=re.MULTILINE)
|
||||
|
||||
try:
|
||||
data = json.loads(json_data)
|
||||
except ValueError as e:
|
||||
raise ExtractorError(u'Invalid JSON: ' + str(e))
|
||||
|
||||
video_url = data['akamai_url'] + '&cbr=256'
|
||||
url_parts = compat_urllib_parse_urlparse(video_url)
|
||||
video_ext = url_parts.path.rpartition('.')[2]
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': data['title'],
|
||||
'description': data.get('teaser_text'),
|
||||
'location': data.get('country_of_origin'),
|
||||
'uploader': data.get('host', {}).get('name'),
|
||||
'uploader_id': data.get('host', {}).get('slug'),
|
||||
'thumbnail': data.get('image', {}).get('large_url_2x'),
|
||||
'duration': data.get('duration'),
|
||||
}
|
||||
return [info]
|
37
youtube_dl/extractor/redtube.py
Normal file
37
youtube_dl/extractor/redtube.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class RedTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.redtube.com/66418',
|
||||
u'file': u'66418.mp4',
|
||||
u'md5': u'7b8c22b5e7098a3e1c09709df1126d2d',
|
||||
u'info_dict': {
|
||||
u"title": u"Sucked on a toilet"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self,url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
video_extension = 'mp4'
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._html_search_regex(r'<source src="(.+?)" type="video/mp4">',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._html_search_regex('<h1 class="videoTitle slidePanelMovable">(.+?)</h1>',
|
||||
webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_extension,
|
||||
'title': video_title,
|
||||
}]
|
37
youtube_dl/extractor/ringtv.py
Normal file
37
youtube_dl/extractor/ringtv.py
Normal file
@@ -0,0 +1,37 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class RingTVIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?ringtv\.craveonline\.com/videos/video/([^/]+)'
|
||||
_TEST = {
|
||||
u"url": u"http://ringtv.craveonline.com/videos/video/746619-canelo-alvarez-talks-about-mayweather-showdown",
|
||||
u"file": u"746619.mp4",
|
||||
u"md5": u"7c46b4057d22de32e0a539f017e64ad3",
|
||||
u"info_dict": {
|
||||
u"title": u"Canelo Alvarez talks about Mayweather showdown",
|
||||
u"description": u"Saul \\\"Canelo\\\" Alvarez spoke to the media about his Sept. 14 showdown with Floyd Mayweather after their kick-off presser in NYC. Canelo is motivated and confident that he will have the speed and gameplan to beat the pound-for-pound king."
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1).split('-')[0]
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._search_regex(r'<title>(.+?)</title>',
|
||||
webpage, 'video title').replace(' | RingTV','')
|
||||
description = self._search_regex(r'<div class="blurb">(.+?)</div>',
|
||||
webpage, 'Description')
|
||||
final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" %(str(video_id))
|
||||
thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" %(str(video_id))
|
||||
ext = final_url.split('.')[-1]
|
||||
return [{
|
||||
'id' : video_id,
|
||||
'url' : final_url,
|
||||
'ext' : ext,
|
||||
'title' : title,
|
||||
'thumbnail' : thumbnail_url,
|
||||
'description' : description,
|
||||
}]
|
||||
|
204
youtube_dl/extractor/soundcloud.py
Normal file
204
youtube_dl/extractor/soundcloud.py
Normal file
@@ -0,0 +1,204 @@
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
|
||||
ExtractorError,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class SoundcloudIE(InfoExtractor):
|
||||
"""Information extractor for soundcloud.com
|
||||
To access the media, the uid of the song and a stream token
|
||||
must be extracted from the page source and the script must make
|
||||
a request to media.soundcloud.com/crossdomain.xml. Then
|
||||
the media can be grabbed by requesting from an url composed
|
||||
of the stream token and uid
|
||||
"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)(?:[?].*)?$'
|
||||
IE_NAME = u'soundcloud'
|
||||
_TEST = {
|
||||
u'url': u'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
|
||||
u'file': u'62986583.mp3',
|
||||
u'md5': u'ebef0a451b909710ed1d7787dddbf0d7',
|
||||
u'info_dict': {
|
||||
u"upload_date": u"20121011",
|
||||
u"description": u"No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o'd",
|
||||
u"uploader": u"E.T. ExTerrestrial Music",
|
||||
u"title": u"Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1"
|
||||
}
|
||||
}
|
||||
|
||||
def report_resolve(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Resolving id' % video_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
# extract uploader (which is in the url)
|
||||
uploader = mobj.group(1)
|
||||
# extract simple title (uploader + slug of song title)
|
||||
slug_title = mobj.group(2)
|
||||
full_title = '%s/%s' % (uploader, slug_title)
|
||||
|
||||
self.report_resolve(full_title)
|
||||
|
||||
url = 'http://soundcloud.com/%s/%s' % (uploader, slug_title)
|
||||
resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
|
||||
info_json = self._download_webpage(resolv_url, full_title, u'Downloading info JSON')
|
||||
|
||||
info = json.loads(info_json)
|
||||
video_id = info['id']
|
||||
self.report_extraction(full_title)
|
||||
|
||||
streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
|
||||
stream_json = self._download_webpage(streams_url, full_title,
|
||||
u'Downloading stream definitions',
|
||||
u'unable to download stream definitions')
|
||||
|
||||
streams = json.loads(stream_json)
|
||||
mediaURL = streams['http_mp3_128_url']
|
||||
upload_date = unified_strdate(info['created_at'])
|
||||
|
||||
return [{
|
||||
'id': info['id'],
|
||||
'url': mediaURL,
|
||||
'uploader': info['user']['username'],
|
||||
'upload_date': upload_date,
|
||||
'title': info['title'],
|
||||
'ext': u'mp3',
|
||||
'description': info['description'],
|
||||
}]
|
||||
|
||||
class SoundcloudSetIE(InfoExtractor):
|
||||
"""Information extractor for soundcloud.com sets
|
||||
To access the media, the uid of the song and a stream token
|
||||
must be extracted from the page source and the script must make
|
||||
a request to media.soundcloud.com/crossdomain.xml. Then
|
||||
the media can be grabbed by requesting from an url composed
|
||||
of the stream token and uid
|
||||
"""
|
||||
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/sets/([\w\d-]+)(?:[?].*)?$'
|
||||
IE_NAME = u'soundcloud:set'
|
||||
_TEST = {
|
||||
u"url":"https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep",
|
||||
u"playlist": [
|
||||
{
|
||||
u"file":"30510138.mp3",
|
||||
u"md5":"f9136bf103901728f29e419d2c70f55d",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20111213",
|
||||
u"description": u"The Royal Concept from Stockholm\r\nFilip / Povel / David / Magnus\r\nwww.royalconceptband.com",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"D-D-Dance"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file":"47127625.mp3",
|
||||
u"md5":"09b6758a018470570f8fd423c9453dd8",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120521",
|
||||
u"description": u"The Royal Concept from Stockholm\r\nFilip / Povel / David / Magnus\r\nwww.royalconceptband.com",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"The Royal Concept - Gimme Twice"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file":"47127627.mp3",
|
||||
u"md5":"154abd4e418cea19c3b901f1e1306d9c",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120521",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"Goldrushed"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file":"47127629.mp3",
|
||||
u"md5":"2f5471edc79ad3f33a683153e96a79c1",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120521",
|
||||
u"description": u"The Royal Concept from Stockholm\r\nFilip / Povel / David / Magnus\r\nwww.royalconceptband.com",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"In the End"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file":"47127631.mp3",
|
||||
u"md5":"f9ba87aa940af7213f98949254f1c6e2",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120521",
|
||||
u"description": u"The Royal Concept from Stockholm\r\nFilip / David / Povel / Magnus\r\nwww.theroyalconceptband.com",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"Knocked Up"
|
||||
}
|
||||
},
|
||||
{
|
||||
u"file":"75206121.mp3",
|
||||
u"md5":"f9d1fe9406717e302980c30de4af9353",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20130116",
|
||||
u"description": u"The unreleased track World on Fire premiered on the CW's hit show Arrow (8pm/7pm central). \r\nAs a gift to our fans we would like to offer you a free download of the track! ",
|
||||
u"uploader": u"The Royal Concept",
|
||||
u"title": u"World On Fire"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def report_resolve(self, video_id):
|
||||
"""Report information extraction."""
|
||||
self.to_screen(u'%s: Resolving id' % video_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
# extract uploader (which is in the url)
|
||||
uploader = mobj.group(1)
|
||||
# extract simple title (uploader + slug of song title)
|
||||
slug_title = mobj.group(2)
|
||||
full_title = '%s/sets/%s' % (uploader, slug_title)
|
||||
|
||||
self.report_resolve(full_title)
|
||||
|
||||
url = 'http://soundcloud.com/%s/sets/%s' % (uploader, slug_title)
|
||||
resolv_url = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
|
||||
info_json = self._download_webpage(resolv_url, full_title)
|
||||
|
||||
videos = []
|
||||
info = json.loads(info_json)
|
||||
if 'errors' in info:
|
||||
for err in info['errors']:
|
||||
self._downloader.report_error(u'unable to download video webpage: %s' % compat_str(err['error_message']))
|
||||
return
|
||||
|
||||
self.report_extraction(full_title)
|
||||
for track in info['tracks']:
|
||||
video_id = track['id']
|
||||
|
||||
streams_url = 'https://api.sndcdn.com/i1/tracks/' + str(video_id) + '/streams?client_id=b45b1aa10f1ac2941910a7f0d10f8e28'
|
||||
stream_json = self._download_webpage(streams_url, video_id, u'Downloading track info JSON')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
streams = json.loads(stream_json)
|
||||
mediaURL = streams['http_mp3_128_url']
|
||||
|
||||
videos.append({
|
||||
'id': video_id,
|
||||
'url': mediaURL,
|
||||
'uploader': track['user']['username'],
|
||||
'upload_date': unified_strdate(track['created_at']),
|
||||
'title': track['title'],
|
||||
'ext': u'mp3',
|
||||
'description': track['description'],
|
||||
})
|
||||
return videos
|
45
youtube_dl/extractor/spiegel.py
Normal file
45
youtube_dl/extractor/spiegel.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import re
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class SpiegelIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
|
||||
_TEST = {
|
||||
u'url': u'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
|
||||
u'file': u'1259285.mp4',
|
||||
u'md5': u'2c2754212136f35fb4b19767d242f66e',
|
||||
u'info_dict': {
|
||||
u"title": u"Vulkanausbruch in Ecuador: Der \"Feuerschlund\" ist wieder aktiv"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('videoID')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<div class="module-title">(.*?)</div>',
|
||||
webpage, u'title')
|
||||
|
||||
xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
|
||||
xml_code = self._download_webpage(xml_url, video_id,
|
||||
note=u'Downloading XML', errnote=u'Failed to download XML')
|
||||
|
||||
idoc = xml.etree.ElementTree.fromstring(xml_code)
|
||||
last_type = idoc[-1]
|
||||
filename = last_type.findall('./filename')[0].text
|
||||
duration = float(last_type.findall('./duration')[0].text)
|
||||
|
||||
video_url = 'http://video2.spiegel.de/flash/' + filename
|
||||
video_ext = filename.rpartition('.')[2]
|
||||
info = {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': video_title,
|
||||
'duration': duration,
|
||||
}
|
||||
return [info]
|
119
youtube_dl/extractor/stanfordoc.py
Normal file
119
youtube_dl/extractor/stanfordoc.py
Normal file
@@ -0,0 +1,119 @@
|
||||
import re
|
||||
import socket
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
orderedSet,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class StanfordOpenClassroomIE(InfoExtractor):
|
||||
IE_NAME = u'stanfordoc'
|
||||
IE_DESC = u'Stanford Open ClassRoom'
|
||||
_VALID_URL = r'^(?:https?://)?openclassroom.stanford.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
|
||||
_TEST = {
|
||||
u'url': u'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
|
||||
u'file': u'PracticalUnix_intro-environment.mp4',
|
||||
u'md5': u'544a9468546059d4e80d76265b0443b8',
|
||||
u'info_dict': {
|
||||
u"title": u"Intro Environment"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
if mobj.group('course') and mobj.group('video'): # A specific video
|
||||
course = mobj.group('course')
|
||||
video = mobj.group('video')
|
||||
info = {
|
||||
'id': course + '_' + video,
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
}
|
||||
|
||||
self.report_extraction(info['id'])
|
||||
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
|
||||
xmlUrl = baseUrl + video + '.xml'
|
||||
try:
|
||||
metaXml = compat_urllib_request.urlopen(xmlUrl).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to download video info XML: %s' % compat_str(err))
|
||||
mdoc = xml.etree.ElementTree.fromstring(metaXml)
|
||||
try:
|
||||
info['title'] = mdoc.findall('./title')[0].text
|
||||
info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
|
||||
except IndexError:
|
||||
raise ExtractorError(u'Invalid metadata XML file')
|
||||
info['ext'] = info['url'].rpartition('.')[2]
|
||||
return [info]
|
||||
elif mobj.group('course'): # A course page
|
||||
course = mobj.group('course')
|
||||
info = {
|
||||
'id': course,
|
||||
'type': 'playlist',
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
}
|
||||
|
||||
coursepage = self._download_webpage(url, info['id'],
|
||||
note='Downloading course info page',
|
||||
errnote='Unable to download course info page')
|
||||
|
||||
info['title'] = self._html_search_regex('<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])
|
||||
|
||||
info['description'] = self._html_search_regex('<description>([^<]+)</description>',
|
||||
coursepage, u'description', fatal=False)
|
||||
|
||||
links = orderedSet(re.findall('<a href="(VideoPage.php\?[^"]+)">', coursepage))
|
||||
info['list'] = [
|
||||
{
|
||||
'type': 'reference',
|
||||
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(vpage),
|
||||
}
|
||||
for vpage in links]
|
||||
results = []
|
||||
for entry in info['list']:
|
||||
assert entry['type'] == 'reference'
|
||||
results += self.extract(entry['url'])
|
||||
return results
|
||||
else: # Root page
|
||||
info = {
|
||||
'id': 'Stanford OpenClassroom',
|
||||
'type': 'playlist',
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
}
|
||||
|
||||
self.report_download_webpage(info['id'])
|
||||
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
|
||||
try:
|
||||
rootpage = compat_urllib_request.urlopen(rootURL).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to download course info page: ' + compat_str(err))
|
||||
|
||||
info['title'] = info['id']
|
||||
|
||||
links = orderedSet(re.findall('<a href="(CoursePage.php\?[^"]+)">', rootpage))
|
||||
info['list'] = [
|
||||
{
|
||||
'type': 'reference',
|
||||
'url': 'http://openclassroom.stanford.edu/MainFolder/' + unescapeHTML(cpage),
|
||||
}
|
||||
for cpage in links]
|
||||
|
||||
results = []
|
||||
for entry in info['list']:
|
||||
assert entry['type'] == 'reference'
|
||||
results += self.extract(entry['url'])
|
||||
return results
|
42
youtube_dl/extractor/statigram.py
Normal file
42
youtube_dl/extractor/statigram.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
class StatigramIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?statigr\.am/p/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://statigr.am/p/484091715184808010_284179915',
|
||||
u'file': u'484091715184808010_284179915.mp4',
|
||||
u'md5': u'deda4ff333abe2e118740321e992605b',
|
||||
u'info_dict': {
|
||||
u"uploader_id": u"videoseconds",
|
||||
u"title": u"Instagram photo by @videoseconds"
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._html_search_regex(
|
||||
r'<meta property="og:video:secure_url" content="(.+?)">',
|
||||
webpage, u'video URL')
|
||||
thumbnail_url = self._html_search_regex(
|
||||
r'<meta property="og:image" content="(.+?)" />',
|
||||
webpage, u'thumbnail URL', fatal=False)
|
||||
html_title = self._html_search_regex(
|
||||
r'<title>(.+?)</title>',
|
||||
webpage, u'title')
|
||||
title = re.sub(r'(?: *\(Videos?\))? \| Statigram$', '', html_title)
|
||||
uploader_id = self._html_search_regex(
|
||||
r'@([^ ]+)', title, u'uploader name', fatal=False)
|
||||
ext = 'mp4'
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail_url,
|
||||
'uploader_id' : uploader_id
|
||||
}]
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user