Compare commits
747 Commits
2014.02.06
...
2014.04.21
Author | SHA1 | Date | |
---|---|---|---|
3f0aae4244 | |||
48099643cc | |||
621f33c9d0 | |||
f07a9f6f43 | |||
e51880fd32 | |||
88ce273da4 | |||
b9ba5dfa28 | |||
4086f11929 | |||
478c2c6193 | |||
d2d6481afb | |||
43acb120f3 | |||
e8f2025edf | |||
a4eb9578af | |||
fa35cdad02 | |||
d1b9c912a4 | |||
edec83a025 | |||
c0a7c60815 | |||
117a7d1944 | |||
a40e0dd434 | |||
188b086dd9 | |||
1f27d2c0e1 | |||
7560096db5 | |||
282cb9c7ba | |||
3a9d6790ad | |||
0610a3e0b2 | |||
7f9c31df88 | |||
3fa6b6e293 | |||
3c50b99ab4 | |||
52fadd5fb2 | |||
5367fe7f4d | |||
427588f6e7 | |||
51745be312 | |||
d7f1e7c88f | |||
4145a257be | |||
525dc9809e | |||
1bf3210816 | |||
e6c6d10d99 | |||
f270256e06 | |||
f401c6f69f | |||
b075d25bed | |||
3d1bb6b4dd | |||
1db2666916 | |||
8f5c0218d8 | |||
d7666dff82 | |||
2d4c98dbd1 | |||
fd50bf623c | |||
d360a14678 | |||
d0f2ab6969 | |||
de906ef543 | |||
2fb3deeca1 | |||
66398056f1 | |||
77477fa4c9 | |||
a169e18ce1 | |||
381640e3ac | |||
37e3410137 | |||
97b5196960 | |||
6a4f3528c8 | |||
b9c76aa1a9 | |||
0d3070d364 | |||
7753cadbfa | |||
3950450342 | |||
c82b1fdad6 | |||
b0fb63abe8 | |||
3ab34c603e | |||
7d6413341a | |||
140012d0f6 | |||
4be9f8c814 | |||
5c802bac37 | |||
6c30ff756a | |||
62749e4708 | |||
6b7dee4b38 | |||
ef2041eb4e | |||
29e3e682af | |||
f983c44199 | |||
e4db19511a | |||
c47d21da80 | |||
269aecd0c0 | |||
aafddb2b0a | |||
6262ac8ac5 | |||
89938c719e | |||
ec0fafbb19 | |||
a5863bdf33 | |||
b58ddb32ba | |||
b9e12a8140 | |||
104aa7388a | |||
c3855d28b0 | |||
734f90bb41 | |||
91a6addeeb | |||
9afb76c5ad | |||
dfb2cb5cfd | |||
650d688d10 | |||
0ba77818f3 | |||
09baa7da7e | |||
85e787f51d | |||
2a9e1e453a | |||
ee1e199685 | |||
17c5a00774 | |||
15c0e8e7b2 | |||
cca37fba48 | |||
9d0993ec4a | |||
342f33bf9e | |||
7cd3bc5f99 | |||
931055e6cb | |||
d0e4cf82f1 | |||
6f88df2c57 | |||
4479bf2762 | |||
1ff7c0f7d8 | |||
610e47c87e | |||
50f566076f | |||
92810ff497 | |||
60ccc59a1c | |||
91745595d3 | |||
d6e40507d0 | |||
deed48b472 | |||
e4d41bfca5 | |||
a355b70f27 | |||
f8514f6186 | |||
e09b8fcd9d | |||
7d1b527ff9 | |||
f943c7b622 | |||
676eb3f2dd | |||
98b7cf1ace | |||
c465afd736 | |||
b84d6e7fc4 | |||
2efd5d78c1 | |||
c8edf47b3a | |||
3b4c26a428 | |||
1525148114 | |||
9e0c5791c1 | |||
29a1ab2afc | |||
fa387d2d99 | |||
6d0d573eca | |||
bb799e811b | |||
04ee53eca1 | |||
659eb98a53 | |||
ca6aada48e | |||
43df5a7e71 | |||
88f1c6de7b | |||
65a40ab82b | |||
4b9cced103 | |||
5c38625259 | |||
6344fa04bb | |||
e3ced9ed61 | |||
5075d598bc | |||
68eb8e90e6 | |||
d3a96346c4 | |||
0e518e2fea | |||
1e0a235f39 | |||
9ad400f75e | |||
3537b93d8a | |||
56eca2e956 | |||
2ad4d1ba07 | |||
4853de808b | |||
6ff5f12218 | |||
52a180684f | |||
b21e25702f | |||
983af2600f | |||
f34e6a2cd6 | |||
a9f304031b | |||
9271bc8355 | |||
d1b3e3dd75 | |||
968ed2a777 | |||
24de5d2556 | |||
d26e981df4 | |||
e45d40b171 | |||
4a419b8851 | |||
5fbd672c38 | |||
bec1fad223 | |||
177fed41bc | |||
b900e7cba4 | |||
14cb4979f0 | |||
69e61e30fe | |||
cce929eaac | |||
b6cfde99b7 | |||
1be99f052d | |||
2410c43d83 | |||
aea6e7fc3c | |||
91a76c40c0 | |||
d2b194607c | |||
f6177462db | |||
9ddaf4ef8c | |||
97b5573848 | |||
18c95c1ab0 | |||
0479c625a4 | |||
f659951e22 | |||
5853a7316e | |||
a612753db9 | |||
c8fc3fb524 | |||
5912c639df | |||
017e4dd58c | |||
651486621d | |||
28d9032c88 | |||
16f4eb723a | |||
1cbd410620 | |||
d41ac5f5dc | |||
9c1fc022ae | |||
83d548ef0f | |||
c72477bd32 | |||
9a7b072e38 | |||
cbc4a6cc7e | |||
cd7481a39e | |||
acd213ed6d | |||
77ffa95701 | |||
2b25cb5d76 | |||
62fec3b2ff | |||
e79162558e | |||
2da67107ee | |||
2ff7f8975e | |||
87a2566048 | |||
986f56736b | |||
2583a0308b | |||
40c716d2a2 | |||
79bfd01001 | |||
f2bcdd8e02 | |||
8c5850eeb4 | |||
bd3e077a2d | |||
7e70ac36b3 | |||
2cc0082dc0 | |||
056b56688a | |||
b17418313f | |||
e9a6fd6a68 | |||
bf30f3bd9d | |||
330edf2d84 | |||
43f775e4ca | |||
8f6562448c | |||
263f4b514b | |||
f0da3f1ef9 | |||
cb3ac1c610 | |||
8efd15f477 | |||
d26ebe990f | |||
28acf5500a | |||
214c22c704 | |||
8cdafb47b9 | |||
0dae5083f1 | |||
4c89bbd22c | |||
e2b06e76c1 | |||
e9c076c317 | |||
6c072e7d25 | |||
ac6c104871 | |||
69c01a9f68 | |||
e55213ce35 | |||
24a2aac445 | |||
784763c565 | |||
39c68260c0 | |||
149254d0d5 | |||
0c14e2fbe3 | |||
98acdc895b | |||
bd3b5b8b10 | |||
9a90636805 | |||
6a66ae96ed | |||
2c8a4ba6b5 | |||
ad8915b729 | |||
34cbc7ee8d | |||
a59e40a1ea | |||
ad0a75db6b | |||
1d0e49e1c7 | |||
b4461b6ebe | |||
80959224fe | |||
865cbf4fc5 | |||
196f061cac | |||
99b380c33b | |||
02e4482e22 | |||
b8a792de80 | |||
fac55558ad | |||
b2799ff96d | |||
7a249480b4 | |||
f605128d13 | |||
ba40a74666 | |||
fb8ae2d438 | |||
893f8832b5 | |||
878d11ec29 | |||
515bbe4b5b | |||
75f2e25ba9 | |||
0d466d34a3 | |||
6949d81095 | |||
f847ca02d3 | |||
510243ba58 | |||
b540697a8a | |||
0d3641e589 | |||
72546c831e | |||
d26db9269d | |||
4c0941853a | |||
c11726364e | |||
c577d735c6 | |||
9f0375f61a | |||
5e114e4bfe | |||
83622b6d2f | |||
3d87426c2d | |||
ce328530a9 | |||
f70daac108 | |||
912b38b428 | |||
6e25c58ed7 | |||
51fb2e98d2 | |||
38d63d846e | |||
07cec9776e | |||
ea38e55fff | |||
257cfebfe6 | |||
6eefe53329 | |||
1986025d2b | |||
c9aa111b4f | |||
bfcb6e3917 | |||
2c1396073e | |||
401983c6a0 | |||
391dc3ee07 | |||
be3b8fa30f | |||
9f5809b3e8 | |||
0320ddc192 | |||
56dd55721c | |||
231f76b530 | |||
55442a7812 | |||
43b81eb98a | |||
bfd718793c | |||
a9c2896e22 | |||
278229d195 | |||
fa154d1dbe | |||
7e2ede9891 | |||
74af99fc2f | |||
0f2a2ba14b | |||
e24b5a8610 | |||
750f9020ae | |||
f82863851e | |||
933a5b3792 | |||
aa488e1385 | |||
d77650525d | |||
3e50c29984 | |||
64e7ad6045 | |||
23f4a93bb4 | |||
6f13b055f1 | |||
1f91bd15c3 | |||
11a15be4ce | |||
14e17e18cb | |||
1b124d1942 | |||
747373d4ae | |||
18d367c0a5 | |||
a1a530b067 | |||
cb9722cb3f | |||
773c0b4bb8 | |||
23c322a531 | |||
7e8c0af004 | |||
d2983ccb25 | |||
f24e9833dc | |||
bc2bdf5709 | |||
627a209f74 | |||
1a4895453a | |||
aab74fa106 | |||
2bd9efd4c2 | |||
39a743fb9b | |||
4966a0b22d | |||
fc26023120 | |||
8d7c0cca13 | |||
f66ede4328 | |||
cc88b90ec8 | |||
b6c5fa9a0b | |||
dff10eaa77 | |||
4e6f9aeca1 | |||
e68301af21 | |||
17286a96f2 | |||
0892363e6d | |||
f102372b5f | |||
ecbe1ad207 | |||
410afb2003 | |||
9d840c43b5 | |||
6f50f63382 | |||
ff14fc4964 | |||
e125c21531 | |||
93d020dd65 | |||
a7515ec265 | |||
b6c1ceccc2 | |||
4056ad8f36 | |||
6563837ee1 | |||
fd5e6f7ef2 | |||
685052fc7b | |||
15fd51b37c | |||
d95e35d659 | |||
1439073049 | |||
1f7659dbe9 | |||
f1cef7a9ff | |||
8264223511 | |||
bc6d597828 | |||
aba77bbfc2 | |||
955c451456 | |||
e5de3f6c89 | |||
2a1db721d4 | |||
1e0eb60f1a | |||
87a29e6f25 | |||
c3d36f134f | |||
84769e708c | |||
9d2ecdbc71 | |||
9b69af5342 | |||
c21215b421 | |||
cddcfd90b4 | |||
f36aacba0f | |||
355271fb61 | |||
2a5b502364 | |||
98ff9d82d4 | |||
b1ff87224c | |||
b461641fb9 | |||
b047de6f6e | |||
34ca5d9ba0 | |||
60cc4dc4b4 | |||
db95dc13a1 | |||
777ac90791 | |||
04f9bebbcb | |||
4ea3137e41 | |||
a0792b738e | |||
19a41fc613 | |||
3ee52157fb | |||
c4d197ee2d | |||
a33932cfe3 | |||
bcf89ce62c | |||
e3899d0e00 | |||
dcb00da49c | |||
aa51d20d19 | |||
ae7ed92057 | |||
e45b31d9bd | |||
5a25f39653 | |||
963d7ec412 | |||
e712d94adf | |||
6a72423955 | |||
4126826b10 | |||
b773ead7fd | |||
855e2750bc | |||
805ef3c60b | |||
fbc2dcb40b | |||
5375d7ad84 | |||
90f3476180 | |||
ee95c09333 | |||
75d06db9fc | |||
439a1fffcb | |||
9d9d70c462 | |||
b4a186b7be | |||
bdebf51c8f | |||
264b86f9b4 | |||
9e55e37a2e | |||
1471956573 | |||
27865b2169 | |||
6d07ce0162 | |||
edb7fc5435 | |||
31f77343f2 | |||
63ad031583 | |||
957688cee6 | |||
806d6c2e8c | |||
0ef68e04d9 | |||
a496524db2 | |||
935c7360cc | |||
340b046876 | |||
cc1db7f9b7 | |||
a4ff6c4762 | |||
1060425cbb | |||
e9c092f125 | |||
22ff5d2105 | |||
136db7881b | |||
dae313e725 | |||
b74fa8cd2c | |||
94eae04c94 | |||
16ff7ebc77 | |||
c361c505b0 | |||
d37c07c575 | |||
9d6105c9f0 | |||
8dec03ecba | |||
826547870b | |||
52d6a9a61d | |||
ad242b5fbc | |||
3524175625 | |||
7b9965ea93 | |||
0a5bce566f | |||
8012bd2424 | |||
f55a1f0a88 | |||
bacac173a9 | |||
ca1fee34f2 | |||
6dadaa9930 | |||
553f6e4633 | |||
652bee05f0 | |||
d63516e9cd | |||
e477dcf649 | |||
9d3f7781f3 | |||
c7095dada3 | |||
607dbbad76 | |||
17b75c0de1 | |||
ab24f4f3be | |||
e1a52d9e10 | |||
d0ff838433 | |||
b37b94501c | |||
cb3bb2cfef | |||
e2cc7983e9 | |||
c9ae7b9565 | |||
86fb4347f7 | |||
2fcec131f5 | |||
9f62eaf4ef | |||
f92259c026 | |||
0afef30b23 | |||
dcdfd1c711 | |||
2acc1f8f50 | |||
2c39b0c695 | |||
e77c5b4f63 | |||
409a16cb72 | |||
94d5e90b4f | |||
2d73b45805 | |||
271a2dbfa2 | |||
bf4adcac66 | |||
fb8b8fdd62 | |||
5a0b26252e | |||
7d78f0cc48 | |||
f00fc78674 | |||
392017874c | |||
c3cb92d1ab | |||
aa5590fa07 | |||
8cfb5bbf92 | |||
69bb54ebf9 | |||
ca97a56e4b | |||
fc26f3b4c2 | |||
f604c93c64 | |||
dc3727b65c | |||
aba3231de1 | |||
9193bab91d | |||
fbcf3e416d | |||
c0e5d85631 | |||
ca7fa3dcb3 | |||
4ccfba28d9 | |||
abb82f1ddc | |||
cda008cff1 | |||
1877a14049 | |||
546582ec3e | |||
4534485586 | |||
a9ab8855e4 | |||
8a44ef6868 | |||
0c7214c404 | |||
4cf9654693 | |||
50a138d95c | |||
1b86cc41cf | |||
91346358b0 | |||
f3783d4b77 | |||
89ef304bed | |||
83cebb8b7a | |||
9e68f9fdf1 | |||
2acea5c03d | |||
978177527e | |||
2648c436f3 | |||
33f1f2c455 | |||
995befe0e9 | |||
1bb92aff55 | |||
b8e1471d3a | |||
60daf7f0bb | |||
a83a3139d1 | |||
fdb7ca3b8d | |||
0d7caf5cdf | |||
a339d7ba91 | |||
7216de55d6 | |||
2437fbca64 | |||
7d75d06b78 | |||
13ef5648c4 | |||
5b2478e2ba | |||
8b286571c3 | |||
f3ac523794 | |||
020cf5ebfd | |||
54ab193970 | |||
8f563f32ab | |||
151bae3566 | |||
76df418cba | |||
d0a72674c6 | |||
1d430674c7 | |||
70cb73922b | |||
344400951c | |||
ea5a0be811 | |||
3c7fd0bdb2 | |||
6cadf8c858 | |||
27579b9e4c | |||
4d756a9cc0 | |||
3e668e05be | |||
60d3a2e0f8 | |||
cc3a3b6b47 | |||
eda1d49a62 | |||
62e609ab77 | |||
2bfe4ead4b | |||
b1c6c32f78 | |||
f6acbdecf4 | |||
f1c9dfcc01 | |||
ce78943ae1 | |||
d6f0d86649 | |||
5bb67dbfea | |||
47610c4d3e | |||
b732f3581f | |||
9e57ce716f | |||
cd7ee7aa44 | |||
3cfe791473 | |||
973f2532f5 | |||
bc3be21d59 | |||
0bf5cf9886 | |||
919052d094 | |||
a2dafe2887 | |||
92661c994b | |||
ffe8fe356a | |||
bc2f773b4f | |||
f919201ecc | |||
7ff5d5c2e2 | |||
9b77f951c7 | |||
a25f2f990a | |||
78b373975d | |||
2fcc873c4c | |||
23c2baadb3 | |||
521ee82334 | |||
1df96e59ce | |||
3e123c1e28 | |||
f38da66731 | |||
06aabfc422 | |||
1052d2bfec | |||
5e0b652344 | |||
0f8f097183 | |||
491ed3dda2 | |||
af284c6d1b | |||
41d3ec5fba | |||
0568c352f3 | |||
2e7b4cb714 | |||
9767726b66 | |||
9ddfd84e41 | |||
1cf563d84b | |||
7928024f57 | |||
3eb38acb43 | |||
f7300c5c90 | |||
3489b7d26c | |||
acd2bcc384 | |||
43e77ca455 | |||
da36297988 | |||
dbb94fb044 | |||
d68f0cdb23 | |||
eae16eb67b | |||
4fc946b546 | |||
280bc5dad6 | |||
f43770d8c9 | |||
98c4b8fa1b | |||
ccb079ee67 | |||
2ea237472c | |||
0d4b4865cc | |||
fe52f9f956 | |||
882907a818 | |||
572a89cc4e | |||
c377110539 | |||
a9c7198a0b | |||
f6f01ea17b | |||
f2d0fc6823 | |||
f7000f3a1b | |||
c7f0177fa7 | |||
09c4d50944 | |||
2eb5d315d4 | |||
ad5976b4d9 | |||
a0dfcdce5e | |||
96d1637082 | |||
960f317171 | |||
4412ca751d | |||
cbffec0c95 | |||
0cea52cc18 | |||
6d784e87f4 | |||
ae6cae78f1 | |||
0f99566c01 | |||
2db806b4aa | |||
3f32c0ba4c | |||
541cb26c0d | |||
5544e038ab | |||
9032dc28a6 | |||
03635e2a71 | |||
00cf938aa5 | |||
a5f707c495 | |||
1824b48169 | |||
07ad22b8af | |||
b53466e168 | |||
6a7a389679 | |||
4edff78531 | |||
99043c2ea5 | |||
e68abba910 | |||
3165dc4d9f | |||
66c43a53e4 | |||
463b334616 | |||
b71dbc57c4 | |||
72ca1d7f45 | |||
76e461f395 | |||
1074982e6e | |||
29b2aaf035 | |||
6f90d098c5 | |||
0715161450 | |||
896583517f | |||
713d31fac8 | |||
96cb10a5f5 | |||
c207c1044e | |||
79629ec717 | |||
008fda0f08 | |||
0ae6b01937 | |||
def630e523 | |||
c5ba203e23 | |||
2317e6b2b3 | |||
cb38928974 | |||
fa78f13302 | |||
18395217c4 | |||
34bd987811 | |||
af6ba6a1c4 | |||
85409a0c69 | |||
ebfe352b62 | |||
fde56d2f17 | |||
3501423dfe | |||
0de668af51 | |||
2a584ea90a | |||
0f6ed94a15 | |||
bcb891e82b | |||
ac6e4ca1ed | |||
2e20bba708 | |||
e70dc1d14b | |||
0793a7b3c7 | |||
026fcc0495 | |||
81c2f20b53 | |||
1afe753462 | |||
524c2c716a | |||
b542d4bbd7 | |||
cf1eb45153 | |||
a97bcd80ba | |||
17968e444c | |||
2e3fd9ec2f | |||
d6a283b025 | |||
9766538124 | |||
98dbee8681 | |||
e421491b3b | |||
6828d37c41 | |||
bf5f610099 | |||
8b7f73404a | |||
85cacb2f51 | |||
b3fa3917e2 | |||
082c6c867a | |||
03fcf1ab57 | |||
3b00dea5eb | |||
8bc6c8e3c0 | |||
79bc27b53a | |||
84dd703199 | |||
c6fdba23a6 | |||
b19fe521a9 | |||
c1e672d121 | |||
f4371f4784 | |||
d914d9d187 | |||
845d14d377 | |||
4a9540b6d2 | |||
9f31be7000 | |||
41fa1b627d | |||
c0c4e66b29 | |||
cd8662de22 | |||
3587159614 | |||
d67cc9fa7c | |||
bf3a2fe923 | |||
e9ea0bf123 | |||
f2dffe55f8 | |||
46a073bfac |
@ -3,3 +3,4 @@ include test/*.py
|
||||
include test/*.json
|
||||
include youtube-dl.bash-completion
|
||||
include youtube-dl.1
|
||||
recursive-include docs Makefile conf.py *.rst
|
||||
|
3
Makefile
3
Makefile
@ -72,8 +72,9 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
||||
--exclude '__pycache' \
|
||||
--exclude '.git' \
|
||||
--exclude 'testdata' \
|
||||
--exclude 'docs/_build' \
|
||||
-- \
|
||||
bin devscripts test youtube_dl \
|
||||
bin devscripts test youtube_dl docs \
|
||||
CHANGELOG LICENSE README.md README.txt \
|
||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
|
||||
youtube-dl
|
||||
|
107
README.md
107
README.md
@ -20,7 +20,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
sure that you have sufficient permissions
|
||||
(run with sudo if needed)
|
||||
-i, --ignore-errors continue on download errors, for example to
|
||||
to skip unavailable videos in a playlist
|
||||
skip unavailable videos in a playlist
|
||||
--abort-on-error Abort downloading of further videos (in the
|
||||
playlist or the command line) if an error
|
||||
occurs
|
||||
@ -28,6 +28,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video
|
||||
access is restricted to one domain
|
||||
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
||||
separated by a colon ':'. You can use this
|
||||
option multiple times
|
||||
--list-extractors List all supported extractors and the URLs
|
||||
they would handle
|
||||
--extractor-descriptions Output descriptions of all supported
|
||||
@ -36,6 +39,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
an empty string (--proxy "") for direct
|
||||
connection
|
||||
--no-check-certificate Suppress HTTPS certificate validation.
|
||||
--prefer-insecure Use an unencrypted connection to retrieve
|
||||
information about the video. (Currently
|
||||
supported only for YouTube)
|
||||
--cache-dir DIR Location in the filesystem where youtube-dl
|
||||
can store some downloaded information
|
||||
permanently. By default $XDG_CACHE_HOME
|
||||
@ -59,6 +65,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
configuration in ~/.config/youtube-dl.conf
|
||||
(%APPDATA%/youtube-dl/config.txt on
|
||||
Windows)
|
||||
--encoding ENCODING Force the specified encoding (experimental)
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
@ -124,8 +131,12 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
video id, %(playlist)s for the playlist the
|
||||
video is in, %(playlist_index)s for the
|
||||
position in the playlist and %% for a
|
||||
literal percent. Use - to output to stdout.
|
||||
Can also be used to download to a different
|
||||
literal percent. %(height)s and %(width)s
|
||||
for the width and height of the video
|
||||
format. %(resolution)s for a textual
|
||||
description of the resolution of the video
|
||||
format. Use - to output to stdout. Can also
|
||||
be used to download to a different
|
||||
directory, for example with -o '/my/downloa
|
||||
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in
|
||||
@ -159,6 +170,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
## Verbosity / Simulation Options:
|
||||
-q, --quiet activates quiet mode
|
||||
--no-warnings Ignore warnings
|
||||
-s, --simulate do not download the video and do not write
|
||||
anything to disk
|
||||
--skip-download do not download the video
|
||||
@ -170,7 +182,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--get-duration simulate, quiet but print video length
|
||||
--get-filename simulate, quiet but print output filename
|
||||
--get-format simulate, quiet but print output format
|
||||
-j, --dump-json simulate, quiet but print JSON information
|
||||
-j, --dump-json simulate, quiet but print JSON information.
|
||||
See --output for a description of available
|
||||
keys.
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
@ -187,9 +201,9 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
preference using slashes: "-f 22/17/18".
|
||||
"-f mp4" and "-f flv" are also supported.
|
||||
You can also use the special names "best",
|
||||
"bestaudio", "worst", and "worstaudio". By
|
||||
default, youtube-dl will pick the best
|
||||
quality.
|
||||
"bestvideo", "bestaudio", "worst",
|
||||
"worstvideo" and "worstaudio". By default,
|
||||
youtube-dl will pick the best quality.
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific
|
||||
one is requested
|
||||
@ -236,6 +250,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
default
|
||||
--embed-subs embed subtitles in the video (only for mp4
|
||||
videos)
|
||||
--embed-thumbnail embed thumbnail in the audio as cover art
|
||||
--add-metadata write metadata to the video file
|
||||
--xattrs write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
@ -246,7 +261,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl.conf`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`.
|
||||
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.config/youtube-dl/config`. On Windows, the configuration file locations are `%APPDATA%\youtube-dl\config.txt` and `C:\Users\<Yourname>\youtube-dl.conf`.
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
@ -281,12 +296,14 @@ Videos can be filtered by their upload date using the options `--date`, `--dateb
|
||||
|
||||
Examples:
|
||||
|
||||
$ # Download only the videos uploaded in the last 6 months
|
||||
$ youtube-dl --dateafter now-6months
|
||||
$ # Download only the videos uploaded on January 1, 1970
|
||||
$ youtube-dl --date 19700101
|
||||
$ # will only download the videos uploaded in the 200x decade
|
||||
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||
# Download only the videos uploaded in the last 6 months
|
||||
$ youtube-dl --dateafter now-6months
|
||||
|
||||
# Download only the videos uploaded on January 1, 1970
|
||||
$ youtube-dl --date 19700101
|
||||
|
||||
$ # will only download the videos uploaded in the 200x decade
|
||||
$ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||
|
||||
# FAQ
|
||||
|
||||
@ -355,7 +372,67 @@ If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, copy *any* [recently modified](https://github.com/rg3/youtube-dl/commits/master/youtube_dl/extractor) file in `youtube_dl/extractor`, add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py). Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Don't forget to run the tests with `python test/test_download.py Test_Download.test_YourExtractor`! For a detailed tutorial, refer to [this blog post](http://filippo.io/add-support-for-a-new-video-site-to-youtube-dl/).
|
||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
3. Start a new git branch with `cd youtube-dl; git checkout -b yourextractor`
|
||||
4. Start with this simple template and save it to `youtube_dl/extractor/yourextractor.py`:
|
||||
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class YourExtractorIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
||||
'info_dict': {
|
||||
'id': '42',
|
||||
'ext': 'mp4',
|
||||
'title': 'Video title goes here',
|
||||
# TODO more properties, either as:
|
||||
# * A value
|
||||
# * MD5 checksum; start the string with md5:
|
||||
# * A regular expression; start the string with re:
|
||||
# * Any Python type (for example int or float)
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<h1>(.*?)</h1>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
# TODO more properties (see youtube_dl/extractor/common.py)
|
||||
}
|
||||
|
||||
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done.
|
||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
||||
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
# BUGS
|
||||
|
||||
|
@ -14,14 +14,20 @@
|
||||
|
||||
set -e
|
||||
|
||||
skip_tests=false
|
||||
if [ "$1" = '--skip-test' ]; then
|
||||
skip_tests=true
|
||||
skip_tests=true
|
||||
if [ "$1" = '--run-tests' ]; then
|
||||
skip_tests=false
|
||||
shift
|
||||
fi
|
||||
|
||||
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
|
||||
version="$1"
|
||||
major_version=$(echo "$version" | sed -n 's#^\([0-9]*\.[0-9]*\.[0-9]*\).*#\1#p')
|
||||
if test "$major_version" '!=' "$(date '+%Y.%m.%d')"; then
|
||||
echo "$version does not start with today's date!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
|
||||
if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
|
||||
useless_files=$(find youtube_dl -type f -not -name '*.py')
|
||||
@ -70,7 +76,7 @@ RELEASE_FILES="youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz"
|
||||
git checkout HEAD -- youtube-dl youtube-dl.exe
|
||||
|
||||
/bin/echo -e "\n### Signing and uploading the new binaries to yt-dl.org ..."
|
||||
for f in $RELEASE_FILES; do gpg --detach-sig "build/$version/$f"; done
|
||||
for f in $RELEASE_FILES; do gpg --passphrase-repeat 5 --detach-sig "build/$version/$f"; done
|
||||
scp -r "build/$version" ytdl@yt-dl.org:html/tmp/
|
||||
ssh ytdl@yt-dl.org "mv html/tmp/$version html/downloads/"
|
||||
ssh ytdl@yt-dl.org "sh html/update_latest.sh $version"
|
||||
@ -97,7 +103,7 @@ rm -rf build
|
||||
|
||||
make pypi-files
|
||||
echo "Uploading to PyPi ..."
|
||||
python setup.py sdist upload
|
||||
python setup.py sdist bdist_wheel upload
|
||||
make clean
|
||||
|
||||
/bin/echo -e "\n### DONE!"
|
||||
|
1
docs/.gitignore
vendored
Normal file
1
docs/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
_build/
|
177
docs/Makefile
Normal file
177
docs/Makefile
Normal file
@ -0,0 +1,177 @@
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# User-friendly check for sphinx-build
|
||||
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
|
||||
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
|
||||
endif
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
# the i18n builder cannot share the environment and doctrees with the others
|
||||
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " texinfo to make Texinfo files"
|
||||
@echo " info to make Texinfo files and run them through makeinfo"
|
||||
@echo " gettext to make PO message catalogs"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " xml to make Docutils-native XML files"
|
||||
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/youtube-dl.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/youtube-dl.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/youtube-dl"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/youtube-dl"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
latexpdfja:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through platex and dvipdfmx..."
|
||||
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
texinfo:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo
|
||||
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
|
||||
@echo "Run \`make' in that directory to run these through makeinfo" \
|
||||
"(use \`make info' here to do that automatically)."
|
||||
|
||||
info:
|
||||
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
|
||||
@echo "Running Texinfo files through makeinfo..."
|
||||
make -C $(BUILDDIR)/texinfo info
|
||||
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
|
||||
|
||||
gettext:
|
||||
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
|
||||
@echo
|
||||
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
||||
|
||||
xml:
|
||||
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
|
||||
@echo
|
||||
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
|
||||
|
||||
pseudoxml:
|
||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||
@echo
|
||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
71
docs/conf.py
Normal file
71
docs/conf.py
Normal file
@ -0,0 +1,71 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# youtube-dl documentation build configuration file, created by
|
||||
# sphinx-quickstart on Fri Mar 14 21:05:43 2014.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys
|
||||
import os
|
||||
# Allows to import youtube_dl
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'youtube-dl'
|
||||
copyright = u'2014, Ricardo Garcia Gonzalez'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
import youtube_dl
|
||||
version = youtube_dl.__version__
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'youtube-dldoc'
|
23
docs/index.rst
Normal file
23
docs/index.rst
Normal file
@ -0,0 +1,23 @@
|
||||
Welcome to youtube-dl's documentation!
|
||||
======================================
|
||||
|
||||
*youtube-dl* is a command-line program to download videos from YouTube.com and more sites.
|
||||
It can also be used in Python code.
|
||||
|
||||
Developer guide
|
||||
---------------
|
||||
|
||||
This section contains information for using *youtube-dl* from Python programs.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
||||
module_guide
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
67
docs/module_guide.rst
Normal file
67
docs/module_guide.rst
Normal file
@ -0,0 +1,67 @@
|
||||
Using the ``youtube_dl`` module
|
||||
===============================
|
||||
|
||||
When using the ``youtube_dl`` module, you start by creating an instance of :class:`YoutubeDL` and adding all the available extractors:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> from youtube_dl import YoutubeDL
|
||||
>>> ydl = YoutubeDL()
|
||||
>>> ydl.add_default_info_extractors()
|
||||
|
||||
Extracting video information
|
||||
----------------------------
|
||||
|
||||
You use the :meth:`YoutubeDL.extract_info` method for getting the video information, which returns a dictionary:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> info = ydl.extract_info('http://www.youtube.com/watch?v=BaW_jenozKc', download=False)
|
||||
[youtube] Setting language
|
||||
[youtube] BaW_jenozKc: Downloading webpage
|
||||
[youtube] BaW_jenozKc: Downloading video info webpage
|
||||
[youtube] BaW_jenozKc: Extracting video information
|
||||
>>> info['title']
|
||||
'youtube-dl test video "\'/\\ä↭𝕐'
|
||||
>>> info['height'], info['width']
|
||||
(720, 1280)
|
||||
|
||||
If you want to download or play the video you can get its url:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> info['url']
|
||||
'https://...'
|
||||
|
||||
Extracting playlist information
|
||||
-------------------------------
|
||||
|
||||
The playlist information is extracted in a similar way, but the dictionary is a bit different:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> playlist = ydl.extract_info('http://www.ted.com/playlists/13/open_source_open_world', download=False)
|
||||
[TED] open_source_open_world: Downloading playlist webpage
|
||||
...
|
||||
>>> playlist['title']
|
||||
'Open-source, open world'
|
||||
|
||||
|
||||
|
||||
You can access the videos in the playlist with the ``entries`` field:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
>>> for video in playlist['entries']:
|
||||
... print('Video #%d: %s' % (video['playlist_index'], video['title']))
|
||||
|
||||
Video #1: How Arduino is open-sourcing imagination
|
||||
Video #2: The year open data went worldwide
|
||||
Video #3: Massive-scale online collaboration
|
||||
Video #4: The art of asking
|
||||
Video #5: How cognitive surplus will change the world
|
||||
Video #6: The birth of Wikipedia
|
||||
Video #7: Coding a better government
|
||||
Video #8: The era of open innovation
|
||||
Video #9: The currency of the new economy is trust
|
||||
|
@ -9,7 +9,10 @@ import sys
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.utils import preferredencoding
|
||||
from youtube_dl.utils import (
|
||||
compat_str,
|
||||
preferredencoding,
|
||||
)
|
||||
|
||||
|
||||
def get_params(override=None):
|
||||
@ -71,15 +74,63 @@ class FakeYDL(YoutubeDL):
|
||||
old_report_warning(message)
|
||||
self.report_warning = types.MethodType(report_warning, self)
|
||||
|
||||
def get_testcases():
|
||||
|
||||
def gettestcases(include_onlymatching=False):
|
||||
for ie in youtube_dl.extractor.gen_extractors():
|
||||
t = getattr(ie, '_TEST', None)
|
||||
if t:
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
||||
for t in getattr(ie, '_TESTS', []):
|
||||
assert not hasattr(ie, '_TESTS'), \
|
||||
'%s has _TEST and _TESTS' % type(ie).__name__
|
||||
tests = [t]
|
||||
else:
|
||||
tests = getattr(ie, '_TESTS', [])
|
||||
for t in tests:
|
||||
if not include_onlymatching and t.get('only_matching', False):
|
||||
continue
|
||||
t['name'] = type(ie).__name__[:-len('IE')]
|
||||
yield t
|
||||
|
||||
|
||||
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
def expect_info_dict(self, expected_dict, got_dict):
|
||||
for info_field, expected in expected_dict.items():
|
||||
if isinstance(expected, compat_str) and expected.startswith('re:'):
|
||||
got = got_dict.get(info_field)
|
||||
match_str = expected[len('re:'):]
|
||||
match_rex = re.compile(match_str)
|
||||
|
||||
self.assertTrue(
|
||||
isinstance(got, compat_str) and match_rex.match(got),
|
||||
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
||||
elif isinstance(expected, type):
|
||||
got = got_dict.get(info_field)
|
||||
self.assertTrue(isinstance(got, expected),
|
||||
u'Expected type %r, but got value %r of type %r' % (expected, got, type(got)))
|
||||
else:
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
got = 'md5:' + md5(got_dict.get(info_field))
|
||||
else:
|
||||
got = got_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# Check for the presence of mandatory fields
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
|
||||
|
||||
# Are checkable fields missing from the test case definition?
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in got_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||
if missing_keys:
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||
self.assertFalse(
|
||||
missing_keys,
|
||||
'Missing keys in test definition: %s' % (
|
||||
', '.join(sorted(missing_keys))))
|
||||
|
44
test/test_InfoExtractor.py
Normal file
44
test/test_InfoExtractor.py
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from youtube_dl.extractor.common import InfoExtractor
|
||||
from youtube_dl.extractor import YoutubeIE, get_info_extractor
|
||||
|
||||
|
||||
class TestIE(InfoExtractor):
|
||||
pass
|
||||
|
||||
|
||||
class TestInfoExtractor(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.ie = TestIE(FakeYDL())
|
||||
|
||||
def test_ie_key(self):
|
||||
self.assertEqual(get_info_extractor(YoutubeIE.ie_key()), YoutubeIE)
|
||||
|
||||
def test_html_search_regex(self):
|
||||
html = '<p id="foo">Watch this <a href="http://www.youtube.com/watch?v=BaW_jenozKc">video</a></p>'
|
||||
search = lambda re, *args: self.ie._html_search_regex(re, html, *args)
|
||||
self.assertEqual(search(r'<p id="foo">(.+?)</p>', 'foo'), 'Watch this video')
|
||||
|
||||
def test_opengraph(self):
|
||||
ie = self.ie
|
||||
html = '''
|
||||
<meta name="og:title" content='Foo'/>
|
||||
<meta content="Some video's description " name="og:description"/>
|
||||
<meta property='og:image' content='http://domain.com/pic.jpg?key1=val1&key2=val2'/>
|
||||
'''
|
||||
self.assertEqual(ie._og_search_title(html), 'Foo')
|
||||
self.assertEqual(ie._og_search_description(html), 'Some video\'s description ')
|
||||
self.assertEqual(ie._og_search_thumbnail(html), 'http://domain.com/pic.jpg?key1=val1&key2=val2')
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -26,16 +26,27 @@ class YDL(FakeYDL):
|
||||
self.msgs.append(msg)
|
||||
|
||||
|
||||
def _make_result(formats, **kwargs):
|
||||
res = {
|
||||
'formats': formats,
|
||||
'id': 'testid',
|
||||
'title': 'testttitle',
|
||||
'extractor': 'testex',
|
||||
}
|
||||
res.update(**kwargs)
|
||||
return res
|
||||
|
||||
|
||||
class TestFormatSelection(unittest.TestCase):
|
||||
def test_prefer_free_formats(self):
|
||||
# Same resolution => download webm
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 460},
|
||||
{'ext': 'mp4', 'height': 460},
|
||||
{'ext': 'webm', 'height': 460, 'url': 'x'},
|
||||
{'ext': 'mp4', 'height': 460, 'url': 'y'},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
info_dict = _make_result(formats)
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
ydl.process_ie_result(info_dict)
|
||||
@ -46,8 +57,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = True
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 720},
|
||||
{'ext': 'mp4', 'height': 1080},
|
||||
{'ext': 'webm', 'height': 720, 'url': 'a'},
|
||||
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@ -60,9 +71,9 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{'ext': 'webm', 'height': 720},
|
||||
{'ext': 'mp4', 'height': 720},
|
||||
{'ext': 'flv', 'height': 720},
|
||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
||||
{'ext': 'mp4', 'height': 720, 'url': '_'},
|
||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@ -74,8 +85,8 @@ class TestFormatSelection(unittest.TestCase):
|
||||
ydl = YDL()
|
||||
ydl.params['prefer_free_formats'] = False
|
||||
formats = [
|
||||
{'ext': 'flv', 'height': 720},
|
||||
{'ext': 'webm', 'height': 720},
|
||||
{'ext': 'flv', 'height': 720, 'url': '_'},
|
||||
{'ext': 'webm', 'height': 720, 'url': '_'},
|
||||
]
|
||||
info_dict['formats'] = formats
|
||||
yie = YoutubeIE(ydl)
|
||||
@ -91,8 +102,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
|
||||
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
|
||||
]
|
||||
info_dict = {
|
||||
'formats': formats, 'extractor': 'test', 'id': 'testvid'}
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL()
|
||||
ydl.process_ie_result(info_dict)
|
||||
@ -120,12 +130,12 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_format_selection(self):
|
||||
formats = [
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4},
|
||||
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
||||
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
|
||||
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
|
||||
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': '20/47'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
@ -154,12 +164,12 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_format_selection_audio(self):
|
||||
formats = [
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none'},
|
||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none'},
|
||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4},
|
||||
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': 'bestaudio'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
@ -172,16 +182,34 @@ class TestFormatSelection(unittest.TestCase):
|
||||
self.assertEqual(downloaded['format_id'], 'audio-low')
|
||||
|
||||
formats = [
|
||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1},
|
||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2},
|
||||
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
|
||||
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
|
||||
]
|
||||
info_dict = {'formats': formats, 'extractor': 'test'}
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'vid-high')
|
||||
|
||||
def test_format_selection_video(self):
|
||||
formats = [
|
||||
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
|
||||
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
|
||||
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
|
||||
]
|
||||
info_dict = _make_result(formats)
|
||||
|
||||
ydl = YDL({'format': 'bestvideo'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'dash-video-high')
|
||||
|
||||
ydl = YDL({'format': 'worstvideo'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
||||
|
||||
def test_youtube_format_selection(self):
|
||||
order = [
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
|
||||
@ -199,10 +227,12 @@ class TestFormatSelection(unittest.TestCase):
|
||||
for f1id, f2id in zip(order, order[1:]):
|
||||
f1 = YoutubeIE._formats[f1id].copy()
|
||||
f1['format_id'] = f1id
|
||||
f1['url'] = 'url:' + f1id
|
||||
f2 = YoutubeIE._formats[f2id].copy()
|
||||
f2['format_id'] = f2id
|
||||
f2['url'] = 'url:' + f2id
|
||||
|
||||
info_dict = {'formats': [f1, f2], 'extractor': 'youtube'}
|
||||
info_dict = _make_result([f1, f2], extractor='youtube')
|
||||
ydl = YDL()
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
@ -210,7 +240,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], f1id)
|
||||
|
||||
info_dict = {'formats': [f2, f1], 'extractor': 'youtube'}
|
||||
info_dict = _make_result([f2, f1], extractor='youtube')
|
||||
ydl = YDL()
|
||||
yie = YoutubeIE(ydl)
|
||||
yie._sort_formats(info_dict['formats'])
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
@ -7,12 +9,13 @@ import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from test.helper import get_testcases
|
||||
from test.helper import gettestcases
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
FacebookIE,
|
||||
gen_extractors,
|
||||
JustinTVIE,
|
||||
PBSIE,
|
||||
YoutubeIE,
|
||||
)
|
||||
|
||||
@ -29,21 +32,24 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
|
||||
def test_youtube_playlist_matching(self):
|
||||
assertPlaylist = lambda url: self.assertMatch(url, ['youtube:playlist'])
|
||||
assertPlaylist(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist(u'UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||
assertPlaylist(u'PL63F0C78739B09958')
|
||||
assertPlaylist(u'https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertPlaylist(u'https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist(u'https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertPlaylist(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies(u'PLtS2H6bU1M'))
|
||||
assertPlaylist('ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('UUBABnxM4Ar9ten8Mdjj1j0Q') #585
|
||||
assertPlaylist('PL63F0C78739B09958')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
|
||||
assertPlaylist('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
assertPlaylist('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012') #668
|
||||
self.assertFalse('youtube:playlist' in self.matching_ies('PLtS2H6bU1M'))
|
||||
# Top tracks
|
||||
assertPlaylist('https://www.youtube.com/playlist?list=MCUS.20142101')
|
||||
|
||||
def test_youtube_matching(self):
|
||||
self.assertTrue(YoutubeIE.suitable(u'PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable(u'https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertTrue(YoutubeIE.suitable('PLtS2H6bU1M'))
|
||||
self.assertFalse(YoutubeIE.suitable('https://www.youtube.com/watch?v=AV6J6_AeFEQ&playnext=1&list=PL4023E734DA416012')) #668
|
||||
self.assertMatch('http://youtu.be/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('http://www.youtube.com/v/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('https://youtube.googleapis.com/v/BaW_jenozKc', ['youtube'])
|
||||
self.assertMatch('http://www.cleanvideosearch.com/media/action/yt/watch?videoId=8v_4O44sfjM', ['youtube'])
|
||||
|
||||
def test_youtube_channel_matching(self):
|
||||
assertChannel = lambda url: self.assertMatch(url, ['youtube:channel'])
|
||||
@ -63,24 +69,31 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
def test_youtube_show_matching(self):
|
||||
self.assertMatch('http://www.youtube.com/show/airdisasters', ['youtube:show'])
|
||||
|
||||
def test_youtube_truncated(self):
|
||||
self.assertMatch('http://www.youtube.com/watch?', ['youtube:truncated_url'])
|
||||
|
||||
def test_youtube_search_matching(self):
|
||||
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||
|
||||
def test_justin_tv_channelid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.justin.tv/vanillatv/"))
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/"))
|
||||
self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
|
||||
|
||||
def test_justintv_videoid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/vanillatv/b/328087483"))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
|
||||
|
||||
def test_justin_tv_chapterid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable(u"http://www.twitch.tv/tsm_theoddone/c/2349361"))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
|
||||
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE()._extract_id(url), id)
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
assertExtractId('http://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
assertExtractId('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc', 'BaW_jenozKc')
|
||||
@ -89,11 +102,11 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
assertExtractId('BaW_jenozKc', 'BaW_jenozKc')
|
||||
|
||||
def test_facebook_matching(self):
|
||||
self.assertTrue(FacebookIE.suitable(u'https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||
self.assertTrue(FacebookIE.suitable('https://www.facebook.com/Shiniknoh#!/photo.php?v=10153317450565268'))
|
||||
|
||||
def test_no_duplicates(self):
|
||||
ies = gen_extractors()
|
||||
for tc in get_testcases():
|
||||
for tc in gettestcases(include_onlymatching=True):
|
||||
url = tc['url']
|
||||
for ie in ies:
|
||||
if type(ie).__name__ in ('GenericIE', tc['name'] + 'IE'):
|
||||
@ -112,6 +125,8 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
|
||||
def test_vimeo_matching(self):
|
||||
self.assertMatch('http://vimeo.com/channels/tributes', ['vimeo:channel'])
|
||||
self.assertMatch('http://vimeo.com/channels/31259', ['vimeo:channel'])
|
||||
self.assertMatch('http://vimeo.com/channels/31259/53576664', ['vimeo'])
|
||||
self.assertMatch('http://vimeo.com/user7108434', ['vimeo:user'])
|
||||
self.assertMatch('http://vimeo.com/user7108434/videos', ['vimeo:user'])
|
||||
self.assertMatch('https://vimeo.com/user21297594/review/75524534/3c257a1b5d', ['vimeo:review'])
|
||||
@ -124,5 +139,43 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', ['Tumblr'])
|
||||
self.assertMatch('http://tatianamaslanydaily.tumblr.com/post/54196191430', ['Tumblr'])
|
||||
|
||||
def test_pbs(self):
|
||||
# https://github.com/rg3/youtube-dl/issues/2350
|
||||
self.assertMatch('http://video.pbs.org/viralplayer/2365173446/', ['PBS'])
|
||||
self.assertMatch('http://video.pbs.org/widget/partnerplayer/980042464/', ['PBS'])
|
||||
|
||||
def test_ComedyCentralShows(self):
|
||||
self.assertMatch(
|
||||
'http://thedailyshow.cc.com/extended-interviews/xm3fnq/andrew-napolitano-extended-interview',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thecolbertreport.cc.com/videos/29w6fx/-realhumanpraise-for-fox-news',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thecolbertreport.cc.com/videos/gh6urb/neil-degrasse-tyson-pt--1?xrs=eml_col_031114',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thedailyshow.cc.com/guests/michael-lewis/3efna8/exclusive---michael-lewis-extended-interview-pt--3',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thedailyshow.cc.com/episodes/sy7yv0/april-8--2014---denis-leary',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thecolbertreport.cc.com/episodes/8ase07/april-8--2014---jane-goodall',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thedailyshow.cc.com/video-playlists/npde3s/the-daily-show-19088-highlights',
|
||||
['ComedyCentralShows'])
|
||||
self.assertMatch(
|
||||
'http://thedailyshow.cc.com/special-editions/2l8fdb/special-edition---a-look-back-at-food',
|
||||
['ComedyCentralShows'])
|
||||
|
||||
def test_yahoo_https(self):
|
||||
# https://github.com/rg3/youtube-dl/issues/2701
|
||||
self.assertMatch(
|
||||
'https://screen.yahoo.com/smartwatches-latest-wearable-gadgets-163745379-cbs.html',
|
||||
['Yahoo'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -8,10 +8,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import (
|
||||
get_params,
|
||||
get_testcases,
|
||||
try_rm,
|
||||
gettestcases,
|
||||
expect_info_dict,
|
||||
md5,
|
||||
report_warning
|
||||
try_rm,
|
||||
report_warning,
|
||||
)
|
||||
|
||||
|
||||
@ -50,7 +51,7 @@ def _file_md5(fn):
|
||||
with open(fn, 'rb') as f:
|
||||
return hashlib.md5(f.read()).hexdigest()
|
||||
|
||||
defs = get_testcases()
|
||||
defs = gettestcases()
|
||||
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
@ -72,9 +73,7 @@ def generator(test_case):
|
||||
if 'playlist' not in test_case:
|
||||
info_dict = test_case.get('info_dict', {})
|
||||
if not test_case.get('file') and not (info_dict.get('id') and info_dict.get('ext')):
|
||||
print_skipping('The output file cannot be know, the "file" '
|
||||
'key is missing or the info_dict is incomplete')
|
||||
return
|
||||
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
||||
if 'skip' in test_case:
|
||||
print_skipping(test_case['skip'])
|
||||
return
|
||||
@ -136,27 +135,8 @@ def generator(test_case):
|
||||
self.assertEqual(md5_for_file, tc['md5'])
|
||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
for (info_field, expected) in tc.get('info_dict', {}).items():
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
got = 'md5:' + md5(info_dict.get(info_field))
|
||||
else:
|
||||
got = info_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# If checkable fields are missing from the test case, print the info_dict
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
for key, value in info_dict.items()
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'uploader_id', 'location'))
|
||||
if not all(key in tc.get('info_dict', {}).keys() for key in test_info_dict.keys()):
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||
|
||||
# Check for the presence of mandatory fields
|
||||
for key in ('id', 'url', 'title', 'ext'):
|
||||
self.assertTrue(key in info_dict.keys() and info_dict[key])
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(info_dict.get(key), u'Missing field: %s' % key)
|
||||
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
||||
finally:
|
||||
try_rm_tcs_files()
|
||||
|
||||
|
@ -9,8 +9,10 @@ import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
|
||||
from test.helper import (
|
||||
expect_info_dict,
|
||||
FakeYDL,
|
||||
)
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
AcademicEarthCourseIE,
|
||||
@ -36,6 +38,12 @@ from youtube_dl.extractor import (
|
||||
RutubeChannelIE,
|
||||
GoogleSearchIE,
|
||||
GenericIE,
|
||||
TEDIE,
|
||||
ToypicsUserIE,
|
||||
XTubeUserIE,
|
||||
InstagramUserIE,
|
||||
CSpanIE,
|
||||
AolIE,
|
||||
)
|
||||
|
||||
|
||||
@ -55,10 +63,10 @@ class TestPlaylists(unittest.TestCase):
|
||||
def test_dailymotion_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = DailymotionUserIE(dl)
|
||||
result = ie.extract('http://www.dailymotion.com/user/generation-quoi/')
|
||||
result = ie.extract('https://www.dailymotion.com/user/nqtv')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'Génération Quoi')
|
||||
self.assertTrue(len(result['entries']) >= 26)
|
||||
self.assertEqual(result['title'], 'Rémi Gaillard')
|
||||
self.assertTrue(len(result['entries']) >= 100)
|
||||
|
||||
def test_vimeo_channel(self):
|
||||
dl = FakeYDL()
|
||||
@ -98,7 +106,7 @@ class TestPlaylists(unittest.TestCase):
|
||||
result = ie.extract('http://www.ustream.tv/channel/young-americans-for-liberty')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], '5124905')
|
||||
self.assertTrue(len(result['entries']) >= 11)
|
||||
self.assertTrue(len(result['entries']) >= 6)
|
||||
|
||||
def test_soundcloud_set(self):
|
||||
dl = FakeYDL()
|
||||
@ -170,12 +178,12 @@ class TestPlaylists(unittest.TestCase):
|
||||
def test_AcademicEarthCourse(self):
|
||||
dl = FakeYDL()
|
||||
ie = AcademicEarthCourseIE(dl)
|
||||
result = ie.extract('http://academicearth.org/courses/building-dynamic-websites/')
|
||||
result = ie.extract('http://academicearth.org/playlists/laws-of-nature/')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'building-dynamic-websites')
|
||||
self.assertEqual(result['title'], 'Building Dynamic Websites')
|
||||
self.assertEqual(result['description'], u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
||||
self.assertEqual(len(result['entries']), 10)
|
||||
self.assertEqual(result['id'], 'laws-of-nature')
|
||||
self.assertEqual(result['title'], 'Laws of Nature')
|
||||
self.assertEqual(result['description'],u'Introduce yourself to the laws of nature with these free online college lectures from Yale, Harvard, and MIT.')# u"Today's websites are increasingly dynamic. Pages are no longer static HTML files but instead generated by scripts and database calls. User interfaces are more seamless, with technologies like Ajax replacing traditional page reloads. This course teaches students how to build dynamic websites with Ajax and with Linux, Apache, MySQL, and PHP (LAMP), one of today's most popular frameworks. Students learn how to set up domain names with DNS, how to structure pages with XHTML and CSS, how to program in JavaScript and PHP, how to configure Apache and MySQL, how to design and query databases with SQL, how to use Ajax with both XML and JSON, and how to build mashups. The course explores issues of security, scalability, and cross-browser support and also discusses enterprise-level deployments of websites, including third-party hosting, virtualization, colocation in data centers, firewalling, and load-balancing.")
|
||||
self.assertEqual(len(result['entries']), 4)
|
||||
|
||||
def test_ivi_compilation(self):
|
||||
dl = FakeYDL()
|
||||
@ -184,8 +192,8 @@ class TestPlaylists(unittest.TestCase):
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'dezhurnyi_angel')
|
||||
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012)')
|
||||
self.assertTrue(len(result['entries']) >= 36)
|
||||
|
||||
self.assertTrue(len(result['entries']) >= 23)
|
||||
|
||||
def test_ivi_compilation_season(self):
|
||||
dl = FakeYDL()
|
||||
ie = IviCompilationIE(dl)
|
||||
@ -193,7 +201,7 @@ class TestPlaylists(unittest.TestCase):
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'dezhurnyi_angel/season2')
|
||||
self.assertEqual(result['title'], 'Дежурный ангел (2010 - 2012) 2 сезон')
|
||||
self.assertTrue(len(result['entries']) >= 20)
|
||||
self.assertTrue(len(result['entries']) >= 7)
|
||||
|
||||
def test_imdb_list(self):
|
||||
dl = FakeYDL()
|
||||
@ -248,7 +256,88 @@ class TestPlaylists(unittest.TestCase):
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'python language')
|
||||
self.assertEqual(result['title'], 'python language')
|
||||
self.assertTrue(len(result['entries']) == 15)
|
||||
self.assertEqual(len(result['entries']), 15)
|
||||
|
||||
def test_generic_rss_feed(self):
|
||||
dl = FakeYDL()
|
||||
ie = GenericIE(dl)
|
||||
result = ie.extract('http://phihag.de/2014/youtube-dl/rss.xml')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'http://phihag.de/2014/youtube-dl/rss.xml')
|
||||
self.assertEqual(result['title'], 'Zero Punctuation')
|
||||
self.assertTrue(len(result['entries']) > 10)
|
||||
|
||||
def test_ted_playlist(self):
|
||||
dl = FakeYDL()
|
||||
ie = TEDIE(dl)
|
||||
result = ie.extract('http://www.ted.com/playlists/who_are_the_hackers')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], '10')
|
||||
self.assertEqual(result['title'], 'Who are the hackers?')
|
||||
self.assertTrue(len(result['entries']) >= 6)
|
||||
|
||||
def test_toypics_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = ToypicsUserIE(dl)
|
||||
result = ie.extract('http://videos.toypics.net/Mikey')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'Mikey')
|
||||
self.assertTrue(len(result['entries']) >= 17)
|
||||
|
||||
def test_xtube_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = XTubeUserIE(dl)
|
||||
result = ie.extract('http://www.xtube.com/community/profile.php?user=greenshowers')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'greenshowers')
|
||||
self.assertTrue(len(result['entries']) >= 155)
|
||||
|
||||
def test_InstagramUser(self):
|
||||
dl = FakeYDL()
|
||||
ie = InstagramUserIE(dl)
|
||||
result = ie.extract('http://instagram.com/porsche')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], 'porsche')
|
||||
self.assertTrue(len(result['entries']) >= 2)
|
||||
test_video = next(
|
||||
e for e in result['entries']
|
||||
if e['id'] == '614605558512799803_462752227')
|
||||
dl.add_default_extra_info(test_video, ie, '(irrelevant URL)')
|
||||
dl.process_video_result(test_video, download=False)
|
||||
EXPECTED = {
|
||||
'id': '614605558512799803_462752227',
|
||||
'ext': 'mp4',
|
||||
'title': '#Porsche Intelligent Performance.',
|
||||
'thumbnail': 're:^https?://.*\.jpg',
|
||||
'uploader': 'Porsche',
|
||||
'uploader_id': 'porsche',
|
||||
'timestamp': 1387486713,
|
||||
'upload_date': '20131219',
|
||||
}
|
||||
expect_info_dict(self, EXPECTED, test_video)
|
||||
|
||||
def test_CSpan_playlist(self):
|
||||
dl = FakeYDL()
|
||||
ie = CSpanIE(dl)
|
||||
result = ie.extract(
|
||||
'http://www.c-span.org/video/?318608-1/gm-ignition-switch-recall')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], '342759')
|
||||
self.assertEqual(
|
||||
result['title'], 'General Motors Ignition Switch Recall')
|
||||
whole_duration = sum(e['duration'] for e in result['entries'])
|
||||
self.assertEqual(whole_duration, 14855)
|
||||
|
||||
def test_aol_playlist(self):
|
||||
dl = FakeYDL()
|
||||
ie = AolIE(dl)
|
||||
result = ie.extract(
|
||||
'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['id'], '152147')
|
||||
self.assertEqual(
|
||||
result['title'], 'Brace Yourself - Today\'s Weirdest News')
|
||||
self.assertTrue(len(result['entries']) >= 10)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -9,6 +9,8 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
# Various small unit tests
|
||||
import io
|
||||
import json
|
||||
import xml.etree.ElementTree
|
||||
|
||||
#from youtube_dl.utils import htmlentity_transform
|
||||
@ -21,16 +23,22 @@ from youtube_dl.utils import (
|
||||
orderedSet,
|
||||
PagedList,
|
||||
parse_duration,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
shell_quote,
|
||||
smuggle_url,
|
||||
str_to_int,
|
||||
struct_unpack,
|
||||
timeconvert,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
unsmuggle_url,
|
||||
url_basename,
|
||||
urlencode_postdata,
|
||||
xpath_with_ns,
|
||||
parse_iso8601,
|
||||
strip_jsonp,
|
||||
uppercase_escape,
|
||||
)
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
@ -127,6 +135,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
|
||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||
|
||||
def test_find_xpath_attr(self):
|
||||
testxml = u'''<root>
|
||||
@ -200,7 +209,16 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_duration('1'), 1)
|
||||
self.assertEqual(parse_duration('1337:12'), 80232)
|
||||
self.assertEqual(parse_duration('9:12:43'), 33163)
|
||||
self.assertEqual(parse_duration('12:00'), 720)
|
||||
self.assertEqual(parse_duration('00:01:01'), 61)
|
||||
self.assertEqual(parse_duration('x:y'), None)
|
||||
self.assertEqual(parse_duration('3h11m53s'), 11513)
|
||||
self.assertEqual(parse_duration('62m45s'), 3765)
|
||||
self.assertEqual(parse_duration('6m59s'), 419)
|
||||
self.assertEqual(parse_duration('49s'), 49)
|
||||
self.assertEqual(parse_duration('0h0m0s'), 0)
|
||||
self.assertEqual(parse_duration('0m0s'), 0)
|
||||
self.assertEqual(parse_duration('0s'), 0)
|
||||
|
||||
def test_fix_xml_ampersands(self):
|
||||
self.assertEqual(
|
||||
@ -236,5 +254,35 @@ class TestUtil(unittest.TestCase):
|
||||
testPL(5, 2, (2, 99), [2, 3, 4])
|
||||
testPL(5, 2, (20, 99), [])
|
||||
|
||||
def test_struct_unpack(self):
|
||||
self.assertEqual(struct_unpack(u'!B', b'\x00'), (0,))
|
||||
|
||||
def test_read_batch_urls(self):
|
||||
f = io.StringIO(u'''\xef\xbb\xbf foo
|
||||
bar\r
|
||||
baz
|
||||
# More after this line\r
|
||||
; or after this
|
||||
bam''')
|
||||
self.assertEqual(read_batch_urls(f), [u'foo', u'bar', u'baz', u'bam'])
|
||||
|
||||
def test_urlencode_postdata(self):
|
||||
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_parse_iso8601(self):
|
||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||
|
||||
def test_strip_jsonp(self):
|
||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||
d = json.loads(stripped)
|
||||
self.assertEqual(d, [{"id": "532cb", "x": 3}])
|
||||
|
||||
def test_uppercase_escpae(self):
|
||||
self.assertEqual(uppercase_escape(u'aä'), u'aä')
|
||||
self.assertEqual(uppercase_escape(u'\\U0001d550'), u'𝕐')
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -16,6 +16,7 @@ from youtube_dl.extractor import (
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeTopListIE,
|
||||
YoutubeSearchURLIE,
|
||||
)
|
||||
|
||||
|
||||
@ -30,7 +31,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'ytdl test PL')
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||
self.assertEqual(ytie_results, [ 'bV9L5Ht9LgY', 'FXxLjLQi3Fg', 'tU3Bgo5qJZE'])
|
||||
|
||||
def test_youtube_playlist_noplaylist(self):
|
||||
@ -39,7 +40,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/watch?v=FXxLjLQi3Fg&list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
|
||||
self.assertEqual(result['_type'], 'url')
|
||||
self.assertEqual(YoutubeIE()._extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
self.assertEqual(YoutubeIE().extract_id(result['url']), 'FXxLjLQi3Fg')
|
||||
|
||||
def test_issue_673(self):
|
||||
dl = FakeYDL()
|
||||
@ -59,7 +60,7 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=PLwP_SiAcdui0KVebT0mU9Apz359a4ubsC')
|
||||
ytie_results = [YoutubeIE()._extract_id(url['url']) for url in result['entries']]
|
||||
ytie_results = [YoutubeIE().extract_id(url['url']) for url in result['entries']]
|
||||
self.assertFalse('pElCt5oNDuI' in ytie_results)
|
||||
self.assertFalse('KdPEApIVdWM' in ytie_results)
|
||||
|
||||
@ -76,9 +77,9 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
# TODO find a > 100 (paginating?) videos course
|
||||
result = ie.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
|
||||
entries = result['entries']
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[0]['url']), 'j9WZyLZCBzs')
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE()._extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_channel(self):
|
||||
dl = FakeYDL()
|
||||
@ -117,6 +118,15 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
original_video = entries[0]
|
||||
self.assertEqual(original_video['id'], 'rjFaenf1T-Y')
|
||||
|
||||
def test_youtube_toptracks(self):
|
||||
print('Skipping: The playlist page gives error 500')
|
||||
return
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/playlist?list=MCUS')
|
||||
entries = result['entries']
|
||||
self.assertEqual(len(entries), 100)
|
||||
|
||||
def test_youtube_toplist(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeTopListIE(dl)
|
||||
@ -124,5 +134,14 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
entries = result['entries']
|
||||
self.assertTrue(len(entries) >= 5)
|
||||
|
||||
def test_youtube_search_url(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeSearchURLIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
|
||||
entries = result['entries']
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'youtube-dl test video')
|
||||
self.assertTrue(len(entries) >= 5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -1,4 +0,0 @@
|
||||
# Legacy file for backwards compatibility, use youtube_dl.extractor instead!
|
||||
|
||||
from .extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
from .extractor import gen_extractors, get_info_extractor
|
140
youtube_dl/YoutubeDL.py
Normal file → Executable file
140
youtube_dl/YoutubeDL.py
Normal file → Executable file
@ -4,9 +4,11 @@
|
||||
from __future__ import absolute_import, unicode_literals
|
||||
|
||||
import collections
|
||||
import datetime
|
||||
import errno
|
||||
import io
|
||||
import json
|
||||
import locale
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
@ -93,6 +95,7 @@ class YoutubeDL(object):
|
||||
usenetrc: Use netrc for authentication instead.
|
||||
verbose: Print additional info to stdout.
|
||||
quiet: Do not print messages to stdout.
|
||||
no_warnings: Do not print out anything for warnings.
|
||||
forceurl: Force printing final URL.
|
||||
forcetitle: Force printing title.
|
||||
forceid: Force printing ID.
|
||||
@ -147,6 +150,8 @@ class YoutubeDL(object):
|
||||
again.
|
||||
cookiefile: File name where cookies should be read from and dumped to.
|
||||
nocheckcertificate:Do not verify SSL certificates
|
||||
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
|
||||
At the moment, this is only supported by YouTube.
|
||||
proxy: URL of the proxy server to use
|
||||
socket_timeout: Time to wait for unresponsive hosts, in seconds
|
||||
bidi_workaround: Work around buggy terminals without bidirectional text
|
||||
@ -155,6 +160,7 @@ class YoutubeDL(object):
|
||||
include_ads: Download ads as well
|
||||
default_search: Prepend this string if an input url is not valid.
|
||||
'auto' for elaborate guessing
|
||||
encoding: Use this encoding instead of the system-specified.
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
the FileDownloader:
|
||||
@ -280,6 +286,9 @@ class YoutubeDL(object):
|
||||
"""Print message to stdout if not in quiet mode."""
|
||||
return self.to_stdout(message, skip_eol, check_quiet=True)
|
||||
|
||||
def _write_string(self, s, out=None):
|
||||
write_string(s, out=out, encoding=self.params.get('encoding'))
|
||||
|
||||
def to_stdout(self, message, skip_eol=False, check_quiet=False):
|
||||
"""Print message to stdout if not in quiet mode."""
|
||||
if self.params.get('logger'):
|
||||
@ -289,7 +298,7 @@ class YoutubeDL(object):
|
||||
terminator = ['\n', ''][skip_eol]
|
||||
output = message + terminator
|
||||
|
||||
write_string(output, self._screen_file)
|
||||
self._write_string(output, self._screen_file)
|
||||
|
||||
def to_stderr(self, message):
|
||||
"""Print message to stderr."""
|
||||
@ -299,7 +308,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
message = self._bidi_workaround(message)
|
||||
output = message + '\n'
|
||||
write_string(output, self._err_file)
|
||||
self._write_string(output, self._err_file)
|
||||
|
||||
def to_console_title(self, message):
|
||||
if not self.params.get('consoletitle', False):
|
||||
@ -309,21 +318,21 @@ class YoutubeDL(object):
|
||||
# already of type unicode()
|
||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||
elif 'TERM' in os.environ:
|
||||
write_string('\033]0;%s\007' % message, self._screen_file)
|
||||
self._write_string('\033]0;%s\007' % message, self._screen_file)
|
||||
|
||||
def save_console_title(self):
|
||||
if not self.params.get('consoletitle', False):
|
||||
return
|
||||
if 'TERM' in os.environ:
|
||||
# Save the title on stack
|
||||
write_string('\033[22;0t', self._screen_file)
|
||||
self._write_string('\033[22;0t', self._screen_file)
|
||||
|
||||
def restore_console_title(self):
|
||||
if not self.params.get('consoletitle', False):
|
||||
return
|
||||
if 'TERM' in os.environ:
|
||||
# Restore the title from stack
|
||||
write_string('\033[23;0t', self._screen_file)
|
||||
self._write_string('\033[23;0t', self._screen_file)
|
||||
|
||||
def __enter__(self):
|
||||
self.save_console_title()
|
||||
@ -370,12 +379,17 @@ class YoutubeDL(object):
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if self._err_file.isatty() and os.name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
if self.params.get('logger') is not None:
|
||||
self.params['logger'].warning(message)
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
warning_message = '%s %s' % (_msg_header, message)
|
||||
self.to_stderr(warning_message)
|
||||
if self.params.get('no_warnings'):
|
||||
return
|
||||
if self._err_file.isatty() and os.name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
warning_message = '%s %s' % (_msg_header, message)
|
||||
self.to_stderr(warning_message)
|
||||
|
||||
def report_error(self, message, tb=None):
|
||||
'''
|
||||
@ -409,6 +423,13 @@ class YoutubeDL(object):
|
||||
template_dict['autonumber'] = autonumber_templ % self._num_downloads
|
||||
if template_dict.get('playlist_index') is not None:
|
||||
template_dict['playlist_index'] = '%05d' % template_dict['playlist_index']
|
||||
if template_dict.get('resolution') is None:
|
||||
if template_dict.get('width') and template_dict.get('height'):
|
||||
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
|
||||
elif template_dict.get('height'):
|
||||
template_dict['resolution'] = '%sp' % template_dict['height']
|
||||
elif template_dict.get('width'):
|
||||
template_dict['resolution'] = '?x%d' % template_dict['width']
|
||||
|
||||
sanitize = lambda k, v: sanitize_filename(
|
||||
compat_str(v),
|
||||
@ -499,13 +520,7 @@ class YoutubeDL(object):
|
||||
'_type': 'compat_list',
|
||||
'entries': ie_result,
|
||||
}
|
||||
self.add_extra_info(ie_result,
|
||||
{
|
||||
'extractor': ie.IE_NAME,
|
||||
'webpage_url': url,
|
||||
'webpage_url_basename': url_basename(url),
|
||||
'extractor_key': ie.ie_key(),
|
||||
})
|
||||
self.add_default_extra_info(ie_result, ie, url)
|
||||
if process:
|
||||
return self.process_ie_result(ie_result, download, extra_info)
|
||||
else:
|
||||
@ -522,7 +537,15 @@ class YoutubeDL(object):
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
self.report_error('no suitable InfoExtractor: %s' % url)
|
||||
self.report_error('no suitable InfoExtractor for URL %s' % url)
|
||||
|
||||
def add_default_extra_info(self, ie_result, ie, url):
|
||||
self.add_extra_info(ie_result, {
|
||||
'extractor': ie.IE_NAME,
|
||||
'webpage_url': url,
|
||||
'webpage_url_basename': url_basename(url),
|
||||
'extractor_key': ie.ie_key(),
|
||||
})
|
||||
|
||||
def process_ie_result(self, ie_result, download=True, extra_info={}):
|
||||
"""
|
||||
@ -656,6 +679,18 @@ class YoutubeDL(object):
|
||||
if f.get('vcodec') == 'none']
|
||||
if audio_formats:
|
||||
return audio_formats[0]
|
||||
elif format_spec == 'bestvideo':
|
||||
video_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
return video_formats[-1]
|
||||
elif format_spec == 'worstvideo':
|
||||
video_formats = [
|
||||
f for f in available_formats
|
||||
if f.get('acodec') == 'none']
|
||||
if video_formats:
|
||||
return video_formats[0]
|
||||
else:
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp']
|
||||
if format_spec in extensions:
|
||||
@ -670,11 +705,24 @@ class YoutubeDL(object):
|
||||
def process_video_result(self, info_dict, download=True):
|
||||
assert info_dict.get('_type', 'video') == 'video'
|
||||
|
||||
if 'id' not in info_dict:
|
||||
raise ExtractorError('Missing "id" field in extractor result')
|
||||
if 'title' not in info_dict:
|
||||
raise ExtractorError('Missing "title" field in extractor result')
|
||||
|
||||
if 'playlist' not in info_dict:
|
||||
# It isn't part of a playlist
|
||||
info_dict['playlist'] = None
|
||||
info_dict['playlist_index'] = None
|
||||
|
||||
if 'display_id' not in info_dict and 'id' in info_dict:
|
||||
info_dict['display_id'] = info_dict['id']
|
||||
|
||||
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
|
||||
upload_date = datetime.datetime.utcfromtimestamp(
|
||||
info_dict['timestamp'])
|
||||
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
|
||||
|
||||
# This extractors handle format selection themselves
|
||||
if info_dict['extractor'] in ['Youku']:
|
||||
if download:
|
||||
@ -688,8 +736,14 @@ class YoutubeDL(object):
|
||||
else:
|
||||
formats = info_dict['formats']
|
||||
|
||||
if not formats:
|
||||
raise ExtractorError('No video formats found!')
|
||||
|
||||
# We check that all the formats have the format and format_id fields
|
||||
for (i, format) in enumerate(formats):
|
||||
for i, format in enumerate(formats):
|
||||
if 'url' not in format:
|
||||
raise ExtractorError('Missing "url" key in result (index %d)' % i)
|
||||
|
||||
if format.get('format_id') is None:
|
||||
format['format_id'] = compat_str(i)
|
||||
if format.get('format') is None:
|
||||
@ -700,7 +754,7 @@ class YoutubeDL(object):
|
||||
)
|
||||
# Automatically determine file extension if missing
|
||||
if 'ext' not in format:
|
||||
format['ext'] = determine_ext(format['url'])
|
||||
format['ext'] = determine_ext(format['url']).lower()
|
||||
|
||||
format_limit = self.params.get('format_limit', None)
|
||||
if format_limit:
|
||||
@ -825,7 +879,7 @@ class YoutubeDL(object):
|
||||
|
||||
try:
|
||||
dn = os.path.dirname(encodeFilename(filename))
|
||||
if dn != '' and not os.path.exists(dn):
|
||||
if dn and not os.path.exists(dn):
|
||||
os.makedirs(dn)
|
||||
except (OSError, IOError) as err:
|
||||
self.report_error('unable to create directory ' + compat_str(err))
|
||||
@ -882,7 +936,7 @@ class YoutubeDL(object):
|
||||
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8') as subfile:
|
||||
subfile.write(sub)
|
||||
except (OSError, IOError):
|
||||
self.report_error('Cannot write subtitles file ' + descfn)
|
||||
self.report_error('Cannot write subtitles file ' + sub_filename)
|
||||
return
|
||||
|
||||
if self.params.get('writeinfojson', False):
|
||||
@ -908,7 +962,7 @@ class YoutubeDL(object):
|
||||
self.to_screen('[%s] %s: Downloading thumbnail ...' %
|
||||
(info_dict['extractor'], info_dict['id']))
|
||||
try:
|
||||
uf = compat_urllib_request.urlopen(info_dict['thumbnail'])
|
||||
uf = self.urlopen(info_dict['thumbnail'])
|
||||
with open(thumb_filename, 'wb') as thumbf:
|
||||
shutil.copyfileobj(uf, thumbf)
|
||||
self.to_screen('[%s] %s: Writing thumbnail to: %s' %
|
||||
@ -1154,12 +1208,22 @@ class YoutubeDL(object):
|
||||
|
||||
def urlopen(self, req):
|
||||
""" Start an HTTP download """
|
||||
return self._opener.open(req)
|
||||
return self._opener.open(req, timeout=self._socket_timeout)
|
||||
|
||||
def print_debug_header(self):
|
||||
if not self.params.get('verbose'):
|
||||
return
|
||||
write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
||||
|
||||
write_string(
|
||||
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
|
||||
locale.getpreferredencoding(),
|
||||
sys.getfilesystemencoding(),
|
||||
sys.stdout.encoding,
|
||||
self.get_encoding()),
|
||||
encoding=None
|
||||
)
|
||||
|
||||
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
|
||||
try:
|
||||
sp = subprocess.Popen(
|
||||
['git', 'rev-parse', '--short', 'HEAD'],
|
||||
@ -1168,24 +1232,24 @@ class YoutubeDL(object):
|
||||
out, err = sp.communicate()
|
||||
out = out.decode().strip()
|
||||
if re.match('[0-9a-f]+', out):
|
||||
write_string('[debug] Git HEAD: ' + out + '\n')
|
||||
self._write_string('[debug] Git HEAD: ' + out + '\n')
|
||||
except:
|
||||
try:
|
||||
sys.exc_clear()
|
||||
except:
|
||||
pass
|
||||
write_string('[debug] Python version %s - %s' %
|
||||
self._write_string('[debug] Python version %s - %s' %
|
||||
(platform.python_version(), platform_name()) + '\n')
|
||||
|
||||
proxy_map = {}
|
||||
for handler in self._opener.handlers:
|
||||
if hasattr(handler, 'proxies'):
|
||||
proxy_map.update(handler.proxies)
|
||||
write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
|
||||
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
|
||||
|
||||
def _setup_opener(self):
|
||||
timeout_val = self.params.get('socket_timeout')
|
||||
timeout = 600 if timeout_val is None else float(timeout_val)
|
||||
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
|
||||
|
||||
opts_cookiefile = self.params.get('cookiefile')
|
||||
opts_proxy = self.params.get('proxy')
|
||||
@ -1224,6 +1288,18 @@ class YoutubeDL(object):
|
||||
opener.addheaders = []
|
||||
self._opener = opener
|
||||
|
||||
# TODO remove this global modification
|
||||
compat_urllib_request.install_opener(opener)
|
||||
socket.setdefaulttimeout(timeout)
|
||||
def encode(self, s):
|
||||
if isinstance(s, bytes):
|
||||
return s # Already encoded
|
||||
|
||||
try:
|
||||
return s.encode(self.get_encoding())
|
||||
except UnicodeEncodeError as err:
|
||||
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
|
||||
raise
|
||||
|
||||
def get_encoding(self):
|
||||
encoding = self.params.get('encoding')
|
||||
if encoding is None:
|
||||
encoding = preferredencoding()
|
||||
return encoding
|
||||
|
@ -41,12 +41,24 @@ __authors__ = (
|
||||
'Chris Gahan',
|
||||
'Saimadhav Heblikar',
|
||||
'Mike Col',
|
||||
'Oleg Prutz',
|
||||
'pulpe',
|
||||
'Andreas Schmitz',
|
||||
'Michael Kaiser',
|
||||
'Niklas Laxström',
|
||||
'David Triendl',
|
||||
'Anthony Weems',
|
||||
'David Wagner',
|
||||
'Juan C. Olivares',
|
||||
'Mattias Harrysson',
|
||||
'phaer',
|
||||
'Sainyam Kapoor',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
import codecs
|
||||
import getpass
|
||||
import io
|
||||
import locale
|
||||
import optparse
|
||||
import os
|
||||
@ -57,6 +69,7 @@ import sys
|
||||
|
||||
|
||||
from .utils import (
|
||||
compat_getpass,
|
||||
compat_print,
|
||||
DateRange,
|
||||
decodeOption,
|
||||
@ -65,6 +78,7 @@ from .utils import (
|
||||
get_cachedir,
|
||||
MaxDownloadsReached,
|
||||
preferredencoding,
|
||||
read_batch_urls,
|
||||
SameFileError,
|
||||
setproctitle,
|
||||
std_headers,
|
||||
@ -78,6 +92,8 @@ from .extractor import gen_extractors
|
||||
from .version import __version__
|
||||
from .YoutubeDL import YoutubeDL
|
||||
from .postprocessor import (
|
||||
AtomicParsleyPP,
|
||||
FFmpegAudioFixPP,
|
||||
FFmpegMetadataPP,
|
||||
FFmpegVideoConvertor,
|
||||
FFmpegExtractAudioPP,
|
||||
@ -203,7 +219,7 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option('-U', '--update',
|
||||
action='store_true', dest='update_self', help='update this program to latest version. Make sure that you have sufficient permissions (run with sudo if needed)')
|
||||
general.add_option('-i', '--ignore-errors',
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to to skip unavailable videos in a playlist', default=False)
|
||||
action='store_true', dest='ignoreerrors', help='continue on download errors, for example to skip unavailable videos in a playlist', default=False)
|
||||
general.add_option('--abort-on-error',
|
||||
action='store_false', dest='ignoreerrors',
|
||||
help='Abort downloading of further videos (in the playlist or the command line) if an error occurs')
|
||||
@ -215,6 +231,9 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option('--referer',
|
||||
dest='referer', help='specify a custom referer, use if the video access is restricted to one domain',
|
||||
metavar='REF', default=None)
|
||||
general.add_option('--add-header',
|
||||
dest='headers', help='specify a custom HTTP header and its value, separated by a colon \':\'. You can use this option multiple times', action="append",
|
||||
metavar='FIELD:VALUE')
|
||||
general.add_option('--list-extractors',
|
||||
action='store_true', dest='list_extractors',
|
||||
help='List all supported extractors and the URLs they would handle', default=False)
|
||||
@ -225,6 +244,9 @@ def parseOpts(overrideArguments=None):
|
||||
'--proxy', dest='proxy', default=None, metavar='URL',
|
||||
help='Use the specified HTTP/HTTPS proxy. Pass in an empty string (--proxy "") for direct connection')
|
||||
general.add_option('--no-check-certificate', action='store_true', dest='no_check_certificate', default=False, help='Suppress HTTPS certificate validation.')
|
||||
general.add_option(
|
||||
'--prefer-insecure', '--prefer-unsecure', action='store_true', dest='prefer_insecure',
|
||||
help='Use an unencrypted connection to retrieve information about the video. (Currently supported only for YouTube)')
|
||||
general.add_option(
|
||||
'--cache-dir', dest='cachedir', default=get_cachedir(), metavar='DIR',
|
||||
help='Location in the filesystem where youtube-dl can store some downloaded information permanently. By default $XDG_CACHE_HOME/youtube-dl or ~/.cache/youtube-dl . At the moment, only YouTube player files (for videos with obfuscated signatures) are cached, but that may change.')
|
||||
@ -237,14 +259,17 @@ def parseOpts(overrideArguments=None):
|
||||
general.add_option(
|
||||
'--bidi-workaround', dest='bidi_workaround', action='store_true',
|
||||
help=u'Work around terminals that lack bidirectional text support. Requires bidiv or fribidi executable in PATH')
|
||||
general.add_option('--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||
general.add_option(
|
||||
'--default-search',
|
||||
dest='default_search', metavar='PREFIX',
|
||||
help='Use this prefix for unqualified URLs. For example "gvsearch2:" downloads two videos from google videos for youtube-dl "large apple". By default (with value "auto") youtube-dl guesses.')
|
||||
general.add_option(
|
||||
'--ignore-config',
|
||||
action='store_true',
|
||||
help='Do not read configuration files. When given in the global configuration file /etc/youtube-dl.conf: do not read the user configuration in ~/.config/youtube-dl.conf (%APPDATA%/youtube-dl/config.txt on Windows)')
|
||||
|
||||
general.add_option(
|
||||
'--encoding', dest='encoding', metavar='ENCODING',
|
||||
help='Force the specified encoding (experimental)')
|
||||
|
||||
selection.add_option(
|
||||
'--playlist-start',
|
||||
@ -304,7 +329,7 @@ def parseOpts(overrideArguments=None):
|
||||
|
||||
video_format.add_option('-f', '--format',
|
||||
action='store', dest='format', metavar='FORMAT', default=None,
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestaudio", "worst", and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||
help='video format code, specify the order of preference using slashes: "-f 22/17/18". "-f mp4" and "-f flv" are also supported. You can also use the special names "best", "bestvideo", "bestaudio", "worst", "worstvideo" and "worstaudio". By default, youtube-dl will pick the best quality.')
|
||||
video_format.add_option('--all-formats',
|
||||
action='store_const', dest='format', help='download all available video formats', const='all')
|
||||
video_format.add_option('--prefer-free-formats',
|
||||
@ -347,6 +372,10 @@ def parseOpts(overrideArguments=None):
|
||||
|
||||
verbosity.add_option('-q', '--quiet',
|
||||
action='store_true', dest='quiet', help='activates quiet mode', default=False)
|
||||
verbosity.add_option(
|
||||
'--no-warnings',
|
||||
dest='no_warnings', action='store_true', default=False,
|
||||
help='Ignore warnings')
|
||||
verbosity.add_option('-s', '--simulate',
|
||||
action='store_true', dest='simulate', help='do not download the video and do not write anything to disk', default=False)
|
||||
verbosity.add_option('--skip-download',
|
||||
@ -374,7 +403,7 @@ def parseOpts(overrideArguments=None):
|
||||
help='simulate, quiet but print output format', default=False)
|
||||
verbosity.add_option('-j', '--dump-json',
|
||||
action='store_true', dest='dumpjson',
|
||||
help='simulate, quiet but print JSON information', default=False)
|
||||
help='simulate, quiet but print JSON information. See --output for a description of available keys.', default=False)
|
||||
verbosity.add_option('--newline',
|
||||
action='store_true', dest='progress_with_newline', help='output progress bar as new lines', default=False)
|
||||
verbosity.add_option('--no-progress',
|
||||
@ -419,6 +448,8 @@ def parseOpts(overrideArguments=None):
|
||||
'%(extractor)s for the provider (youtube, metacafe, etc), '
|
||||
'%(id)s for the video id, %(playlist)s for the playlist the video is in, '
|
||||
'%(playlist_index)s for the position in the playlist and %% for a literal percent. '
|
||||
'%(height)s and %(width)s for the width and height of the video format. '
|
||||
'%(resolution)s for a textual description of the resolution of the video format. '
|
||||
'Use - to output to stdout. Can also be used to download to a different directory, '
|
||||
'for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .'))
|
||||
filesystem.add_option('--autonumber-size',
|
||||
@ -474,6 +505,8 @@ def parseOpts(overrideArguments=None):
|
||||
help='do not overwrite post-processed files; the post-processed files are overwritten by default')
|
||||
postproc.add_option('--embed-subs', action='store_true', dest='embedsubtitles', default=False,
|
||||
help='embed subtitles in the video (only for mp4 videos)')
|
||||
postproc.add_option('--embed-thumbnail', action='store_true', dest='embedthumbnail', default=False,
|
||||
help='embed thumbnail in the audio as cover art')
|
||||
postproc.add_option('--add-metadata', action='store_true', dest='addmetadata', default=False,
|
||||
help='write metadata to the video file')
|
||||
postproc.add_option('--xattrs', action='store_true', dest='xattrs', default=False,
|
||||
@ -516,8 +549,6 @@ def parseOpts(overrideArguments=None):
|
||||
write_string(u'[debug] System config: ' + repr(_hide_login_info(systemConf)) + '\n')
|
||||
write_string(u'[debug] User config: ' + repr(_hide_login_info(userConf)) + '\n')
|
||||
write_string(u'[debug] Command-line args: ' + repr(_hide_login_info(commandLineConf)) + '\n')
|
||||
write_string(u'[debug] Encodings: locale %r, fs %r, out %r, pref: %r\n' %
|
||||
(locale.getpreferredencoding(), sys.getfilesystemencoding(), sys.stdout.encoding, preferredencoding()))
|
||||
|
||||
return parser, opts, args
|
||||
|
||||
@ -540,27 +571,35 @@ def _real_main(argv=None):
|
||||
if opts.referer is not None:
|
||||
std_headers['Referer'] = opts.referer
|
||||
|
||||
# Custom HTTP headers
|
||||
if opts.headers is not None:
|
||||
for h in opts.headers:
|
||||
if h.find(':', 1) < 0:
|
||||
parser.error(u'wrong header formatting, it should be key:value, not "%s"'%h)
|
||||
key, value = h.split(':', 2)
|
||||
if opts.verbose:
|
||||
write_string(u'[debug] Adding header from command line option %s:%s\n'%(key, value))
|
||||
std_headers[key] = value
|
||||
|
||||
# Dump user agent
|
||||
if opts.dump_user_agent:
|
||||
compat_print(std_headers['User-Agent'])
|
||||
sys.exit(0)
|
||||
|
||||
# Batch file verification
|
||||
batchurls = []
|
||||
batch_urls = []
|
||||
if opts.batchfile is not None:
|
||||
try:
|
||||
if opts.batchfile == '-':
|
||||
batchfd = sys.stdin
|
||||
else:
|
||||
batchfd = open(opts.batchfile, 'r')
|
||||
batchurls = batchfd.readlines()
|
||||
batchurls = [x.strip() for x in batchurls]
|
||||
batchurls = [x for x in batchurls if len(x) > 0 and not re.search(r'^[#/;]', x)]
|
||||
batchfd = io.open(opts.batchfile, 'r', encoding='utf-8', errors='ignore')
|
||||
batch_urls = read_batch_urls(batchfd)
|
||||
if opts.verbose:
|
||||
write_string(u'[debug] Batch file urls: ' + repr(batchurls) + u'\n')
|
||||
write_string(u'[debug] Batch file urls: ' + repr(batch_urls) + u'\n')
|
||||
except IOError:
|
||||
sys.exit(u'ERROR: batch file could not be read')
|
||||
all_urls = batchurls + args
|
||||
all_urls = batch_urls + args
|
||||
all_urls = [url.strip() for url in all_urls]
|
||||
_enc = preferredencoding()
|
||||
all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
|
||||
@ -599,7 +638,7 @@ def _real_main(argv=None):
|
||||
if opts.usetitle and opts.useid:
|
||||
parser.error(u'using title conflicts with using video ID')
|
||||
if opts.username is not None and opts.password is None:
|
||||
opts.password = getpass.getpass(u'Type account password and press return:')
|
||||
opts.password = compat_getpass(u'Type account password and press [Return]: ')
|
||||
if opts.ratelimit is not None:
|
||||
numeric_limit = FileDownloader.parse_bytes(opts.ratelimit)
|
||||
if numeric_limit is None:
|
||||
@ -643,7 +682,7 @@ def _real_main(argv=None):
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
if opts.default_search not in ('auto', None) and ':' not in opts.default_search:
|
||||
if opts.default_search not in ('auto', 'auto_warning', None) and ':' not in opts.default_search:
|
||||
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
|
||||
|
||||
# Do not download videos when there are audio-only formats
|
||||
@ -681,6 +720,7 @@ def _real_main(argv=None):
|
||||
'password': opts.password,
|
||||
'videopassword': opts.videopassword,
|
||||
'quiet': (opts.quiet or any_printing),
|
||||
'no_warnings': opts.no_warnings,
|
||||
'forceurl': opts.geturl,
|
||||
'forcetitle': opts.gettitle,
|
||||
'forceid': opts.getid,
|
||||
@ -744,6 +784,7 @@ def _real_main(argv=None):
|
||||
'download_archive': download_archive_fn,
|
||||
'cookiefile': opts.cookiefile,
|
||||
'nocheckcertificate': opts.no_check_certificate,
|
||||
'prefer_insecure': opts.prefer_insecure,
|
||||
'proxy': opts.proxy,
|
||||
'socket_timeout': opts.socket_timeout,
|
||||
'bidi_workaround': opts.bidi_workaround,
|
||||
@ -752,6 +793,7 @@ def _real_main(argv=None):
|
||||
'include_ads': opts.include_ads,
|
||||
'default_search': opts.default_search,
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'encoding': opts.encoding,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
@ -770,6 +812,10 @@ def _real_main(argv=None):
|
||||
ydl.add_post_processor(FFmpegEmbedSubtitlePP(subtitlesformat=opts.subtitlesformat))
|
||||
if opts.xattrs:
|
||||
ydl.add_post_processor(XAttrMetadataPP())
|
||||
if opts.embedthumbnail:
|
||||
if not opts.addmetadata:
|
||||
ydl.add_post_processor(FFmpegAudioFixPP())
|
||||
ydl.add_post_processor(AtomicParsleyPP())
|
||||
|
||||
# Update version
|
||||
if opts.update_self:
|
||||
|
@ -5,6 +5,7 @@ from .hls import HlsFD
|
||||
from .http import HttpFD
|
||||
from .mplayer import MplayerFD
|
||||
from .rtmp import RtmpFD
|
||||
from .f4m import F4mFD
|
||||
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
@ -22,5 +23,7 @@ def get_suitable_downloader(info_dict):
|
||||
return HlsFD
|
||||
if url.startswith('mms') or url.startswith('rtsp'):
|
||||
return MplayerFD
|
||||
if determine_ext(url) == 'f4m':
|
||||
return F4mFD
|
||||
else:
|
||||
return HttpFD
|
||||
|
@ -4,9 +4,10 @@ import sys
|
||||
import time
|
||||
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
encodeFilename,
|
||||
timeconvert,
|
||||
format_bytes,
|
||||
timeconvert,
|
||||
)
|
||||
|
||||
|
||||
@ -173,7 +174,7 @@ class FileDownloader(object):
|
||||
return
|
||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||
except (IOError, OSError) as err:
|
||||
self.report_error(u'unable to rename file: %s' % str(err))
|
||||
self.report_error(u'unable to rename file: %s' % compat_str(err))
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
"""Try to set the last-modified time of the given file."""
|
||||
|
315
youtube_dl/downloader/f4m.py
Normal file
315
youtube_dl/downloader/f4m.py
Normal file
@ -0,0 +1,315 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import io
|
||||
import itertools
|
||||
import os
|
||||
import time
|
||||
import xml.etree.ElementTree as etree
|
||||
|
||||
from .common import FileDownloader
|
||||
from .http import HttpFD
|
||||
from ..utils import (
|
||||
struct_pack,
|
||||
struct_unpack,
|
||||
compat_urlparse,
|
||||
format_bytes,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
)
|
||||
|
||||
|
||||
class FlvReader(io.BytesIO):
|
||||
"""
|
||||
Reader for Flv files
|
||||
The file format is documented in https://www.adobe.com/devnet/f4v.html
|
||||
"""
|
||||
|
||||
# Utility functions for reading numbers and strings
|
||||
def read_unsigned_long_long(self):
|
||||
return struct_unpack('!Q', self.read(8))[0]
|
||||
|
||||
def read_unsigned_int(self):
|
||||
return struct_unpack('!I', self.read(4))[0]
|
||||
|
||||
def read_unsigned_char(self):
|
||||
return struct_unpack('!B', self.read(1))[0]
|
||||
|
||||
def read_string(self):
|
||||
res = b''
|
||||
while True:
|
||||
char = self.read(1)
|
||||
if char == b'\x00':
|
||||
break
|
||||
res += char
|
||||
return res
|
||||
|
||||
def read_box_info(self):
|
||||
"""
|
||||
Read a box and return the info as a tuple: (box_size, box_type, box_data)
|
||||
"""
|
||||
real_size = size = self.read_unsigned_int()
|
||||
box_type = self.read(4)
|
||||
header_end = 8
|
||||
if size == 1:
|
||||
real_size = self.read_unsigned_long_long()
|
||||
header_end = 16
|
||||
return real_size, box_type, self.read(real_size-header_end)
|
||||
|
||||
def read_asrt(self):
|
||||
# version
|
||||
self.read_unsigned_char()
|
||||
# flags
|
||||
self.read(3)
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualityEntryCount
|
||||
for i in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
segment_run_count = self.read_unsigned_int()
|
||||
segments = []
|
||||
for i in range(segment_run_count):
|
||||
first_segment = self.read_unsigned_int()
|
||||
fragments_per_segment = self.read_unsigned_int()
|
||||
segments.append((first_segment, fragments_per_segment))
|
||||
|
||||
return {
|
||||
'segment_run': segments,
|
||||
}
|
||||
|
||||
def read_afrt(self):
|
||||
# version
|
||||
self.read_unsigned_char()
|
||||
# flags
|
||||
self.read(3)
|
||||
# time scale
|
||||
self.read_unsigned_int()
|
||||
|
||||
quality_entry_count = self.read_unsigned_char()
|
||||
# QualitySegmentUrlModifiers
|
||||
for i in range(quality_entry_count):
|
||||
self.read_string()
|
||||
|
||||
fragments_count = self.read_unsigned_int()
|
||||
fragments = []
|
||||
for i in range(fragments_count):
|
||||
first = self.read_unsigned_int()
|
||||
first_ts = self.read_unsigned_long_long()
|
||||
duration = self.read_unsigned_int()
|
||||
if duration == 0:
|
||||
discontinuity_indicator = self.read_unsigned_char()
|
||||
else:
|
||||
discontinuity_indicator = None
|
||||
fragments.append({
|
||||
'first': first,
|
||||
'ts': first_ts,
|
||||
'duration': duration,
|
||||
'discontinuity_indicator': discontinuity_indicator,
|
||||
})
|
||||
|
||||
return {
|
||||
'fragments': fragments,
|
||||
}
|
||||
|
||||
def read_abst(self):
|
||||
# version
|
||||
self.read_unsigned_char()
|
||||
# flags
|
||||
self.read(3)
|
||||
|
||||
self.read_unsigned_int() # BootstrapinfoVersion
|
||||
# Profile,Live,Update,Reserved
|
||||
self.read(1)
|
||||
# time scale
|
||||
self.read_unsigned_int()
|
||||
# CurrentMediaTime
|
||||
self.read_unsigned_long_long()
|
||||
# SmpteTimeCodeOffset
|
||||
self.read_unsigned_long_long()
|
||||
|
||||
self.read_string() # MovieIdentifier
|
||||
server_count = self.read_unsigned_char()
|
||||
# ServerEntryTable
|
||||
for i in range(server_count):
|
||||
self.read_string()
|
||||
quality_count = self.read_unsigned_char()
|
||||
# QualityEntryTable
|
||||
for i in range(quality_count):
|
||||
self.read_string()
|
||||
# DrmData
|
||||
self.read_string()
|
||||
# MetaData
|
||||
self.read_string()
|
||||
|
||||
segments_count = self.read_unsigned_char()
|
||||
segments = []
|
||||
for i in range(segments_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'asrt'
|
||||
segment = FlvReader(box_data).read_asrt()
|
||||
segments.append(segment)
|
||||
fragments_run_count = self.read_unsigned_char()
|
||||
fragments = []
|
||||
for i in range(fragments_run_count):
|
||||
box_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'afrt'
|
||||
fragments.append(FlvReader(box_data).read_afrt())
|
||||
|
||||
return {
|
||||
'segments': segments,
|
||||
'fragments': fragments,
|
||||
}
|
||||
|
||||
def read_bootstrap_info(self):
|
||||
total_size, box_type, box_data = self.read_box_info()
|
||||
assert box_type == b'abst'
|
||||
return FlvReader(box_data).read_abst()
|
||||
|
||||
|
||||
def read_bootstrap_info(bootstrap_bytes):
|
||||
return FlvReader(bootstrap_bytes).read_bootstrap_info()
|
||||
|
||||
|
||||
def build_fragments_list(boot_info):
|
||||
""" Return a list of (segment, fragment) for each fragment in the video """
|
||||
res = []
|
||||
segment_run_table = boot_info['segments'][0]
|
||||
# I've only found videos with one segment
|
||||
segment_run_entry = segment_run_table['segment_run'][0]
|
||||
n_frags = segment_run_entry[1]
|
||||
fragment_run_entry_table = boot_info['fragments'][0]['fragments']
|
||||
first_frag_number = fragment_run_entry_table[0]['first']
|
||||
for (i, frag_number) in zip(range(1, n_frags+1), itertools.count(first_frag_number)):
|
||||
res.append((1, frag_number))
|
||||
return res
|
||||
|
||||
|
||||
def write_flv_header(stream, metadata):
|
||||
"""Writes the FLV header and the metadata to stream"""
|
||||
# FLV header
|
||||
stream.write(b'FLV\x01')
|
||||
stream.write(b'\x05')
|
||||
stream.write(b'\x00\x00\x00\x09')
|
||||
# FLV File body
|
||||
stream.write(b'\x00\x00\x00\x00')
|
||||
# FLVTAG
|
||||
# Script data
|
||||
stream.write(b'\x12')
|
||||
# Size of the metadata with 3 bytes
|
||||
stream.write(struct_pack('!L', len(metadata))[1:])
|
||||
stream.write(b'\x00\x00\x00\x00\x00\x00\x00')
|
||||
stream.write(metadata)
|
||||
# Magic numbers extracted from the output files produced by AdobeHDS.php
|
||||
#(https://github.com/K-S-V/Scripts)
|
||||
stream.write(b'\x00\x00\x01\x73')
|
||||
|
||||
|
||||
def _add_ns(prop):
|
||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||
|
||||
|
||||
class HttpQuietDownloader(HttpFD):
|
||||
def to_screen(self, *args, **kargs):
|
||||
pass
|
||||
|
||||
|
||||
class F4mFD(FileDownloader):
|
||||
"""
|
||||
A downloader for f4m manifests or AdobeHDS.
|
||||
"""
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
man_url = info_dict['url']
|
||||
self.to_screen('[download] Downloading f4m manifest')
|
||||
manifest = self.ydl.urlopen(man_url).read()
|
||||
self.report_destination(filename)
|
||||
http_dl = HttpQuietDownloader(self.ydl,
|
||||
{
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'test': self.params.get('test', False),
|
||||
})
|
||||
|
||||
doc = etree.fromstring(manifest)
|
||||
formats = [(int(f.attrib.get('bitrate', -1)), f) for f in doc.findall(_add_ns('media'))]
|
||||
formats = sorted(formats, key=lambda f: f[0])
|
||||
rate, media = formats[-1]
|
||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
|
||||
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
||||
boot_info = read_bootstrap_info(bootstrap)
|
||||
fragments_list = build_fragments_list(boot_info)
|
||||
if self.params.get('test', False):
|
||||
# We only download the first fragment
|
||||
fragments_list = fragments_list[:1]
|
||||
total_frags = len(fragments_list)
|
||||
|
||||
tmpfilename = self.temp_name(filename)
|
||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||
write_flv_header(dest_stream, metadata)
|
||||
|
||||
# This dict stores the download progress, it's updated by the progress
|
||||
# hook
|
||||
state = {
|
||||
'downloaded_bytes': 0,
|
||||
'frag_counter': 0,
|
||||
}
|
||||
start = time.time()
|
||||
|
||||
def frag_progress_hook(status):
|
||||
frag_total_bytes = status.get('total_bytes', 0)
|
||||
estimated_size = (state['downloaded_bytes'] +
|
||||
(total_frags - state['frag_counter']) * frag_total_bytes)
|
||||
if status['status'] == 'finished':
|
||||
state['downloaded_bytes'] += frag_total_bytes
|
||||
state['frag_counter'] += 1
|
||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||
byte_counter = state['downloaded_bytes']
|
||||
else:
|
||||
frag_downloaded_bytes = status['downloaded_bytes']
|
||||
byte_counter = state['downloaded_bytes'] + frag_downloaded_bytes
|
||||
frag_progress = self.calc_percent(frag_downloaded_bytes,
|
||||
frag_total_bytes)
|
||||
progress = self.calc_percent(state['frag_counter'], total_frags)
|
||||
progress += frag_progress / float(total_frags)
|
||||
|
||||
eta = self.calc_eta(start, time.time(), estimated_size, byte_counter)
|
||||
self.report_progress(progress, format_bytes(estimated_size),
|
||||
status.get('speed'), eta)
|
||||
http_dl.add_progress_hook(frag_progress_hook)
|
||||
|
||||
frags_filenames = []
|
||||
for (seg_i, frag_i) in fragments_list:
|
||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||
url = base_url + name
|
||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||
success = http_dl.download(frag_filename, {'url': url})
|
||||
if not success:
|
||||
return False
|
||||
with open(frag_filename, 'rb') as down:
|
||||
down_data = down.read()
|
||||
reader = FlvReader(down_data)
|
||||
while True:
|
||||
_, box_type, box_data = reader.read_box_info()
|
||||
if box_type == b'mdat':
|
||||
dest_stream.write(box_data)
|
||||
break
|
||||
frags_filenames.append(frag_filename)
|
||||
|
||||
dest_stream.close()
|
||||
self.report_finish(format_bytes(state['downloaded_bytes']), time.time() - start)
|
||||
|
||||
self.try_rename(tmpfilename, filename)
|
||||
for frag_file in frags_filenames:
|
||||
os.remove(frag_file)
|
||||
|
||||
fsize = os.path.getsize(encodeFilename(filename))
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
|
||||
return True
|
@ -13,8 +13,10 @@ class HlsFD(FileDownloader):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
args = ['-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
||||
'-bsf:a', 'aac_adtstoasc', tmpfilename]
|
||||
args = [
|
||||
'-y', '-i', url, '-f', 'mp4', '-c', 'copy',
|
||||
'-bsf:a', 'aac_adtstoasc',
|
||||
encodeFilename(tmpfilename, for_subprocess=True)]
|
||||
|
||||
for program in ['avconv', 'ffmpeg']:
|
||||
try:
|
||||
|
@ -23,6 +23,8 @@ class HttpFD(FileDownloader):
|
||||
headers = {'Youtubedl-no-compression': 'True'}
|
||||
if 'user_agent' in info_dict:
|
||||
headers['Youtubedl-user-agent'] = info_dict['user_agent']
|
||||
if 'http_referer' in info_dict:
|
||||
headers['Referer'] = info_dict['http_referer']
|
||||
basic_request = compat_urllib_request.Request(url, None, headers)
|
||||
request = compat_urllib_request.Request(url, None, headers)
|
||||
|
||||
@ -49,7 +51,7 @@ class HttpFD(FileDownloader):
|
||||
while count <= retries:
|
||||
# Establish connection
|
||||
try:
|
||||
data = compat_urllib_request.urlopen(request)
|
||||
data = self.ydl.urlopen(request)
|
||||
break
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||
@ -59,7 +61,7 @@ class HttpFD(FileDownloader):
|
||||
# Unable to resume (requested range not satisfiable)
|
||||
try:
|
||||
# Open the connection again without the range header
|
||||
data = compat_urllib_request.urlopen(basic_request)
|
||||
data = self.ydl.urlopen(basic_request)
|
||||
content_length = data.info()['Content-Length']
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
if err.code < 500 or err.code >= 600:
|
||||
@ -85,6 +87,7 @@ class HttpFD(FileDownloader):
|
||||
else:
|
||||
# The length does not match, we start the download over
|
||||
self.report_unable_to_resume()
|
||||
resume_len = 0
|
||||
open_mode = 'wb'
|
||||
break
|
||||
# Retry
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
@ -22,7 +24,7 @@ class RtmpFD(FileDownloader):
|
||||
proc_stderr_closed = False
|
||||
while not proc_stderr_closed:
|
||||
# read line from stderr
|
||||
line = u''
|
||||
line = ''
|
||||
while True:
|
||||
char = proc.stderr.read(1)
|
||||
if not char:
|
||||
@ -46,7 +48,7 @@ class RtmpFD(FileDownloader):
|
||||
data_len = None
|
||||
if percent > 0:
|
||||
data_len = int(downloaded_data_len * 100 / percent)
|
||||
data_len_str = u'~' + format_bytes(data_len)
|
||||
data_len_str = '~' + format_bytes(data_len)
|
||||
self.report_progress(percent, data_len_str, speed, eta)
|
||||
cursor_in_new_line = False
|
||||
self._hook_progress({
|
||||
@ -76,19 +78,21 @@ class RtmpFD(FileDownloader):
|
||||
})
|
||||
elif self.params.get('verbose', False):
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen(u'')
|
||||
self.to_screen('')
|
||||
cursor_in_new_line = True
|
||||
self.to_screen(u'[rtmpdump] '+line)
|
||||
self.to_screen('[rtmpdump] '+line)
|
||||
proc.wait()
|
||||
if not cursor_in_new_line:
|
||||
self.to_screen(u'')
|
||||
self.to_screen('')
|
||||
return proc.returncode
|
||||
|
||||
url = info_dict['url']
|
||||
player_url = info_dict.get('player_url', None)
|
||||
page_url = info_dict.get('page_url', None)
|
||||
app = info_dict.get('app', None)
|
||||
play_path = info_dict.get('play_path', None)
|
||||
tc_url = info_dict.get('tc_url', None)
|
||||
flash_version = info_dict.get('flash_version', None)
|
||||
live = info_dict.get('rtmp_live', False)
|
||||
conn = info_dict.get('rtmp_conn', None)
|
||||
|
||||
@ -100,7 +104,7 @@ class RtmpFD(FileDownloader):
|
||||
try:
|
||||
subprocess.call(['rtmpdump', '-h'], stdout=(open(os.path.devnull, 'w')), stderr=subprocess.STDOUT)
|
||||
except (OSError, IOError):
|
||||
self.report_error(u'RTMP download detected but "rtmpdump" could not be run')
|
||||
self.report_error('RTMP download detected but "rtmpdump" could not be run')
|
||||
return False
|
||||
|
||||
# Download using rtmpdump. rtmpdump returns exit code 2 when
|
||||
@ -111,17 +115,21 @@ class RtmpFD(FileDownloader):
|
||||
basic_args += ['--swfVfy', player_url]
|
||||
if page_url is not None:
|
||||
basic_args += ['--pageUrl', page_url]
|
||||
if app is not None:
|
||||
basic_args += ['--app', app]
|
||||
if play_path is not None:
|
||||
basic_args += ['--playpath', play_path]
|
||||
if tc_url is not None:
|
||||
basic_args += ['--tcUrl', url]
|
||||
if test:
|
||||
basic_args += ['--stop', '1']
|
||||
if flash_version is not None:
|
||||
basic_args += ['--flashVer', flash_version]
|
||||
if live:
|
||||
basic_args += ['--live']
|
||||
if conn:
|
||||
basic_args += ['--conn', conn]
|
||||
args = basic_args + [[], ['--resume', '--skip', '1']][self.params.get('continuedl', False)]
|
||||
args = basic_args + [[], ['--resume', '--skip', '1']][not live and self.params.get('continuedl', False)]
|
||||
|
||||
if sys.platform == 'win32' and sys.version_info < (3, 0):
|
||||
# Windows subprocess module does not actually support Unicode
|
||||
@ -144,26 +152,35 @@ class RtmpFD(FileDownloader):
|
||||
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
|
||||
except ImportError:
|
||||
shell_quote = repr
|
||||
self.to_screen(u'[debug] rtmpdump command line: ' + shell_quote(str_args))
|
||||
self.to_screen('[debug] rtmpdump command line: ' + shell_quote(str_args))
|
||||
|
||||
RD_SUCCESS = 0
|
||||
RD_FAILED = 1
|
||||
RD_INCOMPLETE = 2
|
||||
RD_NO_CONNECT = 3
|
||||
|
||||
retval = run_rtmpdump(args)
|
||||
|
||||
while (retval == 2 or retval == 1) and not test:
|
||||
if retval == RD_NO_CONNECT:
|
||||
self.report_error('[rtmpdump] Could not connect to RTMP server.')
|
||||
return False
|
||||
|
||||
while (retval == RD_INCOMPLETE or retval == RD_FAILED) and not test and not live:
|
||||
prevsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'[rtmpdump] %s bytes' % prevsize)
|
||||
self.to_screen('[rtmpdump] %s bytes' % prevsize)
|
||||
time.sleep(5.0) # This seems to be needed
|
||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == 1])
|
||||
retval = run_rtmpdump(basic_args + ['-e'] + [[], ['-k', '1']][retval == RD_FAILED])
|
||||
cursize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
if prevsize == cursize and retval == 1:
|
||||
if prevsize == cursize and retval == RD_FAILED:
|
||||
break
|
||||
# Some rtmp streams seem abort after ~ 99.8%. Don't complain for those
|
||||
if prevsize == cursize and retval == 2 and cursize > 1024:
|
||||
self.to_screen(u'[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
|
||||
retval = 0
|
||||
if prevsize == cursize and retval == RD_INCOMPLETE and cursize > 1024:
|
||||
self.to_screen('[rtmpdump] Could not download the whole video. This can happen for some advertisements.')
|
||||
retval = RD_SUCCESS
|
||||
break
|
||||
if retval == 0 or (test and retval == 2):
|
||||
if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE):
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(u'[rtmpdump] %s bytes' % fsize)
|
||||
self.to_screen('[rtmpdump] %s bytes' % fsize)
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
@ -173,6 +190,6 @@ class RtmpFD(FileDownloader):
|
||||
})
|
||||
return True
|
||||
else:
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'rtmpdump exited with code %d' % retval)
|
||||
self.to_stderr('\n')
|
||||
self.report_error('rtmpdump exited with code %d' % retval)
|
||||
return False
|
||||
|
@ -1,6 +1,8 @@
|
||||
from .academicearth import AcademicEarthCourseIE
|
||||
from .addanime import AddAnimeIE
|
||||
from .aftonbladet import AftonbladetIE
|
||||
from .anitube import AnitubeIE
|
||||
from .aol import AolIE
|
||||
from .aparat import AparatIE
|
||||
from .appletrailers import AppleTrailersIE
|
||||
from .archiveorg import ArchiveOrgIE
|
||||
@ -9,28 +11,42 @@ from .arte import (
|
||||
ArteTvIE,
|
||||
ArteTVPlus7IE,
|
||||
ArteTVCreativeIE,
|
||||
ArteTVConcertIE,
|
||||
ArteTVFutureIE,
|
||||
ArteTVDDCIE,
|
||||
ArteTVEmbedIE,
|
||||
)
|
||||
from .auengine import AUEngineIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bbccouk import BBCCoUkIE
|
||||
from .blinkx import BlinkxIE
|
||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .br import BRIE
|
||||
from .breakcom import BreakIE
|
||||
from .brightcove import BrightcoveIE
|
||||
from .byutv import BYUtvIE
|
||||
from .c56 import C56IE
|
||||
from .canal13cl import Canal13clIE
|
||||
from .canalplus import CanalplusIE
|
||||
from .canalc2 import Canalc2IE
|
||||
from .cbs import CBSIE
|
||||
from .cbsnews import CBSNewsIE
|
||||
from .ceskatelevize import CeskaTelevizeIE
|
||||
from .channel9 import Channel9IE
|
||||
from .chilloutzone import ChilloutzoneIE
|
||||
from .cinemassacre import CinemassacreIE
|
||||
from .clipfish import ClipfishIE
|
||||
from .cliphunter import CliphunterIE
|
||||
from .clipsyndicate import ClipsyndicateIE
|
||||
from .clubic import ClubicIE
|
||||
from .cmt import CMTIE
|
||||
from .cnn import CNNIE
|
||||
from .cnet import CNETIE
|
||||
from .cnn import (
|
||||
CNNIE,
|
||||
CNNBlogsIE,
|
||||
)
|
||||
from .collegehumor import CollegeHumorIE
|
||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||
from .condenast import CondeNastIE
|
||||
@ -44,17 +60,18 @@ from .dailymotion import (
|
||||
DailymotionUserIE,
|
||||
)
|
||||
from .daum import DaumIE
|
||||
from .depositfiles import DepositFilesIE
|
||||
from .dotsub import DotsubIE
|
||||
from .dreisat import DreiSatIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .divxstage import DivxStageIE
|
||||
from .dropbox import DropboxIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .ehow import EHowIE
|
||||
from .eighttracks import EightTracksIE
|
||||
from .eitb import EitbIE
|
||||
from .elpais import ElPaisIE
|
||||
from .engadget import EngadgetIE
|
||||
from .escapist import EscapistIE
|
||||
from .everyonesmixtape import EveryonesMixtapeIE
|
||||
from .exfm import ExfmIE
|
||||
@ -62,11 +79,15 @@ from .extremetube import ExtremeTubeIE
|
||||
from .facebook import FacebookIE
|
||||
from .faz import FazIE
|
||||
from .firstpost import FirstpostIE
|
||||
from .firsttv import FirstTVIE
|
||||
from .fivemin import FiveMinIE
|
||||
from .fktv import (
|
||||
FKTVIE,
|
||||
FKTVPosteckeIE,
|
||||
)
|
||||
from .flickr import FlickrIE
|
||||
from .fourtube import FourTubeIE
|
||||
from .franceculture import FranceCultureIE
|
||||
from .franceinter import FranceInterIE
|
||||
from .francetv import (
|
||||
PluzzIE,
|
||||
@ -81,10 +102,12 @@ from .funnyordie import FunnyOrDieIE
|
||||
from .gamekings import GamekingsIE
|
||||
from .gamespot import GameSpotIE
|
||||
from .gametrailers import GametrailersIE
|
||||
from .gdcvault import GDCVaultIE
|
||||
from .generic import GenericIE
|
||||
from .googleplus import GooglePlusIE
|
||||
from .googlesearch import GoogleSearchIE
|
||||
from .hark import HarkIE
|
||||
from .helsinki import HelsinkiIE
|
||||
from .hotnewhiphop import HotNewHipHopIE
|
||||
from .howcast import HowcastIE
|
||||
from .huffpost import HuffPostIE
|
||||
@ -96,13 +119,14 @@ from .imdb import (
|
||||
)
|
||||
from .ina import InaIE
|
||||
from .infoq import InfoQIE
|
||||
from .instagram import InstagramIE
|
||||
from .instagram import InstagramIE, InstagramUserIE
|
||||
from .internetvideoarchive import InternetVideoArchiveIE
|
||||
from .iprima import IPrimaIE
|
||||
from .ivi import (
|
||||
IviIE,
|
||||
IviCompilationIE
|
||||
)
|
||||
from .jadorecettepub import JadoreCettePubIE
|
||||
from .jeuxvideo import JeuxVideoIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .justintv import JustinTVIE
|
||||
@ -112,6 +136,7 @@ from .keezmovies import KeezMoviesIE
|
||||
from .khanacademy import KhanAcademyIE
|
||||
from .kickstarter import KickStarterIE
|
||||
from .keek import KeekIE
|
||||
from .kontrtube import KontrTubeIE
|
||||
from .la7 import LA7IE
|
||||
from .lifenews import LifeNewsIE
|
||||
from .liveleak import LiveLeakIE
|
||||
@ -122,44 +147,59 @@ from .lynda import (
|
||||
)
|
||||
from .m6 import M6IE
|
||||
from .macgamestore import MacGameStoreIE
|
||||
from .mailru import MailRuIE
|
||||
from .malemotion import MalemotionIE
|
||||
from .mdr import MDRIE
|
||||
from .metacafe import MetacafeIE
|
||||
from .metacritic import MetacriticIE
|
||||
from .mit import TechTVMITIE, MITIE
|
||||
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
||||
from .mixcloud import MixcloudIE
|
||||
from .mpora import MporaIE
|
||||
from .mofosex import MofosexIE
|
||||
from .mooshare import MooshareIE
|
||||
from .morningstar import MorningstarIE
|
||||
from .motorsport import MotorsportIE
|
||||
from .movshare import MovShareIE
|
||||
from .mtv import (
|
||||
MTVIE,
|
||||
MTVIggyIE,
|
||||
)
|
||||
from .musicplayon import MusicPlayOnIE
|
||||
from .muzu import MuzuTVIE
|
||||
from .myspace import MySpaceIE
|
||||
from .myspass import MySpassIE
|
||||
from .myvideo import MyVideoIE
|
||||
from .naver import NaverIE
|
||||
from .nba import NBAIE
|
||||
from .nbc import NBCNewsIE
|
||||
from .nbc import (
|
||||
NBCIE,
|
||||
NBCNewsIE,
|
||||
)
|
||||
from .ndr import NDRIE
|
||||
from .ndtv import NDTVIE
|
||||
from .newgrounds import NewgroundsIE
|
||||
from .nfb import NFBIE
|
||||
from .nhl import NHLIE, NHLVideocenterIE
|
||||
from .niconico import NiconicoIE
|
||||
from .ninegag import NineGagIE
|
||||
from .noco import NocoIE
|
||||
from .normalboots import NormalbootsIE
|
||||
from .novamov import NovamovIE
|
||||
from .novamov import NovaMovIE
|
||||
from .nowness import NownessIE
|
||||
from .nowvideo import NowVideoIE
|
||||
from .ntv import NTVIE
|
||||
from .oe1 import OE1IE
|
||||
from .ooyala import OoyalaIE
|
||||
from .orf import ORFIE
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .pbs import PBSIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .playvid import PlayvidIE
|
||||
from .podomatic import PodomaticIE
|
||||
from .pornhd import PornHdIE
|
||||
from .pornhub import PornHubIE
|
||||
from .pornotube import PornotubeIE
|
||||
from .prosiebensat1 import ProSiebenSat1IE
|
||||
from .pyvideo import PyvideoIE
|
||||
from .radiofrance import RadioFranceIE
|
||||
from .rbmaradio import RBMARadioIE
|
||||
@ -169,15 +209,18 @@ from .ro220 import Ro220IE
|
||||
from .rottentomatoes import RottenTomatoesIE
|
||||
from .roxwel import RoxwelIE
|
||||
from .rtlnow import RTLnowIE
|
||||
from .rts import RTSIE
|
||||
from .rtve import RTVEALaCartaIE
|
||||
from .rutube import (
|
||||
RutubeIE,
|
||||
RutubeChannelIE,
|
||||
RutubeMovieIE,
|
||||
RutubePersonIE,
|
||||
)
|
||||
from .rutv import RUTVIE
|
||||
from .savefrom import SaveFromIE
|
||||
from .servingsys import ServingSysIE
|
||||
from .sina import SinaIE
|
||||
from .slashdot import SlashdotIE
|
||||
from .slideshare import SlideshareIE
|
||||
from .smotri import (
|
||||
SmotriIE,
|
||||
@ -199,33 +242,48 @@ from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .statigram import StatigramIE
|
||||
from .steam import SteamIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
from .streamcz import StreamCZIE
|
||||
from .syfy import SyfyIE
|
||||
from .sztvhu import SztvHuIE
|
||||
from .teamcoco import TeamcocoIE
|
||||
from .techtalks import TechTalksIE
|
||||
from .ted import TEDIE
|
||||
from .testurl import TestURLIE
|
||||
from .tf1 import TF1IE
|
||||
from .theplatform import ThePlatformIE
|
||||
from .thisav import ThisAVIE
|
||||
from .tinypic import TinyPicIE
|
||||
from .tlc import TlcIE, TlcDeIE
|
||||
from .toutv import TouTvIE
|
||||
from .toypics import ToypicsUserIE, ToypicsIE
|
||||
from .traileraddict import TrailerAddictIE
|
||||
from .trilulilu import TriluliluIE
|
||||
from .trutube import TruTubeIE
|
||||
from .tube8 import Tube8IE
|
||||
from .tudou import TudouIE
|
||||
from .tumblr import TumblrIE
|
||||
from .tutv import TutvIE
|
||||
from .tvigle import TvigleIE
|
||||
from .tvp import TvpIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
UdemyCourseIE
|
||||
)
|
||||
from .unistra import UnistraIE
|
||||
from .urort import UrortIE
|
||||
from .ustream import UstreamIE, UstreamChannelIE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veehd import VeeHDIE
|
||||
from .veoh import VeohIE
|
||||
from .vesti import VestiIE
|
||||
from .vevo import VevoIE
|
||||
from .vice import ViceIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videobam import VideoBamIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videolecturesnet import VideoLecturesNetIE
|
||||
from .videofyme import VideofyMeIE
|
||||
from .videopremium import VideoPremiumIE
|
||||
from .videoweed import VideoWeedIE
|
||||
from .vimeo import (
|
||||
VimeoIE,
|
||||
VimeoChannelIE,
|
||||
@ -238,15 +296,21 @@ from .vine import VineIE
|
||||
from .viki import VikiIE
|
||||
from .vk import VKIE
|
||||
from .vube import VubeIE
|
||||
from .washingtonpost import WashingtonPostIE
|
||||
from .wat import WatIE
|
||||
from .wdr import (
|
||||
WDRIE,
|
||||
WDRMausIE,
|
||||
)
|
||||
from .weibo import WeiboIE
|
||||
from .wimp import WimpIE
|
||||
from .wistia import WistiaIE
|
||||
from .worldstarhiphop import WorldStarHipHopIE
|
||||
from .xbef import XBefIE
|
||||
from .xhamster import XHamsterIE
|
||||
from .xnxx import XNXXIE
|
||||
from .xvideos import XVideosIE
|
||||
from .xtube import XTubeIE
|
||||
from .xtube import XTubeUserIE, XTubeIE
|
||||
from .yahoo import (
|
||||
YahooIE,
|
||||
YahooNewsIE,
|
||||
@ -257,19 +321,20 @@ from .youku import YoukuIE
|
||||
from .youporn import YouPornIE
|
||||
from .youtube import (
|
||||
YoutubeIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeSearchIE,
|
||||
YoutubeSearchDateIE,
|
||||
YoutubeUserIE,
|
||||
YoutubeChannelIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeSubscriptionsIE,
|
||||
YoutubeRecommendedIE,
|
||||
YoutubeTruncatedURLIE,
|
||||
YoutubeWatchLaterIE,
|
||||
YoutubeFavouritesIE,
|
||||
YoutubeHistoryIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeRecommendedIE,
|
||||
YoutubeSearchDateIE,
|
||||
YoutubeSearchIE,
|
||||
YoutubeSearchURLIE,
|
||||
YoutubeShowIE,
|
||||
YoutubeSubscriptionsIE,
|
||||
YoutubeTopListIE,
|
||||
YoutubeTruncatedURLIE,
|
||||
YoutubeUserIE,
|
||||
YoutubeWatchLaterIE,
|
||||
)
|
||||
from .zdf import ZDFIE
|
||||
|
||||
|
@ -5,7 +5,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class AcademicEarthCourseIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/(?:courses|playlists)/(?P<id>[^?#/]+)'
|
||||
_VALID_URL = r'^https?://(?:www\.)?academicearth\.org/playlists/(?P<id>[^?#/]+)'
|
||||
IE_NAME = 'AcademicEarth:Course'
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -14,12 +14,12 @@ class AcademicEarthCourseIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class="playlist-name">(.*?)</h1>', webpage, u'title')
|
||||
r'<h1 class="playlist-name"[^>]*?>(.*?)</h1>', webpage, u'title')
|
||||
description = self._html_search_regex(
|
||||
r'<p class="excerpt">(.*?)</p>',
|
||||
r'<p class="excerpt"[^>]*?>(.*?)</p>',
|
||||
webpage, u'description', fatal=False)
|
||||
urls = re.findall(
|
||||
r'<h3 class="lecture-title"><a target="_blank" href="([^"]+)">',
|
||||
r'<li class="lecture-preview">\s*?<a target="_blank" href="([^"]+)">',
|
||||
webpage)
|
||||
entries = [self.url_result(u) for u in urls]
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -14,14 +16,14 @@ from ..utils import (
|
||||
class AddAnimeIE(InfoExtractor):
|
||||
|
||||
_VALID_URL = r'^http://(?:\w+\.)?add-anime\.net/watch_video\.php\?(?:.*?)v=(?P<video_id>[\w_]+)(?:.*)'
|
||||
IE_NAME = u'AddAnime'
|
||||
_TEST = {
|
||||
u'url': u'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||
u'file': u'24MR3YO5SAS9.mp4',
|
||||
u'md5': u'72954ea10bc979ab5e2eb288b21425a0',
|
||||
u'info_dict': {
|
||||
u"description": u"One Piece 606",
|
||||
u"title": u"One Piece 606"
|
||||
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
||||
'info_dict': {
|
||||
'id': '24MR3YO5SAS9',
|
||||
'ext': 'mp4',
|
||||
'description': 'One Piece 606',
|
||||
'title': 'One Piece 606',
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,10 +40,10 @@ class AddAnimeIE(InfoExtractor):
|
||||
redir_webpage = ee.cause.read().decode('utf-8')
|
||||
action = self._search_regex(
|
||||
r'<form id="challenge-form" action="([^"]+)"',
|
||||
redir_webpage, u'Redirect form')
|
||||
redir_webpage, 'Redirect form')
|
||||
vc = self._search_regex(
|
||||
r'<input type="hidden" name="jschl_vc" value="([^"]+)"/>',
|
||||
redir_webpage, u'redirect vc value')
|
||||
redir_webpage, 'redirect vc value')
|
||||
av = re.search(
|
||||
r'a\.value = ([0-9]+)[+]([0-9]+)[*]([0-9]+);',
|
||||
redir_webpage)
|
||||
@ -52,19 +54,19 @@ class AddAnimeIE(InfoExtractor):
|
||||
parsed_url = compat_urllib_parse_urlparse(url)
|
||||
av_val = av_res + len(parsed_url.netloc)
|
||||
confirm_url = (
|
||||
parsed_url.scheme + u'://' + parsed_url.netloc +
|
||||
parsed_url.scheme + '://' + parsed_url.netloc +
|
||||
action + '?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
||||
self._download_webpage(
|
||||
confirm_url, video_id,
|
||||
note=u'Confirming after redirect')
|
||||
note='Confirming after redirect')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
formats = []
|
||||
for format_id in ('normal', 'hq'):
|
||||
rex = r"var %s_video_file = '(.*?)';" % re.escape(format_id)
|
||||
video_url = self._search_regex(rex, webpage, u'video file URLx',
|
||||
video_url = self._search_regex(rex, webpage, 'video file URLx',
|
||||
fatal=False)
|
||||
if not video_url:
|
||||
continue
|
||||
@ -72,14 +74,13 @@ class AddAnimeIE(InfoExtractor):
|
||||
'format_id': format_id,
|
||||
'url': video_url,
|
||||
})
|
||||
if not formats:
|
||||
raise ExtractorError(u'Cannot find any video format!')
|
||||
self._sort_formats(formats)
|
||||
video_title = self._og_search_title(webpage)
|
||||
video_description = self._og_search_description(webpage)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': video_title,
|
||||
'description': video_description
|
||||
|
69
youtube_dl/extractor/aftonbladet.py
Normal file
69
youtube_dl/extractor/aftonbladet.py
Normal file
@ -0,0 +1,69 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class AftonbladetIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://tv\.aftonbladet\.se/webbtv.+?(?P<video_id>article[0-9]+)\.ab(?:$|[?#])'
|
||||
_TEST = {
|
||||
'url': 'http://tv.aftonbladet.se/webbtv/nyheter/vetenskap/rymden/article36015.ab',
|
||||
'info_dict': {
|
||||
'id': 'article36015',
|
||||
'ext': 'mp4',
|
||||
'title': 'Vulkanutbrott i rymden - nu släpper NASA bilderna',
|
||||
'description': 'Jupiters måne mest aktiv av alla himlakroppar',
|
||||
'upload_date': '20140306',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.search(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('video_id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# find internal video meta data
|
||||
META_URL = 'http://aftonbladet-play.drlib.aptoma.no/video/%s.json'
|
||||
internal_meta_id = self._html_search_regex(
|
||||
r'data-aptomaId="([\w\d]+)"', webpage, 'internal_meta_id')
|
||||
internal_meta_url = META_URL % internal_meta_id
|
||||
internal_meta_json = self._download_json(
|
||||
internal_meta_url, video_id, 'Downloading video meta data')
|
||||
|
||||
# find internal video formats
|
||||
FORMATS_URL = 'http://aftonbladet-play.videodata.drvideo.aptoma.no/actions/video/?id=%s'
|
||||
internal_video_id = internal_meta_json['videoId']
|
||||
internal_formats_url = FORMATS_URL % internal_video_id
|
||||
internal_formats_json = self._download_json(
|
||||
internal_formats_url, video_id, 'Downloading video formats')
|
||||
|
||||
formats = []
|
||||
for fmt in internal_formats_json['formats']['http']['pseudostreaming']['mp4']:
|
||||
p = fmt['paths'][0]
|
||||
formats.append({
|
||||
'url': 'http://%s:%d/%s/%s' % (p['address'], p['port'], p['path'], p['filename']),
|
||||
'ext': 'mp4',
|
||||
'width': fmt['width'],
|
||||
'height': fmt['height'],
|
||||
'tbr': fmt['bitrate'],
|
||||
'protocol': 'http',
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
timestamp = datetime.datetime.fromtimestamp(internal_meta_json['timePublished'])
|
||||
upload_date = timestamp.strftime('%Y%m%d')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': internal_meta_json['title'],
|
||||
'formats': formats,
|
||||
'thumbnail': internal_meta_json['imageUrl'],
|
||||
'description': internal_meta_json['shortPreamble'],
|
||||
'upload_date': upload_date,
|
||||
'duration': internal_meta_json['duration'],
|
||||
'view_count': internal_meta_json['views'],
|
||||
}
|
65
youtube_dl/extractor/aol.py
Normal file
65
youtube_dl/extractor/aol.py
Normal file
@ -0,0 +1,65 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .fivemin import FiveMinIE
|
||||
|
||||
|
||||
class AolIE(InfoExtractor):
|
||||
IE_NAME = 'on.aol.com'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
aol-video:|
|
||||
http://on\.aol\.com/
|
||||
(?:
|
||||
video/.*-|
|
||||
playlist/(?P<playlist_display_id>[^/?#]+?)-(?P<playlist_id>[0-9]+)[?#].*_videoid=
|
||||
)
|
||||
)
|
||||
(?P<id>[0-9]+)
|
||||
(?:$|\?)
|
||||
'''
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
|
||||
'md5': '18ef68f48740e86ae94b98da815eec42',
|
||||
'info_dict': {
|
||||
'id': '518167793',
|
||||
'ext': 'mp4',
|
||||
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
playlist_id = mobj.group('playlist_id')
|
||||
if playlist_id and not self._downloader.params.get('noplaylist'):
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
|
||||
playlist_html = self._search_regex(
|
||||
r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
|
||||
'playlist HTML')
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': 'aol-video:%s' % m.group('id'),
|
||||
'ie_key': 'Aol',
|
||||
} for m in re.finditer(
|
||||
r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
|
||||
playlist_html)]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'display_id': mobj.group('playlist_display_id'),
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
return FiveMinIE._build_result(video_id)
|
@ -6,7 +6,6 @@ import json
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
@ -16,9 +15,10 @@ class AppleTrailersIE(InfoExtractor):
|
||||
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
|
||||
"playlist": [
|
||||
{
|
||||
"file": "manofsteel-trailer4.mov",
|
||||
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
|
||||
"info_dict": {
|
||||
"id": "manofsteel-trailer4",
|
||||
"ext": "mov",
|
||||
"duration": 111,
|
||||
"title": "Trailer 4",
|
||||
"upload_date": "20130523",
|
||||
@ -26,9 +26,10 @@ class AppleTrailersIE(InfoExtractor):
|
||||
},
|
||||
},
|
||||
{
|
||||
"file": "manofsteel-trailer3.mov",
|
||||
"md5": "b8017b7131b721fb4e8d6f49e1df908c",
|
||||
"info_dict": {
|
||||
"id": "manofsteel-trailer3",
|
||||
"ext": "mov",
|
||||
"duration": 182,
|
||||
"title": "Trailer 3",
|
||||
"upload_date": "20130417",
|
||||
@ -36,9 +37,10 @@ class AppleTrailersIE(InfoExtractor):
|
||||
},
|
||||
},
|
||||
{
|
||||
"file": "manofsteel-trailer.mov",
|
||||
"md5": "d0f1e1150989b9924679b441f3404d48",
|
||||
"info_dict": {
|
||||
"id": "manofsteel-trailer",
|
||||
"ext": "mov",
|
||||
"duration": 148,
|
||||
"title": "Trailer",
|
||||
"upload_date": "20121212",
|
||||
@ -46,15 +48,16 @@ class AppleTrailersIE(InfoExtractor):
|
||||
},
|
||||
},
|
||||
{
|
||||
"file": "manofsteel-teaser.mov",
|
||||
"md5": "5fe08795b943eb2e757fa95cb6def1cb",
|
||||
"info_dict": {
|
||||
"id": "manofsteel-teaser",
|
||||
"ext": "mov",
|
||||
"duration": 93,
|
||||
"title": "Teaser",
|
||||
"upload_date": "20120721",
|
||||
"uploader_id": "wb",
|
||||
},
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@ -65,16 +68,16 @@ class AppleTrailersIE(InfoExtractor):
|
||||
movie = mobj.group('movie')
|
||||
uploader_id = mobj.group('company')
|
||||
|
||||
playlist_url = compat_urlparse.urljoin(url, u'includes/playlists/itunes.inc')
|
||||
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
|
||||
def fix_html(s):
|
||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', u'', s)
|
||||
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
|
||||
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
|
||||
# The ' in the onClick attributes are not escaped, it couldn't be parsed
|
||||
# like: http://trailers.apple.com/trailers/wb/gravity/
|
||||
def _clean_json(m):
|
||||
return u'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
|
||||
s = re.sub(self._JSON_RE, _clean_json, s)
|
||||
s = u'<html>' + s + u'</html>'
|
||||
s = '<html>' + s + u'</html>'
|
||||
return s
|
||||
doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
|
||||
|
||||
@ -82,7 +85,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
for li in doc.findall('./div/ul/li'):
|
||||
on_click = li.find('.//a').attrib['onClick']
|
||||
trailer_info_json = self._search_regex(self._JSON_RE,
|
||||
on_click, u'trailer info')
|
||||
on_click, 'trailer info')
|
||||
trailer_info = json.loads(trailer_info_json)
|
||||
title = trailer_info['title']
|
||||
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
|
||||
@ -98,8 +101,7 @@ class AppleTrailersIE(InfoExtractor):
|
||||
first_url = trailer_info['url']
|
||||
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
|
||||
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
|
||||
settings_json = self._download_webpage(settings_json_url, trailer_id, u'Downloading settings json')
|
||||
settings = json.loads(settings_json)
|
||||
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
|
||||
|
||||
formats = []
|
||||
for format in settings['metadata']['sizes']:
|
||||
@ -107,7 +109,6 @@ class AppleTrailersIE(InfoExtractor):
|
||||
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'ext': determine_ext(format_url),
|
||||
'format': format['type'],
|
||||
'width': format['width'],
|
||||
'height': int(format['height']),
|
||||
|
@ -2,7 +2,6 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
@ -19,115 +18,46 @@ from ..utils import (
|
||||
# is different for each one. The videos usually expire in 7 days, so we can't
|
||||
# add tests.
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VIDEOS_URL = r'(?:http://)?videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||
_LIVEWEB_URL = r'(?:http://)?liveweb\.arte\.tv/(?P<lang>fr|de)/(?P<subpage>.+?)/(?P<name>.+)'
|
||||
_LIVE_URL = r'index-[0-9]+\.html$'
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||
IE_NAME = 'arte.tv'
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return any(re.match(regex, url) for regex in (cls._VIDEOS_URL, cls._LIVEWEB_URL))
|
||||
|
||||
# TODO implement Live Stream
|
||||
# from ..utils import compat_urllib_parse
|
||||
# def extractLiveStream(self, url):
|
||||
# video_lang = url.split('/')[-4]
|
||||
# info = self.grep_webpage(
|
||||
# url,
|
||||
# r'src="(.*?/videothek_js.*?\.js)',
|
||||
# 0,
|
||||
# [
|
||||
# (1, 'url', 'Invalid URL: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# http_host = url.split('/')[2]
|
||||
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
||||
# info = self.grep_webpage(
|
||||
# next_url,
|
||||
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
||||
# '(http://.*?\.swf).*?' +
|
||||
# '(rtmp://.*?)\'',
|
||||
# re.DOTALL,
|
||||
# [
|
||||
# (1, 'path', 'could not extract video path: %s' % url),
|
||||
# (2, 'player', 'could not extract video player: %s' % url),
|
||||
# (3, 'url', 'could not extract video url: %s' % url)
|
||||
# ]
|
||||
# )
|
||||
# video_url = '%s/%s' % (info.get('url'), info.get('path'))
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VIDEOS_URL, url)
|
||||
if mobj is not None:
|
||||
id = mobj.group('id')
|
||||
lang = mobj.group('lang')
|
||||
return self._extract_video(url, id, lang)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
lang = mobj.group('lang')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
mobj = re.match(self._LIVEWEB_URL, url)
|
||||
if mobj is not None:
|
||||
name = mobj.group('name')
|
||||
lang = mobj.group('lang')
|
||||
return self._extract_liveweb(url, name, lang)
|
||||
|
||||
if re.search(self._LIVE_URL, url) is not None:
|
||||
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
||||
# self.extractLiveStream(url)
|
||||
# return
|
||||
|
||||
def _extract_video(self, url, video_id, lang):
|
||||
"""Extract from videos.arte.tv"""
|
||||
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
||||
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
||||
ref_xml_doc = self._download_xml(ref_xml_url, video_id, note=u'Downloading metadata')
|
||||
ref_xml_doc = self._download_xml(
|
||||
ref_xml_url, video_id, note='Downloading metadata')
|
||||
config_node = find_xpath_attr(ref_xml_doc, './/video', 'lang', lang)
|
||||
config_xml_url = config_node.attrib['ref']
|
||||
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
||||
config = self._download_xml(
|
||||
config_xml_url, video_id, note='Downloading configuration')
|
||||
|
||||
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
||||
def _key(m):
|
||||
quality = m.group('quality')
|
||||
if quality == 'hd':
|
||||
return 2
|
||||
else:
|
||||
return 1
|
||||
# We pick the best quality
|
||||
video_urls = sorted(video_urls, key=_key)
|
||||
video_url = list(video_urls)[-1].group('url')
|
||||
|
||||
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
||||
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
||||
config_xml, 'thumbnail')
|
||||
return {'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
}
|
||||
formats = [{
|
||||
'forma_id': q.attrib['quality'],
|
||||
'url': q.text,
|
||||
'ext': 'flv',
|
||||
'quality': 2 if q.attrib['quality'] == 'hd' else 1,
|
||||
} for q in config.findall('./urls/url')]
|
||||
self._sort_formats(formats)
|
||||
|
||||
def _extract_liveweb(self, url, name, lang):
|
||||
"""Extract form http://liveweb.arte.tv/"""
|
||||
webpage = self._download_webpage(url, name)
|
||||
video_id = self._search_regex(r'eventId=(\d+?)("|&)', webpage, 'event id')
|
||||
config_doc = self._download_xml('http://download.liveweb.arte.tv/o21/liveweb/events/event-%s.xml' % video_id,
|
||||
video_id, 'Downloading information')
|
||||
event_doc = config_doc.find('event')
|
||||
url_node = event_doc.find('video').find('urlHd')
|
||||
if url_node is None:
|
||||
url_node = event_doc.find('urlSd')
|
||||
|
||||
return {'id': video_id,
|
||||
'title': event_doc.find('name%s' % lang.capitalize()).text,
|
||||
'url': url_node.text.replace('MP4', 'mp4'),
|
||||
'ext': 'flv',
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
title = config.find('.//name').text
|
||||
thumbnail = config.find('.//firstThumbnailUrl').text
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class ArteTVPlus7IE(InfoExtractor):
|
||||
IE_NAME = 'arte.tv:+7'
|
||||
_VALID_URL = r'https?://www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
|
||||
@classmethod
|
||||
def _extract_url_info(cls, url):
|
||||
@ -144,13 +74,12 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
return self._extract_from_webpage(webpage, video_id, lang)
|
||||
|
||||
def _extract_from_webpage(self, webpage, video_id, lang):
|
||||
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
||||
json_url = self._html_search_regex(
|
||||
r'arte_vp_url="(.*?)"', webpage, 'json vp url')
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
|
||||
def _extract_from_json_url(self, json_url, video_id, lang):
|
||||
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
||||
self.report_extraction(video_id)
|
||||
info = json.loads(json_info)
|
||||
info = self._download_json(json_url, video_id)
|
||||
player_info = info['videoJsonPlayer']
|
||||
|
||||
info_dict = {
|
||||
@ -172,6 +101,8 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
l = 'F'
|
||||
elif lang == 'de':
|
||||
l = 'A'
|
||||
else:
|
||||
l = lang
|
||||
regexes = [r'VO?%s' % l, r'VO?.-ST%s' % l]
|
||||
return any(re.match(r, f['versionCode']) for r in regexes)
|
||||
# Some formats may not be in the same language as the url
|
||||
@ -198,6 +129,8 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
re.match(r'VO-ST(F|A)', f.get('versionCode', '')) is None,
|
||||
# The version with sourds/mal subtitles has also lower relevance
|
||||
re.match(r'VO?(F|A)-STM\1', f.get('versionCode', '')) is None,
|
||||
# Prefer http downloads over m3u8
|
||||
0 if f['url'].endswith('m3u8') else 1,
|
||||
)
|
||||
formats = sorted(formats, key=sort_key)
|
||||
def _format(format_info):
|
||||
@ -238,8 +171,9 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||
'file': '050489-002.mp4',
|
||||
'info_dict': {
|
||||
'id': '050489-002',
|
||||
'ext': 'mp4',
|
||||
'title': 'Agentur Amateur / Agence Amateur #2 : Corporate Design',
|
||||
},
|
||||
}
|
||||
@ -251,8 +185,9 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://future.arte.tv/fr/sujet/info-sciences#article-anchor-7081',
|
||||
'file': '050940-003.mp4',
|
||||
'info_dict': {
|
||||
'id': '050940-003',
|
||||
'ext': 'mp4',
|
||||
'title': 'Les champignons au secours de la planète',
|
||||
},
|
||||
}
|
||||
@ -266,7 +201,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:ddc'
|
||||
_VALID_URL = r'http?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, lang = self._extract_url_info(url)
|
||||
@ -280,3 +215,39 @@ class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
javascriptPlayerGenerator = self._download_webpage(script_url, video_id, 'Download javascript player generator')
|
||||
json_url = self._search_regex(r"json_url=(.*)&rendering_place.*", javascriptPlayerGenerator, 'json url')
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
|
||||
|
||||
class ArteTVConcertIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:concert'
|
||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
|
||||
'md5': '9ea035b7bd69696b67aa2ccaaa218161',
|
||||
'info_dict': {
|
||||
'id': '186',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Notwist im Pariser Konzertclub "Divan du Monde"',
|
||||
'upload_date': '20140128',
|
||||
'description': 'md5:486eb08f991552ade77439fe6d82c305',
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class ArteTVEmbedIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:embed'
|
||||
_VALID_URL = r'''(?x)
|
||||
http://www\.arte\.tv
|
||||
/playerv2/embed\.php\?json_url=
|
||||
(?P<json_url>
|
||||
http://arte\.tv/papi/tvguide/videos/stream/player/
|
||||
(?P<lang>[^/]+)/(?P<id>[^/]+)[^&]*
|
||||
)
|
||||
'''
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
lang = mobj.group('lang')
|
||||
json_url = mobj.group('json_url')
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
|
@ -11,22 +11,24 @@ from ..utils import (
|
||||
|
||||
|
||||
class AUEngineIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?auengine\.com/embed\.php\?.*?file=(?P<id>[^&]+).*?'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://auengine.com/embed.php?file=lfvlytY6&w=650&h=370',
|
||||
'file': 'lfvlytY6.mp4',
|
||||
'md5': '48972bdbcf1a3a2f5533e62425b41d4f',
|
||||
'info_dict': {
|
||||
'id': 'lfvlytY6',
|
||||
'ext': 'mp4',
|
||||
'title': '[Commie]The Legend of the Legendary Heroes - 03 - Replication Eye (Alpha Stigma)[F9410F5A]'
|
||||
}
|
||||
}
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?auengine\.com/embed\.php\?.*?file=([^&]+).*?'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>',
|
||||
webpage, 'title')
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
|
||||
title = title.strip()
|
||||
links = re.findall(r'\s(?:file|url):\s*["\']([^\'"]+)["\']', webpage)
|
||||
links = map(compat_urllib_parse.unquote, links)
|
||||
@ -39,14 +41,15 @@ class AUEngineIE(InfoExtractor):
|
||||
elif '/videos/' in link:
|
||||
video_url = link
|
||||
if not video_url:
|
||||
raise ExtractorError(u'Could not find video URL')
|
||||
raise ExtractorError('Could not find video URL')
|
||||
ext = '.' + determine_ext(video_url)
|
||||
if ext == title[-len(ext):]:
|
||||
title = title[:-len(ext)]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'http_referer': 'http://www.auengine.com/flowplayer/flowplayer.commercial-3.2.14.swf',
|
||||
}
|
||||
|
223
youtube_dl/extractor/bbccouk.py
Normal file
223
youtube_dl/extractor/bbccouk.py
Normal file
@ -0,0 +1,223 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class BBCCoUkIE(SubtitlesInfoExtractor):
|
||||
IE_NAME = 'bbc.co.uk'
|
||||
IE_DESC = 'BBC iPlayer'
|
||||
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:programmes|iplayer/episode)/(?P<id>[\da-z]{8})'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
|
||||
'info_dict': {
|
||||
'id': 'b039d07m',
|
||||
'ext': 'flv',
|
||||
'title': 'Kaleidoscope: Leonard Cohen',
|
||||
'description': 'md5:db4755d7a665ae72343779f7dacb402c',
|
||||
'duration': 1740,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Man in Black: Series 3: The Printed Name',
|
||||
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
|
||||
'duration': 1800,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Episode is no longer available on BBC iPlayer Radio',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
|
||||
'info_dict': {
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||
'duration': 5100,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
}
|
||||
]
|
||||
|
||||
def _extract_asx_playlist(self, connection, programme_id):
|
||||
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
|
||||
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
|
||||
|
||||
def _extract_connection(self, connection, programme_id):
|
||||
formats = []
|
||||
protocol = connection.get('protocol')
|
||||
supplier = connection.get('supplier')
|
||||
if protocol == 'http':
|
||||
href = connection.get('href')
|
||||
# ASX playlist
|
||||
if supplier == 'asx':
|
||||
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
|
||||
formats.append({
|
||||
'url': ref,
|
||||
'format_id': 'ref%s_%s' % (i, supplier),
|
||||
})
|
||||
# Direct link
|
||||
else:
|
||||
formats.append({
|
||||
'url': href,
|
||||
'format_id': supplier,
|
||||
})
|
||||
elif protocol == 'rtmp':
|
||||
application = connection.get('application', 'ondemand')
|
||||
auth_string = connection.get('authString')
|
||||
identifier = connection.get('identifier')
|
||||
server = connection.get('server')
|
||||
formats.append({
|
||||
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
|
||||
'play_path': identifier,
|
||||
'app': '%s?%s' % (application, auth_string),
|
||||
'page_url': 'http://www.bbc.co.uk',
|
||||
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
|
||||
'rtmp_live': False,
|
||||
'ext': 'flv',
|
||||
'format_id': supplier,
|
||||
})
|
||||
return formats
|
||||
|
||||
def _extract_items(self, playlist):
|
||||
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
|
||||
|
||||
def _extract_medias(self, media_selection):
|
||||
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
|
||||
|
||||
def _extract_connections(self, media):
|
||||
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
|
||||
|
||||
def _extract_video(self, media, programme_id):
|
||||
formats = []
|
||||
vbr = int(media.get('bitrate'))
|
||||
vcodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
width = int(media.get('width'))
|
||||
height = int(media.get('height'))
|
||||
file_size = int(media.get('media_file_size'))
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'width': width,
|
||||
'height': height,
|
||||
'vbr': vbr,
|
||||
'vcodec': vcodec,
|
||||
'filesize': file_size,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _extract_audio(self, media, programme_id):
|
||||
formats = []
|
||||
abr = int(media.get('bitrate'))
|
||||
acodec = media.get('encoding')
|
||||
service = media.get('service')
|
||||
for connection in self._extract_connections(media):
|
||||
conn_formats = self._extract_connection(connection, programme_id)
|
||||
for format in conn_formats:
|
||||
format.update({
|
||||
'format_id': '%s_%s' % (service, format['format_id']),
|
||||
'abr': abr,
|
||||
'acodec': acodec,
|
||||
})
|
||||
formats.extend(conn_formats)
|
||||
return formats
|
||||
|
||||
def _extract_captions(self, media, programme_id):
|
||||
subtitles = {}
|
||||
for connection in self._extract_connections(media):
|
||||
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
|
||||
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
|
||||
ps = captions.findall('./{0}body/{0}div/{0}p'.format('{http://www.w3.org/2006/10/ttaf1}'))
|
||||
srt = ''
|
||||
for pos, p in enumerate(ps):
|
||||
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (str(pos), p.get('begin'), p.get('end'),
|
||||
p.text.strip() if p.text is not None else '')
|
||||
subtitles[lang] = srt
|
||||
return subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
group_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, group_id, 'Downloading video page')
|
||||
if re.search(r'id="emp-error" class="notinuk">', webpage):
|
||||
raise ExtractorError('Currently BBC iPlayer TV programmes are available to play in the UK only',
|
||||
expected=True)
|
||||
|
||||
playlist = self._download_xml('http://www.bbc.co.uk/iplayer/playlist/%s' % group_id, group_id,
|
||||
'Downloading playlist XML')
|
||||
|
||||
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
|
||||
if no_items is not None:
|
||||
reason = no_items.get('reason')
|
||||
if reason == 'preAvailability':
|
||||
msg = 'Episode %s is not yet available' % group_id
|
||||
elif reason == 'postAvailability':
|
||||
msg = 'Episode %s is no longer available' % group_id
|
||||
else:
|
||||
msg = 'Episode %s is not available: %s' % (group_id, reason)
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
formats = []
|
||||
subtitles = None
|
||||
|
||||
for item in self._extract_items(playlist):
|
||||
kind = item.get('kind')
|
||||
if kind != 'programme' and kind != 'radioProgramme':
|
||||
continue
|
||||
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
|
||||
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
|
||||
|
||||
programme_id = item.get('identifier')
|
||||
duration = int(item.get('duration'))
|
||||
|
||||
media_selection = self._download_xml(
|
||||
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
|
||||
programme_id, 'Downloading media selection XML')
|
||||
|
||||
for media in self._extract_medias(media_selection):
|
||||
kind = media.get('kind')
|
||||
if kind == 'audio':
|
||||
formats.extend(self._extract_audio(media, programme_id))
|
||||
elif kind == 'video':
|
||||
formats.extend(self._extract_video(media, programme_id))
|
||||
elif kind == 'captions':
|
||||
subtitles = self._extract_captions(media, programme_id)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(programme_id, subtitles)
|
||||
return
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': programme_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
@ -1,22 +1,21 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .ooyala import OoyalaIE
|
||||
|
||||
|
||||
class BloombergIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.bloomberg\.com/video/(?P<name>.+?)\.html'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
|
||||
u'file': u'12bzhqZTqQHmmlA8I-i0NpzJgcG5NNYX.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Shah\'s Presentation on Foreign-Exchange Strategies',
|
||||
u'description': u'md5:abc86e5236f9f0e4866c59ad36736686',
|
||||
},
|
||||
u'params': {
|
||||
# Requires ffmpeg (m3u8 manifest)
|
||||
u'skip_download': True,
|
||||
'url': 'http://www.bloomberg.com/video/shah-s-presentation-on-foreign-exchange-strategies-qurhIVlJSB6hzkVi229d8g.html',
|
||||
'md5': '7bf08858ff7c203c870e8a6190e221e5',
|
||||
'info_dict': {
|
||||
'id': 'qurhIVlJSB6hzkVi229d8g',
|
||||
'ext': 'flv',
|
||||
'title': 'Shah\'s Presentation on Foreign-Exchange Strategies',
|
||||
'description': 'md5:0681e0d30dcdfc6abf34594961d8ea88',
|
||||
},
|
||||
}
|
||||
|
||||
@ -24,5 +23,16 @@ class BloombergIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
webpage = self._download_webpage(url, name)
|
||||
ooyala_url = self._twitter_search_player(webpage)
|
||||
return self.url_result(ooyala_url, OoyalaIE.ie_key())
|
||||
f4m_url = self._search_regex(
|
||||
r'<source src="(https?://[^"]+\.f4m.*?)"', webpage,
|
||||
'f4m url')
|
||||
title = re.sub(': Video$', '', self._og_search_title(webpage))
|
||||
|
||||
return {
|
||||
'id': name.split('-')[-1],
|
||||
'title': title,
|
||||
'url': f4m_url,
|
||||
'ext': 'flv',
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
|
139
youtube_dl/extractor/br.py
Normal file
139
youtube_dl/extractor/br.py
Normal file
@ -0,0 +1,139 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class BRIE(InfoExtractor):
|
||||
IE_DESC = 'Bayerischer Rundfunk Mediathek'
|
||||
_VALID_URL = r'https?://(?:www\.)?br\.de/(?:[a-z0-9\-]+/)+(?P<id>[a-z0-9\-]+)\.html'
|
||||
_BASE_URL = 'http://www.br.de'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.br.de/mediathek/video/anselm-gruen-114.html',
|
||||
'md5': 'c4f83cf0f023ba5875aba0bf46860df2',
|
||||
'info_dict': {
|
||||
'id': '2c8d81c5-6fb7-4a74-88d4-e768e5856532',
|
||||
'ext': 'mp4',
|
||||
'title': 'Feiern und Verzichten',
|
||||
'description': 'Anselm Grün: Feiern und Verzichten',
|
||||
'uploader': 'BR/Birgit Baier',
|
||||
'upload_date': '20140301',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/mediathek/video/sendungen/unter-unserem-himmel/unter-unserem-himmel-alpen-ueber-den-pass-100.html',
|
||||
'md5': 'ab451b09d861dbed7d7cc9ab0be19ebe',
|
||||
'info_dict': {
|
||||
'id': '2c060e69-3a27-4e13-b0f0-668fac17d812',
|
||||
'ext': 'mp4',
|
||||
'title': 'Über den Pass',
|
||||
'description': 'Die Eroberung der Alpen: Über den Pass',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/nachrichten/schaeuble-haushaltsentwurf-bundestag-100.html',
|
||||
'md5': '3db0df1a9a9cd9fa0c70e6ea8aa8e820',
|
||||
'info_dict': {
|
||||
'id': 'c6aae3de-2cf9-43f2-957f-f17fef9afaab',
|
||||
'ext': 'aac',
|
||||
'title': '"Keine neuen Schulden im nächsten Jahr"',
|
||||
'description': 'Haushaltsentwurf: "Keine neuen Schulden im nächsten Jahr"',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/radio/bayern1/service/team/videos/team-video-erdelt100.html',
|
||||
'md5': 'dbab0aef2e047060ea7a21fc1ce1078a',
|
||||
'info_dict': {
|
||||
'id': '6ba73750-d405-45d3-861d-1ce8c524e059',
|
||||
'ext': 'mp4',
|
||||
'title': 'Umweltbewusster Häuslebauer',
|
||||
'description': 'Uwe Erdelt: Umweltbewusster Häuslebauer',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.br.de/fernsehen/br-alpha/sendungen/kant-fuer-anfaenger/kritik-der-reinen-vernunft/kant-kritik-01-metaphysik100.html',
|
||||
'md5': '23bca295f1650d698f94fc570977dae3',
|
||||
'info_dict': {
|
||||
'id': 'd982c9ce-8648-4753-b358-98abb8aec43d',
|
||||
'ext': 'mp4',
|
||||
'title': 'Folge 1 - Metaphysik',
|
||||
'description': 'Kant für Anfänger: Folge 1 - Metaphysik',
|
||||
'uploader': 'Eva Maria Steimle',
|
||||
'upload_date': '20140117',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
page = self._download_webpage(url, display_id)
|
||||
xml_url = self._search_regex(
|
||||
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
|
||||
xml = self._download_xml(self._BASE_URL + xml_url, None)
|
||||
|
||||
medias = []
|
||||
|
||||
for xml_media in xml.findall('video') + xml.findall('audio'):
|
||||
media = {
|
||||
'id': xml_media.get('externalId'),
|
||||
'title': xml_media.find('title').text,
|
||||
'formats': self._extract_formats(xml_media.find('assets')),
|
||||
'thumbnails': self._extract_thumbnails(xml_media.find('teaserImage/variants')),
|
||||
'description': ' '.join(xml_media.find('shareTitle').text.splitlines()),
|
||||
'webpage_url': xml_media.find('permalink').text
|
||||
}
|
||||
if xml_media.find('author').text:
|
||||
media['uploader'] = xml_media.find('author').text
|
||||
if xml_media.find('broadcastDate').text:
|
||||
media['upload_date'] = ''.join(reversed(xml_media.find('broadcastDate').text.split('.')))
|
||||
medias.append(media)
|
||||
|
||||
if len(medias) > 1:
|
||||
self._downloader.report_warning(
|
||||
'found multiple medias; please '
|
||||
'report this with the video URL to http://yt-dl.org/bug')
|
||||
if not medias:
|
||||
raise ExtractorError('No media entries found')
|
||||
return medias[0]
|
||||
|
||||
def _extract_formats(self, assets):
|
||||
|
||||
def text_or_none(asset, tag):
|
||||
elem = asset.find(tag)
|
||||
return None if elem is None else elem.text
|
||||
|
||||
formats = [{
|
||||
'url': text_or_none(asset, 'downloadUrl'),
|
||||
'ext': text_or_none(asset, 'mediaType'),
|
||||
'format_id': asset.get('type'),
|
||||
'width': int_or_none(text_or_none(asset, 'frameWidth')),
|
||||
'height': int_or_none(text_or_none(asset, 'frameHeight')),
|
||||
'tbr': int_or_none(text_or_none(asset, 'bitrateVideo')),
|
||||
'abr': int_or_none(text_or_none(asset, 'bitrateAudio')),
|
||||
'vcodec': text_or_none(asset, 'codecVideo'),
|
||||
'acodec': text_or_none(asset, 'codecAudio'),
|
||||
'container': text_or_none(asset, 'mediaType'),
|
||||
'filesize': int_or_none(text_or_none(asset, 'size')),
|
||||
} for asset in assets.findall('asset')
|
||||
if asset.find('downloadUrl') is not None]
|
||||
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _extract_thumbnails(self, variants):
|
||||
thumbnails = [{
|
||||
'url': self._BASE_URL + variant.find('url').text,
|
||||
'width': int_or_none(variant.find('width').text),
|
||||
'height': int_or_none(variant.find('height').text),
|
||||
} for variant in variants.findall('variant')]
|
||||
thumbnails.sort(key=lambda x: x['width'] * x['height'], reverse=True)
|
||||
return thumbnails
|
@ -1,18 +1,20 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?break\.com/video/([^/]+)'
|
||||
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
u'file': u'2468056.mp4',
|
||||
u'md5': u'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||
u'info_dict': {
|
||||
u"title": u"When Girls Act Like D-Bags"
|
||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||
'info_dict': {
|
||||
'id': '2468056',
|
||||
'ext': 'mp4',
|
||||
'title': 'When Girls Act Like D-Bags',
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,18 +23,18 @@ class BreakIE(InfoExtractor):
|
||||
video_id = mobj.group(1).split("-")[-1]
|
||||
embed_url = 'http://www.break.com/embed/%s' % video_id
|
||||
webpage = self._download_webpage(embed_url, video_id)
|
||||
info_json = self._search_regex(r'var embedVars = ({.*?});', webpage,
|
||||
u'info json', flags=re.DOTALL)
|
||||
info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
|
||||
webpage, 'info json', flags=re.DOTALL)
|
||||
info = json.loads(info_json)
|
||||
video_url = info['videoUri']
|
||||
m_youtube = re.search(r'(https?://www\.youtube\.com/watch\?v=.*)', video_url)
|
||||
if m_youtube is not None:
|
||||
return self.url_result(m_youtube.group(1), 'Youtube')
|
||||
youtube_id = info.get('youtubeId')
|
||||
if youtube_id:
|
||||
return self.url_result(youtube_id, 'Youtube')
|
||||
|
||||
final_url = video_url + '?' + info['AuthToken']
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': determine_ext(final_url),
|
||||
'title': info['contentName'],
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'title': info['contentName'],
|
||||
'thumbnail': info['thumbUri'],
|
||||
}]
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ from ..utils import (
|
||||
|
||||
ExtractorError,
|
||||
unsmuggle_url,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
@ -86,7 +87,7 @@ class BrightcoveIE(InfoExtractor):
|
||||
object_str = object_str.replace('<--', '<!--')
|
||||
object_str = fix_xml_ampersands(object_str)
|
||||
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str)
|
||||
object_doc = xml.etree.ElementTree.fromstring(object_str.encode('utf-8'))
|
||||
|
||||
fv_el = find_xpath_attr(object_doc, './param', 'name', 'flashVars')
|
||||
if fv_el is not None:
|
||||
@ -139,7 +140,11 @@ class BrightcoveIE(InfoExtractor):
|
||||
|
||||
url_m = re.search(r'<meta\s+property="og:video"\s+content="(http://c.brightcove.com/[^"]+)"', webpage)
|
||||
if url_m:
|
||||
return [url_m.group(1)]
|
||||
url = unescapeHTML(url_m.group(1))
|
||||
# Some sites don't add it, we can't download with this url, for example:
|
||||
# http://www.ktvu.com/videos/news/raw-video-caltrain-releases-video-of-man-almost/vCTZdY/
|
||||
if 'playerKey' in url:
|
||||
return [url]
|
||||
|
||||
matches = re.findall(
|
||||
r'''(?sx)<object
|
||||
|
48
youtube_dl/extractor/byutv.py
Normal file
48
youtube_dl/extractor/byutv.py
Normal file
@ -0,0 +1,48 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class BYUtvIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
|
||||
'info_dict': {
|
||||
'id': 'granite-flats-talking',
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
|
||||
'title': 'Talking',
|
||||
'thumbnail': 're:^https?://.*promo.*'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
episode_code = self._search_regex(
|
||||
r'(?s)episode:(.*?\}),\s*\n', webpage, 'episode information')
|
||||
episode_json = re.sub(
|
||||
r'(\n\s+)([a-zA-Z]+):\s+\'(.*?)\'', r'\1"\2": "\3"', episode_code)
|
||||
ep = json.loads(episode_json)
|
||||
|
||||
if ep['providerType'] == 'Ooyala':
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': 'Ooyala',
|
||||
'url': 'ooyala:%s' % ep['providerId'],
|
||||
'id': video_id,
|
||||
'title': ep['title'],
|
||||
'description': ep.get('description'),
|
||||
'thumbnail': ep.get('imageThumbnail'),
|
||||
}
|
||||
else:
|
||||
raise ExtractorError('Unsupported provider %s' % ep['provider'])
|
@ -2,39 +2,46 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class C56IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://((www|player)\.)?56\.com/(.+?/)?(v_|(play_album.+-))(?P<textid>.+?)\.(html|swf)'
|
||||
_VALID_URL = r'https?://(?:(?:www|player)\.)?56\.com/(?:.+?/)?(?:v_|(?:play_album.+-))(?P<textid>.+?)\.(?:html|swf)'
|
||||
IE_NAME = '56.com'
|
||||
_TEST = {
|
||||
'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
||||
'file': '93440716.flv',
|
||||
'md5': 'e59995ac63d0457783ea05f93f12a866',
|
||||
'info_dict': {
|
||||
'id': '93440716',
|
||||
'ext': 'flv',
|
||||
'title': '网事知多少 第32期:车怒',
|
||||
'duration': 283.813,
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
||||
text_id = mobj.group('textid')
|
||||
info_page = self._download_webpage('http://vxml.56.com/json/%s/' % text_id,
|
||||
text_id, 'Downloading video info')
|
||||
info = json.loads(info_page)['info']
|
||||
formats = [{
|
||||
'format_id': f['type'],
|
||||
'filesize': int(f['filesize']),
|
||||
'url': f['url']
|
||||
} for f in info['rfiles']]
|
||||
|
||||
page = self._download_json(
|
||||
'http://vxml.56.com/json/%s/' % text_id, text_id, 'Downloading video info')
|
||||
|
||||
info = page['info']
|
||||
|
||||
formats = [
|
||||
{
|
||||
'format_id': f['type'],
|
||||
'filesize': int(f['filesize']),
|
||||
'url': f['url']
|
||||
} for f in info['rfiles']
|
||||
]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': info['vid'],
|
||||
'title': info['Subject'],
|
||||
'duration': int(info['duration']) / 1000.0,
|
||||
'formats': formats,
|
||||
'thumbnail': info.get('bimg') or info.get('img'),
|
||||
}
|
||||
|
48
youtube_dl/extractor/canal13cl.py
Normal file
48
youtube_dl/extractor/canal13cl.py
Normal file
@ -0,0 +1,48 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class Canal13clIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://(?:www\.)?13\.cl/(?:[^/?#]+/)*(?P<id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.13.cl/t13/nacional/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
|
||||
'md5': '4cb1fa38adcad8fea88487a078831755',
|
||||
'info_dict': {
|
||||
'id': '1403022125',
|
||||
'display_id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
|
||||
'ext': 'mp4',
|
||||
'title': 'El "círculo de hierro" de Michelle Bachelet en su regreso a La Moneda',
|
||||
'description': '(Foto: Agencia Uno) En nueve días más, Michelle Bachelet va a asumir por segunda vez como presidenta de la República. Entre aquellos que la acompañarán hay caras que se repiten y otras que se consolidan en su entorno de colaboradores más cercanos.',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
title = self._html_search_meta(
|
||||
'twitter:title', webpage, 'title', fatal=True)
|
||||
description = self._html_search_meta(
|
||||
'twitter:description', webpage, 'description')
|
||||
url = self._html_search_regex(
|
||||
r'articuloVideo = \"(.*?)\"', webpage, 'url')
|
||||
real_id = self._search_regex(
|
||||
r'[^0-9]([0-9]{7,})[^0-9]', url, 'id', default=display_id)
|
||||
thumbnail = self._html_search_regex(
|
||||
r'articuloImagen = \"(.*?)\"', webpage, 'thumbnail')
|
||||
|
||||
return {
|
||||
'id': real_id,
|
||||
'display_id': display_id,
|
||||
'url': url,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'ext': 'mp4',
|
||||
'thumbnail': thumbnail,
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -9,11 +11,12 @@ class Canalc2IE(InfoExtractor):
|
||||
_VALID_URL = r'http://.*?\.canalc2\.tv/video\.asp\?.*?idVideo=(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
|
||||
u'file': u'12163.mp4',
|
||||
u'md5': u'060158428b650f896c542dfbb3d6487f',
|
||||
u'info_dict': {
|
||||
u'title': u'Terrasses du Numérique'
|
||||
'url': 'http://www.canalc2.tv/video.asp?idVideo=12163&voir=oui',
|
||||
'md5': '060158428b650f896c542dfbb3d6487f',
|
||||
'info_dict': {
|
||||
'id': '12163',
|
||||
'ext': 'mp4',
|
||||
'title': 'Terrasses du Numérique'
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,10 +31,11 @@ class Canalc2IE(InfoExtractor):
|
||||
video_url = 'http://vod-flash.u-strasbg.fr:8080/' + file_name
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'class="evenement8">(.*?)</a>', webpage, u'title')
|
||||
|
||||
return {'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
r'class="evenement8">(.*?)</a>', webpage, 'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'ext': 'mp4',
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
}
|
||||
|
@ -1,4 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -8,46 +10,56 @@ from ..utils import unified_strdate
|
||||
class CanalplusIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>\d+))'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
|
||||
IE_NAME = u'canalplus.fr'
|
||||
IE_NAME = 'canalplus.fr'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||
u'file': u'922470.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Zapping - 26/08/13',
|
||||
u'description': u'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||
u'upload_date': u'20130826',
|
||||
},
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||
'md5': '60c29434a416a83c15dae2587d47027d',
|
||||
'info_dict': {
|
||||
'id': '922470',
|
||||
'ext': 'flv',
|
||||
'title': 'Zapping - 26/08/13',
|
||||
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||
'upload_date': '20130826',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.groupdict().get('id')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, mobj.group('path'))
|
||||
video_id = self._search_regex(r'videoId = "(\d+)";', webpage, u'video id')
|
||||
video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
|
||||
|
||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
||||
doc = self._download_xml(info_url,video_id,
|
||||
u'Downloading video info')
|
||||
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||
infos = video_info.find('INFOS')
|
||||
media = video_info.find('MEDIA')
|
||||
formats = [media.find('VIDEOS/%s' % format)
|
||||
for format in ['BAS_DEBIT', 'HAUT_DEBIT', 'HD']]
|
||||
video_url = [format.text for format in formats if format is not None][-1]
|
||||
infos = video_info.find('INFOS')
|
||||
|
||||
return {'id': video_id,
|
||||
'title': u'%s - %s' % (infos.find('TITRAGE/TITRE').text,
|
||||
infos.find('TITRAGE/SOUS_TITRE').text),
|
||||
'url': video_url,
|
||||
'ext': 'flv',
|
||||
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
|
||||
'thumbnail': media.find('IMAGES/GRAND').text,
|
||||
'description': infos.find('DESCRIPTION').text,
|
||||
'view_count': int(infos.find('NB_VUES').text),
|
||||
}
|
||||
preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
|
||||
'format_id': fmt.tag,
|
||||
'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
|
||||
'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
|
||||
} for fmt in media.find('VIDEOS') if fmt.text
|
||||
]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': '%s - %s' % (infos.find('TITRAGE/TITRE').text,
|
||||
infos.find('TITRAGE/SOUS_TITRE').text),
|
||||
'upload_date': unified_strdate(infos.find('PUBLICATION/DATE').text),
|
||||
'thumbnail': media.find('IMAGES/GRAND').text,
|
||||
'description': infos.find('DESCRIPTION').text,
|
||||
'view_count': int(infos.find('NB_VUES').text),
|
||||
'like_count': int(infos.find('NB_LIKES').text),
|
||||
'comment_count': int(infos.find('NB_COMMENTS').text),
|
||||
'formats': formats,
|
||||
}
|
87
youtube_dl/extractor/cbsnews.py
Normal file
87
youtube_dl/extractor/cbsnews.py
Normal file
@ -0,0 +1,87 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CBSNewsIE(InfoExtractor):
|
||||
IE_DESC = 'CBS News'
|
||||
_VALID_URL = r'http://(?:www\.)?cbsnews\.com/(?:[^/]+/)+(?P<id>[\da-z_-]+)'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.cbsnews.com/news/tesla-and-spacex-elon-musks-industrial-empire/',
|
||||
'info_dict': {
|
||||
'id': 'tesla-and-spacex-elon-musks-industrial-empire',
|
||||
'ext': 'flv',
|
||||
'title': 'Tesla and SpaceX: Elon Musk\'s industrial empire',
|
||||
'thumbnail': 'http://beta.img.cbsnews.com/i/2014/03/30/60147937-2f53-4565-ad64-1bdd6eb64679/60-0330-pelley-640x360.jpg',
|
||||
'duration': 791,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://www.cbsnews.com/videos/fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack/',
|
||||
'info_dict': {
|
||||
'id': 'fort-hood-shooting-army-downplays-mental-illness-as-cause-of-attack',
|
||||
'ext': 'flv',
|
||||
'title': 'Fort Hood shooting: Army downplays mental illness as cause of attack',
|
||||
'thumbnail': 'http://cbsnews2.cbsistatic.com/hub/i/r/2014/04/04/0c9fbc66-576b-41ca-8069-02d122060dd2/thumbnail/140x90/6dad7a502f88875ceac38202984b6d58/en-0404-werner-replace-640x360.jpg',
|
||||
'duration': 205,
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_info = json.loads(self._html_search_regex(
|
||||
r'(?:<ul class="media-list items" id="media-related-items"><li data-video-info|<div id="cbsNewsVideoPlayer" data-video-player-options)=\'({.+?})\'',
|
||||
webpage, 'video JSON info'))
|
||||
|
||||
item = video_info['item'] if 'item' in video_info else video_info
|
||||
title = item.get('articleTitle') or item.get('hed')
|
||||
duration = item.get('duration')
|
||||
thumbnail = item.get('mediaImage') or item.get('thumbnail')
|
||||
|
||||
formats = []
|
||||
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
|
||||
uri = item.get('media' + format_id + 'URI')
|
||||
if not uri:
|
||||
continue
|
||||
fmt = {
|
||||
'url': uri,
|
||||
'format_id': format_id,
|
||||
}
|
||||
if uri.startswith('rtmp'):
|
||||
fmt.update({
|
||||
'app': 'ondemand?auth=cbs',
|
||||
'play_path': 'mp4:' + uri.split('<break>')[-1],
|
||||
'player_url': 'http://www.cbsnews.com/[[IMPORT]]/vidtech.cbsinteractive.com/player/3_3_0/CBSI_PLAYER_HD.swf',
|
||||
'page_url': 'http://www.cbsnews.com',
|
||||
'ext': 'flv',
|
||||
})
|
||||
elif uri.endswith('.m3u8'):
|
||||
fmt['ext'] = 'mp4'
|
||||
formats.append(fmt)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
126
youtube_dl/extractor/ceskatelevize.py
Normal file
126
youtube_dl/extractor/ceskatelevize.py
Normal file
@ -0,0 +1,126 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class CeskaTelevizeIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.ceskatelevize\.cz/(porady|ivysilani)/(.+/)?(?P<id>[^?#]+)'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/213512120230004-spanelska-chripka',
|
||||
'info_dict': {
|
||||
'id': '213512120230004',
|
||||
'ext': 'flv',
|
||||
'title': 'První republika: Španělská chřipka',
|
||||
'duration': 3107.4,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
'skip': 'Works only from Czech Republic.',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/1030584952-tsatsiki-maminka-a-policajt',
|
||||
'info_dict': {
|
||||
'id': '20138143440',
|
||||
'ext': 'flv',
|
||||
'title': 'Tsatsiki, maminka a policajt',
|
||||
'duration': 6754.1,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
'skip': 'Works only from Czech Republic.',
|
||||
},
|
||||
{
|
||||
'url': 'http://www.ceskatelevize.cz/ivysilani/10532695142-prvni-republika/bonus/14716-zpevacka-z-duparny-bobina',
|
||||
'info_dict': {
|
||||
'id': '14716',
|
||||
'ext': 'flv',
|
||||
'title': 'První republika: Zpěvačka z Dupárny Bobina',
|
||||
'duration': 90,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
url = url.replace('/porady/', '/ivysilani/').replace('/video/', '')
|
||||
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
NOT_AVAILABLE_STRING = 'This content is not available at your territory due to limited copyright.'
|
||||
if '%s</p>' % NOT_AVAILABLE_STRING in webpage:
|
||||
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
|
||||
|
||||
typ = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":"(.+?)","id":".+?"\}\],', webpage, 'type')
|
||||
episode_id = self._html_search_regex(r'getPlaylistUrl\(\[\{"type":".+?","id":"(.+?)"\}\],', webpage, 'episode_id')
|
||||
|
||||
data = {
|
||||
'playlist[0][type]': typ,
|
||||
'playlist[0][id]': episode_id,
|
||||
'requestUrl': compat_urllib_parse_urlparse(url).path,
|
||||
'requestSource': 'iVysilani',
|
||||
}
|
||||
|
||||
req = compat_urllib_request.Request('http://www.ceskatelevize.cz/ivysilani/ajax/get-playlist-url',
|
||||
data=compat_urllib_parse.urlencode(data))
|
||||
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
req.add_header('x-addr', '127.0.0.1')
|
||||
req.add_header('X-Requested-With', 'XMLHttpRequest')
|
||||
req.add_header('Referer', url)
|
||||
|
||||
playlistpage = self._download_json(req, video_id)
|
||||
|
||||
req = compat_urllib_request.Request(compat_urllib_parse.unquote(playlistpage['url']))
|
||||
req.add_header('Referer', url)
|
||||
|
||||
playlist = self._download_xml(req, video_id)
|
||||
|
||||
formats = []
|
||||
for i in playlist.find('smilRoot/body'):
|
||||
if 'AD' not in i.attrib['id']:
|
||||
base_url = i.attrib['base']
|
||||
parsedurl = compat_urllib_parse_urlparse(base_url)
|
||||
duration = i.attrib['duration']
|
||||
|
||||
for video in i.findall('video'):
|
||||
if video.attrib['label'] != 'AD':
|
||||
format_id = video.attrib['label']
|
||||
play_path = video.attrib['src']
|
||||
vbr = int(video.attrib['system-bitrate'])
|
||||
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': base_url,
|
||||
'vbr': vbr,
|
||||
'play_path': play_path,
|
||||
'app': parsedurl.path[1:] + '?' + parsedurl.query,
|
||||
'rtmp_live': True,
|
||||
'ext': 'flv',
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': episode_id,
|
||||
'title': self._html_search_regex(r'<title>(.+?) — iVysílání — Česká televize</title>', webpage, 'title'),
|
||||
'duration': float(duration),
|
||||
'formats': formats,
|
||||
}
|
@ -15,14 +15,15 @@ class Channel9IE(InfoExtractor):
|
||||
'''
|
||||
IE_DESC = 'Channel 9'
|
||||
IE_NAME = 'channel9'
|
||||
_VALID_URL = r'^https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
|
||||
_VALID_URL = r'https?://(?:www\.)?channel9\.msdn\.com/(?P<contentpath>.+)/?'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://channel9.msdn.com/Events/TechEd/Australia/2013/KOS002',
|
||||
'file': 'Events_TechEd_Australia_2013_KOS002.mp4',
|
||||
'md5': 'bbd75296ba47916b754e73c3a4bbdf10',
|
||||
'info_dict': {
|
||||
'id': 'Events/TechEd/Australia/2013/KOS002',
|
||||
'ext': 'mp4',
|
||||
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
||||
'duration': 4576,
|
||||
@ -35,9 +36,10 @@ class Channel9IE(InfoExtractor):
|
||||
},
|
||||
{
|
||||
'url': 'http://channel9.msdn.com/posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
||||
'file': 'posts_Self-service-BI-with-Power-BI-nuclear-testing.mp4',
|
||||
'md5': 'b43ee4529d111bc37ba7ee4f34813e68',
|
||||
'info_dict': {
|
||||
'id': 'posts/Self-service-BI-with-Power-BI-nuclear-testing',
|
||||
'ext': 'mp4',
|
||||
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||
'duration': 1540,
|
||||
|
97
youtube_dl/extractor/chilloutzone.py
Normal file
97
youtube_dl/extractor/chilloutzone.py
Normal file
@ -0,0 +1,97 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import base64
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
ExtractorError
|
||||
)
|
||||
|
||||
|
||||
class ChilloutzoneIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?chilloutzone\.net/video/(?P<id>[\w|-]+)\.html'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.chilloutzone.net/video/enemene-meck-alle-katzen-weg.html',
|
||||
'md5': 'a76f3457e813ea0037e5244f509e66d1',
|
||||
'info_dict': {
|
||||
'id': 'enemene-meck-alle-katzen-weg',
|
||||
'ext': 'mp4',
|
||||
'title': 'Enemene Meck - Alle Katzen weg',
|
||||
'description': 'Ist das der Umkehrschluss des Niesenden Panda-Babys?',
|
||||
},
|
||||
}, {
|
||||
'note': 'Video hosted at YouTube',
|
||||
'url': 'http://www.chilloutzone.net/video/eine-sekunde-bevor.html',
|
||||
'info_dict': {
|
||||
'id': '1YVQaAgHyRU',
|
||||
'ext': 'mp4',
|
||||
'title': '16 Photos Taken 1 Second Before Disaster',
|
||||
'description': 'md5:58a8fcf6a459fe0a08f54140f0ad1814',
|
||||
'uploader': 'BuzzFeedVideo',
|
||||
'uploader_id': 'BuzzFeedVideo',
|
||||
'upload_date': '20131105',
|
||||
},
|
||||
}, {
|
||||
'note': 'Video hosted at Vimeo',
|
||||
'url': 'http://www.chilloutzone.net/video/icon-blending.html',
|
||||
'md5': '2645c678b8dc4fefcc0e1b60db18dac1',
|
||||
'info_dict': {
|
||||
'id': '85523671',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Sunday Times - Icons',
|
||||
'description': 'md5:3e1c0dc6047498d6728dcdaad0891762',
|
||||
'uploader': 'Us',
|
||||
'uploader_id': 'usfilms',
|
||||
'upload_date': '20140131'
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
base64_video_info = self._html_search_regex(
|
||||
r'var cozVidData = "(.+?)";', webpage, 'video data')
|
||||
decoded_video_info = base64.b64decode(base64_video_info).decode("utf-8")
|
||||
video_info_dict = json.loads(decoded_video_info)
|
||||
|
||||
# get video information from dict
|
||||
video_url = video_info_dict['mediaUrl']
|
||||
description = clean_html(video_info_dict.get('description'))
|
||||
title = video_info_dict['title']
|
||||
native_platform = video_info_dict['nativePlatform']
|
||||
native_video_id = video_info_dict['nativeVideoId']
|
||||
source_priority = video_info_dict['sourcePriority']
|
||||
|
||||
# If nativePlatform is None a fallback mechanism is used (i.e. youtube embed)
|
||||
if native_platform is None:
|
||||
youtube_url = self._html_search_regex(
|
||||
r'<iframe.* src="((?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
|
||||
webpage, 'fallback video URL', default=None)
|
||||
if youtube_url is not None:
|
||||
return self.url_result(youtube_url, ie='Youtube')
|
||||
|
||||
# Non Fallback: Decide to use native source (e.g. youtube or vimeo) or
|
||||
# the own CDN
|
||||
if source_priority == 'native':
|
||||
if native_platform == 'youtube':
|
||||
return self.url_result(native_video_id, ie='Youtube')
|
||||
if native_platform == 'vimeo':
|
||||
return self.url_result(
|
||||
'http://vimeo.com/' + native_video_id, ie='Vimeo')
|
||||
|
||||
if not video_url:
|
||||
raise ExtractorError('No video found')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
'description': description,
|
||||
}
|
@ -1,4 +1,5 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -8,77 +9,71 @@ from ..utils import (
|
||||
|
||||
|
||||
class CinemassacreIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?(?P<url>cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/.+?)(?:[/?].*)?'
|
||||
_TESTS = [{
|
||||
u'url': u'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
|
||||
u'file': u'19911.flv',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20121110',
|
||||
u'title': u'“Angry Video Game Nerd: The Movie” – Trailer',
|
||||
u'description': u'md5:fb87405fcb42a331742a0dce2708560b',
|
||||
_VALID_URL = r'http://(?:www\.)?cinemassacre\.com/(?P<date_Y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
|
||||
'file': '19911.mp4',
|
||||
'md5': '782f8504ca95a0eba8fc9177c373eec7',
|
||||
'info_dict': {
|
||||
'upload_date': '20121110',
|
||||
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
|
||||
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
|
||||
},
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
u'url': u'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
u'file': u'521be8ef82b16.flv',
|
||||
u'info_dict': {
|
||||
u'upload_date': u'20131002',
|
||||
u'title': u'The Mummy’s Hand (1940)',
|
||||
},
|
||||
u'params': {
|
||||
# rtmp download
|
||||
u'skip_download': True,
|
||||
},
|
||||
}]
|
||||
{
|
||||
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
'file': '521be8ef82b16.mp4',
|
||||
'md5': 'dec39ee5118f8d9cc067f45f9cbe3a35',
|
||||
'info_dict': {
|
||||
'upload_date': '20131002',
|
||||
'title': 'The Mummy’s Hand (1940)',
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage_url = u'http://' + mobj.group('url')
|
||||
webpage = self._download_webpage(webpage_url, None) # Don't know video id yet
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
||||
if not mobj:
|
||||
raise ExtractorError(u'Can\'t extract embed url and video id')
|
||||
playerdata_url = mobj.group(u'embed_url')
|
||||
video_id = mobj.group(u'video_id')
|
||||
raise ExtractorError('Can\'t extract embed url and video id')
|
||||
playerdata_url = mobj.group('embed_url')
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
video_title = self._html_search_regex(r'<title>(?P<title>.+?)\|',
|
||||
webpage, u'title')
|
||||
video_description = self._html_search_regex(r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||
webpage, u'description', flags=re.DOTALL, fatal=False)
|
||||
if len(video_description) == 0:
|
||||
video_description = None
|
||||
video_title = self._html_search_regex(
|
||||
r'<title>(?P<title>.+?)\|', webpage, 'title')
|
||||
video_description = self._html_search_regex(
|
||||
r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||
webpage, 'description', flags=re.DOTALL, fatal=False)
|
||||
|
||||
playerdata = self._download_webpage(playerdata_url, video_id)
|
||||
url = self._html_search_regex(r'\'streamer\': \'(?P<url>[^\']+)\'', playerdata, u'url')
|
||||
|
||||
sd_file = self._html_search_regex(r'\'file\': \'(?P<sd_file>[^\']+)\'', playerdata, u'sd_file')
|
||||
hd_file = self._html_search_regex(r'\'?file\'?: "(?P<hd_file>[^"]+)"', playerdata, u'hd_file')
|
||||
video_thumbnail = self._html_search_regex(r'\'image\': \'(?P<thumbnail>[^\']+)\'', playerdata, u'thumbnail', fatal=False)
|
||||
sd_url = self._html_search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
|
||||
hd_url = self._html_search_regex(
|
||||
r'file: \'([^\']+)\', label: \'HD\'', playerdata, 'hd_file',
|
||||
default=None)
|
||||
video_thumbnail = self._html_search_regex(r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + sd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
'format': 'sd',
|
||||
'format_id': 'sd',
|
||||
},
|
||||
{
|
||||
'url': url,
|
||||
'play_path': 'mp4:' + hd_file,
|
||||
'rtmp_live': True, # workaround
|
||||
'ext': 'flv',
|
||||
formats = [{
|
||||
'url': sd_url,
|
||||
'ext': 'mp4',
|
||||
'format': 'sd',
|
||||
'format_id': 'sd',
|
||||
'quality': 1,
|
||||
}]
|
||||
if hd_url:
|
||||
formats.append({
|
||||
'url': hd_url,
|
||||
'ext': 'mp4',
|
||||
'format': 'hd',
|
||||
'format_id': 'hd',
|
||||
},
|
||||
]
|
||||
'quality': 2,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -1,22 +1,28 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import time
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
parse_duration,
|
||||
)
|
||||
|
||||
|
||||
class ClipfishIE(InfoExtractor):
|
||||
IE_NAME = u'clipfish'
|
||||
IE_NAME = 'clipfish'
|
||||
|
||||
_VALID_URL = r'^https?://(?:www\.)?clipfish\.de/.*?/video/(?P<id>[0-9]+)/'
|
||||
_TEST = {
|
||||
u'url': u'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
|
||||
u'file': u'3966754.mp4',
|
||||
u'md5': u'2521cd644e862936cf2e698206e47385',
|
||||
u'info_dict': {
|
||||
u'title': u'FIFA 14 - E3 2013 Trailer',
|
||||
u'duration': 82,
|
||||
'url': 'http://www.clipfish.de/special/game-trailer/video/3966754/fifa-14-e3-2013-trailer/',
|
||||
'md5': '2521cd644e862936cf2e698206e47385',
|
||||
'info_dict': {
|
||||
'id': '3966754',
|
||||
'ext': 'mp4',
|
||||
'title': 'FIFA 14 - E3 2013 Trailer',
|
||||
'duration': 82,
|
||||
},
|
||||
u'skip': 'Blocked in the US'
|
||||
}
|
||||
@ -33,21 +39,10 @@ class ClipfishIE(InfoExtractor):
|
||||
video_url = doc.find('filename').text
|
||||
if video_url is None:
|
||||
xml_bytes = xml.etree.ElementTree.tostring(doc)
|
||||
raise ExtractorError(u'Cannot find video URL in document %r' %
|
||||
raise ExtractorError('Cannot find video URL in document %r' %
|
||||
xml_bytes)
|
||||
thumbnail = doc.find('imageurl').text
|
||||
duration_str = doc.find('duration').text
|
||||
m = re.match(
|
||||
r'^(?P<hours>[0-9]+):(?P<minutes>[0-9]{2}):(?P<seconds>[0-9]{2}):(?P<ms>[0-9]*)$',
|
||||
duration_str)
|
||||
if m:
|
||||
duration = (
|
||||
(int(m.group('hours')) * 60 * 60) +
|
||||
(int(m.group('minutes')) * 60) +
|
||||
(int(m.group('seconds')))
|
||||
)
|
||||
else:
|
||||
duration = None
|
||||
duration = parse_duration(doc.find('duration').text)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -11,13 +13,14 @@ class ClipsyndicateIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
|
||||
u'md5': u'4d7d549451bad625e0ff3d7bd56d776c',
|
||||
u'info_dict': {
|
||||
u'id': u'4629301',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'Brick Briscoe',
|
||||
u'duration': 612,
|
||||
'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
|
||||
'md5': '4d7d549451bad625e0ff3d7bd56d776c',
|
||||
'info_dict': {
|
||||
'id': '4629301',
|
||||
'ext': 'mp4',
|
||||
'title': 'Brick Briscoe',
|
||||
'duration': 612,
|
||||
'thumbnail': 're:^https?://.+\.jpg',
|
||||
},
|
||||
}
|
||||
|
||||
@ -26,13 +29,13 @@ class ClipsyndicateIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
js_player = self._download_webpage(
|
||||
'http://eplayer.clipsyndicate.com/embed/player.js?va_id=%s' % video_id,
|
||||
video_id, u'Downlaoding player')
|
||||
video_id, 'Downlaoding player')
|
||||
# it includes a required token
|
||||
flvars = self._search_regex(r'flvars: "(.*?)"', js_player, u'flvars')
|
||||
flvars = self._search_regex(r'flvars: "(.*?)"', js_player, 'flvars')
|
||||
|
||||
pdoc = self._download_xml(
|
||||
'http://eplayer.clipsyndicate.com/osmf/playlist?%s' % flvars,
|
||||
video_id, u'Downloading video info',
|
||||
video_id, 'Downloading video info',
|
||||
transform_source=fix_xml_ampersands)
|
||||
|
||||
track_doc = pdoc.find('trackList/track')
|
||||
|
58
youtube_dl/extractor/clubic.py
Normal file
58
youtube_dl/extractor/clubic.py
Normal file
@ -0,0 +1,58 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
class ClubicIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?clubic\.com/video/[^/]+/video.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
|
||||
'md5': '1592b694ba586036efac1776b0b43cd3',
|
||||
'info_dict': {
|
||||
'id': '448474',
|
||||
'ext': 'mp4',
|
||||
'title': 'Clubic Week 2.0 : le FBI se lance dans la photo d\u0092identité',
|
||||
'description': 're:Gueule de bois chez Nokia. Le constructeur a indiqué cette.*',
|
||||
'thumbnail': 're:^http://img\.clubic\.com/.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
player_url = 'http://player.m6web.fr/v1/player/clubic/%s.html' % video_id
|
||||
player_page = self._download_webpage(player_url, video_id)
|
||||
|
||||
config_json = self._search_regex(
|
||||
r'(?m)M6\.Player\.config\s*=\s*(\{.+?\});$', player_page,
|
||||
'configuration')
|
||||
config = json.loads(config_json)
|
||||
|
||||
video_info = config['videoInfo']
|
||||
sources = config['sources']
|
||||
quality_order = qualities(['sd', 'hq'])
|
||||
|
||||
formats = [{
|
||||
'format_id': src['streamQuality'],
|
||||
'url': src['src'],
|
||||
'quality': quality_order(src['streamQuality']),
|
||||
} for src in sources]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_info['title'],
|
||||
'formats': formats,
|
||||
'description': clean_html(video_info.get('description')),
|
||||
'thumbnail': config.get('poster'),
|
||||
}
|
75
youtube_dl/extractor/cnet.py
Normal file
75
youtube_dl/extractor/cnet.py
Normal file
@ -0,0 +1,75 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class CNETIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?cnet\.com/videos/(?P<id>[^/]+)/'
|
||||
_TEST = {
|
||||
'url': 'http://www.cnet.com/videos/hands-on-with-microsofts-windows-8-1-update/',
|
||||
'md5': '041233212a0d06b179c87cbcca1577b8',
|
||||
'info_dict': {
|
||||
'id': '56f4ea68-bd21-4852-b08c-4de5b8354c60',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hands-on with Microsoft Windows 8.1 Update',
|
||||
'description': 'The new update to the Windows 8 OS brings improved performance for mouse and keyboard users.',
|
||||
'thumbnail': 're:^http://.*/flmswindows8.jpg$',
|
||||
'uploader_id': 'sarah.mitroff@cbsinteractive.com',
|
||||
'uploader': 'Sarah Mitroff',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
data_json = self._html_search_regex(
|
||||
r"<div class=\"cnetVideoPlayer\"\s+.*?data-cnet-video-options='([^']+)'",
|
||||
webpage, 'data json')
|
||||
data = json.loads(data_json)
|
||||
vdata = data['video']
|
||||
if not vdata:
|
||||
vdata = data['videos'][0]
|
||||
if not vdata:
|
||||
raise ExtractorError('Cannot find video data')
|
||||
|
||||
video_id = vdata['id']
|
||||
title = vdata['headline']
|
||||
description = vdata.get('dek')
|
||||
thumbnail = vdata.get('image', {}).get('path')
|
||||
author = vdata.get('author')
|
||||
if author:
|
||||
uploader = '%s %s' % (author['firstName'], author['lastName'])
|
||||
uploader_id = author.get('email')
|
||||
else:
|
||||
uploader = None
|
||||
uploader_id = None
|
||||
|
||||
formats = [{
|
||||
'format_id': '%s-%s-%s' % (
|
||||
f['type'], f['format'],
|
||||
int_or_none(f.get('bitrate'), 1000, default='')),
|
||||
'url': f['uri'],
|
||||
'tbr': int_or_none(f.get('bitrate'), 1000),
|
||||
} for f in vdata['files']['data']]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'description': description,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
@ -6,6 +6,7 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
@ -98,3 +99,28 @@ class CNNIE(InfoExtractor):
|
||||
'duration': duration,
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
|
||||
class CNNBlogsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://[^\.]+\.blogs\.cnn\.com/.+'
|
||||
_TEST = {
|
||||
'url': 'http://reliablesources.blogs.cnn.com/2014/02/09/criminalizing-journalism/',
|
||||
'md5': '3e56f97b0b6ffb4b79f4ea0749551084',
|
||||
'info_dict': {
|
||||
'id': 'bestoftv/2014/02/09/criminalizing-journalism.cnn',
|
||||
'ext': 'mp4',
|
||||
'title': 'Criminalizing journalism?',
|
||||
'description': 'Glenn Greenwald responds to comments made this week on Capitol Hill that journalists could be criminal accessories.',
|
||||
'upload_date': '20140209',
|
||||
},
|
||||
'add_ie': ['CNN'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
webpage = self._download_webpage(url, url_basename(url))
|
||||
cnn_url = self._html_search_regex(r'data-url="(.+?)"', webpage, 'cnn url')
|
||||
return {
|
||||
'_type': 'url',
|
||||
'url': cnn_url,
|
||||
'ie_key': CNNIE.ie_key(),
|
||||
}
|
||||
|
@ -17,8 +17,9 @@ class CollegeHumorIE(InfoExtractor):
|
||||
'id': '6902724',
|
||||
'ext': 'mp4',
|
||||
'title': 'Comic-Con Cosplay Catastrophe',
|
||||
'description': 'Fans get creative this year',
|
||||
'description': "Fans get creative this year at San Diego. Too creative. And yes, that's really Joss Whedon.",
|
||||
'age_limit': 13,
|
||||
'duration': 187,
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -28,22 +29,22 @@ class CollegeHumorIE(InfoExtractor):
|
||||
'id': '3505939',
|
||||
'ext': 'mp4',
|
||||
'title': 'Font Conference',
|
||||
'description': 'This video wasn\'t long enough,',
|
||||
'description': "This video wasn't long enough, so we made it double-spaced.",
|
||||
'age_limit': 10,
|
||||
'duration': 179,
|
||||
},
|
||||
},
|
||||
# embedded youtube video
|
||||
{
|
||||
'url': 'http://www.collegehumor.com/embed/6950457',
|
||||
'url': 'http://www.collegehumor.com/embed/6950306',
|
||||
'info_dict': {
|
||||
'id': 'W5gMp3ZjYg4',
|
||||
'id': 'Z-bao9fg6Yc',
|
||||
'ext': 'mp4',
|
||||
'title': 'Funny Dogs Protecting Babies Compilation 2014 [NEW HD]',
|
||||
'uploader': 'Funnyplox TV',
|
||||
'uploader_id': 'funnyploxtv',
|
||||
'description': 'md5:11812366244110c3523968aa74f02521',
|
||||
'upload_date': '20140128',
|
||||
'title': 'Young Americans Think President John F. Kennedy Died THIS MORNING IN A CAR ACCIDENT!!!',
|
||||
'uploader': 'Mark Dice',
|
||||
'uploader_id': 'MarkDice',
|
||||
'description': 'md5:62c3dab9351fac7bb44b53b69511d87f',
|
||||
'upload_date': '20140127',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@ -87,6 +88,7 @@ class CollegeHumorIE(InfoExtractor):
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = int_or_none(vdata.get('duration'), 1000)
|
||||
like_count = int_or_none(vdata.get('likes'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -96,4 +98,5 @@ class CollegeHumorIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'age_limit': age_limit,
|
||||
'duration': duration,
|
||||
'like_count': like_count,
|
||||
}
|
||||
|
@ -7,21 +7,21 @@ from .mtv import MTVServicesInfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
unified_strdate,
|
||||
)
|
||||
|
||||
|
||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?comedycentral\.com/
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?(comedycentral|cc)\.com/
|
||||
(video-clips|episodes|cc-studios|video-collections)
|
||||
/(?P<title>.*)'''
|
||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.comedycentral.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
'md5': '4167875aae411f903b751a21f357f1ee',
|
||||
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
|
||||
'info_dict': {
|
||||
'id': 'cef0cbb3-e776-4bc9-b62e-8016deccb354',
|
||||
'ext': 'mp4',
|
||||
@ -32,31 +32,34 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
|
||||
|
||||
class ComedyCentralShowsIE(InfoExtractor):
|
||||
IE_DESC = 'The Daily Show / Colbert Report'
|
||||
IE_DESC = 'The Daily Show / The Colbert Report'
|
||||
# urls can be abbreviations like :thedailyshow or :colbert
|
||||
# urls for episodes like:
|
||||
# or urls for clips like: http://www.thedailyshow.com/watch/mon-december-10-2012/any-given-gun-day
|
||||
# or: http://www.colbertnation.com/the-colbert-report-videos/421667/november-29-2012/moon-shattering-news
|
||||
# or: http://www.colbertnation.com/the-colbert-report-collections/422008/festival-of-lights/79524
|
||||
_VALID_URL = r"""^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|
||||
|(https?://)?(www\.)?
|
||||
(?P<showname>thedailyshow|colbertnation)\.com/
|
||||
(full-episodes/(?P<episode>.*)|
|
||||
_VALID_URL = r'''(?x)^(:(?P<shortname>tds|thedailyshow|cr|colbert|colbertnation|colbertreport)
|
||||
|https?://(:www\.)?
|
||||
(?P<showname>thedailyshow|thecolbertreport)\.(?:cc\.)?com/
|
||||
((?:full-)?episodes/(?:[0-9a-z]{6}/)?(?P<episode>.*)|
|
||||
(?P<clip>
|
||||
(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|
||||
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*)))|
|
||||
(?:(?:guests/[^/]+|videos|video-playlists|special-editions)/[^/]+/(?P<videotitle>[^/?#]+))
|
||||
|(the-colbert-report-(videos|collections)/(?P<clipID>[0-9]+)/[^/]*/(?P<cntitle>.*?))
|
||||
|(watch/(?P<date>[^/]*)/(?P<tdstitle>.*))
|
||||
)|
|
||||
(?P<interview>
|
||||
extended-interviews/(?P<interID>[0-9]+)/playlist_tds_extended_(?P<interview_title>.*?)/.*?)))
|
||||
$"""
|
||||
extended-interviews/(?P<interID>[0-9a-z]+)/(?:playlist_tds_extended_)?(?P<interview_title>.*?)(/.*?)?)))
|
||||
(?:[?#].*|$)'''
|
||||
_TEST = {
|
||||
'url': 'http://www.thedailyshow.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
'file': '422212.mp4',
|
||||
'url': 'http://thedailyshow.cc.com/watch/thu-december-13-2012/kristen-stewart',
|
||||
'md5': '4e2f5cb088a83cd8cdb7756132f9739d',
|
||||
'info_dict': {
|
||||
"upload_date": "20121214",
|
||||
"description": "Kristen Stewart",
|
||||
"uploader": "thedailyshow",
|
||||
"title": "thedailyshow-kristen-stewart part 1"
|
||||
'id': 'ab9ab3e7-5a98-4dbe-8b21-551dc0523d55',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20121213',
|
||||
'description': 'Kristen Stewart learns to let loose in "On the Road."',
|
||||
'uploader': 'thedailyshow',
|
||||
'title': 'thedailyshow kristen-stewart part 1',
|
||||
}
|
||||
}
|
||||
|
||||
@ -79,11 +82,6 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
'400': (384, 216),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
"""Receives a URL and returns True if suitable for this IE."""
|
||||
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
|
||||
|
||||
@staticmethod
|
||||
def _transform_rtmp_url(rtmp_video_url):
|
||||
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\.comedystor/.*)$', rtmp_video_url)
|
||||
@ -99,14 +97,16 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
|
||||
if mobj.group('shortname'):
|
||||
if mobj.group('shortname') in ('tds', 'thedailyshow'):
|
||||
url = 'http://www.thedailyshow.com/full-episodes/'
|
||||
url = 'http://thedailyshow.cc.com/full-episodes/'
|
||||
else:
|
||||
url = 'http://www.colbertnation.com/full-episodes/'
|
||||
url = 'http://thecolbertreport.cc.com/full-episodes/'
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
assert mobj is not None
|
||||
|
||||
if mobj.group('clip'):
|
||||
if mobj.group('showname') == 'thedailyshow':
|
||||
if mobj.group('videotitle'):
|
||||
epTitle = mobj.group('videotitle')
|
||||
elif mobj.group('showname') == 'thedailyshow':
|
||||
epTitle = mobj.group('tdstitle')
|
||||
else:
|
||||
epTitle = mobj.group('cntitle')
|
||||
@ -120,9 +120,9 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
epTitle = mobj.group('showname')
|
||||
else:
|
||||
epTitle = mobj.group('episode')
|
||||
show_name = mobj.group('showname')
|
||||
|
||||
self.report_extraction(epTitle)
|
||||
webpage,htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
webpage, htmlHandle = self._download_webpage_handle(url, epTitle)
|
||||
if dlNewest:
|
||||
url = htmlHandle.geturl()
|
||||
mobj = re.match(self._VALID_URL, url, re.VERBOSE)
|
||||
@ -130,71 +130,86 @@ class ComedyCentralShowsIE(InfoExtractor):
|
||||
raise ExtractorError('Invalid redirected URL: ' + url)
|
||||
if mobj.group('episode') == '':
|
||||
raise ExtractorError('Redirected URL is still not specific: ' + url)
|
||||
epTitle = mobj.group('episode')
|
||||
epTitle = mobj.group('episode').rpartition('/')[-1]
|
||||
|
||||
mMovieParams = re.findall('(?:<param name="movie" value="|var url = ")(http://media.mtvnservices.com/([^"]*(?:episode|video).*?:.*?))"', webpage)
|
||||
|
||||
if len(mMovieParams) == 0:
|
||||
# The Colbert Report embeds the information in a without
|
||||
# a URL prefix; so extract the alternate reference
|
||||
# and then add the URL prefix manually.
|
||||
|
||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video).*?:.*?)"', webpage)
|
||||
altMovieParams = re.findall('data-mgid="([^"]*(?:episode|video|playlist).*?:.*?)"', webpage)
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
indexUrl = 'http://shadow.comedycentral.com/feeds/video_player/mrss/?' + compat_urllib_parse.urlencode({'uri': uri})
|
||||
idoc = self._download_xml(indexUrl, epTitle,
|
||||
'Downloading show index',
|
||||
'unable to download episode index')
|
||||
# Correct cc.com in uri
|
||||
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.cc.com', uri)
|
||||
|
||||
results = []
|
||||
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
|
||||
idoc = self._download_xml(
|
||||
index_url, epTitle,
|
||||
'Downloading show index', 'Unable to download episode index')
|
||||
|
||||
itemEls = idoc.findall('.//item')
|
||||
for partNum,itemEl in enumerate(itemEls):
|
||||
mediaId = itemEl.findall('./guid')[0].text
|
||||
shortMediaId = mediaId.split(':')[-1]
|
||||
showId = mediaId.split(':')[-2].replace('.com', '')
|
||||
officialTitle = itemEl.findall('./title')[0].text
|
||||
officialDate = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||
title = idoc.find('./channel/title').text
|
||||
description = idoc.find('./channel/description').text
|
||||
|
||||
configUrl = ('http://www.comedycentral.com/global/feeds/entertainment/media/mediaGenEntertainment.jhtml?' +
|
||||
compat_urllib_parse.urlencode({'uri': mediaId}))
|
||||
cdoc = self._download_xml(configUrl, epTitle,
|
||||
'Downloading configuration for %s' % shortMediaId)
|
||||
entries = []
|
||||
item_els = idoc.findall('.//item')
|
||||
for part_num, itemEl in enumerate(item_els):
|
||||
upload_date = unified_strdate(itemEl.findall('./pubDate')[0].text)
|
||||
thumbnail = itemEl.find('.//{http://search.yahoo.com/mrss/}thumbnail').attrib.get('url')
|
||||
|
||||
content = itemEl.find('.//{http://search.yahoo.com/mrss/}content')
|
||||
duration = float_or_none(content.attrib.get('duration'))
|
||||
mediagen_url = content.attrib['url']
|
||||
guid = itemEl.find('./guid').text.rpartition(':')[-1]
|
||||
|
||||
cdoc = self._download_xml(
|
||||
mediagen_url, epTitle,
|
||||
'Downloading configuration for segment %d / %d' % (part_num + 1, len(item_els)))
|
||||
|
||||
turls = []
|
||||
for rendition in cdoc.findall('.//rendition'):
|
||||
finfo = (rendition.attrib['bitrate'], rendition.findall('./src')[0].text)
|
||||
turls.append(finfo)
|
||||
|
||||
if len(turls) == 0:
|
||||
self._downloader.report_error('unable to download ' + mediaId + ': No videos found')
|
||||
continue
|
||||
|
||||
formats = []
|
||||
for format, rtmp_video_url in turls:
|
||||
w, h = self._video_dimensions.get(format, (None, None))
|
||||
formats.append({
|
||||
'format_id': 'vhttp-%s' % format,
|
||||
'url': self._transform_rtmp_url(rtmp_video_url),
|
||||
'ext': self._video_extensions.get(format, 'mp4'),
|
||||
'format_id': format,
|
||||
'height': h,
|
||||
'width': w,
|
||||
})
|
||||
formats.append({
|
||||
'format_id': 'rtmp-%s' % format,
|
||||
'url': rtmp_video_url,
|
||||
'ext': self._video_extensions.get(format, 'mp4'),
|
||||
'height': h,
|
||||
'width': w,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
effTitle = showId + '-' + epTitle + ' part ' + compat_str(partNum+1)
|
||||
results.append({
|
||||
'id': shortMediaId,
|
||||
virtual_id = show_name + ' ' + epTitle + ' part ' + compat_str(part_num + 1)
|
||||
entries.append({
|
||||
'id': guid,
|
||||
'title': virtual_id,
|
||||
'formats': formats,
|
||||
'uploader': showId,
|
||||
'upload_date': officialDate,
|
||||
'title': effTitle,
|
||||
'thumbnail': None,
|
||||
'description': compat_str(officialTitle),
|
||||
'uploader': show_name,
|
||||
'upload_date': upload_date,
|
||||
'duration': duration,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
})
|
||||
|
||||
return results
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
'title': show_name + ' ' + title,
|
||||
'description': description,
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ class InfoExtractor(object):
|
||||
"http", "https", "rtsp", "rtmp", "m3u8" or so.
|
||||
* preference Order number of this format. If this field is
|
||||
present and not None, the formats get sorted
|
||||
by this field.
|
||||
by this field, regardless of all other values.
|
||||
-1 for default (order by other properties),
|
||||
-2 or smaller for less than default.
|
||||
* quality Order number of the video quality of this
|
||||
@ -88,12 +88,18 @@ class InfoExtractor(object):
|
||||
|
||||
The following fields are optional:
|
||||
|
||||
display_id An alternative identifier for the video, not necessarily
|
||||
unique, but available before title. Typically, id is
|
||||
something like "4234987", title "Dancing naked mole rats",
|
||||
and display_id "dancing-naked-mole-rats"
|
||||
thumbnails: A list of dictionaries (with the entries "resolution" and
|
||||
"url") for the varying thumbnails
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: One-line video description.
|
||||
uploader: Full name of the video uploader.
|
||||
timestamp: UNIX timestamp of the moment the video became available.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
If not explicitly set, calculated from timestamp.
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
location: Physical location of the video.
|
||||
subtitles: The subtitle file contents as a dictionary in the format
|
||||
@ -114,9 +120,6 @@ class InfoExtractor(object):
|
||||
_real_extract() methods and define a _VALID_URL regexp.
|
||||
Probably, they should also be added to the list of extractors.
|
||||
|
||||
_real_extract() must return a *list* of information dictionaries as
|
||||
described above.
|
||||
|
||||
Finally, the _WORKING attribute should be set to False for broken IEs
|
||||
in order to warn the users and skip the tests.
|
||||
"""
|
||||
@ -248,7 +251,21 @@ class InfoExtractor(object):
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(webpage_bytes)
|
||||
|
||||
content = webpage_bytes.decode(encoding, 'replace')
|
||||
try:
|
||||
content = webpage_bytes.decode(encoding, 'replace')
|
||||
except LookupError:
|
||||
content = webpage_bytes.decode('utf-8', 'replace')
|
||||
|
||||
if (u'<title>Access to this site is blocked</title>' in content and
|
||||
u'Websense' in content[:512]):
|
||||
msg = u'Access to this webpage has been blocked by Websense filtering software in your network.'
|
||||
blocked_iframe = self._html_search_regex(
|
||||
r'<iframe src="([^"]+)"', content,
|
||||
u'Websense information URL', default=None)
|
||||
if blocked_iframe:
|
||||
msg += u' Visit %s for more details' % blocked_iframe
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
return (content, urlh)
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
@ -271,8 +288,11 @@ class InfoExtractor(object):
|
||||
|
||||
def _download_json(self, url_or_request, video_id,
|
||||
note=u'Downloading JSON metadata',
|
||||
errnote=u'Unable to download JSON metadata'):
|
||||
errnote=u'Unable to download JSON metadata',
|
||||
transform_source=None):
|
||||
json_string = self._download_webpage(url_or_request, video_id, note, errnote)
|
||||
if transform_source:
|
||||
json_string = transform_source(json_string)
|
||||
try:
|
||||
return json.loads(json_string)
|
||||
except ValueError as ve:
|
||||
@ -429,14 +449,14 @@ class InfoExtractor(object):
|
||||
if secure: regexes = self._og_regexes('video:secure_url') + regexes
|
||||
return self._html_search_regex(regexes, html, name, **kargs)
|
||||
|
||||
def _html_search_meta(self, name, html, display_name=None):
|
||||
def _html_search_meta(self, name, html, display_name=None, fatal=False):
|
||||
if display_name is None:
|
||||
display_name = name
|
||||
return self._html_search_regex(
|
||||
r'''(?ix)<meta
|
||||
(?=[^>]+(?:itemprop|name|property)=["\']%s["\'])
|
||||
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
|
||||
html, display_name, fatal=False)
|
||||
html, display_name, fatal=fatal)
|
||||
|
||||
def _dc_search_uploader(self, html):
|
||||
return self._html_search_meta('dc.creator', html, 'uploader')
|
||||
|
@ -28,16 +28,18 @@ class CondeNastIE(InfoExtractor):
|
||||
'glamour': 'Glamour',
|
||||
'wmagazine': 'W Magazine',
|
||||
'vanityfair': 'Vanity Fair',
|
||||
'cnevids': 'Condé Nast',
|
||||
}
|
||||
|
||||
_VALID_URL = r'http://(video|www)\.(?P<site>%s)\.com/(?P<type>watch|series|video)/(?P<id>.+)' % '|'.join(_SITES.keys())
|
||||
_VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
||||
'file': '5171b343c2b4c00dd0c1ccb3.mp4',
|
||||
'md5': '1921f713ed48aabd715691f774c451f7',
|
||||
'info_dict': {
|
||||
'id': '5171b343c2b4c00dd0c1ccb3',
|
||||
'ext': 'mp4',
|
||||
'title': '3D Printed Speakers Lit With LED',
|
||||
'description': 'Check out these beautiful 3D printed LED speakers. You can\'t actually buy them, but LumiGeek is working on a board that will let you make you\'re own.',
|
||||
}
|
||||
@ -55,12 +57,16 @@ class CondeNastIE(InfoExtractor):
|
||||
entries = [self.url_result(build_url(path), 'CondeNast') for path in paths]
|
||||
return self.playlist_result(entries, playlist_title=title)
|
||||
|
||||
def _extract_video(self, webpage):
|
||||
description = self._html_search_regex([r'<div class="cne-video-description">(.+?)</div>',
|
||||
r'<div class="video-post-content">(.+?)</div>',
|
||||
],
|
||||
webpage, 'description',
|
||||
fatal=False, flags=re.DOTALL)
|
||||
def _extract_video(self, webpage, url_type):
|
||||
if url_type != 'embed':
|
||||
description = self._html_search_regex(
|
||||
[
|
||||
r'<div class="cne-video-description">(.+?)</div>',
|
||||
r'<div class="video-post-content">(.+?)</div>',
|
||||
],
|
||||
webpage, 'description', fatal=False, flags=re.DOTALL)
|
||||
else:
|
||||
description = None
|
||||
params = self._search_regex(r'var params = {(.+?)}[;,]', webpage,
|
||||
'player params', flags=re.DOTALL)
|
||||
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
|
||||
@ -99,12 +105,12 @@ class CondeNastIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
site = mobj.group('site')
|
||||
url_type = mobj.group('type')
|
||||
id = mobj.group('id')
|
||||
item_id = mobj.group('id')
|
||||
|
||||
self.to_screen(u'Extracting from %s with the Condé Nast extractor' % self._SITES[site])
|
||||
webpage = self._download_webpage(url, id)
|
||||
self.to_screen('Extracting from %s with the Condé Nast extractor' % self._SITES[site])
|
||||
webpage = self._download_webpage(url, item_id)
|
||||
|
||||
if url_type == 'series':
|
||||
return self._extract_series(url, webpage)
|
||||
else:
|
||||
return self._extract_video(webpage)
|
||||
return self._extract_video(webpage, url_type)
|
||||
|
@ -1,7 +1,11 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re, base64, zlib
|
||||
import re
|
||||
import json
|
||||
import base64
|
||||
import zlib
|
||||
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
@ -19,13 +23,15 @@ from ..aes import (
|
||||
inc,
|
||||
)
|
||||
|
||||
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_TESTS = [{
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
'file': '645513.flv',
|
||||
#'md5': 'b1639fd6ddfaa43788c85f6d1dddd412',
|
||||
'info_dict': {
|
||||
'id': '645513',
|
||||
'ext': 'flv',
|
||||
'title': 'Wanna be the Strongest in the World Episode 1 – An Idol-Wrestler is Born!',
|
||||
'description': 'md5:2d17137920c64f2f49981a7797d275ef',
|
||||
'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
|
||||
@ -36,7 +42,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
# rtmp
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
}
|
||||
|
||||
_FORMAT_IDS = {
|
||||
'360': ('60', '106'),
|
||||
@ -68,7 +74,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
shaHash = bytes_to_intlist(sha1(prefix + str(num4).encode('ascii')).digest())
|
||||
# Extend 160 Bit hash to 256 Bit
|
||||
return shaHash + [0] * 12
|
||||
|
||||
|
||||
key = obfuscate_key(id)
|
||||
class Counter:
|
||||
__value = iv
|
||||
@ -80,9 +86,8 @@ class CrunchyrollIE(InfoExtractor):
|
||||
return zlib.decompress(decrypted_data)
|
||||
|
||||
def _convert_subtitles_to_srt(self, subtitles):
|
||||
i=1
|
||||
output = ''
|
||||
for start, end, text in re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles):
|
||||
for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
|
||||
start = start.replace('.', ',')
|
||||
end = end.replace('.', ',')
|
||||
text = clean_html(text)
|
||||
@ -90,7 +95,6 @@ class CrunchyrollIE(InfoExtractor):
|
||||
if not text:
|
||||
continue
|
||||
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||
i+=1
|
||||
return output
|
||||
|
||||
def _real_extract(self,url):
|
||||
@ -108,6 +112,12 @@ class CrunchyrollIE(InfoExtractor):
|
||||
if note_m:
|
||||
raise ExtractorError(note_m)
|
||||
|
||||
mobj = re.search(r'Page\.messaging_box_controller\.addItems\(\[(?P<msg>{.+?})\]\)', webpage)
|
||||
if mobj:
|
||||
msg = json.loads(mobj.group('msg'))
|
||||
if msg.get('type') == 'error':
|
||||
raise ExtractorError('crunchyroll returned error: %s' % msg['message_body'], expected=True)
|
||||
|
||||
video_title = self._html_search_regex(r'<h1[^>]*>(.+?)</h1>', webpage, 'video_title', flags=re.DOTALL)
|
||||
video_title = re.sub(r' {2,}', ' ', video_title)
|
||||
video_description = self._html_search_regex(r'"description":"([^"]+)', webpage, 'video_description', default='')
|
||||
@ -123,7 +133,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
||||
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
||||
|
||||
|
||||
stream_id = self._search_regex(r'<media_id>([^<]+)', playerdata, 'stream_id')
|
||||
video_thumbnail = self._search_regex(r'<episode_image_url>([^<]+)', playerdata, 'thumbnail', fatal=False)
|
||||
|
||||
@ -161,7 +171,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
data = base64.b64decode(data)
|
||||
|
||||
subtitle = self._decrypt_subtitles(data, iv, id).decode('utf-8')
|
||||
lang_code = self._search_regex(r'lang_code=\'([^\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||
|
@ -4,15 +4,16 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
unescapeHTML,
|
||||
find_xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class CSpanIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>\d+)'
|
||||
_VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)'
|
||||
IE_DESC = 'C-SPAN'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
|
||||
'md5': '8e44ce11f0f725527daccc453f553eb0',
|
||||
'info_dict': {
|
||||
@ -22,13 +23,24 @@ class CSpanIE(InfoExtractor):
|
||||
'description': 'Attorney General Eric Holder spoke to reporters following the Supreme Court decision in Shelby County v. Holder in which the court ruled that the preclearance provisions of the Voting Rights Act could not be enforced until Congress established new guidelines for review.',
|
||||
},
|
||||
'skip': 'Regularly fails on travis, for unknown reasons',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.c-span.org/video/?c4486943/cspan-international-health-care-models',
|
||||
# For whatever reason, the served video alternates between
|
||||
# two different ones
|
||||
#'md5': 'dbb0f047376d457f2ab8b3929cbb2d0c',
|
||||
'info_dict': {
|
||||
'id': '340723',
|
||||
'ext': 'mp4',
|
||||
'title': 'International Health Care Models',
|
||||
'description': 'md5:7a985a2d595dba00af3d9c9f0783c967',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
video_id = self._search_regex(r'data-progid=\'(\d+)\'>', webpage, 'video id')
|
||||
video_id = self._search_regex(r'progid=\'?([0-9]+)\'?>', webpage, 'video id')
|
||||
|
||||
description = self._html_search_regex(
|
||||
[
|
||||
@ -43,18 +55,29 @@ class CSpanIE(InfoExtractor):
|
||||
info_url = 'http://c-spanvideo.org/videoLibrary/assets/player/ajax-player.php?os=android&html5=program&id=' + video_id
|
||||
data = self._download_json(info_url, video_id)
|
||||
|
||||
url = unescapeHTML(data['video']['files'][0]['path']['#text'])
|
||||
|
||||
doc = self._download_xml('http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
||||
doc = self._download_xml(
|
||||
'http://www.c-span.org/common/services/flashXml.php?programid=' + video_id,
|
||||
video_id)
|
||||
|
||||
def find_string(s):
|
||||
return find_xpath_attr(doc, './/string', 'name', s).text
|
||||
title = find_xpath_attr(doc, './/string', 'name', 'title').text
|
||||
thumbnail = find_xpath_attr(doc, './/string', 'name', 'poster').text
|
||||
|
||||
files = data['video']['files']
|
||||
|
||||
entries = [{
|
||||
'id': '%s_%d' % (video_id, partnum + 1),
|
||||
'title': (
|
||||
title if len(files) == 1 else
|
||||
'%s part %d' % (title, partnum + 1)),
|
||||
'url': unescapeHTML(f['path']['#text']),
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': int_or_none(f.get('length', {}).get('#text')),
|
||||
} for partnum, f in enumerate(files)]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
'title': title,
|
||||
'id': video_id,
|
||||
'title': find_string('title'),
|
||||
'url': url,
|
||||
'description': description,
|
||||
'thumbnail': find_string('poster'),
|
||||
}
|
||||
|
@ -8,10 +8,9 @@ from .subtitles import SubtitlesInfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
compat_str,
|
||||
get_element_by_attribute,
|
||||
get_element_by_id,
|
||||
orderedSet,
|
||||
str_to_int,
|
||||
int_or_none,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
@ -124,7 +123,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
if video_url is not None:
|
||||
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
|
||||
if m_size is not None:
|
||||
width, height = m_size.group(1), m_size.group(2)
|
||||
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
|
||||
else:
|
||||
width, height = None, None
|
||||
formats.append({
|
||||
@ -179,7 +178,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
IE_NAME = u'dailymotion:playlist'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
|
||||
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/playlist/.+?".*?>.*?</a>.*?</div>'
|
||||
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
|
||||
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
|
||||
|
||||
def _extract_entries(self, id):
|
||||
@ -189,10 +188,9 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
webpage = self._download_webpage(request,
|
||||
id, u'Downloading page %s' % pagenum)
|
||||
|
||||
playlist_el = get_element_by_attribute(u'class', u'row video_list', webpage)
|
||||
video_ids.extend(re.findall(r'data-id="(.+?)"', playlist_el))
|
||||
video_ids.extend(re.findall(r'data-id="(.+?)"', webpage))
|
||||
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage, re.DOTALL) is None:
|
||||
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
|
||||
break
|
||||
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
|
||||
for video_id in orderedSet(video_ids)]
|
||||
@ -202,17 +200,17 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
|
||||
playlist_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
return {'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'title': get_element_by_id(u'playlist_name', webpage),
|
||||
'entries': self._extract_entries(playlist_id),
|
||||
}
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
'entries': self._extract_entries(playlist_id),
|
||||
}
|
||||
|
||||
|
||||
class DailymotionUserIE(DailymotionPlaylistIE):
|
||||
IE_NAME = u'dailymotion:user'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
|
||||
_MORE_PAGES_INDICATOR = r'<div class="next">.*?<a.*?href="/user/.+?".*?>.*?</a>.*?</div>'
|
||||
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/user/(?P<user>[^/]+)'
|
||||
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -1,25 +1,28 @@
|
||||
# encoding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class DaumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
|
||||
IE_NAME = u'daum.net'
|
||||
IE_NAME = 'daum.net'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
|
||||
u'file': u'52554690.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
u'description': u'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
u'upload_date': u'20130831',
|
||||
u'duration': 3868,
|
||||
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
|
||||
'info_dict': {
|
||||
'id': '52554690',
|
||||
'ext': 'mp4',
|
||||
'title': 'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
'description': 'DOTA 2GETHER 시즌2 6회 - 2부',
|
||||
'upload_date': '20130831',
|
||||
'duration': 3868,
|
||||
},
|
||||
}
|
||||
|
||||
@ -30,14 +33,14 @@ class DaumIE(InfoExtractor):
|
||||
webpage = self._download_webpage(canonical_url, video_id)
|
||||
full_id = self._search_regex(
|
||||
r'<iframe src="http://videofarm.daum.net/controller/video/viewer/Video.html\?.*?vid=(.+?)[&"]',
|
||||
webpage, u'full id')
|
||||
webpage, 'full id')
|
||||
query = compat_urllib_parse.urlencode({'vid': full_id})
|
||||
info = self._download_xml(
|
||||
'http://tvpot.daum.net/clip/ClipInfoXml.do?' + query, video_id,
|
||||
u'Downloading video info')
|
||||
'Downloading video info')
|
||||
urls = self._download_xml(
|
||||
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
|
||||
video_id, u'Downloading video formats info')
|
||||
video_id, 'Downloading video formats info')
|
||||
|
||||
self.to_screen(u'%s: Getting video urls' % video_id)
|
||||
formats = []
|
||||
@ -53,7 +56,6 @@ class DaumIE(InfoExtractor):
|
||||
format_url = url_doc.find('result/url').text
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'ext': determine_ext(format_url),
|
||||
'format_id': profile,
|
||||
})
|
||||
|
||||
|
@ -1,60 +0,0 @@
|
||||
import re
|
||||
import os
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class DepositFilesIE(InfoExtractor):
|
||||
"""Information extractor for depositfiles.com"""
|
||||
|
||||
_VALID_URL = r'(?:http://)?(?:\w+\.)?depositfiles\.com/(?:../(?#locale))?files/(.+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
file_id = url.split('/')[-1]
|
||||
# Rebuild url in english locale
|
||||
url = 'http://depositfiles.com/en/files/' + file_id
|
||||
|
||||
# Retrieve file webpage with 'Free download' button pressed
|
||||
free_download_indication = {'gateway_result' : '1'}
|
||||
request = compat_urllib_request.Request(url, compat_urllib_parse.urlencode(free_download_indication))
|
||||
try:
|
||||
self.report_download_webpage(file_id)
|
||||
webpage = compat_urllib_request.urlopen(request).read()
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
raise ExtractorError(u'Unable to retrieve file webpage: %s' % compat_str(err))
|
||||
|
||||
# Search for the real file URL
|
||||
mobj = re.search(r'<form action="(http://fileshare.+?)"', webpage)
|
||||
if (mobj is None) or (mobj.group(1) is None):
|
||||
# Try to figure out reason of the error.
|
||||
mobj = re.search(r'<strong>(Attention.*?)</strong>', webpage, re.DOTALL)
|
||||
if (mobj is not None) and (mobj.group(1) is not None):
|
||||
restriction_message = re.sub('\s+', ' ', mobj.group(1)).strip()
|
||||
raise ExtractorError(u'%s' % restriction_message)
|
||||
else:
|
||||
raise ExtractorError(u'Unable to extract download URL from: %s' % url)
|
||||
|
||||
file_url = mobj.group(1)
|
||||
file_extension = os.path.splitext(file_url)[1][1:]
|
||||
|
||||
# Search for file title
|
||||
file_title = self._search_regex(r'<b title="(.*?)">', webpage, u'title')
|
||||
|
||||
return [{
|
||||
'id': file_id.decode('utf-8'),
|
||||
'url': file_url.decode('utf-8'),
|
||||
'uploader': None,
|
||||
'upload_date': None,
|
||||
'title': file_title,
|
||||
'ext': file_extension.decode('utf-8'),
|
||||
}]
|
@ -10,9 +10,10 @@ class DiscoveryIE(InfoExtractor):
|
||||
_VALID_URL = r'http://dsc\.discovery\.com\/[a-zA-Z0-9\-]*/[a-zA-Z0-9\-]*/videos/(?P<id>[a-zA-Z0-9\-]*)(.htm)?'
|
||||
_TEST = {
|
||||
'url': 'http://dsc.discovery.com/tv-shows/mythbusters/videos/mission-impossible-outtakes.htm',
|
||||
'file': '614784.mp4',
|
||||
'md5': 'e12614f9ee303a6ccef415cb0793eba2',
|
||||
'info_dict': {
|
||||
'id': '614784',
|
||||
'ext': 'mp4',
|
||||
'title': 'MythBusters: Mission Impossible Outtakes',
|
||||
'description': ('Watch Jamie Hyneman and Adam Savage practice being'
|
||||
' each other -- to the point of confusing Jamie\'s dog -- and '
|
||||
@ -34,7 +35,7 @@ class DiscoveryIE(InfoExtractor):
|
||||
formats = []
|
||||
for f in info['mp4']:
|
||||
formats.append(
|
||||
{'url': f['src'], r'ext': r'mp4', 'tbr': int(f['bitrate'][:-1])})
|
||||
{'url': f['src'], 'ext': 'mp4', 'tbr': int(f['bitrate'][:-1])})
|
||||
|
||||
return {
|
||||
'id': info['contentId'],
|
||||
|
27
youtube_dl/extractor/divxstage.py
Normal file
27
youtube_dl/extractor/divxstage.py
Normal file
@ -0,0 +1,27 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .novamov import NovaMovIE
|
||||
|
||||
|
||||
class DivxStageIE(NovaMovIE):
|
||||
IE_NAME = 'divxstage'
|
||||
IE_DESC = 'DivxStage'
|
||||
|
||||
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag)'}
|
||||
|
||||
_HOST = 'www.divxstage.eu'
|
||||
|
||||
_FILE_DELETED_REGEX = r'>This file no longer exists on our servers.<'
|
||||
_TITLE_REGEX = r'<div class="video_det">\s*<strong>([^<]+)</strong>'
|
||||
_DESCRIPTION_REGEX = r'<div class="video_det">\s*<strong>[^<]+</strong>\s*<p>([^<]+)</p>'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.divxstage.eu/video/57f238e2e5e01',
|
||||
'md5': '63969f6eb26533a1968c4d325be63e72',
|
||||
'info_dict': {
|
||||
'id': '57f238e2e5e01',
|
||||
'ext': 'flv',
|
||||
'title': 'youtubedl test video',
|
||||
'description': 'This is a test video for youtubedl.',
|
||||
}
|
||||
}
|
@ -1,41 +1,42 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class DotsubIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?dotsub\.com/view/([^/]+)'
|
||||
_VALID_URL = r'http://(?:www\.)?dotsub\.com/view/(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||
u'file': u'aed3b8b2-1889-4df5-ae63-ad85f5572f27.flv',
|
||||
u'md5': u'0914d4d69605090f623b7ac329fea66e',
|
||||
u'info_dict': {
|
||||
u"title": u"Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary",
|
||||
u"uploader": u"4v4l0n42",
|
||||
u'description': u'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
||||
u'thumbnail': u'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
||||
u'upload_date': u'20101213',
|
||||
'url': 'http://dotsub.com/view/aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||
'md5': '0914d4d69605090f623b7ac329fea66e',
|
||||
'info_dict': {
|
||||
'id': 'aed3b8b2-1889-4df5-ae63-ad85f5572f27',
|
||||
'ext': 'flv',
|
||||
'title': 'Pyramids of Waste (2010), AKA The Lightbulb Conspiracy - Planned obsolescence documentary',
|
||||
'uploader': '4v4l0n42',
|
||||
'description': 'Pyramids of Waste (2010) also known as "The lightbulb conspiracy" is a documentary about how our economic system based on consumerism and planned obsolescence is breaking our planet down.\r\n\r\nSolutions to this can be found at:\r\nhttp://robotswillstealyourjob.com\r\nhttp://www.federicopistono.org\r\n\r\nhttp://opensourceecology.org\r\nhttp://thezeitgeistmovement.com',
|
||||
'thumbnail': 'http://dotsub.com/media/aed3b8b2-1889-4df5-ae63-ad85f5572f27/p',
|
||||
'upload_date': '20101213',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" %(video_id)
|
||||
webpage = self._download_webpage(info_url, video_id)
|
||||
info = json.loads(webpage)
|
||||
video_id = mobj.group('id')
|
||||
info_url = "https://dotsub.com/api/media/%s/metadata" % video_id
|
||||
info = self._download_json(info_url, video_id)
|
||||
date = time.gmtime(info['dateCreated']/1000) # The timestamp is in miliseconds
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': info['mediaURI'],
|
||||
'ext': 'flv',
|
||||
'title': info['title'],
|
||||
'thumbnail': info['screenshotURI'],
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': info['mediaURI'],
|
||||
'ext': 'flv',
|
||||
'title': info['title'],
|
||||
'thumbnail': info['screenshotURI'],
|
||||
'description': info['description'],
|
||||
'uploader': info['user'],
|
||||
'view_count': info['numberOfViews'],
|
||||
'upload_date': u'%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
||||
}]
|
||||
'uploader': info['user'],
|
||||
'view_count': info['numberOfViews'],
|
||||
'upload_date': '%04i%02i%02i' % (date.tm_year, date.tm_mon, date.tm_mday),
|
||||
}
|
||||
|
@ -10,11 +10,12 @@ from .common import InfoExtractor
|
||||
class DropboxIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
|
||||
_TEST = {
|
||||
'url': 'https://www.dropbox.com/s/mcnzehi9wo55th4/20131219_085616.mp4',
|
||||
'file': 'mcnzehi9wo55th4.mp4',
|
||||
'md5': 'f6d65b1b326e82fd7ab7720bea3dacae',
|
||||
'url': 'https://www.dropbox.com/s/0qr9sai2veej4f8/THE_DOCTOR_GAMES.mp4',
|
||||
'md5': '8ae17c51172fb7f93bdd6a214cc8c896',
|
||||
'info_dict': {
|
||||
'title': '20131219_085616'
|
||||
'id': '0qr9sai2veej4f8',
|
||||
'ext': 'mp4',
|
||||
'title': 'THE_DOCTOR_GAMES'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,23 +1,25 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
determine_ext
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EHowIE(InfoExtractor):
|
||||
IE_NAME = u'eHow'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
|
||||
IE_NAME = 'eHow'
|
||||
_VALID_URL = r'https?://(?:www\.)?ehow\.com/[^/_?]*_(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
|
||||
u'file': u'12245069.flv',
|
||||
u'md5': u'9809b4e3f115ae2088440bcb4efbf371',
|
||||
u'info_dict': {
|
||||
u"title": u"Hardwood Flooring Basics",
|
||||
u"description": u"Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...",
|
||||
u"uploader": u"Erick Nathan"
|
||||
'url': 'http://www.ehow.com/video_12245069_hardwood-flooring-basics.html',
|
||||
'md5': '9809b4e3f115ae2088440bcb4efbf371',
|
||||
'info_dict': {
|
||||
'id': '12245069',
|
||||
'ext': 'flv',
|
||||
'title': 'Hardwood Flooring Basics',
|
||||
'description': 'Hardwood flooring may be time consuming, but its ultimately a pretty straightforward concept. Learn about hardwood flooring basics with help from a hardware flooring business owner in this free video...',
|
||||
'uploader': 'Erick Nathan',
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,21 +28,16 @@ class EHowIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._search_regex(r'(?:file|source)=(http[^\'"&]*)',
|
||||
webpage, u'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
uploader = self._search_regex(r'<meta name="uploader" content="(.+?)" />',
|
||||
webpage, u'uploader')
|
||||
webpage, 'video URL')
|
||||
final_url = compat_urllib_parse.unquote(video_url)
|
||||
uploader = self._html_search_meta('uploader', webpage)
|
||||
title = self._og_search_title(webpage).replace(' | eHow', '')
|
||||
ext = determine_ext(final_url)
|
||||
|
||||
return {
|
||||
'_type': 'video',
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'ext': ext,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'uploader': uploader,
|
||||
'uploader': uploader,
|
||||
}
|
||||
|
||||
|
@ -9,7 +9,7 @@ from ..utils import unified_strdate
|
||||
|
||||
class ElPaisIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])'
|
||||
IE_DESCR = 'El País'
|
||||
IE_DESC = 'El País'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html',
|
||||
|
43
youtube_dl/extractor/engadget.py
Normal file
43
youtube_dl/extractor/engadget.py
Normal file
@ -0,0 +1,43 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .fivemin import FiveMinIE
|
||||
from ..utils import (
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class EngadgetIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://www.engadget.com/
|
||||
(?:video/5min/(?P<id>\d+)|
|
||||
[\d/]+/.*?)
|
||||
'''
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.engadget.com/video/5min/518153925/',
|
||||
'md5': 'c6820d4828a5064447a4d9fc73f312c9',
|
||||
'info_dict': {
|
||||
'id': '518153925',
|
||||
'ext': 'mp4',
|
||||
'title': 'Samsung Galaxy Tab Pro 8.4 Review',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
if video_id is not None:
|
||||
return FiveMinIE._build_result(video_id)
|
||||
else:
|
||||
title = url_basename(url)
|
||||
webpage = self._download_webpage(url, title)
|
||||
ids = re.findall(r'<iframe[^>]+?playList=(\d+)', webpage)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': title,
|
||||
'entries': [FiveMinIE._build_result(id) for id in ids]
|
||||
}
|
@ -1,9 +1,9 @@
|
||||
import json
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
|
||||
ExtractorError,
|
||||
@ -11,70 +11,68 @@ from ..utils import (
|
||||
|
||||
|
||||
class EscapistIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<episode>[^/?]+)[/?]?.*$'
|
||||
_VALID_URL = r'^https?://?(www\.)?escapistmagazine\.com/videos/view/(?P<showname>[^/]+)/(?P<id>[0-9]+)-'
|
||||
_TEST = {
|
||||
u'url': u'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
u'file': u'6618-Breaking-Down-Baldurs-Gate.mp4',
|
||||
u'md5': u'ab3a706c681efca53f0a35f1415cf0d1',
|
||||
u'info_dict': {
|
||||
u"description": u"Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
||||
u"uploader": u"the-escapist-presents",
|
||||
u"title": u"Breaking Down Baldur's Gate"
|
||||
'url': 'http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate',
|
||||
'md5': 'ab3a706c681efca53f0a35f1415cf0d1',
|
||||
'info_dict': {
|
||||
'id': '6618',
|
||||
'ext': 'mp4',
|
||||
'description': "Baldur's Gate: Original, Modded or Enhanced Edition? I'll break down what you can expect from the new Baldur's Gate: Enhanced Edition.",
|
||||
'uploader': 'the-escapist-presents',
|
||||
'title': "Breaking Down Baldur's Gate",
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
showName = mobj.group('showname')
|
||||
videoId = mobj.group('episode')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
self.report_extraction(videoId)
|
||||
webpage = self._download_webpage(url, videoId)
|
||||
self.report_extraction(video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
videoDesc = self._html_search_regex(
|
||||
r'<meta name="description" content="([^"]*)"',
|
||||
webpage, u'description', fatal=False)
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
playerUrl = self._og_search_video_url(webpage, name=u'player URL')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<meta name="title" content="([^"]*)"',
|
||||
webpage, u'title').split(' : ')[-1]
|
||||
webpage, 'title').split(' : ')[-1]
|
||||
|
||||
configUrl = self._search_regex('config=(.*)$', playerUrl, u'config URL')
|
||||
configUrl = self._search_regex('config=(.*)$', playerUrl, 'config URL')
|
||||
configUrl = compat_urllib_parse.unquote(configUrl)
|
||||
|
||||
formats = []
|
||||
|
||||
def _add_format(name, cfgurl):
|
||||
configJSON = self._download_webpage(
|
||||
cfgurl, videoId,
|
||||
u'Downloading ' + name + ' configuration',
|
||||
u'Unable to download ' + name + ' configuration')
|
||||
def _add_format(name, cfgurl, quality):
|
||||
config = self._download_json(
|
||||
cfgurl, video_id,
|
||||
'Downloading ' + name + ' configuration',
|
||||
'Unable to download ' + name + ' configuration',
|
||||
transform_source=lambda s: s.replace("'", '"'))
|
||||
|
||||
# Technically, it's JavaScript, not JSON
|
||||
configJSON = configJSON.replace("'", '"')
|
||||
|
||||
try:
|
||||
config = json.loads(configJSON)
|
||||
except (ValueError,) as err:
|
||||
raise ExtractorError(u'Invalid JSON in configuration file: ' + compat_str(err))
|
||||
playlist = config['playlist']
|
||||
formats.append({
|
||||
'url': playlist[1]['url'],
|
||||
'format_id': name,
|
||||
'quality': quality,
|
||||
})
|
||||
|
||||
_add_format(u'normal', configUrl)
|
||||
_add_format('normal', configUrl, quality=0)
|
||||
hq_url = (configUrl +
|
||||
('&hq=1' if '?' in configUrl else configUrl + '?hq=1'))
|
||||
try:
|
||||
_add_format(u'hq', hq_url)
|
||||
_add_format('hq', hq_url, quality=1)
|
||||
except ExtractorError:
|
||||
pass # That's fine, we'll just use normal quality
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': videoId,
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': showName,
|
||||
'title': title,
|
||||
|
@ -1,56 +1,58 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class ExfmIE(InfoExtractor):
|
||||
IE_NAME = u'exfm'
|
||||
IE_DESC = u'ex.fm'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?ex\.fm/song/([^/]+)'
|
||||
_SOUNDCLOUD_URL = r'(?:http://)?(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
|
||||
IE_NAME = 'exfm'
|
||||
IE_DESC = 'ex.fm'
|
||||
_VALID_URL = r'http://(?:www\.)?ex\.fm/song/(?P<id>[^/]+)'
|
||||
_SOUNDCLOUD_URL = r'http://(?:www\.)?api\.soundcloud\.com/tracks/([^/]+)/stream'
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://ex.fm/song/eh359',
|
||||
u'file': u'44216187.mp3',
|
||||
u'md5': u'e45513df5631e6d760970b14cc0c11e7',
|
||||
u'info_dict': {
|
||||
u"title": u"Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive",
|
||||
u"uploader": u"deadjournalist",
|
||||
u'upload_date': u'20120424',
|
||||
u'description': u'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
|
||||
'url': 'http://ex.fm/song/eh359',
|
||||
'md5': 'e45513df5631e6d760970b14cc0c11e7',
|
||||
'info_dict': {
|
||||
'id': '44216187',
|
||||
'ext': 'mp3',
|
||||
'title': 'Test House "Love Is Not Enough" (Extended Mix) DeadJournalist Exclusive',
|
||||
'uploader': 'deadjournalist',
|
||||
'upload_date': '20120424',
|
||||
'description': 'Test House \"Love Is Not Enough\" (Extended Mix) DeadJournalist Exclusive',
|
||||
},
|
||||
u'note': u'Soundcloud song',
|
||||
u'skip': u'The site is down too often',
|
||||
'note': 'Soundcloud song',
|
||||
'skip': 'The site is down too often',
|
||||
},
|
||||
{
|
||||
u'url': u'http://ex.fm/song/wddt8',
|
||||
u'file': u'wddt8.mp3',
|
||||
u'md5': u'966bd70741ac5b8570d8e45bfaed3643',
|
||||
u'info_dict': {
|
||||
u'title': u'Safe and Sound',
|
||||
u'uploader': u'Capital Cities',
|
||||
'url': 'http://ex.fm/song/wddt8',
|
||||
'md5': '966bd70741ac5b8570d8e45bfaed3643',
|
||||
'info_dict': {
|
||||
'id': 'wddt8',
|
||||
'ext': 'mp3',
|
||||
'title': 'Safe and Sound',
|
||||
'uploader': 'Capital Cities',
|
||||
},
|
||||
u'skip': u'The site is down too often',
|
||||
'skip': 'The site is down too often',
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
song_id = mobj.group(1)
|
||||
info_url = "http://ex.fm/api/v3/song/%s" %(song_id)
|
||||
webpage = self._download_webpage(info_url, song_id)
|
||||
info = json.loads(webpage)
|
||||
song_url = info['song']['url']
|
||||
song_id = mobj.group('id')
|
||||
info_url = "http://ex.fm/api/v3/song/%s" % song_id
|
||||
info = self._download_json(info_url, song_id)['song']
|
||||
song_url = info['url']
|
||||
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
||||
self.to_screen('Soundcloud song detected')
|
||||
return self.url_result(song_url.replace('/stream',''), 'Soundcloud')
|
||||
return [{
|
||||
'id': song_id,
|
||||
'url': song_url,
|
||||
'ext': 'mp3',
|
||||
'title': info['song']['title'],
|
||||
'thumbnail': info['song']['image']['large'],
|
||||
'uploader': info['song']['artist'],
|
||||
'view_count': info['song']['loved_count'],
|
||||
}]
|
||||
return self.url_result(song_url.replace('/stream', ''), 'Soundcloud')
|
||||
return {
|
||||
'id': song_id,
|
||||
'url': song_url,
|
||||
'ext': 'mp3',
|
||||
'title': info['title'],
|
||||
'thumbnail': info['image']['large'],
|
||||
'uploader': info['artist'],
|
||||
'view_count': info['loved_count'],
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
import os
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -8,18 +9,23 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class ExtremeTubeIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
u'file': u'652431.mp4',
|
||||
u'md5': u'1fb9228f5e3332ec8c057d6ac36f33e0',
|
||||
u'info_dict': {
|
||||
u"title": u"Music Video 14 british euro brit european cumshots swallow",
|
||||
u"uploader": u"unknown",
|
||||
u"age_limit": 18,
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?(?P<url>extremetube\.com/.*?video/.+?(?P<videoid>[0-9]+))(?:[/?&]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.extremetube.com/video/music-video-14-british-euro-brit-european-cumshots-swallow-652431',
|
||||
'md5': '1fb9228f5e3332ec8c057d6ac36f33e0',
|
||||
'info_dict': {
|
||||
'id': '652431',
|
||||
'ext': 'mp4',
|
||||
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
||||
'uploader': 'unknown',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.extremetube.com/gay/video/abcde-1234',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
@ -30,11 +36,14 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
req.add_header('Cookie', 'age_verified=1')
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, u'title')
|
||||
uploader = self._html_search_regex(r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, u'uploader', fatal=False)
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(r'video_url=(.+?)&', webpage, u'video_url'))
|
||||
video_title = self._html_search_regex(
|
||||
r'<h1 [^>]*?title="([^"]+)"[^>]*>\1<', webpage, 'title')
|
||||
uploader = self._html_search_regex(
|
||||
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
|
||||
fatal=False)
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||
r'video_url=(.+?)&', webpage, 'video_url'))
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
extension = os.path.splitext(path)[1][1:]
|
||||
format = path.split('/')[5].split('_')[:2]
|
||||
format = "-".join(format)
|
||||
|
||||
@ -43,7 +52,6 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
'title': video_title,
|
||||
'uploader': uploader,
|
||||
'url': video_url,
|
||||
'ext': extension,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
'age_limit': 18,
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
@ -9,16 +11,15 @@ from ..utils import (
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
urlencode_postdata,
|
||||
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class FacebookIE(InfoExtractor):
|
||||
"""Information Extractor for Facebook"""
|
||||
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:https?://)?(?:\w+\.)?facebook\.com/
|
||||
https?://(?:\w+\.)?facebook\.com/
|
||||
(?:[^#?]*\#!/)?
|
||||
(?:video/video\.php|photo\.php|video/embed)\?(?:.*?)
|
||||
(?:v|video_id)=(?P<id>[0-9]+)
|
||||
@ -26,21 +27,18 @@ class FacebookIE(InfoExtractor):
|
||||
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
|
||||
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
|
||||
_NETRC_MACHINE = 'facebook'
|
||||
IE_NAME = u'facebook'
|
||||
IE_NAME = 'facebook'
|
||||
_TEST = {
|
||||
u'url': u'https://www.facebook.com/photo.php?v=120708114770723',
|
||||
u'file': u'120708114770723.mp4',
|
||||
u'md5': u'48975a41ccc4b7a581abd68651c1a5a8',
|
||||
u'info_dict': {
|
||||
u"duration": 279,
|
||||
u"title": u"PEOPLE ARE AWESOME 2013"
|
||||
'url': 'https://www.facebook.com/photo.php?v=120708114770723',
|
||||
'md5': '48975a41ccc4b7a581abd68651c1a5a8',
|
||||
'info_dict': {
|
||||
'id': '120708114770723',
|
||||
'ext': 'mp4',
|
||||
'duration': 279,
|
||||
'title': 'PEOPLE ARE AWESOME 2013',
|
||||
}
|
||||
}
|
||||
|
||||
def report_login(self):
|
||||
"""Report attempt to log in."""
|
||||
self.to_screen(u'Logging in')
|
||||
|
||||
def _login(self):
|
||||
(useremail, password) = self._get_login_info()
|
||||
if useremail is None:
|
||||
@ -48,11 +46,13 @@ class FacebookIE(InfoExtractor):
|
||||
|
||||
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
|
||||
login_page_req.add_header('Cookie', 'locale=en_US')
|
||||
self.report_login()
|
||||
login_page = self._download_webpage(login_page_req, None, note=False,
|
||||
errnote=u'Unable to download login page')
|
||||
lsd = self._search_regex(r'"lsd":"(\w*?)"', login_page, u'lsd')
|
||||
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, u'lgnrnd')
|
||||
login_page = self._download_webpage(login_page_req, None,
|
||||
note='Downloading login page',
|
||||
errnote='Unable to download login page')
|
||||
lsd = self._search_regex(
|
||||
r'<input type="hidden" name="lsd" value="([^"]*)"',
|
||||
login_page, 'lsd')
|
||||
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
|
||||
|
||||
login_form = {
|
||||
'email': useremail,
|
||||
@ -65,27 +65,29 @@ class FacebookIE(InfoExtractor):
|
||||
'timezone': '-60',
|
||||
'trynum': '1',
|
||||
}
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
|
||||
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
try:
|
||||
login_results = compat_urllib_request.urlopen(request).read()
|
||||
login_results = self._download_webpage(request, None,
|
||||
note='Logging in', errnote='unable to fetch login page')
|
||||
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
|
||||
self._downloader.report_warning(u'unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
|
||||
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
|
||||
return
|
||||
|
||||
check_form = {
|
||||
'fb_dtsg': self._search_regex(r'"fb_dtsg":"(.*?)"', login_results, u'fb_dtsg'),
|
||||
'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, u'nh'),
|
||||
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
|
||||
'nh': self._search_regex(r'name="nh" value="(\w*?)"', login_results, 'nh'),
|
||||
'name_action_selected': 'dont_save',
|
||||
'submit[Continue]': self._search_regex(r'<input value="(.*?)" name="submit\[Continue\]"', login_results, u'continue'),
|
||||
'submit[Continue]': self._search_regex(r'<button[^>]+value="(.*?)"[^>]+name="submit\[Continue\]"', login_results, 'continue'),
|
||||
}
|
||||
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, compat_urllib_parse.urlencode(check_form))
|
||||
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
|
||||
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
check_response = compat_urllib_request.urlopen(check_req).read()
|
||||
check_response = self._download_webpage(check_req, None,
|
||||
note='Confirming login')
|
||||
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
||||
self._downloader.report_warning(u'Unable to confirm login, you have to login in your brower and authorize the login.')
|
||||
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
self._downloader.report_warning(u'unable to log in: %s' % compat_str(err))
|
||||
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
def _real_initialize(self):
|
||||
@ -93,8 +95,6 @@ class FacebookIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
|
||||
@ -107,10 +107,10 @@ class FacebookIE(InfoExtractor):
|
||||
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
|
||||
if m_msg is not None:
|
||||
raise ExtractorError(
|
||||
u'The video is not available, Facebook said: "%s"' % m_msg.group(1),
|
||||
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
|
||||
expected=True)
|
||||
else:
|
||||
raise ExtractorError(u'Cannot parse data')
|
||||
raise ExtractorError('Cannot parse data')
|
||||
data = dict(json.loads(m.group(1)))
|
||||
params_raw = compat_urllib_parse.unquote(data['params'])
|
||||
params = json.loads(params_raw)
|
||||
@ -119,19 +119,15 @@ class FacebookIE(InfoExtractor):
|
||||
if not video_url:
|
||||
video_url = video_data['sd_src']
|
||||
if not video_url:
|
||||
raise ExtractorError(u'Cannot find video URL')
|
||||
video_duration = int(video_data['video_duration'])
|
||||
thumbnail = video_data['thumbnail_src']
|
||||
raise ExtractorError('Cannot find video URL')
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, u'title')
|
||||
r'<h2 class="uiHeaderTitle">([^<]*)</h2>', webpage, 'title')
|
||||
|
||||
info = {
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'duration': video_duration,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': int(video_data['video_duration']),
|
||||
'thumbnail': video_data['thumbnail_src'],
|
||||
}
|
||||
return [info]
|
||||
|
@ -6,7 +6,6 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class FirstpostIE(InfoExtractor):
|
||||
IE_NAME = 'Firstpost.com'
|
||||
_VALID_URL = r'http://(?:www\.)?firstpost\.com/[^/]+/.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TEST = {
|
||||
@ -16,7 +15,6 @@ class FirstpostIE(InfoExtractor):
|
||||
'id': '1025403',
|
||||
'ext': 'mp4',
|
||||
'title': 'India to launch indigenous aircraft carrier INS Vikrant today',
|
||||
'description': 'Its flight deck is over twice the size of a football field, its power unit can light up the entire Kochi city and the cabling is enough to cover the distance between here to Delhi.',
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,15 +22,26 @@ class FirstpostIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_url = self._html_search_regex(
|
||||
r'<div.*?name="div_video".*?flashvars="([^"]+)">',
|
||||
webpage, 'video URL')
|
||||
data = self._download_xml(
|
||||
'http://www.firstpost.com/getvideoxml-%s.xml' % video_id, video_id,
|
||||
'Downloading video XML')
|
||||
|
||||
item = data.find('./playlist/item')
|
||||
thumbnail = item.find('./image').text
|
||||
title = item.find('./title').text
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': details.find('./file').text,
|
||||
'format_id': details.find('./label').text.strip(),
|
||||
'width': int(details.find('./width').text.strip()),
|
||||
'height': int(details.find('./height').text.strip()),
|
||||
} for details in item.findall('./source/file_details') if details.find('./file').text
|
||||
]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
||||
|
60
youtube_dl/extractor/firsttv.py
Normal file
60
youtube_dl/extractor/firsttv.py
Normal file
@ -0,0 +1,60 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class FirstTVIE(InfoExtractor):
|
||||
IE_NAME = 'firsttv'
|
||||
IE_DESC = 'Видеоархив - Первый канал'
|
||||
_VALID_URL = r'http://(?:www\.)?1tv\.ru/videoarchive/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.1tv.ru/videoarchive/73390',
|
||||
'md5': '3de6390cf0cca4a5eae1d1d83895e5ad',
|
||||
'info_dict': {
|
||||
'id': '73390',
|
||||
'ext': 'mp4',
|
||||
'title': 'Олимпийские канатные дороги',
|
||||
'description': 'md5:cc730d2bf4215463e37fff6a1e277b13',
|
||||
'thumbnail': 'http://img1.1tv.ru/imgsize640x360/PR20140210114657.JPG',
|
||||
'duration': 149,
|
||||
},
|
||||
'skip': 'Only works from Russia',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id, 'Downloading page')
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'''(?s)jwplayer\('flashvideoportal_1'\)\.setup\({.*?'file': '([^']+)'.*?}\);''', webpage, 'video URL')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<div class="tv_translation">\s*<h1><a href="[^"]+">([^<]*)</a>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'<div class="descr">\s*<div> </div>\s*<p>([^<]*)</p></div>', webpage, 'description', fatal=False)
|
||||
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
duration = self._og_search_property('video:duration', webpage, 'video duration', fatal=False)
|
||||
|
||||
like_count = self._html_search_regex(r'title="Понравилось".*?/></label> \[(\d+)\]',
|
||||
webpage, 'like count', fatal=False)
|
||||
dislike_count = self._html_search_regex(r'title="Не понравилось".*?/></label> \[(\d+)\]',
|
||||
webpage, 'dislike count', fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'thumbnail': thumbnail,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': int_or_none(duration),
|
||||
'like_count': int_or_none(like_count),
|
||||
'dislike_count': int_or_none(dislike_count),
|
||||
}
|
79
youtube_dl/extractor/fivemin.py
Normal file
79
youtube_dl/extractor/fivemin.py
Normal file
@ -0,0 +1,79 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
|
||||
|
||||
class FiveMinIE(InfoExtractor):
|
||||
IE_NAME = '5min'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(.*?&)?playList=|
|
||||
5min:)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
# From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/
|
||||
'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791',
|
||||
'md5': '4f7b0b79bf1a470e5004f7112385941d',
|
||||
'info_dict': {
|
||||
'id': '518013791',
|
||||
'ext': 'mp4',
|
||||
'title': 'iPad Mini with Retina Display Review',
|
||||
},
|
||||
},
|
||||
{
|
||||
# From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247
|
||||
'url': '5min:518086247',
|
||||
'md5': 'e539a9dd682c288ef5a498898009f69e',
|
||||
'info_dict': {
|
||||
'id': '518086247',
|
||||
'ext': 'mp4',
|
||||
'title': 'How to Make a Next-Level Fruit Salad',
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _build_result(cls, video_id):
|
||||
return cls.url_result('5min:%s' % video_id, cls.ie_key())
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
'Downloading embed page')
|
||||
sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
|
||||
query = compat_urllib_parse.urlencode({
|
||||
'func': 'GetResults',
|
||||
'playlist': video_id,
|
||||
'sid': sid,
|
||||
'isPlayerSeed': 'true',
|
||||
'url': embed_url,
|
||||
})
|
||||
info = self._download_json(
|
||||
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
|
||||
video_id)['binding'][0]
|
||||
|
||||
second_id = compat_str(int(video_id[:-2]) + 1)
|
||||
formats = []
|
||||
for quality, height in [(1, 320), (2, 480), (4, 720), (8, 1080)]:
|
||||
if any(r['ID'] == quality for r in info['Renditions']):
|
||||
formats.append({
|
||||
'format_id': compat_str(quality),
|
||||
'url': 'http://avideos.5min.com/%s/%s/%s_%s.mp4' % (second_id[-3:], second_id, video_id, quality),
|
||||
'height': height,
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info['Title'],
|
||||
'formats': formats,
|
||||
}
|
95
youtube_dl/extractor/fourtube.py
Normal file
95
youtube_dl/extractor/fourtube.py
Normal file
@ -0,0 +1,95 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
unified_strdate,
|
||||
str_to_int,
|
||||
parse_duration,
|
||||
clean_html,
|
||||
)
|
||||
|
||||
|
||||
class FourTubeIE(InfoExtractor):
|
||||
IE_NAME = '4tube'
|
||||
_VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
|
||||
'md5': '6516c8ac63b03de06bc8eac14362db4f',
|
||||
'info_dict': {
|
||||
'id': '209733',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
|
||||
'uploader': 'WCP Club',
|
||||
'uploader_id': 'wcp-club',
|
||||
'upload_date': '20131031',
|
||||
'duration': 583,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage_url = 'http://www.4tube.com/videos/' + video_id
|
||||
webpage = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
playlist_json = self._html_search_regex(r'var playerConfigPlaylist\s+=\s+([^;]+)', webpage, 'Playlist')
|
||||
media_id = self._search_regex(r'idMedia:\s*(\d+)', playlist_json, 'Media Id')
|
||||
sources = self._search_regex(r'sources:\s*\[([^\]]*)\]', playlist_json, 'Sources').split(',')
|
||||
title = self._search_regex(r'title:\s*"([^"]*)', playlist_json, 'Title')
|
||||
thumbnail_url = self._search_regex(r'image:\s*"([^"]*)', playlist_json, 'Thumbnail', fatal=False)
|
||||
|
||||
uploader_str = self._search_regex(r'<span>Uploaded by</span>(.*?)<span>', webpage, 'uploader', fatal=False)
|
||||
mobj = re.search(r'<a href="/sites/(?P<id>[^"]+)"><strong>(?P<name>[^<]+)</strong></a>', uploader_str)
|
||||
(uploader, uploader_id) = (mobj.group('name'), mobj.group('id')) if mobj else (clean_html(uploader_str), None)
|
||||
|
||||
upload_date = None
|
||||
view_count = None
|
||||
duration = None
|
||||
description = self._html_search_meta('description', webpage, 'description')
|
||||
if description:
|
||||
upload_date = self._search_regex(r'Published Date: (\d{2} [a-zA-Z]{3} \d{4})', description, 'upload date',
|
||||
fatal=False)
|
||||
if upload_date:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
view_count = self._search_regex(r'Views: ([\d,\.]+)', description, 'view count', fatal=False)
|
||||
if view_count:
|
||||
view_count = str_to_int(view_count)
|
||||
duration = parse_duration(self._search_regex(r'Length: (\d+m\d+s)', description, 'duration', fatal=False))
|
||||
|
||||
token_url = "http://tkn.4tube.com/{0}/desktop/{1}".format(media_id, "+".join(sources))
|
||||
headers = {
|
||||
b'Content-Type': b'application/x-www-form-urlencoded',
|
||||
b'Origin': b'http://www.4tube.com',
|
||||
}
|
||||
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
|
||||
tokens = self._download_json(token_req, video_id)
|
||||
|
||||
formats = [{
|
||||
'url': tokens[format]['token'],
|
||||
'format_id': format + 'p',
|
||||
'resolution': format + 'p',
|
||||
'quality': int(format),
|
||||
} for format in sources]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail_url,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'upload_date': upload_date,
|
||||
'view_count': view_count,
|
||||
'duration': duration,
|
||||
'age_limit': 18,
|
||||
'webpage_url': webpage_url,
|
||||
}
|
77
youtube_dl/extractor/franceculture.py
Normal file
77
youtube_dl/extractor/franceculture.py
Normal file
@ -0,0 +1,77 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_parse_qs,
|
||||
compat_urlparse,
|
||||
)
|
||||
|
||||
|
||||
class FranceCultureIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<baseurl>http://(?:www\.)?franceculture\.fr/)player/reecouter\?play=(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.franceculture.fr/player/reecouter?play=4795174',
|
||||
'info_dict': {
|
||||
'id': '4795174',
|
||||
'ext': 'mp3',
|
||||
'title': 'Rendez-vous au pays des geeks',
|
||||
'vcodec': 'none',
|
||||
'uploader': 'Colette Fellous',
|
||||
'upload_date': '20140301',
|
||||
'duration': 3601,
|
||||
'thumbnail': r're:^http://www\.franceculture\.fr/.*/images/player/Carnet-nomade\.jpg$',
|
||||
'description': 'Avec :Jean-Baptiste Péretié pour son documentaire sur Arte "La revanche des « geeks », une enquête menée aux Etats-Unis dans la S ...',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
baseurl = mobj.group('baseurl')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
params_code = self._search_regex(
|
||||
r"<param name='movie' value='/sites/all/modules/rf/rf_player/swf/loader.swf\?([^']+)' />",
|
||||
webpage, 'parameter code')
|
||||
params = compat_parse_qs(params_code)
|
||||
video_url = compat_urlparse.urljoin(baseurl, params['urlAOD'][0])
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class="title[^"]+">(.+?)</h1>', webpage, 'title')
|
||||
uploader = self._html_search_regex(
|
||||
r'(?s)<div id="emission".*?<span class="author">(.*?)</span>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
thumbnail_part = self._html_search_regex(
|
||||
r'(?s)<div id="emission".*?<img src="([^"]+)"', webpage,
|
||||
'thumbnail', fatal=False)
|
||||
if thumbnail_part is None:
|
||||
thumbnail = None
|
||||
else:
|
||||
thumbnail = compat_urlparse.urljoin(baseurl, thumbnail_part)
|
||||
description = self._html_search_regex(
|
||||
r'(?s)<p class="desc">(.*?)</p>', webpage, 'description')
|
||||
|
||||
info = json.loads(params['infoData'][0])[0]
|
||||
duration = info.get('media_length')
|
||||
upload_date_candidate = info.get('media_section5')
|
||||
upload_date = (
|
||||
upload_date_candidate
|
||||
if (upload_date_candidate is not None and
|
||||
re.match(r'[0-9]{8}$', upload_date_candidate))
|
||||
else None)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'vcodec': 'none' if video_url.lower().endswith('.mp3') else None,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
}
|
@ -184,6 +184,7 @@ class GenerationQuoiIE(InfoExtractor):
|
||||
# It uses Dailymotion
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Only available from France',
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -1,18 +1,21 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class FreesoundIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?freesound\.org/people/([^/]+)/sounds/(?P<id>[^/]+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.freesound.org/people/miklovan/sounds/194503/',
|
||||
u'file': u'194503.mp3',
|
||||
u'md5': u'12280ceb42c81f19a515c745eae07650',
|
||||
u'info_dict': {
|
||||
u"title": u"gulls in the city.wav",
|
||||
u"uploader" : u"miklovan",
|
||||
u'description': u'the sounds of seagulls in the city',
|
||||
'url': 'http://www.freesound.org/people/miklovan/sounds/194503/',
|
||||
'md5': '12280ceb42c81f19a515c745eae07650',
|
||||
'info_dict': {
|
||||
'id': '194503',
|
||||
'ext': 'mp3',
|
||||
'title': 'gulls in the city.wav',
|
||||
'uploader': 'miklovan',
|
||||
'description': 'the sounds of seagulls in the city',
|
||||
}
|
||||
}
|
||||
|
||||
@ -20,17 +23,17 @@ class FreesoundIE(InfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
music_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, music_id)
|
||||
title = self._html_search_regex(r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
|
||||
webpage, 'music title', flags=re.DOTALL)
|
||||
music_url = self._og_search_property('audio', webpage, 'music url')
|
||||
description = self._html_search_regex(r'<div id="sound_description">(.*?)</div>',
|
||||
webpage, 'description', fatal=False, flags=re.DOTALL)
|
||||
title = self._html_search_regex(
|
||||
r'<div id="single_sample_header">.*?<a href="#">(.+?)</a>',
|
||||
webpage, 'music title', flags=re.DOTALL)
|
||||
description = self._html_search_regex(
|
||||
r'<div id="sound_description">(.*?)</div>', webpage, 'description',
|
||||
fatal=False, flags=re.DOTALL)
|
||||
|
||||
return [{
|
||||
'id': music_id,
|
||||
'title': title,
|
||||
'url': music_url,
|
||||
return {
|
||||
'id': music_id,
|
||||
'title': title,
|
||||
'url': self._og_search_property('audio', webpage, 'music url'),
|
||||
'uploader': self._og_search_property('audio:artist', webpage, 'music uploader'),
|
||||
'ext': determine_ext(music_url),
|
||||
'description': description,
|
||||
}]
|
||||
}
|
||||
|
@ -1,12 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class FunnyOrDieIE(InfoExtractor):
|
||||
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
||||
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
|
||||
_TEST = {
|
||||
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||
'file': '0732f586d7.mp4',
|
||||
@ -30,10 +31,23 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
[r'type="video/mp4" src="(.*?)"', r'src="([^>]*?)" type=\'video/mp4\''],
|
||||
webpage, 'video URL', flags=re.DOTALL)
|
||||
|
||||
if mobj.group('type') == 'embed':
|
||||
post_json = self._search_regex(
|
||||
r'fb_post\s*=\s*(\{.*?\});', webpage, 'post details')
|
||||
post = json.loads(post_json)
|
||||
title = post['name']
|
||||
description = post.get('description')
|
||||
thumbnail = post.get('picture')
|
||||
else:
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = None
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -6,13 +8,14 @@ from .common import InfoExtractor
|
||||
class GamekingsIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.gamekings\.tv/videos/(?P<name>[0-9a-z\-]+)'
|
||||
_TEST = {
|
||||
u"url": u"http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/",
|
||||
u'file': u'20130811.mp4',
|
||||
'url': 'http://www.gamekings.tv/videos/phoenix-wright-ace-attorney-dual-destinies-review/',
|
||||
# MD5 is flaky, seems to change regularly
|
||||
#u'md5': u'2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||
# 'md5': '2f32b1f7b80fdc5cb616efb4f387f8a3',
|
||||
u'info_dict': {
|
||||
u"title": u"Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review",
|
||||
u"description": u"Melle en Steven hebben voor de review een week in de rechtbank doorbracht met Phoenix Wright: Ace Attorney - Dual Destinies.",
|
||||
'id': '20130811',
|
||||
'ext': 'mp4',
|
||||
'title': 'Phoenix Wright: Ace Attorney \u2013 Dual Destinies Review',
|
||||
'description': 'md5:632e61a9f97d700e83f43d77ddafb6a4',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,10 +7,11 @@ class GametrailersIE(MTVServicesInfoExtractor):
|
||||
_VALID_URL = r'http://www\.gametrailers\.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
||||
_TEST = {
|
||||
'url': 'http://www.gametrailers.com/videos/zbvr8i/mirror-s-edge-2-e3-2013--debut-trailer',
|
||||
'file': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d.mp4',
|
||||
'md5': '4c8e67681a0ea7ec241e8c09b3ea8cf7',
|
||||
'info_dict': {
|
||||
'title': 'Mirror\'s Edge 2|E3 2013: Debut Trailer',
|
||||
'id': '70e9a5d7-cf25-4a10-9104-6f3e7342ae0d',
|
||||
'ext': 'mp4',
|
||||
'title': 'E3 2013: Debut Trailer',
|
||||
'description': 'Faith is back! Check out the World Premiere trailer for Mirror\'s Edge 2 straight from the EA Press Conference at E3 2013!',
|
||||
},
|
||||
}
|
||||
|
134
youtube_dl/extractor/gdcvault.py
Normal file
134
youtube_dl/extractor/gdcvault.py
Normal file
@ -0,0 +1,134 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
|
||||
class GDCVaultIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?gdcvault\.com/play/(?P<id>\d+)/(?P<name>(\w|-)+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.gdcvault.com/play/1019721/Doki-Doki-Universe-Sweet-Simple',
|
||||
'md5': '7ce8388f544c88b7ac11c7ab1b593704',
|
||||
'info_dict': {
|
||||
'id': '1019721',
|
||||
'ext': 'mp4',
|
||||
'title': 'Doki-Doki Universe: Sweet, Simple and Genuine (GDC Next 10)'
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.gdcvault.com/play/1015683/Embracing-the-Dark-Art-of',
|
||||
'info_dict': {
|
||||
'id': '1015683',
|
||||
'ext': 'flv',
|
||||
'title': 'Embracing the Dark Art of Mathematical Modeling in AI'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # Requires rtmpdump
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _parse_mp4(self, xml_description):
|
||||
video_formats = []
|
||||
mp4_video = xml_description.find('./metadata/mp4video')
|
||||
if mp4_video is None:
|
||||
return None
|
||||
|
||||
mobj = re.match(r'(?P<root>https?://.*?/).*', mp4_video.text)
|
||||
video_root = mobj.group('root')
|
||||
formats = xml_description.findall('./metadata/MBRVideos/MBRVideo')
|
||||
for format in formats:
|
||||
mobj = re.match(r'mp4\:(?P<path>.*)', format.find('streamName').text)
|
||||
url = video_root + mobj.group('path')
|
||||
vbr = format.find('bitrate').text
|
||||
video_formats.append({
|
||||
'url': url,
|
||||
'vbr': int(vbr),
|
||||
})
|
||||
return video_formats
|
||||
|
||||
def _parse_flv(self, xml_description):
|
||||
video_formats = []
|
||||
akami_url = xml_description.find('./metadata/akamaiHost').text
|
||||
slide_video_path = xml_description.find('./metadata/slideVideo').text
|
||||
video_formats.append({
|
||||
'url': 'rtmp://' + akami_url + '/' + slide_video_path,
|
||||
'format_note': 'slide deck video',
|
||||
'quality': -2,
|
||||
'preference': -2,
|
||||
'format_id': 'slides',
|
||||
})
|
||||
speaker_video_path = xml_description.find('./metadata/speakerVideo').text
|
||||
video_formats.append({
|
||||
'url': 'rtmp://' + akami_url + '/' + speaker_video_path,
|
||||
'format_note': 'speaker video',
|
||||
'quality': -1,
|
||||
'preference': -1,
|
||||
'format_id': 'speaker',
|
||||
})
|
||||
return video_formats
|
||||
|
||||
def _login(self, webpage_url, video_id):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None or password is None:
|
||||
self.report_warning('It looks like ' + webpage_url + ' requires a login. Try specifying a username and password and try again.')
|
||||
return None
|
||||
|
||||
mobj = re.match(r'(?P<root_url>https?://.*?/).*', webpage_url)
|
||||
login_url = mobj.group('root_url') + 'api/login.php'
|
||||
logout_url = mobj.group('root_url') + 'logout'
|
||||
|
||||
login_form = {
|
||||
'email': username,
|
||||
'password': password,
|
||||
}
|
||||
|
||||
request = compat_urllib_request.Request(login_url, compat_urllib_parse.urlencode(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
self._download_webpage(request, video_id, 'Logging in')
|
||||
start_page = self._download_webpage(webpage_url, video_id, 'Getting authenticated video page')
|
||||
self._download_webpage(logout_url, video_id, 'Logging out')
|
||||
|
||||
return start_page
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
webpage_url = 'http://www.gdcvault.com/play/' + video_id
|
||||
start_page = self._download_webpage(webpage_url, video_id)
|
||||
|
||||
xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root', None, False)
|
||||
|
||||
if xml_root is None:
|
||||
# Probably need to authenticate
|
||||
start_page = self._login(webpage_url, video_id)
|
||||
if start_page is None:
|
||||
self.report_warning('Could not login.')
|
||||
else:
|
||||
# Grab the url from the authenticated page
|
||||
xml_root = self._html_search_regex(r'<iframe src="(?P<xml_root>.*?)player.html.*?".*?</iframe>', start_page, 'xml root')
|
||||
|
||||
xml_name = self._html_search_regex(r'<iframe src=".*?\?xml=(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename', None, False)
|
||||
if xml_name is None:
|
||||
# Fallback to the older format
|
||||
xml_name = self._html_search_regex(r'<iframe src=".*?\?xmlURL=xml/(?P<xml_file>.+?\.xml).*?".*?</iframe>', start_page, 'xml filename')
|
||||
|
||||
xml_decription_url = xml_root + 'xml/' + xml_name
|
||||
xml_description = self._download_xml(xml_decription_url, video_id)
|
||||
|
||||
video_title = xml_description.find('./metadata/title').text
|
||||
video_formats = self._parse_mp4(xml_description)
|
||||
if video_formats is None:
|
||||
video_formats = self._parse_flv(xml_description)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': video_formats,
|
||||
}
|
@ -12,9 +12,11 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
|
||||
ExtractorError,
|
||||
HEADRequest,
|
||||
parse_xml,
|
||||
smuggle_url,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
@ -22,6 +24,8 @@ from ..utils import (
|
||||
)
|
||||
from .brightcove import BrightcoveIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .rutv import RUTVIE
|
||||
from .smotri import SmotriIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@ -31,9 +35,10 @@ class GenericIE(InfoExtractor):
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
|
||||
'file': '13601338388002.mp4',
|
||||
'md5': '6e15c93721d7ec9e9ca3fdbf07982cfd',
|
||||
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
|
||||
'info_dict': {
|
||||
'id': '13601338388002',
|
||||
'ext': 'mp4',
|
||||
'uploader': 'www.hodiho.fr',
|
||||
'title': 'R\u00e9gis plante sa Jeep',
|
||||
}
|
||||
@ -42,8 +47,9 @@ class GenericIE(InfoExtractor):
|
||||
{
|
||||
'add_ie': ['Bandcamp'],
|
||||
'url': 'http://bronyrock.com/track/the-pony-mash',
|
||||
'file': '3235767654.mp3',
|
||||
'info_dict': {
|
||||
'id': '3235767654',
|
||||
'ext': 'mp3',
|
||||
'title': 'The Pony Mash',
|
||||
'uploader': 'M_Pallante',
|
||||
},
|
||||
@ -69,22 +75,34 @@ class GenericIE(InfoExtractor):
|
||||
{
|
||||
# https://github.com/rg3/youtube-dl/issues/2253
|
||||
'url': 'http://bcove.me/i6nfkrc3',
|
||||
'file': '3101154703001.mp4',
|
||||
'md5': '0ba9446db037002366bab3b3eb30c88c',
|
||||
'info_dict': {
|
||||
'id': '3101154703001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Still no power',
|
||||
'uploader': 'thestar.com',
|
||||
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
|
||||
},
|
||||
'add_ie': ['Brightcove'],
|
||||
},
|
||||
{
|
||||
'url': 'http://www.championat.com/video/football/v/87/87499.html',
|
||||
'md5': 'fb973ecf6e4a78a67453647444222983',
|
||||
'info_dict': {
|
||||
'id': '3414141473001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
|
||||
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
|
||||
'uploader': 'Championat',
|
||||
},
|
||||
},
|
||||
# Direct link to a video
|
||||
{
|
||||
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
|
||||
'file': 'trailer.mp4',
|
||||
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
|
||||
'info_dict': {
|
||||
'id': 'trailer',
|
||||
'ext': 'mp4',
|
||||
'title': 'trailer',
|
||||
'upload_date': '20100513',
|
||||
}
|
||||
@ -92,7 +110,6 @@ class GenericIE(InfoExtractor):
|
||||
# ooyala video
|
||||
{
|
||||
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
|
||||
'file': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ.mp4',
|
||||
'md5': '5644c6ca5d5782c1d0d350dad9bd840c',
|
||||
'info_dict': {
|
||||
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
|
||||
@ -100,6 +117,138 @@ class GenericIE(InfoExtractor):
|
||||
'title': '2cc213299525360.mov', # that's what we get
|
||||
},
|
||||
},
|
||||
# google redirect
|
||||
{
|
||||
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
|
||||
'info_dict': {
|
||||
'id': 'cmQHVoWB5FY',
|
||||
'ext': 'mp4',
|
||||
'upload_date': '20130224',
|
||||
'uploader_id': 'TheVerge',
|
||||
'description': 'Chris Ziegler takes a look at the Alcatel OneTouch Fire and the ZTE Open; two of the first Firefox OS handsets to be officially announced.',
|
||||
'uploader': 'The Verge',
|
||||
'title': 'First Firefox OS phones side-by-side',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': False,
|
||||
}
|
||||
},
|
||||
# embed.ly video
|
||||
{
|
||||
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
|
||||
'info_dict': {
|
||||
'id': '9ODmcdjQcHQ',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
|
||||
'upload_date': '20140225',
|
||||
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
|
||||
'uploader': 'Tested',
|
||||
'uploader_id': 'testedcom',
|
||||
},
|
||||
# No need to test YoutubeIE here
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
# funnyordie embed
|
||||
{
|
||||
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
|
||||
'md5': '7cf780be104d40fea7bae52eed4a470e',
|
||||
'info_dict': {
|
||||
'id': '18e820ec3f',
|
||||
'ext': 'mp4',
|
||||
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
|
||||
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
|
||||
},
|
||||
},
|
||||
# RUTV embed
|
||||
{
|
||||
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
|
||||
'info_dict': {
|
||||
'id': '776940',
|
||||
'ext': 'mp4',
|
||||
'title': 'Охотское море стало целиком российским',
|
||||
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
# Embedded TED video
|
||||
{
|
||||
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
|
||||
'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
|
||||
'info_dict': {
|
||||
'id': '981',
|
||||
'ext': 'mp4',
|
||||
'title': 'My web playroom',
|
||||
'uploader': 'Ze Frank',
|
||||
'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
|
||||
}
|
||||
},
|
||||
# Embeded Ustream video
|
||||
{
|
||||
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
|
||||
'md5': '27b99cdb639c9b12a79bca876a073417',
|
||||
'info_dict': {
|
||||
'id': '45734260',
|
||||
'ext': 'flv',
|
||||
'uploader': 'AU SPA: The NSA and Privacy',
|
||||
'title': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman'
|
||||
}
|
||||
},
|
||||
# nowvideo embed hidden behind percent encoding
|
||||
{
|
||||
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
|
||||
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
|
||||
'info_dict': {
|
||||
'id': '06e53103ca9aa',
|
||||
'ext': 'flv',
|
||||
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
|
||||
'description': 'No description',
|
||||
},
|
||||
},
|
||||
# arte embed
|
||||
{
|
||||
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
|
||||
'md5': '7653032cbb25bf6c80d80f217055fa43',
|
||||
'info_dict': {
|
||||
'id': '048195-004_PLUS7-F',
|
||||
'ext': 'flv',
|
||||
'title': 'X:enius',
|
||||
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
|
||||
'upload_date': '20140320',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'Requires rtmpdump'
|
||||
}
|
||||
},
|
||||
# smotri embed
|
||||
{
|
||||
'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
|
||||
'md5': 'ec40048448e9284c9a1de77bb188108b',
|
||||
'info_dict': {
|
||||
'id': 'v27008541fad',
|
||||
'ext': 'mp4',
|
||||
'title': 'Крым и Севастополь вошли в состав России',
|
||||
'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
|
||||
'duration': 900,
|
||||
'upload_date': '20140318',
|
||||
'uploader': 'rbctv_2012_4',
|
||||
'uploader_id': 'rbctv_2012_4',
|
||||
},
|
||||
},
|
||||
# Condé Nast embed
|
||||
{
|
||||
'url': 'http://www.wired.com/2014/04/honda-asimo/',
|
||||
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
|
||||
'info_dict': {
|
||||
'id': '53501be369702d3275860000',
|
||||
'ext': 'mp4',
|
||||
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
@ -125,9 +274,14 @@ class GenericIE(InfoExtractor):
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
newheaders = dict((k,v) for k,v in req.headers.items()
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
try:
|
||||
# This function was deprecated in python 3.3 and removed in 3.4
|
||||
origin_req_host = req.get_origin_req_host()
|
||||
except AttributeError:
|
||||
origin_req_host = req.origin_req_host
|
||||
return HEADRequest(newurl,
|
||||
headers=newheaders,
|
||||
origin_req_host=req.get_origin_req_host(),
|
||||
origin_req_host=origin_req_host,
|
||||
unverifiable=True)
|
||||
else:
|
||||
raise compat_urllib_error.HTTPError(req.get_full_url(), code, msg, headers, fp)
|
||||
@ -159,23 +313,45 @@ class GenericIE(InfoExtractor):
|
||||
raise ExtractorError('Invalid URL protocol')
|
||||
return response
|
||||
|
||||
def _extract_rss(self, url, video_id, doc):
|
||||
playlist_title = doc.find('./channel/title').text
|
||||
playlist_desc_el = doc.find('./channel/description')
|
||||
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': e.find('link').text,
|
||||
'title': e.find('title').text,
|
||||
} for e in doc.findall('./channel/item')]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': url,
|
||||
'title': playlist_title,
|
||||
'description': playlist_desc,
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
parsed_url = compat_urlparse.urlparse(url)
|
||||
if not parsed_url.scheme:
|
||||
default_search = self._downloader.params.get('default_search')
|
||||
if default_search is None:
|
||||
default_search = 'auto'
|
||||
default_search = 'auto_warning'
|
||||
|
||||
if default_search == 'auto':
|
||||
if default_search in ('auto', 'auto_warning'):
|
||||
if '/' in url:
|
||||
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
|
||||
return self.url_result('http://' + url)
|
||||
else:
|
||||
if default_search == 'auto_warning':
|
||||
self._downloader.report_warning(
|
||||
'Falling back to youtube search for %s . Set --default-search to "auto" to suppress this warning.' % url)
|
||||
return self.url_result('ytsearch:' + url)
|
||||
else:
|
||||
assert ':' in default_search
|
||||
return self.url_result(default_search + url)
|
||||
video_id = os.path.splitext(url.split('/')[-1])[0]
|
||||
video_id = os.path.splitext(url.rstrip('/').split('/')[-1])[0]
|
||||
|
||||
self.to_screen('%s: Requesting header' % video_id)
|
||||
|
||||
@ -219,6 +395,19 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Is it an RSS feed?
|
||||
try:
|
||||
doc = parse_xml(webpage)
|
||||
if doc.tag == 'rss':
|
||||
return self._extract_rss(url, video_id, doc)
|
||||
except compat_xml_parse_error:
|
||||
pass
|
||||
|
||||
# Sometimes embedded video player is hidden behind percent encoding
|
||||
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
|
||||
# Unescaping the whole page allows to handle those cases in a generic way
|
||||
webpage = compat_urllib_parse.unquote(webpage)
|
||||
|
||||
# it's tempting to parse this further, but you would
|
||||
# have to take into account all the variations like
|
||||
# Video Title - Site Name
|
||||
@ -252,9 +441,9 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
# Look for embedded (iframe) Vimeo player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src="((?:https?:)?//player\.vimeo\.com/video/.+?)"', webpage)
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//player\.vimeo\.com/video/.+?)\1', webpage)
|
||||
if mobj:
|
||||
player_url = unescapeHTML(mobj.group(1))
|
||||
player_url = unescapeHTML(mobj.group('url'))
|
||||
surl = smuggle_url(player_url, {'Referer': url})
|
||||
return self.url_result(surl, 'Vimeo')
|
||||
|
||||
@ -306,6 +495,22 @@ class GenericIE(InfoExtractor):
|
||||
if mobj:
|
||||
return self.url_result(mobj.group(1), 'BlipTV')
|
||||
|
||||
# Look for embedded condenast player
|
||||
matches = re.findall(
|
||||
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
|
||||
webpage)
|
||||
if matches:
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': [{
|
||||
'_type': 'url',
|
||||
'ie_key': 'CondeNast',
|
||||
'url': ma,
|
||||
} for ma in matches],
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
}
|
||||
|
||||
# Look for Bandcamp pages with custom domain
|
||||
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
|
||||
if mobj is not None:
|
||||
@ -320,12 +525,13 @@ class GenericIE(InfoExtractor):
|
||||
return self.url_result(mobj.group('url'))
|
||||
|
||||
# Look for Ooyala videos
|
||||
mobj = re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=([^"&]+)', webpage)
|
||||
mobj = (re.search(r'player.ooyala.com/[^"?]+\?[^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
|
||||
re.search(r'OO.Player.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage))
|
||||
if mobj is not None:
|
||||
return OoyalaIE._build_url_result(mobj.group(1))
|
||||
return OoyalaIE._build_url_result(mobj.group('ec'))
|
||||
|
||||
# Look for Aparat videos
|
||||
mobj = re.search(r'<iframe src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
|
||||
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group(1), 'Aparat')
|
||||
|
||||
@ -334,11 +540,18 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group(1), 'Mpora')
|
||||
|
||||
# Look for embedded Novamov player
|
||||
# Look for embedded NovaMov-based player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>http://(?:(?:embed|www)\.)?novamov\.com/embed\.php.+?)\1', webpage)
|
||||
r'''(?x)<iframe[^>]+?src=(["\'])
|
||||
(?P<url>http://(?:(?:embed|www)\.)?
|
||||
(?:novamov\.com|
|
||||
nowvideo\.(?:ch|sx|eu|at|ag|co)|
|
||||
videoweed\.(?:es|com)|
|
||||
movshare\.(?:net|sx|ag)|
|
||||
divxstage\.(?:eu|net|ch|co|at|ag))
|
||||
/embed\.php.+?)\1''', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Novamov')
|
||||
return self.url_result(mobj.group('url'))
|
||||
|
||||
# Look for embedded Facebook player
|
||||
mobj = re.search(
|
||||
@ -346,12 +559,62 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Facebook')
|
||||
|
||||
# Look for embedded VK player
|
||||
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'VK')
|
||||
|
||||
# Look for embedded Huffington Post player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'HuffPost')
|
||||
|
||||
# Look for embed.ly
|
||||
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'))
|
||||
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(compat_urllib_parse.unquote(mobj.group('url')))
|
||||
|
||||
# Look for funnyordie embed
|
||||
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
|
||||
if matches:
|
||||
urlrs = [self.url_result(unescapeHTML(eurl), 'FunnyOrDie')
|
||||
for eurl in matches]
|
||||
return self.playlist_result(
|
||||
urlrs, playlist_id=video_id, playlist_title=video_title)
|
||||
|
||||
# Look for embedded RUTV player
|
||||
rutv_url = RUTVIE._extract_url(webpage)
|
||||
if rutv_url:
|
||||
return self.url_result(rutv_url, 'RUTV')
|
||||
|
||||
# Look for embedded TED player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>http://embed\.ted\.com/.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'TED')
|
||||
|
||||
# Look for embedded Ustream videos
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Ustream')
|
||||
|
||||
# Look for embedded arte.tv player
|
||||
mobj = re.search(
|
||||
r'<script [^>]*?src="(?P<url>http://www\.arte\.tv/playerv2/embed[^"]+)"',
|
||||
webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
|
||||
|
||||
# Look for embedded smotri.com player
|
||||
smotri_url = SmotriIE._extract_url(webpage)
|
||||
if smotri_url:
|
||||
return self.url_result(smotri_url, 'Smotri')
|
||||
|
||||
# Start with something easy: JW Player in SWFObject
|
||||
mobj = re.search(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||
if mobj is None:
|
||||
@ -363,6 +626,7 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is None:
|
||||
# Broaden the search a little bit: JWPlayer JS loader
|
||||
mobj = re.search(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
|
||||
|
||||
if mobj is None:
|
||||
# Try to find twitter cards info
|
||||
mobj = re.search(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||
@ -376,6 +640,18 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is None:
|
||||
# HTML5 video
|
||||
mobj = re.search(r'<video[^<]*(?:>.*?<source.*?)? src="([^"]+)"', webpage, flags=re.DOTALL)
|
||||
if mobj is None:
|
||||
mobj = re.search(
|
||||
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
||||
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
|
||||
webpage)
|
||||
if mobj:
|
||||
new_url = mobj.group(1)
|
||||
self.report_following_redirect(new_url)
|
||||
return {
|
||||
'_type': 'url',
|
||||
'url': new_url,
|
||||
}
|
||||
if mobj is None:
|
||||
raise ExtractorError('Unsupported URL: %s' % url)
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import re
|
||||
@ -10,32 +11,28 @@ from ..utils import (
|
||||
|
||||
|
||||
class GooglePlusIE(InfoExtractor):
|
||||
IE_DESC = u'Google Plus'
|
||||
_VALID_URL = r'(?:https://)?plus\.google\.com/(?:[^/]+/)*?posts/(\w+)'
|
||||
IE_NAME = u'plus.google'
|
||||
IE_DESC = 'Google Plus'
|
||||
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
|
||||
IE_NAME = 'plus.google'
|
||||
_TEST = {
|
||||
u"url": u"https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
|
||||
u"file": u"ZButuJc6CtH.flv",
|
||||
u"info_dict": {
|
||||
u"upload_date": u"20120613",
|
||||
u"uploader": u"井上ヨシマサ",
|
||||
u"title": u"嘆きの天使 降臨"
|
||||
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
|
||||
'info_dict': {
|
||||
'id': 'ZButuJc6CtH',
|
||||
'ext': 'flv',
|
||||
'upload_date': '20120613',
|
||||
'uploader': '井上ヨシマサ',
|
||||
'title': '嘆きの天使 降臨',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||
|
||||
post_url = mobj.group(0)
|
||||
video_id = mobj.group(1)
|
||||
|
||||
video_extension = 'flv'
|
||||
video_id = mobj.group('id')
|
||||
|
||||
# Step 1, Retrieve post webpage to extract further information
|
||||
webpage = self._download_webpage(post_url, video_id, u'Downloading entry webpage')
|
||||
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
@ -43,7 +40,7 @@ class GooglePlusIE(InfoExtractor):
|
||||
upload_date = self._html_search_regex(
|
||||
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
||||
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
||||
webpage, u'upload date', fatal=False, flags=re.VERBOSE)
|
||||
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
|
||||
if upload_date:
|
||||
# Convert timestring to a format suitable for filename
|
||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
||||
@ -51,28 +48,27 @@ class GooglePlusIE(InfoExtractor):
|
||||
|
||||
# Extract uploader
|
||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||
webpage, u'uploader', fatal=False)
|
||||
webpage, 'uploader', fatal=False)
|
||||
|
||||
# Extract title
|
||||
# Get the first line for title
|
||||
video_title = self._html_search_regex(r'<meta name\=\"Description\" content\=\"(.*?)[\n<"]',
|
||||
webpage, 'title', default=u'NA')
|
||||
webpage, 'title', default='NA')
|
||||
|
||||
# Step 2, Simulate clicking the image box to launch video
|
||||
DOMAIN = 'https://plus.google.com/'
|
||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||
webpage, u'video page URL')
|
||||
webpage, 'video page URL')
|
||||
if not video_page.startswith(DOMAIN):
|
||||
video_page = DOMAIN + video_page
|
||||
|
||||
webpage = self._download_webpage(video_page, video_id, u'Downloading video page')
|
||||
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
|
||||
|
||||
# Extract video links on video page
|
||||
"""Extract video links of all sizes"""
|
||||
# Extract video links all sizes
|
||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
||||
mobj = re.findall(pattern, webpage)
|
||||
if len(mobj) == 0:
|
||||
raise ExtractorError(u'Unable to extract video links')
|
||||
raise ExtractorError('Unable to extract video links')
|
||||
|
||||
# Sort in resolution
|
||||
links = sorted(mobj)
|
||||
@ -87,12 +83,11 @@ class GooglePlusIE(InfoExtractor):
|
||||
except AttributeError: # Python 3
|
||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': video_extension,
|
||||
}]
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
}
|
||||
|
@ -46,6 +46,6 @@ class GoogleSearchIE(SearchInfoExtractor):
|
||||
'url': mobj.group(1)
|
||||
})
|
||||
|
||||
if (len(entries) >= n) or not re.search(r'class="pn" id="pnnext"', webpage):
|
||||
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
|
||||
res['entries'] = entries[:n]
|
||||
return res
|
||||
|
62
youtube_dl/extractor/helsinki.py
Normal file
62
youtube_dl/extractor/helsinki.py
Normal file
@ -0,0 +1,62 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HelsinkiIE(InfoExtractor):
|
||||
IE_DESC = 'helsinki.fi'
|
||||
_VALID_URL = r'https?://video\.helsinki\.fi/Arkisto/flash\.php\?id=(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://video.helsinki.fi/Arkisto/flash.php?id=20258',
|
||||
'info_dict': {
|
||||
'id': '20258',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tietotekniikkafoorumi-iltapäivä',
|
||||
'description': 'md5:f5c904224d43c133225130fe156a5ee0',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # RTMP
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
formats = []
|
||||
|
||||
mobj = re.search(r'file=((\w+):[^&]+)', webpage)
|
||||
if mobj:
|
||||
formats.append({
|
||||
'ext': mobj.group(2),
|
||||
'play_path': mobj.group(1),
|
||||
'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
|
||||
'player_url': 'http://video.helsinki.fi/player.swf',
|
||||
'format_note': 'sd',
|
||||
'quality': 0,
|
||||
})
|
||||
|
||||
mobj = re.search(r'hd\.file=((\w+):[^&]+)', webpage)
|
||||
if mobj:
|
||||
formats.append({
|
||||
'ext': mobj.group(2),
|
||||
'play_path': mobj.group(1),
|
||||
'url': 'rtmp://flashvideo.it.helsinki.fi/vod/',
|
||||
'player_url': 'http://video.helsinki.fi/player.swf',
|
||||
'format_note': 'hd',
|
||||
'quality': 1,
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage).replace('Video: ', ''),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'formats': formats,
|
||||
}
|
@ -1,17 +1,20 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class HowcastIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
u'url': u'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
||||
u'file': u'390161.mp4',
|
||||
u'md5': u'8b743df908c42f60cf6496586c7f12c3',
|
||||
u'info_dict': {
|
||||
u"description": u"The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here's the proper way to tie a square knot.",
|
||||
u"title": u"How to Tie a Square Knot Properly"
|
||||
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
|
||||
'md5': '8b743df908c42f60cf6496586c7f12c3',
|
||||
'info_dict': {
|
||||
'id': '390161',
|
||||
'ext': 'mp4',
|
||||
'description': 'The square knot, also known as the reef knot, is one of the oldest, most basic knots to tie, and can be used in many different ways. Here\'s the proper way to tie a square knot.',
|
||||
'title': 'How to Tie a Square Knot Properly',
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,22 +27,15 @@ class HowcastIE(InfoExtractor):
|
||||
self.report_extraction(video_id)
|
||||
|
||||
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
||||
webpage, u'video URL')
|
||||
|
||||
video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
|
||||
webpage, u'title')
|
||||
webpage, 'video URL')
|
||||
|
||||
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
||||
webpage, u'description', fatal=False)
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
|
||||
webpage, u'thumbnail', fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': video_title,
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': video_description,
|
||||
'thumbnail': thumbnail,
|
||||
}]
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
|
@ -21,9 +21,10 @@ class HuffPostIE(InfoExtractor):
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://live.huffingtonpost.com/r/segment/legalese-it/52dd3e4b02a7602131000677',
|
||||
'file': '52dd3e4b02a7602131000677.mp4',
|
||||
'md5': '55f5e8981c1c80a64706a44b74833de8',
|
||||
'info_dict': {
|
||||
'id': '52dd3e4b02a7602131000677',
|
||||
'ext': 'mp4',
|
||||
'title': 'Legalese It! with @MikeSacksHP',
|
||||
'description': 'This week on Legalese It, Mike talks to David Bosco about his new book on the ICC, "Rough Justice," he also discusses the Virginia AG\'s historic stance on gay marriage, the execution of Edgar Tamayo, the ICC\'s delay of Kenya\'s President and more. ',
|
||||
'duration': 1549,
|
||||
|
@ -1,10 +1,8 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class IGNIE(InfoExtractor):
|
||||
@ -14,52 +12,57 @@ class IGNIE(InfoExtractor):
|
||||
"""
|
||||
|
||||
_VALID_URL = r'https?://.+?\.ign\.com/(?P<type>videos|show_videos|articles|(?:[^/]*/feature))(/.+)?/(?P<name_or_id>.+)'
|
||||
IE_NAME = u'ign.com'
|
||||
IE_NAME = 'ign.com'
|
||||
|
||||
_CONFIG_URL_TEMPLATE = 'http://www.ign.com/videos/configs/id/%s.config'
|
||||
_DESCRIPTION_RE = [r'<span class="page-object-description">(.+?)</span>',
|
||||
r'id="my_show_video">.*?<p>(.*?)</p>',
|
||||
]
|
||||
_DESCRIPTION_RE = [
|
||||
r'<span class="page-object-description">(.+?)</span>',
|
||||
r'id="my_show_video">.*?<p>(.*?)</p>',
|
||||
]
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
u'url': u'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
|
||||
u'file': u'8f862beef863986b2785559b9e1aa599.mp4',
|
||||
u'md5': u'eac8bdc1890980122c3b66f14bdd02e9',
|
||||
u'info_dict': {
|
||||
u'title': u'The Last of Us Review',
|
||||
u'description': u'md5:c8946d4260a4d43a00d5ae8ed998870c',
|
||||
'url': 'http://www.ign.com/videos/2013/06/05/the-last-of-us-review',
|
||||
'md5': 'eac8bdc1890980122c3b66f14bdd02e9',
|
||||
'info_dict': {
|
||||
'id': '8f862beef863986b2785559b9e1aa599',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Last of Us Review',
|
||||
'description': 'md5:c8946d4260a4d43a00d5ae8ed998870c',
|
||||
}
|
||||
},
|
||||
{
|
||||
u'url': u'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
|
||||
u'playlist': [
|
||||
'url': 'http://me.ign.com/en/feature/15775/100-little-things-in-gta-5-that-will-blow-your-mind',
|
||||
'playlist': [
|
||||
{
|
||||
u'file': u'5ebbd138523268b93c9141af17bec937.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'GTA 5 Video Review',
|
||||
u'description': u'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
|
||||
'info_dict': {
|
||||
'id': '5ebbd138523268b93c9141af17bec937',
|
||||
'ext': 'mp4',
|
||||
'title': 'GTA 5 Video Review',
|
||||
'description': 'Rockstar drops the mic on this generation of games. Watch our review of the masterly Grand Theft Auto V.',
|
||||
},
|
||||
},
|
||||
{
|
||||
u'file': u'638672ee848ae4ff108df2a296418ee2.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'26 Twisted Moments from GTA 5 in Slow Motion',
|
||||
u'description': u'The twisted beauty of GTA 5 in stunning slow motion.',
|
||||
'info_dict': {
|
||||
'id': '638672ee848ae4ff108df2a296418ee2',
|
||||
'ext': 'mp4',
|
||||
'title': '26 Twisted Moments from GTA 5 in Slow Motion',
|
||||
'description': 'The twisted beauty of GTA 5 in stunning slow motion.',
|
||||
},
|
||||
},
|
||||
],
|
||||
u'params': {
|
||||
u'skip_download': True,
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def _find_video_id(self, webpage):
|
||||
res_id = [r'data-video-id="(.+?)"',
|
||||
r'<object id="vid_(.+?)"',
|
||||
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
|
||||
]
|
||||
res_id = [
|
||||
r'data-video-id="(.+?)"',
|
||||
r'<object id="vid_(.+?)"',
|
||||
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
|
||||
]
|
||||
return self._search_regex(res_id, webpage, 'video id')
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -68,7 +71,7 @@ class IGNIE(InfoExtractor):
|
||||
page_type = mobj.group('type')
|
||||
webpage = self._download_webpage(url, name_or_id)
|
||||
if page_type == 'articles':
|
||||
video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, u'video url')
|
||||
video_url = self._search_regex(r'var videoUrl = "(.+?)"', webpage, 'video url')
|
||||
return self.url_result(video_url, ie='IGN')
|
||||
elif page_type != 'video':
|
||||
multiple_urls = re.findall(
|
||||
@ -80,50 +83,42 @@ class IGNIE(InfoExtractor):
|
||||
video_id = self._find_video_id(webpage)
|
||||
result = self._get_video_info(video_id)
|
||||
description = self._html_search_regex(self._DESCRIPTION_RE,
|
||||
webpage, 'video description',
|
||||
flags=re.DOTALL)
|
||||
webpage, 'video description', flags=re.DOTALL)
|
||||
result['description'] = description
|
||||
return result
|
||||
|
||||
def _get_video_info(self, video_id):
|
||||
config_url = self._CONFIG_URL_TEMPLATE % video_id
|
||||
config = json.loads(self._download_webpage(config_url, video_id,
|
||||
u'Downloading video info'))
|
||||
config = self._download_json(config_url, video_id)
|
||||
media = config['playlist']['media']
|
||||
video_url = media['url']
|
||||
|
||||
return {'id': media['metadata']['videoId'],
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': media['metadata']['title'],
|
||||
'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
|
||||
}
|
||||
return {
|
||||
'id': media['metadata']['videoId'],
|
||||
'url': media['url'],
|
||||
'title': media['metadata']['title'],
|
||||
'thumbnail': media['poster'][0]['url'].replace('{size}', 'grande'),
|
||||
}
|
||||
|
||||
|
||||
class OneUPIE(IGNIE):
|
||||
"""Extractor for 1up.com, it uses the ign videos system."""
|
||||
|
||||
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
||||
IE_NAME = '1up.com'
|
||||
|
||||
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://gamevideos.1up.com/video/id/34976',
|
||||
u'file': u'34976.mp4',
|
||||
u'md5': u'68a54ce4ebc772e4b71e3123d413163d',
|
||||
u'info_dict': {
|
||||
u'title': u'Sniper Elite V2 - Trailer',
|
||||
u'description': u'md5:5d289b722f5a6d940ca3136e9dae89cf',
|
||||
_TESTS = [{
|
||||
'url': 'http://gamevideos.1up.com/video/id/34976',
|
||||
'md5': '68a54ce4ebc772e4b71e3123d413163d',
|
||||
'info_dict': {
|
||||
'id': '34976',
|
||||
'ext': 'mp4',
|
||||
'title': 'Sniper Elite V2 - Trailer',
|
||||
'description': 'md5:5d289b722f5a6d940ca3136e9dae89cf',
|
||||
}
|
||||
}
|
||||
|
||||
# Override IGN tests
|
||||
_TESTS = []
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
id = mobj.group('name_or_id')
|
||||
result = super(OneUPIE, self)._real_extract(url)
|
||||
result['id'] = id
|
||||
result['id'] = mobj.group('name_or_id')
|
||||
return result
|
||||
|
@ -11,16 +11,15 @@ from ..utils import (
|
||||
|
||||
class InfoQIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?infoq\.com/[^/]+/(?P<id>[^/]+)$'
|
||||
|
||||
_TEST = {
|
||||
"name": "InfoQ",
|
||||
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
|
||||
"file": "12-jan-pythonthings.mp4",
|
||||
"info_dict": {
|
||||
"description": "Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.",
|
||||
"title": "A Few of My Favorite [Python] Things",
|
||||
},
|
||||
"params": {
|
||||
"skip_download": True,
|
||||
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
|
||||
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
|
||||
'info_dict': {
|
||||
'id': '12-jan-pythonthings',
|
||||
'ext': 'mp4',
|
||||
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
|
||||
'title': 'A Few of My Favorite [Python] Things',
|
||||
},
|
||||
}
|
||||
|
||||
@ -30,26 +29,39 @@ class InfoQIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
|
||||
video_description = self._html_search_meta('description', webpage, 'description')
|
||||
|
||||
# The server URL is hardcoded
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/'
|
||||
|
||||
# Extract video URL
|
||||
encoded_id = self._search_regex(r"jsclassref ?= ?'([^']*)'", webpage, 'encoded id')
|
||||
encoded_id = self._search_regex(
|
||||
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
|
||||
real_id = compat_urllib_parse.unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/' + real_id
|
||||
playpath = 'mp4:' + real_id
|
||||
|
||||
# Extract title
|
||||
video_title = self._search_regex(r'contentTitle = "(.*?)";',
|
||||
webpage, 'title')
|
||||
|
||||
# Extract description
|
||||
video_description = self._html_search_regex(r'<meta name="description" content="(.*)"(?:\s*/)?>',
|
||||
webpage, 'description', fatal=False)
|
||||
|
||||
video_filename = video_url.split('/')[-1]
|
||||
video_filename = playpath.split('/')[-1]
|
||||
video_id, extension = video_filename.split('.')
|
||||
|
||||
http_base = self._search_regex(
|
||||
r'EXPRESSINSTALL_SWF\s*=\s*"(https?://[^/"]+/)', webpage,
|
||||
'HTTP base URL')
|
||||
|
||||
formats = [{
|
||||
'format_id': 'rtmp',
|
||||
'url': video_url,
|
||||
'ext': extension,
|
||||
'play_path': playpath,
|
||||
}, {
|
||||
'format_id': 'http',
|
||||
'url': http_base + real_id,
|
||||
}]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'ext': extension, # Extension is always(?) mp4, but seems to be flv
|
||||
'description': video_description,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -1,35 +1,107 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class InstagramIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?instagram\.com/p/(.*?)/'
|
||||
_VALID_URL = r'http://instagram\.com/p/(?P<id>.*?)/'
|
||||
_TEST = {
|
||||
u'url': u'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
||||
u'file': u'aye83DjauH.mp4',
|
||||
u'md5': u'0d2da106a9d2631273e192b372806516',
|
||||
u'info_dict': {
|
||||
u"uploader_id": u"naomipq",
|
||||
u"title": u"Video by naomipq",
|
||||
u'description': u'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||
'url': 'http://instagram.com/p/aye83DjauH/?foo=bar#abc',
|
||||
'md5': '0d2da106a9d2631273e192b372806516',
|
||||
'info_dict': {
|
||||
'id': 'aye83DjauH',
|
||||
'ext': 'mp4',
|
||||
'uploader_id': 'naomipq',
|
||||
'title': 'Video by naomipq',
|
||||
'description': 'md5:1f17f0ab29bd6fe2bfad705f58de3cb8',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
uploader_id = self._search_regex(r'"owner":{"username":"(.+?)"',
|
||||
webpage, u'uploader id', fatal=False)
|
||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, u'description',
|
||||
webpage, 'uploader id', fatal=False)
|
||||
desc = self._search_regex(r'"caption":"(.*?)"', webpage, 'description',
|
||||
fatal=False)
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': self._og_search_video_url(webpage, secure=False),
|
||||
'ext': 'mp4',
|
||||
'title': u'Video by %s' % uploader_id,
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': self._og_search_video_url(webpage, secure=False),
|
||||
'ext': 'mp4',
|
||||
'title': 'Video by %s' % uploader_id,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader_id' : uploader_id,
|
||||
'uploader_id': uploader_id,
|
||||
'description': desc,
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
class InstagramUserIE(InfoExtractor):
|
||||
_VALID_URL = r'http://instagram\.com/(?P<username>[^/]{2,})/?(?:$|[?#])'
|
||||
IE_DESC = 'Instagram user profile'
|
||||
IE_NAME = 'instagram:user'
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
uploader_id = mobj.group('username')
|
||||
|
||||
entries = []
|
||||
page_count = 0
|
||||
media_url = 'http://instagram.com/%s/media' % uploader_id
|
||||
while True:
|
||||
page = self._download_json(
|
||||
media_url, uploader_id,
|
||||
note='Downloading page %d ' % (page_count + 1),
|
||||
)
|
||||
page_count += 1
|
||||
|
||||
for it in page['items']:
|
||||
if it.get('type') != 'video':
|
||||
continue
|
||||
like_count = int_or_none(it.get('likes', {}).get('count'))
|
||||
user = it.get('user', {})
|
||||
|
||||
formats = [{
|
||||
'format_id': k,
|
||||
'height': v.get('height'),
|
||||
'width': v.get('width'),
|
||||
'url': v['url'],
|
||||
} for k, v in it['videos'].items()]
|
||||
self._sort_formats(formats)
|
||||
|
||||
thumbnails_el = it.get('images', {})
|
||||
thumbnail = thumbnails_el.get('thumbnail', {}).get('url')
|
||||
|
||||
title = it.get('caption', {}).get('text', it['id'])
|
||||
|
||||
entries.append({
|
||||
'id': it['id'],
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
'webpage_url': it.get('link'),
|
||||
'uploader': user.get('full_name'),
|
||||
'uploader_id': user.get('username'),
|
||||
'like_count': like_count,
|
||||
'timestamp': int_or_none(it.get('created_time')),
|
||||
})
|
||||
|
||||
if not page['items']:
|
||||
break
|
||||
max_id = page['items'][-1]['id']
|
||||
media_url = (
|
||||
'http://instagram.com/%s/media?max_id=%s' % (
|
||||
uploader_id, max_id))
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': entries,
|
||||
'id': uploader_id,
|
||||
'title': uploader_id,
|
||||
}
|
||||
|
@ -6,11 +6,14 @@ from random import random
|
||||
from math import floor
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import compat_urllib_request
|
||||
from ..utils import (
|
||||
compat_urllib_request,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
class IPrimaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://play\.iprima\.cz/(?P<videogroup>.+)/(?P<videoid>.+)'
|
||||
_VALID_URL = r'https?://play\.iprima\.cz/[^?#]+/(?P<id>[^?#]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://play.iprima.cz/particka/particka-92',
|
||||
@ -22,20 +25,37 @@ class IPrimaIE(InfoExtractor):
|
||||
'thumbnail': 'http://play.iprima.cz/sites/default/files/image_crops/image_620x349/3/491483_particka-92_image_620x349.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
},
|
||||
]
|
||||
}, {
|
||||
'url': 'http://play.iprima.cz/particka/tchibo-particka-jarni-moda',
|
||||
'info_dict': {
|
||||
'id': '9718337',
|
||||
'ext': 'flv',
|
||||
'title': 'Tchibo Partička - Jarní móda',
|
||||
'description': 'md5:589f8f59f414220621ff8882eb3ce7be',
|
||||
'thumbnail': 're:^http:.*\.jpg$',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires rtmpdump
|
||||
},
|
||||
'skip': 'Do not have permission to access this page',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('videoid')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
player_url = 'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' % (
|
||||
floor(random()*1073741824),
|
||||
floor(random()*1073741824))
|
||||
if re.search(r'Nemáte oprávnění přistupovat na tuto stránku\.\s*</div>', webpage):
|
||||
raise ExtractorError(
|
||||
'%s said: You do not have permission to access this page' % self.IE_NAME, expected=True)
|
||||
|
||||
player_url = (
|
||||
'http://embed.livebox.cz/iprimaplay/player-embed-v2.js?__tok%s__=%s' %
|
||||
(floor(random()*1073741824), floor(random()*1073741824))
|
||||
)
|
||||
|
||||
req = compat_urllib_request.Request(player_url)
|
||||
req.add_header('Referer', url)
|
||||
@ -44,18 +64,20 @@ class IPrimaIE(InfoExtractor):
|
||||
base_url = ''.join(re.findall(r"embed\['stream'\] = '(.+?)'.+'(\?auth=)'.+'(.+?)';", playerpage)[1])
|
||||
|
||||
zoneGEO = self._html_search_regex(r'"zoneGEO":(.+?),', webpage, 'zoneGEO')
|
||||
|
||||
if zoneGEO != '0':
|
||||
base_url = base_url.replace('token', 'token_'+zoneGEO)
|
||||
base_url = base_url.replace('token', 'token_' + zoneGEO)
|
||||
|
||||
formats = []
|
||||
for format_id in ['lq', 'hq', 'hd']:
|
||||
filename = self._html_search_regex(r'"%s_id":(.+?),' % format_id, webpage, 'filename')
|
||||
filename = self._html_search_regex(
|
||||
r'"%s_id":(.+?),' % format_id, webpage, 'filename')
|
||||
|
||||
if filename == 'null':
|
||||
continue
|
||||
|
||||
real_id = self._search_regex(r'Prima-[0-9]{10}-([0-9]+)_', filename, 'real video id')
|
||||
real_id = self._search_regex(
|
||||
r'Prima-(?:[0-9]{10}|WEB)-([0-9]+)[-_]',
|
||||
filename, 'real video id')
|
||||
|
||||
if format_id == 'lq':
|
||||
quality = 0
|
||||
@ -63,13 +85,13 @@ class IPrimaIE(InfoExtractor):
|
||||
quality = 1
|
||||
elif format_id == 'hd':
|
||||
quality = 2
|
||||
filename = 'hq/'+filename
|
||||
filename = 'hq/' + filename
|
||||
|
||||
formats.append({
|
||||
'format_id': format_id,
|
||||
'url': base_url,
|
||||
'quality': quality,
|
||||
'play_path': 'mp4:'+filename.replace('"', '')[:-4],
|
||||
'play_path': 'mp4:' + filename.replace('"', '')[:-4],
|
||||
'rtmp_live': True,
|
||||
'ext': 'flv',
|
||||
})
|
||||
|
@ -14,15 +14,16 @@ from ..utils import (
|
||||
class IviIE(InfoExtractor):
|
||||
IE_DESC = 'ivi.ru'
|
||||
IE_NAME = 'ivi'
|
||||
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch(?:/(?P<compilationid>[^/]+))?/(?P<videoid>\d+)'
|
||||
|
||||
_TESTS = [
|
||||
# Single movie
|
||||
{
|
||||
'url': 'http://www.ivi.ru/watch/53141',
|
||||
'file': '53141.mp4',
|
||||
'md5': '6ff5be2254e796ed346251d117196cf4',
|
||||
'info_dict': {
|
||||
'id': '53141',
|
||||
'ext': 'mp4',
|
||||
'title': 'Иван Васильевич меняет профессию',
|
||||
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
|
||||
'duration': 5498,
|
||||
@ -33,9 +34,10 @@ class IviIE(InfoExtractor):
|
||||
# Serial's serie
|
||||
{
|
||||
'url': 'http://www.ivi.ru/watch/dezhurnyi_angel/74791',
|
||||
'file': '74791.mp4',
|
||||
'md5': '3e6cc9a848c1d2ebcc6476444967baa9',
|
||||
'info_dict': {
|
||||
'id': '74791',
|
||||
'ext': 'mp4',
|
||||
'title': 'Дежурный ангел - 1 серия',
|
||||
'duration': 2490,
|
||||
'thumbnail': 'http://thumbs.ivi.ru/f7.vcp.digitalaccess.ru/contents/8/e/bc2f6c2b6e5d291152fdd32c059141.jpg',
|
||||
@ -124,7 +126,7 @@ class IviIE(InfoExtractor):
|
||||
class IviCompilationIE(InfoExtractor):
|
||||
IE_DESC = 'ivi.ru compilations'
|
||||
IE_NAME = 'ivi:compilation'
|
||||
_VALID_URL = r'^https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
|
||||
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
|
||||
|
||||
def _extract_entries(self, html, compilation_id):
|
||||
return [self.url_result('http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), 'Ivi')
|
||||
|
48
youtube_dl/extractor/jadorecettepub.py
Normal file
48
youtube_dl/extractor/jadorecettepub.py
Normal file
@ -0,0 +1,48 @@
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
|
||||
|
||||
class JadoreCettePubIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?jadorecettepub\.com/[0-9]{4}/[0-9]{2}/(?P<id>.*?)\.html'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.jadorecettepub.com/2010/12/star-wars-massacre-par-les-japonais.html',
|
||||
'md5': '401286a06067c70b44076044b66515de',
|
||||
'info_dict': {
|
||||
'id': 'jLMja3tr7a4',
|
||||
'ext': 'mp4',
|
||||
'title': 'La pire utilisation de Star Wars',
|
||||
'description': "Jadorecettepub.com vous a gratifié de plusieurs pubs géniales utilisant Star Wars et Dark Vador plus particulièrement... Mais l'heure est venue de vous proposer une version totalement massacrée, venue du Japon. Quand les Japonais détruisent l'image de Star Wars pour vendre du thon en boite, ça promet...",
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<span style="font-size: x-large;"><b>(.*?)</b></span>',
|
||||
webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'(?s)<div id="fb-root">(.*?)<script>', webpage, 'description',
|
||||
fatal=False)
|
||||
real_url = self._search_regex(
|
||||
r'\[/postlink\](.*)endofvid', webpage, 'video URL')
|
||||
video_id = YoutubeIE.extract_id(real_url)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': real_url,
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
}
|
||||
|
@ -1,5 +1,7 @@
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
@ -10,12 +12,13 @@ class JeuxVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'http://.*?\.jeuxvideo\.com/.*/(.*?)-\d+\.htm'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
||||
u'file': u'5182.mp4',
|
||||
u'md5': u'046e491afb32a8aaac1f44dd4ddd54ee',
|
||||
u'info_dict': {
|
||||
u'title': u'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||
u'description': u'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
||||
'url': 'http://www.jeuxvideo.com/reportages-videos-jeux/0004/00046170/tearaway-playstation-vita-gc-2013-tearaway-nous-presente-ses-papiers-d-identite-00115182.htm',
|
||||
'md5': '046e491afb32a8aaac1f44dd4ddd54ee',
|
||||
'info_dict': {
|
||||
'id': '5182',
|
||||
'ext': 'mp4',
|
||||
'title': 'GC 2013 : Tearaway nous présente ses papiers d\'identité',
|
||||
'description': 'Lorsque les développeurs de LittleBigPlanet proposent un nouveau titre, on ne peut que s\'attendre à un résultat original et fort attrayant.\n',
|
||||
},
|
||||
}
|
||||
|
||||
@ -25,14 +28,14 @@ class JeuxVideoIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, title)
|
||||
xml_link = self._html_search_regex(
|
||||
r'<param name="flashvars" value="config=(.*?)" />',
|
||||
webpage, u'config URL')
|
||||
webpage, 'config URL')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'http://www\.jeuxvideo\.com/config/\w+/\d+/(.*?)/\d+_player\.xml',
|
||||
xml_link, u'video ID')
|
||||
xml_link, 'video ID')
|
||||
|
||||
config = self._download_xml(
|
||||
xml_link, title, u'Downloading XML config')
|
||||
xml_link, title, 'Downloading XML config')
|
||||
info_json = config.find('format.json').text
|
||||
info = json.loads(info_json)['versions'][0]
|
||||
|
||||
|
@ -1,56 +1,61 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
RegexNotFoundError,
|
||||
unescapeHTML,
|
||||
)
|
||||
|
||||
|
||||
class JukeboxIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
||||
_IFRAME = r'<iframe .*src="(?P<iframe>[^"]*)".*>'
|
||||
_VIDEO_URL = r'"config":{"file":"(?P<video_url>http:[^"]+[.](?P<video_ext>[^.?]+)[?]mdtk=[0-9]+)"'
|
||||
_TITLE = r'<h1 class="inline">(?P<title>[^<]+)</h1>.*<span id="infos_article_artist">(?P<artist>[^<]+)</span>'
|
||||
_IS_YOUTUBE = r'config":{"file":"(?P<youtube_url>http:[\\][/][\\][/]www[.]youtube[.]com[\\][/]watch[?]v=[^"]+)"'
|
||||
_TEST = {
|
||||
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
|
||||
'md5': '5dc6477e74b1e37042ac5acedd8413e5',
|
||||
'info_dict': {
|
||||
'id': 'r303r',
|
||||
'ext': 'flv',
|
||||
'title': 'Kosheen-En Vivo Pride',
|
||||
'uploader': 'Kosheen',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
|
||||
html = self._download_webpage(url, video_id)
|
||||
|
||||
mobj = re.search(self._IFRAME, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract iframe url')
|
||||
iframe_url = unescapeHTML(mobj.group('iframe'))
|
||||
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
|
||||
|
||||
iframe_html = self._download_webpage(iframe_url, video_id, 'Downloading iframe')
|
||||
mobj = re.search(r'class="jkb_waiting"', iframe_html)
|
||||
if mobj is not None:
|
||||
raise ExtractorError(u'Video is not available(in your country?)!')
|
||||
if re.search(r'class="jkb_waiting"', iframe_html) is not None:
|
||||
raise ExtractorError('Video is not available(in your country?)!')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
mobj = re.search(self._VIDEO_URL, iframe_html)
|
||||
if mobj is None:
|
||||
mobj = re.search(self._IS_YOUTUBE, iframe_html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract video url')
|
||||
youtube_url = unescapeHTML(mobj.group('youtube_url')).replace('\/','/')
|
||||
self.to_screen(u'Youtube video detected')
|
||||
return self.url_result(youtube_url,ie='Youtube')
|
||||
video_url = unescapeHTML(mobj.group('video_url')).replace('\/','/')
|
||||
video_ext = unescapeHTML(mobj.group('video_ext'))
|
||||
try:
|
||||
video_url = self._search_regex(r'"config":{"file":"(?P<video_url>http:[^"]+\?mdtk=[0-9]+)"',
|
||||
iframe_html, 'video url')
|
||||
video_url = unescapeHTML(video_url).replace('\/', '/')
|
||||
except RegexNotFoundError:
|
||||
youtube_url = self._search_regex(
|
||||
r'config":{"file":"(http:\\/\\/www\.youtube\.com\\/watch\?v=[^"]+)"',
|
||||
iframe_html, 'youtube url')
|
||||
youtube_url = unescapeHTML(youtube_url).replace('\/', '/')
|
||||
self.to_screen('Youtube video detected')
|
||||
return self.url_result(youtube_url, ie='Youtube')
|
||||
|
||||
mobj = re.search(self._TITLE, html)
|
||||
if mobj is None:
|
||||
raise ExtractorError(u'Cannot extract title')
|
||||
title = unescapeHTML(mobj.group('title'))
|
||||
artist = unescapeHTML(mobj.group('artist'))
|
||||
title = self._html_search_regex(r'<h1 class="inline">([^<]+)</h1>',
|
||||
html, 'title')
|
||||
artist = self._html_search_regex(r'<span id="infos_article_artist">([^<]+)</span>',
|
||||
html, 'artist')
|
||||
|
||||
return [{'id': video_id,
|
||||
'url': video_url,
|
||||
'title': artist + '-' + title,
|
||||
'ext': video_ext
|
||||
}]
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': artist + '-' + title,
|
||||
'uploader': artist,
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user