Compare commits
539 Commits
2014.09.14
...
2014.11.13
Author | SHA1 | Date | |
---|---|---|---|
2d42905b68 | |||
cbe71cb41d | |||
894dd8682e | |||
9e05d039e0 | |||
73689dafbf | |||
4b50ba0989 | |||
5ccaddf5b1 | |||
0b201a3134 | |||
ffe38646ca | |||
b703ab4d7f | |||
c6afed48ff | |||
732c848c14 | |||
9d2a4dae90 | |||
7009a9047a | |||
498942f187 | |||
28465df1ff | |||
ef89dba58f | |||
13ba3a6461 | |||
8f6ec4bbe6 | |||
c295490830 | |||
eb4cb42a02 | |||
7a8cbc72b2 | |||
2774852c2f | |||
bbcc21efd1 | |||
60526d6bcb | |||
1d4df56d09 | |||
a1cf99d03a | |||
3c6af203cc | |||
1a92e086a7 | |||
519c73f267 | |||
a6dae6c09c | |||
f866e474f3 | |||
8bb9b97c97 | |||
d6fdc38682 | |||
c2b61af548 | |||
2fdbf27ad8 | |||
29ed169cd6 | |||
b868c972d1 | |||
9908e03528 | |||
1fe8fb8c20 | |||
5d63b0aa93 | |||
4164f0117e | |||
37aab27808 | |||
6110bbbfdd | |||
cde9b380e6 | |||
dab647a7b6 | |||
a316a83d2b | |||
81b22aee8b | |||
a80c96eab0 | |||
20436c30c9 | |||
3828505646 | |||
11fba1751d | |||
12ea2f30cf | |||
9c3e870393 | |||
44789f2457 | |||
711ede6e1b | |||
a32f253112 | |||
94bd361318 | |||
acd40f64ed | |||
766306450d | |||
e7642ab572 | |||
bdf9701729 | |||
b5af6fcdad | |||
278143df5b | |||
fdca55fe34 | |||
4f195f55f0 | |||
ac35c26686 | |||
982a58d049 | |||
42f7d2f588 | |||
39f0a2a6b7 | |||
ecc0c5ee01 | |||
451948b28c | |||
baa708036c | |||
8c25f81bee | |||
4c83c96795 | |||
9580711841 | |||
c30ae9594c | |||
ffae28ae18 | |||
d9116714f2 | |||
08965906a8 | |||
ccdd0ffb80 | |||
5263cdfcf9 | |||
b2a68d14cf | |||
6e1cff9c33 | |||
72975729c8 | |||
d319948b6a | |||
9a4bf889f9 | |||
2a834bdb21 | |||
0d2c141865 | |||
5ec39d8b96 | |||
7b6de3728a | |||
a51d3aa001 | |||
2c8e03d937 | |||
fbb21cf528 | |||
b8a618f898 | |||
feb74960eb | |||
d65d628613 | |||
ac645ac7d0 | |||
7d11297f3f | |||
6ad4013d40 | |||
dbd1283d31 | |||
c451d4f553 | |||
8abec2c8bb | |||
a9bad429b3 | |||
50c8266ef0 | |||
00edd4f9be | |||
ee966928af | |||
e5193599ec | |||
01d663bca3 | |||
e0c51cdadc | |||
9334f8f17a | |||
632256d9ec | |||
03df7baa6a | |||
3511266bc3 | |||
9fdece5d34 | |||
bbf1092ad0 | |||
9ef55c5bbc | |||
48a24ab746 | |||
68acdbda9d | |||
27c542c06f | |||
aaa399d2f6 | |||
b2e6a1c14c | |||
8cc3eba79a | |||
b0fb6d4db1 | |||
81515ad9f6 | |||
8112d4b284 | |||
bf7aa6301b | |||
aea856621f | |||
f24a5a2faa | |||
ecfe623422 | |||
4a6c94288a | |||
10e3d73472 | |||
15956b5aa1 | |||
586f7082ef | |||
d6d9186f0d | |||
2e9ff8f362 | |||
6407432333 | |||
f744c0f398 | |||
249efaf44b | |||
8d32abff9e | |||
94f052cbf4 | |||
446a03bd96 | |||
6009b69f81 | |||
3d6047113c | |||
9dec99303d | |||
7706927370 | |||
3adba6fa2a | |||
f46a8702cc | |||
8d11b59bbb | |||
cf501a23d2 | |||
2bcae58d46 | |||
c9f08154a3 | |||
526b276fd7 | |||
77ec444d9a | |||
bfc2bedcfc | |||
83855f3a1f | |||
50b51830fb | |||
3d6eed9b52 | |||
1a253e134c | |||
6194bb1419 | |||
37d66e7f1e | |||
70b7e3fbb6 | |||
579657ad87 | |||
5f82b129e0 | |||
64269e4d01 | |||
d481699a7a | |||
5894a4f4ee | |||
09e5d6a6e5 | |||
274b12b5a8 | |||
23be51d8ce | |||
488447455d | |||
d28b517154 | |||
a7e97f6db1 | |||
639a422d21 | |||
f889cea109 | |||
1bdeb7be2e | |||
699151bcb1 | |||
911344e5ac | |||
03936f6e6d | |||
b13ccb1b87 | |||
f64f8a4662 | |||
681b9caa9c | |||
0eb9fb9f24 | |||
9a76f416ce | |||
603821161f | |||
d3c72db894 | |||
43d9718fb9 | |||
7fc54e5262 | |||
ec9c978481 | |||
d36cae46d8 | |||
fdfefa1b9c | |||
724d031893 | |||
63e0be3415 | |||
c64ed2a310 | |||
cdc5cb7c2b | |||
8efd06aa42 | |||
7f9ced64cb | |||
7608815cc2 | |||
5823eda139 | |||
e82c1e9a6e | |||
1ede5b2481 | |||
964ae0a122 | |||
98e1d28982 | |||
2c26df763c | |||
018e835594 | |||
e65e06fbe2 | |||
95ee84421e | |||
2acfe95f58 | |||
b5a14350b9 | |||
8d81f872fb | |||
36f1c90497 | |||
057a5206cc | |||
9e9bc793f3 | |||
5c565ac9e7 | |||
67500bf939 | |||
b1edd7a48a | |||
2c63ccec78 | |||
f2f2c0c2c6 | |||
4661e243f8 | |||
f3cd403c2b | |||
ad5f53ac72 | |||
75da98e9e1 | |||
281d3f1d68 | |||
6283c10b1c | |||
85d7b76586 | |||
2399535fd1 | |||
52cffcb186 | |||
8f3b5397a7 | |||
9bbec55255 | |||
6b445558ff | |||
bfd91588f3 | |||
6bf6962062 | |||
40bca5f927 | |||
74214d35c5 | |||
1b10a011ec | |||
d24a2b20b4 | |||
8230018c20 | |||
cc98a3f096 | |||
ce519b747e | |||
16efb3695f | |||
4510d14f0a | |||
0f175a932f | |||
849b269273 | |||
95fa5fb569 | |||
77c3c5c5ed | |||
159444a668 | |||
f9befee1f5 | |||
9471c44405 | |||
013bfdd84c | |||
46fd0dd5a5 | |||
4698f0d858 | |||
355d074ff9 | |||
7da224c907 | |||
1723edb1a5 | |||
4740864508 | |||
09a42738fc | |||
df928d500f | |||
a72cbfacf0 | |||
62a164e713 | |||
5f58165def | |||
a86c73cf80 | |||
bd4e40df1a | |||
1419fafd36 | |||
9b36dcbd65 | |||
2aefb886fa | |||
72961c2a8a | |||
4c1ce987b0 | |||
8a2300a597 | |||
1cc887cbf0 | |||
203fb43f36 | |||
4d7b03f1f2 | |||
72ebb5e4b4 | |||
8450c15c25 | |||
b88b45e46c | |||
2417dc1715 | |||
23d83ad4d5 | |||
772ece3571 | |||
2c9f31188b | |||
d18be55533 | |||
ac20fc047a | |||
b4c3c8c172 | |||
3357110a4c | |||
e29fdedb45 | |||
4828703f14 | |||
afe08e0d4a | |||
071420e136 | |||
f4cf848d1d | |||
b7b2ca6e2b | |||
1409704afa | |||
3741302a10 | |||
c8e390c2b0 | |||
823f1e015a | |||
3c06d3715e | |||
762958d5af | |||
53d9009bdb | |||
1b725173a5 | |||
0ca41c3d9c | |||
fc6861b175 | |||
b097b5f246 | |||
385009fc44 | |||
ced659bb4d | |||
842cca7d56 | |||
b3826f6c8d | |||
7bc8780c57 | |||
c59c3c84ed | |||
24f7fb5e1e | |||
3b700f8d43 | |||
31d06400ec | |||
642b76ac15 | |||
4c4de296d4 | |||
b10609d98c | |||
3ae165aa10 | |||
e4b85e35d0 | |||
bb0c206f59 | |||
b81f484b60 | |||
5e69192ef7 | |||
e9be9a6acd | |||
f47754f061 | |||
d838b1bd4a | |||
fe506288bd | |||
d397c0b3dd | |||
146c80e256 | |||
f78c01f68b | |||
8489578df4 | |||
10606050bc | |||
d9bf465200 | |||
01384d6e4b | |||
08d5230945 | |||
852f8641e8 | |||
18937a50a4 | |||
e4d6cca0c1 | |||
d5feab9aaa | |||
9e77c60c9a | |||
1414df5ce2 | |||
e80f40e5ca | |||
d3c9af84fc | |||
59d206ca2d | |||
fc66e4a0d5 | |||
e7b6d12254 | |||
410f3e73ab | |||
07e764439a | |||
f8fb4a7ca8 | |||
4644ac5527 | |||
e497a7f2ca | |||
a3b6be104d | |||
b7bb0df21e | |||
4dc19c0982 | |||
58ea7ec81e | |||
c0f64ac689 | |||
7a08ad7d59 | |||
2d29ac4f23 | |||
a7a747d687 | |||
fdb4d278bf | |||
59c03a9bfb | |||
e7db973328 | |||
99b67fecc5 | |||
89294b5f50 | |||
72d53356f6 | |||
9e1e67fc15 | |||
1e60e5546e | |||
457749a703 | |||
937f935db0 | |||
80bcefcd77 | |||
8c23945c72 | |||
989b4b2b86 | |||
2a7b4681c6 | |||
8157ae3904 | |||
e50e2fcd4d | |||
6be451f422 | |||
5e4f06197f | |||
761e1645e0 | |||
8ff14175e2 | |||
dbe3043cd6 | |||
a8eb5a8e61 | |||
6043f1df4e | |||
12548cd933 | |||
2593039522 | |||
35d3e63d24 | |||
27aede9074 | |||
f5b7e6a842 | |||
a1f934b171 | |||
a43ee88c6f | |||
e2dce53781 | |||
1770ed9e86 | |||
457ac58cc7 | |||
9c44d2429b | |||
d2e32f7df5 | |||
67077b182b | |||
5f4c318844 | |||
dfee83234b | |||
7f5c0c4a19 | |||
4bc77c8417 | |||
22dd3fad86 | |||
d6e6a42256 | |||
76e7d1e74b | |||
38c4d41b74 | |||
f0b8e3607d | |||
51ee08c4bb | |||
c841789772 | |||
c121a75b36 | |||
5a8b77551d | |||
0217aee154 | |||
b14f3a4c1d | |||
92f7963f6e | |||
88fbe4c2cc | |||
394599f422 | |||
ed9266db90 | |||
f4b1c7adb8 | |||
c95eeb7b80 | |||
5e43e3803c | |||
a89435a7a8 | |||
a0a90b3ba1 | |||
c664182323 | |||
6be1cd4ddb | |||
ee0d90707a | |||
f776d8f608 | |||
b3ac3a51ac | |||
0b75c2a88b | |||
7b7518124e | |||
68b0973046 | |||
3a203b8bfa | |||
70752ccefd | |||
0155549d6c | |||
b66745288e | |||
2a1325fdde | |||
2f9e8776df | |||
497339fa0e | |||
8e6f8051f0 | |||
11b3ce8509 | |||
6a5af6acb9 | |||
9a0d98bb40 | |||
fbd3162e49 | |||
54e9a4af95 | |||
8a32b82e46 | |||
fec02bcc90 | |||
c6e90caaa6 | |||
4bbf157794 | |||
6b08cdf626 | |||
b686fc18da | |||
0b97f3a936 | |||
eb73f2649f | |||
f0b5d6af74 | |||
2f771f6c99 | |||
3b2f933b01 | |||
cc746841e7 | |||
ac7553d031 | |||
cdc628a498 | |||
69ea8ca42c | |||
4bc3a23ec5 | |||
bd5650ac64 | |||
86916dae4b | |||
f7d159cf95 | |||
632e5684ce | |||
094d42fe44 | |||
63cddb6477 | |||
273dea4248 | |||
f90d95edeb | |||
45c85d7ba1 | |||
d0df92928b | |||
df8f53f752 | |||
e35cb78c40 | |||
3ef7d11acd | |||
224ce0d872 | |||
dd41e8c82b | |||
b509a4b176 | |||
b28c8403b2 | |||
7bd4b4229a | |||
72e450c555 | |||
522c55b7f2 | |||
58e7071a2c | |||
516812df41 | |||
752297631f | |||
34e14a9beb | |||
ffb5b05db1 | |||
3e8fcd9fa1 | |||
746c67d72f | |||
5aa38e75b2 | |||
532f5bff70 | |||
f566d9f1d5 | |||
7267bd536f | |||
589d3d7c7a | |||
46f74bcf5c | |||
37bfe8ace4 | |||
0529eef5a4 | |||
fd78a4d3e6 | |||
1de33fafd9 | |||
e2e5dae64d | |||
09b23c902b | |||
109a540e7a | |||
2914e5f00f | |||
2f834e9381 | |||
9296738f20 | |||
0e59b9fffb | |||
67abbe9527 | |||
944a3de278 | |||
5a13fe9ed2 | |||
6b6096d0b7 | |||
d0246d07f1 | |||
727a98c3ee | |||
997987d568 | |||
c001f939e4 | |||
e825c38082 | |||
a04aa7a9e6 | |||
7cdd5339b3 | |||
38349518f1 | |||
64892c0b79 | |||
dc9f356846 | |||
ed86ee3b4a | |||
7bb5df1cda | |||
37a81dff04 | |||
fc96eb4e21 | |||
ae369738b0 | |||
e2037b3f7d | |||
5419033935 | |||
2eebf060af | |||
acd9db5902 | |||
d0e8b3d59b | |||
c15dd15388 | |||
0003a5c416 | |||
21f2927f70 | |||
e5a79071a5 | |||
ca0e7a2b17 | |||
b523bb71ab | |||
a020a0dc20 | |||
6d1f2431bd | |||
fdea3abdf8 | |||
59d284c316 | |||
98703c7fbf | |||
b04c8f7358 | |||
56d1912f1d | |||
eb3bd7ba8d | |||
3b11e86eeb | |||
2bca84e345 | |||
984e8e14ea | |||
d05cfe0600 | |||
37419b4f99 | |||
a8aa99442f | |||
94b539d155 | |||
b8874d4d4e |
2
.gitignore
vendored
2
.gitignore
vendored
@ -11,6 +11,7 @@ MANIFEST
|
||||
README.txt
|
||||
youtube-dl.1
|
||||
youtube-dl.bash-completion
|
||||
youtube-dl.fish
|
||||
youtube-dl
|
||||
youtube-dl.exe
|
||||
youtube-dl.tar.gz
|
||||
@ -29,3 +30,4 @@ updates_key.pem
|
||||
*.swp
|
||||
test/testdata
|
||||
.tox
|
||||
youtube-dl.zsh
|
||||
|
83
AUTHORS
Normal file
83
AUTHORS
Normal file
@ -0,0 +1,83 @@
|
||||
Ricardo Garcia Gonzalez
|
||||
Danny Colligan
|
||||
Benjamin Johnson
|
||||
Vasyl' Vavrychuk
|
||||
Witold Baryluk
|
||||
Paweł Paprota
|
||||
Gergely Imreh
|
||||
Rogério Brito
|
||||
Philipp Hagemeister
|
||||
Sören Schulze
|
||||
Kevin Ngo
|
||||
Ori Avtalion
|
||||
shizeeg
|
||||
Filippo Valsorda
|
||||
Christian Albrecht
|
||||
Dave Vasilevsky
|
||||
Jaime Marquínez Ferrándiz
|
||||
Jeff Crouse
|
||||
Osama Khalid
|
||||
Michael Walter
|
||||
M. Yasoob Ullah Khalid
|
||||
Julien Fraichard
|
||||
Johny Mo Swag
|
||||
Axel Noack
|
||||
Albert Kim
|
||||
Pierre Rudloff
|
||||
Huarong Huo
|
||||
Ismael Mejía
|
||||
Steffan 'Ruirize' James
|
||||
Andras Elso
|
||||
Jelle van der Waa
|
||||
Marcin Cieślak
|
||||
Anton Larionov
|
||||
Takuya Tsuchida
|
||||
Sergey M.
|
||||
Michael Orlitzky
|
||||
Chris Gahan
|
||||
Saimadhav Heblikar
|
||||
Mike Col
|
||||
Oleg Prutz
|
||||
pulpe
|
||||
Andreas Schmitz
|
||||
Michael Kaiser
|
||||
Niklas Laxström
|
||||
David Triendl
|
||||
Anthony Weems
|
||||
David Wagner
|
||||
Juan C. Olivares
|
||||
Mattias Harrysson
|
||||
phaer
|
||||
Sainyam Kapoor
|
||||
Nicolas Évrard
|
||||
Jason Normore
|
||||
Hoje Lee
|
||||
Adam Thalhammer
|
||||
Georg Jähnig
|
||||
Ralf Haring
|
||||
Koki Takahashi
|
||||
Ariset Llerena
|
||||
Adam Malcontenti-Wilson
|
||||
Tobias Bell
|
||||
Naglis Jonaitis
|
||||
Charles Chen
|
||||
Hassaan Ali
|
||||
Dobrosław Żybort
|
||||
David Fabijan
|
||||
Sebastian Haas
|
||||
Alexander Kirk
|
||||
Erik Johnson
|
||||
Keith Beckman
|
||||
Ole Ernst
|
||||
Aaron McDaniel (mcd1992)
|
||||
Magnus Kolstad
|
||||
Hari Padmanaban
|
||||
Carlos Ramos
|
||||
5moufl
|
||||
lenaten
|
||||
Dennis Scheiba
|
||||
Damon Timm
|
||||
winwon
|
||||
Xavier Beynon
|
||||
Gabriel Schubiner
|
||||
xantares
|
@ -2,5 +2,6 @@ include README.md
|
||||
include test/*.py
|
||||
include test/*.json
|
||||
include youtube-dl.bash-completion
|
||||
include youtube-dl.fish
|
||||
include youtube-dl.1
|
||||
recursive-include docs Makefile conf.py *.rst
|
||||
|
30
Makefile
30
Makefile
@ -1,7 +1,7 @@
|
||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||
|
||||
clean:
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part
|
||||
|
||||
cleanall: clean
|
||||
rm -f youtube-dl youtube-dl.exe
|
||||
@ -9,6 +9,7 @@ cleanall: clean
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
MANDIR ?= $(PREFIX)/man
|
||||
SHAREDIR ?= $(PREFIX)/share
|
||||
PYTHON ?= /usr/bin/env python
|
||||
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
@ -22,13 +23,17 @@ else
|
||||
endif
|
||||
endif
|
||||
|
||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
|
||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||
install -d $(DESTDIR)$(BINDIR)
|
||||
install -m 755 youtube-dl $(DESTDIR)$(BINDIR)
|
||||
install -d $(DESTDIR)$(MANDIR)/man1
|
||||
install -m 644 youtube-dl.1 $(DESTDIR)$(MANDIR)/man1
|
||||
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
|
||||
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
|
||||
install -d $(DESTDIR)$(SHAREDIR)/zsh/site-functions
|
||||
install -m 644 youtube-dl.zsh $(DESTDIR)$(SHAREDIR)/zsh/site-functions/_youtube-dl
|
||||
install -d $(DESTDIR)$(SYSCONFDIR)/fish/completions
|
||||
install -m 644 youtube-dl.fish $(DESTDIR)$(SYSCONFDIR)/fish/completions/youtube-dl.fish
|
||||
|
||||
test:
|
||||
#nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
|
||||
@ -36,9 +41,9 @@ test:
|
||||
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
.PHONY: all clean install test tar bash-completion pypi-files
|
||||
.PHONY: all clean install test tar bash-completion pypi-files zsh-completion fish-completion
|
||||
|
||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1
|
||||
pypi-files: youtube-dl.bash-completion README.txt youtube-dl.1 youtube-dl.fish
|
||||
|
||||
youtube-dl: youtube_dl/*.py youtube_dl/*/*.py
|
||||
zip --quiet youtube-dl youtube_dl/*.py youtube_dl/*/*.py
|
||||
@ -64,7 +69,17 @@ youtube-dl.bash-completion: youtube_dl/*.py youtube_dl/*/*.py devscripts/bash-co
|
||||
|
||||
bash-completion: youtube-dl.bash-completion
|
||||
|
||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
|
||||
youtube-dl.zsh: youtube_dl/*.py youtube_dl/*/*.py devscripts/zsh-completion.in
|
||||
python devscripts/zsh-completion.py
|
||||
|
||||
zsh-completion: youtube-dl.zsh
|
||||
|
||||
youtube-dl.fish: youtube_dl/*.py youtube_dl/*/*.py devscripts/fish-completion.in
|
||||
python devscripts/fish-completion.py
|
||||
|
||||
fish-completion: youtube-dl.fish
|
||||
|
||||
youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||
@tar -czf youtube-dl.tar.gz --transform "s|^|youtube-dl/|" --owner 0 --group 0 \
|
||||
--exclude '*.DS_Store' \
|
||||
--exclude '*.kate-swp' \
|
||||
@ -78,5 +93,6 @@ youtube-dl.tar.gz: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-
|
||||
-- \
|
||||
bin devscripts test youtube_dl docs \
|
||||
LICENSE README.md README.txt \
|
||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion setup.py \
|
||||
Makefile MANIFEST.in youtube-dl.1 youtube-dl.bash-completion \
|
||||
youtube-dl.zsh youtube-dl.fish setup.py \
|
||||
youtube-dl
|
||||
|
105
README.md
105
README.md
@ -69,6 +69,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
configuration in ~/.config/youtube-dl.conf
|
||||
(%APPDATA%/youtube-dl/config.txt on
|
||||
Windows)
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them.
|
||||
|
||||
## Video Selection:
|
||||
--playlist-start NUMBER playlist video to start at (default is 1)
|
||||
@ -99,8 +101,6 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
downloaded videos in it.
|
||||
--include-ads Download advertisements as well
|
||||
(experimental)
|
||||
--youtube-include-dash-manifest Try to download the DASH manifest on
|
||||
YouTube videos (experimental)
|
||||
|
||||
## Download Options:
|
||||
-r, --rate-limit LIMIT maximum download rate in bytes per second
|
||||
@ -131,17 +131,19 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
%(upload_date)s for the upload date
|
||||
(YYYYMMDD), %(extractor)s for the provider
|
||||
(youtube, metacafe, etc), %(id)s for the
|
||||
video id, %(playlist)s for the playlist the
|
||||
video id, %(playlist_title)s,
|
||||
%(playlist_id)s, or %(playlist)s (=title if
|
||||
present, ID otherwise) for the playlist the
|
||||
video is in, %(playlist_index)s for the
|
||||
position in the playlist and %% for a
|
||||
literal percent. %(height)s and %(width)s
|
||||
for the width and height of the video
|
||||
format. %(resolution)s for a textual
|
||||
position in the playlist. %(height)s and
|
||||
%(width)s for the width and height of the
|
||||
video format. %(resolution)s for a textual
|
||||
description of the resolution of the video
|
||||
format. Use - to output to stdout. Can also
|
||||
be used to download to a different
|
||||
directory, for example with -o '/my/downloa
|
||||
ds/%(uploader)s/%(title)s-%(id)s.%(ext)s' .
|
||||
format. %% for a literal percent. Use - to
|
||||
output to stdout. Can also be used to
|
||||
download to a different directory, for
|
||||
example with -o '/my/downloads/%(uploader)s
|
||||
/%(title)s-%(id)s.%(ext)s' .
|
||||
--autonumber-size NUMBER Specifies the number of digits in
|
||||
%(autonumber)s when it is present in output
|
||||
filename template or --auto-number option
|
||||
@ -158,7 +160,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
downloads if possible.
|
||||
--no-continue do not resume partially downloaded files
|
||||
(restart from beginning)
|
||||
--no-part do not use .part files
|
||||
--no-part do not use .part files - write directly
|
||||
into output file
|
||||
--no-mtime do not use the Last-modified header to set
|
||||
the file modification time
|
||||
--write-description write video description to a .description
|
||||
@ -198,6 +201,10 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
-j, --dump-json simulate, quiet but print JSON information.
|
||||
See --output for a description of available
|
||||
keys.
|
||||
-J, --dump-single-json simulate, quiet but print JSON information
|
||||
for each command-line argument. If the URL
|
||||
refers to a playlist, dump the whole
|
||||
playlist information in a single line.
|
||||
--newline output progress bar as new lines
|
||||
--no-progress do not print progress bar
|
||||
--console-title display progress in console titlebar
|
||||
@ -216,7 +223,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
information about the video. (Currently
|
||||
supported only for YouTube)
|
||||
--user-agent UA specify a custom user agent
|
||||
--referer REF specify a custom referer, use if the video
|
||||
--referer URL specify a custom referer, use if the video
|
||||
access is restricted to one domain
|
||||
--add-header FIELD:VALUE specify a custom HTTP header and its value,
|
||||
separated by a colon ':'. You can use this
|
||||
@ -227,17 +234,27 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
## Video Format Options:
|
||||
-f, --format FORMAT video format code, specify the order of
|
||||
preference using slashes: "-f 22/17/18".
|
||||
"-f mp4" and "-f flv" are also supported.
|
||||
You can also use the special names "best",
|
||||
"bestvideo", "bestaudio", "worst",
|
||||
"worstvideo" and "worstaudio". By default,
|
||||
youtube-dl will pick the best quality.
|
||||
preference using slashes: -f 22/17/18 . -f
|
||||
mp4 , -f m4a and -f flv are also
|
||||
supported. You can also use the special
|
||||
names "best", "bestvideo", "bestaudio",
|
||||
"worst", "worstvideo" and "worstaudio". By
|
||||
default, youtube-dl will pick the best
|
||||
quality. Use commas to download multiple
|
||||
audio formats, such as -f
|
||||
136/137/mp4/bestvideo,140/m4a/bestaudio.
|
||||
You can merge the video and audio of two
|
||||
formats into a single file using -f <video-
|
||||
format>+<audio-format> (requires ffmpeg or
|
||||
avconv), for example -f
|
||||
bestvideo+bestaudio.
|
||||
--all-formats download all available video formats
|
||||
--prefer-free-formats prefer free video formats unless a specific
|
||||
one is requested
|
||||
--max-quality FORMAT highest quality format to download
|
||||
-F, --list-formats list all available formats
|
||||
--youtube-skip-dash-manifest Do not download the DASH manifest on
|
||||
YouTube videos
|
||||
|
||||
## Subtitle Options:
|
||||
--write-sub write subtitle file
|
||||
@ -253,7 +270,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
language tags like 'en,pt'
|
||||
|
||||
## Authentication Options:
|
||||
-u, --username USERNAME account username
|
||||
-u, --username USERNAME login with this account ID
|
||||
-p, --password PASSWORD account password
|
||||
-2, --twofactor TWOFACTOR two-factor auth code
|
||||
-n, --netrc use .netrc authentication data
|
||||
@ -264,7 +281,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
(requires ffmpeg or avconv and ffprobe or
|
||||
avprobe)
|
||||
--audio-format FORMAT "best", "aac", "vorbis", "mp3", "m4a",
|
||||
"opus", or "wav"; best by default
|
||||
"opus", or "wav"; "best" by default
|
||||
--audio-quality QUALITY ffmpeg/avconv audio quality specification,
|
||||
insert a value between 0 (better) and 9
|
||||
(worse) for VBR or a specific bitrate like
|
||||
@ -345,21 +362,34 @@ $ youtube-dl --dateafter 20000101 --datebefore 20091231
|
||||
|
||||
# FAQ
|
||||
|
||||
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
|
||||
### How do I update youtube-dl?
|
||||
|
||||
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||
If you've followed [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html), you can simply run `youtube-dl -U` (or, on Linux, `sudo youtube-dl -U`).
|
||||
|
||||
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to report bugs to the Ubuntu packaging guys - all they have to do is update the package to a somewhat recent version.
|
||||
If you have used pip, a simple `sudo pip install -U youtube-dl` is sufficient to update.
|
||||
|
||||
Alternatively, uninstall the youtube-dl package and follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html). In a pinch, this should do if you used `apt-get` before to install youtube-dl:
|
||||
If you have installed youtube-dl using a package manager like *apt-get* or *yum*, use the standard system update mechanism to update. Note that distribution packages are often outdated. As a rule of thumb, youtube-dl releases at least once a month, and often weekly or even daily. Simply go to http://yt-dl.org/ to find out the current version. Unfortunately, there is nothing we youtube-dl developers can do if your distributions serves a really outdated version. You can (and should) complain to your distribution in their bugtracker or support forum.
|
||||
|
||||
As a last resort, you can also uninstall the version installed by your package manager and follow our manual installation instructions. For that, remove the distribution's package, with a line like
|
||||
|
||||
sudo apt-get remove -y youtube-dl
|
||||
|
||||
Afterwards, simply follow [our manual installation instructions](http://rg3.github.io/youtube-dl/download.html):
|
||||
|
||||
```
|
||||
sudo apt-get remove -y youtube-dl
|
||||
sudo wget https://yt-dl.org/latest/youtube-dl -O /usr/local/bin/youtube-dl
|
||||
sudo chmod a+x /usr/local/bin/youtube-dl
|
||||
hash -r
|
||||
```
|
||||
|
||||
Again, from then on you'll be able to update with `sudo youtube-dl -U`.
|
||||
|
||||
### I'm getting an error `Unable to extract OpenGraph title` on YouTube playlists
|
||||
|
||||
YouTube changed their playlist format in March 2014 and later on, so you'll need at least youtube-dl 2014.07.25 to download all YouTube videos.
|
||||
|
||||
If you have installed youtube-dl with a package manager, pip, setup.py or a tarball, please use that to update. Note that Ubuntu packages do not seem to get updated anymore. Since we are not affiliated with Ubuntu, there is little we can do. Feel free to [report bugs](https://bugs.launchpad.net/ubuntu/+source/youtube-dl/+filebug) to the [Ubuntu packaging guys](mailto:ubuntu-motu@lists.ubuntu.com?subject=outdated%20version%20of%20youtube-dl) - all they have to do is update the package to a somewhat recent version. See above for a way to update.
|
||||
|
||||
### Do I always have to pass in `--max-quality FORMAT`, or `-citw`?
|
||||
|
||||
By default, youtube-dl intends to have the best options (incidentally, if you have a convincing case that these should be different, [please file an issue where you explain that](https://yt-dl.org/bug)). Therefore, it is unnecessary and sometimes harmful to copy long option strings from webpages. In particular, `--max-quality` *limits* the video quality (so if you want the best quality, do NOT pass it in), and the only option out of `-citw` that is regularly useful is `-i`.
|
||||
@ -439,8 +469,6 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@ -448,7 +476,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
_VALID_URL = r'https?://(?:www\.)?yourextractor\.com/watch/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://yourextractor.com/watch/42',
|
||||
'md5': 'TODO: md5 sum of the first 10KiB of the video file',
|
||||
'md5': 'TODO: md5 sum of the first 10241 bytes of the video file (use --test)',
|
||||
'info_dict': {
|
||||
'id': '42',
|
||||
'ext': 'mp4',
|
||||
@ -463,8 +491,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# TODO more code goes here, for example ...
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
@ -480,7 +507,7 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will be then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/common/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L38). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [pyflakes](https://pypi.python.org/pypi/pyflakes) (a good idea) and [pep8](https://pypi.python.org/pypi/pep8) (optional, ignore E501).
|
||||
9. When the tests pass, [add](https://www.kernel.org/pub/software/scm/git/docs/git-add.html) the new files and [commit](https://www.kernel.org/pub/software/scm/git/docs/git-commit.html) them and [push](https://www.kernel.org/pub/software/scm/git/docs/git-push.html) the result, like this:
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
@ -491,6 +518,20 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
# EMBEDDING YOUTUBE-DL
|
||||
|
||||
youtube-dl makes the best effort to be a good command-line program, and thus should be callable from any programming language. If you encounter any problems parsing its output, feel free to [create a report](https://github.com/rg3/youtube-dl/issues/new).
|
||||
|
||||
From a Python program, you can embed youtube-dl in a more powerful fashion, like this:
|
||||
|
||||
import youtube_dl
|
||||
|
||||
ydl_opts = {}
|
||||
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.download(['http://www.youtube.com/watch?v=BaW_jenozKc'])
|
||||
|
||||
Most likely, you'll want to use various options. For a list of what can be done, have a look at [youtube_dl/YoutubeDL.py](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/YoutubeDL.py#L69). For a start, if you want to intercept youtube-dl's output, set a `logger` object.
|
||||
|
||||
# BUGS
|
||||
|
||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues> . Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email.
|
||||
|
5
devscripts/fish-completion.in
Normal file
5
devscripts/fish-completion.in
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
{{commands}}
|
||||
|
||||
|
||||
complete --command youtube-dl --arguments ":ytfavorites :ytrecommended :ytsubscriptions :ytwatchlater :ythistory"
|
48
devscripts/fish-completion.py
Executable file
48
devscripts/fish-completion.py
Executable file
@ -0,0 +1,48 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import optparse
|
||||
import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
from youtube_dl.utils import shell_quote
|
||||
|
||||
FISH_COMPLETION_FILE = 'youtube-dl.fish'
|
||||
FISH_COMPLETION_TEMPLATE = 'devscripts/fish-completion.in'
|
||||
|
||||
EXTRA_ARGS = {
|
||||
'recode-video': ['--arguments', 'mp4 flv ogg webm mkv', '--exclusive'],
|
||||
|
||||
# Options that need a file parameter
|
||||
'download-archive': ['--require-parameter'],
|
||||
'cookies': ['--require-parameter'],
|
||||
'load-info': ['--require-parameter'],
|
||||
'batch-file': ['--require-parameter'],
|
||||
}
|
||||
|
||||
def build_completion(opt_parser):
|
||||
commands = []
|
||||
|
||||
for group in opt_parser.option_groups:
|
||||
for option in group.option_list:
|
||||
long_option = option.get_opt_string().strip('-')
|
||||
help_msg = shell_quote([option.help])
|
||||
complete_cmd = ['complete', '--command', 'youtube-dl', '--long-option', long_option]
|
||||
if option._short_opts:
|
||||
complete_cmd += ['--short-option', option._short_opts[0].strip('-')]
|
||||
if option.help != optparse.SUPPRESS_HELP:
|
||||
complete_cmd += ['--description', option.help]
|
||||
complete_cmd.extend(EXTRA_ARGS.get(long_option, []))
|
||||
commands.append(shell_quote(complete_cmd))
|
||||
|
||||
with open(FISH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
filled_template = template.replace('{{commands}}', '\n'.join(commands))
|
||||
with open(FISH_COMPLETION_FILE, 'w') as f:
|
||||
f.write(filled_template)
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
build_completion(parser)
|
28
devscripts/zsh-completion.in
Normal file
28
devscripts/zsh-completion.in
Normal file
@ -0,0 +1,28 @@
|
||||
#compdef youtube-dl
|
||||
|
||||
__youtube_dl() {
|
||||
local curcontext="$curcontext" fileopts diropts cur prev
|
||||
typeset -A opt_args
|
||||
fileopts="{{fileopts}}"
|
||||
diropts="{{diropts}}"
|
||||
cur=$words[CURRENT]
|
||||
case $cur in
|
||||
:)
|
||||
_arguments '*: :(::ytfavorites ::ytrecommended ::ytsubscriptions ::ytwatchlater ::ythistory)'
|
||||
;;
|
||||
*)
|
||||
prev=$words[CURRENT-1]
|
||||
if [[ ${prev} =~ ${fileopts} ]]; then
|
||||
_path_files
|
||||
elif [[ ${prev} =~ ${diropts} ]]; then
|
||||
_path_files -/
|
||||
elif [[ ${prev} == "--recode-video" ]]; then
|
||||
_arguments '*: :(mp4 flv ogg webm mkv)'
|
||||
else
|
||||
_arguments '*: :({{flags}})'
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
__youtube_dl
|
46
devscripts/zsh-completion.py
Executable file
46
devscripts/zsh-completion.py
Executable file
@ -0,0 +1,46 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
from os.path import dirname as dirn
|
||||
import sys
|
||||
|
||||
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
|
||||
import youtube_dl
|
||||
|
||||
ZSH_COMPLETION_FILE = "youtube-dl.zsh"
|
||||
ZSH_COMPLETION_TEMPLATE = "devscripts/zsh-completion.in"
|
||||
|
||||
|
||||
def build_completion(opt_parser):
|
||||
opts = [opt for group in opt_parser.option_groups
|
||||
for opt in group.option_list]
|
||||
opts_file = [opt for opt in opts if opt.metavar == "FILE"]
|
||||
opts_dir = [opt for opt in opts if opt.metavar == "DIR"]
|
||||
|
||||
fileopts = []
|
||||
for opt in opts_file:
|
||||
if opt._short_opts:
|
||||
fileopts.extend(opt._short_opts)
|
||||
if opt._long_opts:
|
||||
fileopts.extend(opt._long_opts)
|
||||
|
||||
diropts = []
|
||||
for opt in opts_dir:
|
||||
if opt._short_opts:
|
||||
diropts.extend(opt._short_opts)
|
||||
if opt._long_opts:
|
||||
diropts.extend(opt._long_opts)
|
||||
|
||||
flags = [opt.get_opt_string() for opt in opts]
|
||||
|
||||
with open(ZSH_COMPLETION_TEMPLATE) as f:
|
||||
template = f.read()
|
||||
|
||||
template = template.replace("{{fileopts}}", "|".join(fileopts))
|
||||
template = template.replace("{{diropts}}", "|".join(diropts))
|
||||
template = template.replace("{{flags}}", " ".join(flags))
|
||||
|
||||
with open(ZSH_COMPLETION_FILE, "w") as f:
|
||||
f.write(template)
|
||||
|
||||
parser = youtube_dl.parseOpts()[0]
|
||||
build_completion(parser)
|
@ -44,8 +44,8 @@ copyright = u'2014, Ricardo Garcia Gonzalez'
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
import youtube_dl
|
||||
version = youtube_dl.__version__
|
||||
from youtube_dl.version import __version__
|
||||
version = __version__
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = version
|
||||
|
||||
|
1
setup.py
1
setup.py
@ -48,6 +48,7 @@ if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
|
||||
else:
|
||||
files_spec = [
|
||||
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
|
||||
('etc/fish/completions', ['youtube-dl.fish']),
|
||||
('share/doc/youtube_dl', ['README.txt']),
|
||||
('share/man/man1', ['youtube-dl.1'])
|
||||
]
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import errno
|
||||
import io
|
||||
import hashlib
|
||||
@ -12,6 +14,7 @@ from youtube_dl import YoutubeDL
|
||||
from youtube_dl.utils import (
|
||||
compat_str,
|
||||
preferredencoding,
|
||||
write_string,
|
||||
)
|
||||
|
||||
|
||||
@ -40,10 +43,10 @@ def report_warning(message):
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
_msg_header = u'\033[0;33mWARNING:\033[0m'
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = u'WARNING:'
|
||||
output = u'%s %s\n' % (_msg_header, message)
|
||||
_msg_header = 'WARNING:'
|
||||
output = '%s %s\n' % (_msg_header, message)
|
||||
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
|
||||
output = output.encode(preferredencoding())
|
||||
sys.stderr.write(output)
|
||||
@ -54,7 +57,7 @@ class FakeYDL(YoutubeDL):
|
||||
# Different instances of the downloader can't share the same dictionary
|
||||
# some test set the "sublang" parameter, which would break the md5 checks.
|
||||
params = get_params(override=override)
|
||||
super(FakeYDL, self).__init__(params)
|
||||
super(FakeYDL, self).__init__(params, auto_init=False)
|
||||
self.result = []
|
||||
|
||||
def to_screen(self, s, skip_eol=None):
|
||||
@ -103,22 +106,22 @@ def expect_info_dict(self, expected_dict, got_dict):
|
||||
|
||||
self.assertTrue(
|
||||
isinstance(got, compat_str),
|
||||
u'Expected a %s object, but got %s for field %s' % (
|
||||
'Expected a %s object, but got %s for field %s' % (
|
||||
compat_str.__name__, type(got).__name__, info_field))
|
||||
self.assertTrue(
|
||||
match_rex.match(got),
|
||||
u'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
||||
'field %s (value: %r) should match %r' % (info_field, got, match_str))
|
||||
elif isinstance(expected, type):
|
||||
got = got_dict.get(info_field)
|
||||
self.assertTrue(isinstance(got, expected),
|
||||
u'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
||||
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
|
||||
else:
|
||||
if isinstance(expected, compat_str) and expected.startswith('md5:'):
|
||||
got = 'md5:' + md5(got_dict.get(info_field))
|
||||
else:
|
||||
got = got_dict.get(info_field)
|
||||
self.assertEqual(expected, got,
|
||||
u'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
|
||||
|
||||
# Check for the presence of mandatory fields
|
||||
if got_dict.get('_type') != 'playlist':
|
||||
@ -126,7 +129,7 @@ def expect_info_dict(self, expected_dict, got_dict):
|
||||
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
|
||||
# Check for mandatory fields that are automatically set by YoutubeDL
|
||||
for key in ['webpage_url', 'extractor', 'extractor_key']:
|
||||
self.assertTrue(got_dict.get(key), u'Missing field: %s' % key)
|
||||
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
|
||||
|
||||
# Are checkable fields missing from the test case definition?
|
||||
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
|
||||
@ -134,7 +137,15 @@ def expect_info_dict(self, expected_dict, got_dict):
|
||||
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
|
||||
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
|
||||
if missing_keys:
|
||||
sys.stderr.write(u'\n"info_dict": ' + json.dumps(test_info_dict, ensure_ascii=False, indent=4) + u'\n')
|
||||
def _repr(v):
|
||||
if isinstance(v, compat_str):
|
||||
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
|
||||
else:
|
||||
return repr(v)
|
||||
info_dict_str = ''.join(
|
||||
' %s: %s,\n' % (_repr(k), _repr(v))
|
||||
for k, v in test_info_dict.items())
|
||||
write_string('\n"info_dict": {\n' + info_dict_str + '}\n', out=sys.stderr)
|
||||
self.assertFalse(
|
||||
missing_keys,
|
||||
'Missing keys in test definition: %s' % (
|
||||
@ -160,3 +171,13 @@ def assertGreaterEqual(self, got, expected, msg=None):
|
||||
if msg is None:
|
||||
msg = '%r not greater than or equal to %r' % (got, expected)
|
||||
self.assertTrue(got >= expected, msg)
|
||||
|
||||
|
||||
def expect_warnings(ydl, warnings_re):
|
||||
real_warning = ydl.report_warning
|
||||
|
||||
def _report_warning(w):
|
||||
if not any(re.search(w_re, w) for w_re in warnings_re):
|
||||
real_warning(w)
|
||||
|
||||
ydl.report_warning = _report_warning
|
||||
|
@ -14,7 +14,7 @@ from test.helper import gettestcases
|
||||
from youtube_dl.extractor import (
|
||||
FacebookIE,
|
||||
gen_extractors,
|
||||
JustinTVIE,
|
||||
TwitchIE,
|
||||
YoutubeIE,
|
||||
)
|
||||
|
||||
@ -72,21 +72,17 @@ class TestAllURLsMatching(unittest.TestCase):
|
||||
self.assertMatch('http://www.youtube.com/results?search_query=making+mustard', ['youtube:search_url'])
|
||||
self.assertMatch('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video', ['youtube:search_url'])
|
||||
|
||||
def test_justin_tv_channelid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable('justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('www.justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.justin.tv/vanillatv/'))
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/'))
|
||||
def test_twitch_channelid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv'))
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/'))
|
||||
|
||||
def test_justintv_videoid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
|
||||
def test_twitch_videoid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/vanillatv/b/328087483'))
|
||||
|
||||
def test_justin_tv_chapterid_matching(self):
|
||||
self.assertTrue(JustinTVIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
|
||||
def test_twitch_chapterid_matching(self):
|
||||
self.assertTrue(TwitchIE.suitable('http://www.twitch.tv/tsm_theoddone/c/2349361'))
|
||||
|
||||
def test_youtube_extract(self):
|
||||
assertExtractId = lambda url, id: self.assertEqual(YoutubeIE.extract_id(url), id)
|
||||
|
44
test/test_compat.py
Normal file
44
test/test_compat.py
Normal file
@ -0,0 +1,44 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
|
||||
from youtube_dl.utils import get_filesystem_encoding
|
||||
from youtube_dl.compat import (
|
||||
compat_getenv,
|
||||
compat_expanduser,
|
||||
)
|
||||
|
||||
|
||||
class TestCompat(unittest.TestCase):
|
||||
def test_compat_getenv(self):
|
||||
test_str = 'тест'
|
||||
os.environ['YOUTUBE-DL-TEST'] = (
|
||||
test_str if sys.version_info >= (3, 0)
|
||||
else test_str.encode(get_filesystem_encoding()))
|
||||
self.assertEqual(compat_getenv('YOUTUBE-DL-TEST'), test_str)
|
||||
|
||||
def test_compat_expanduser(self):
|
||||
test_str = 'C:\Documents and Settings\тест\Application Data'
|
||||
os.environ['HOME'] = (
|
||||
test_str if sys.version_info >= (3, 0)
|
||||
else test_str.encode(get_filesystem_encoding()))
|
||||
self.assertEqual(compat_expanduser('~'), test_str)
|
||||
|
||||
def test_all_present(self):
|
||||
import youtube_dl.compat
|
||||
all_names = youtube_dl.compat.__all__
|
||||
present_names = set(filter(
|
||||
lambda c: '_' in c and not c.startswith('_'),
|
||||
dir(youtube_dl.compat))) - set(['unicode_literals'])
|
||||
self.assertEqual(all_names, sorted(present_names))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -8,6 +8,7 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import (
|
||||
assertGreaterEqual,
|
||||
expect_warnings,
|
||||
get_params,
|
||||
gettestcases,
|
||||
expect_info_dict,
|
||||
@ -22,10 +23,12 @@ import json
|
||||
import socket
|
||||
|
||||
import youtube_dl.YoutubeDL
|
||||
from youtube_dl.utils import (
|
||||
from youtube_dl.compat import (
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
DownloadError,
|
||||
ExtractorError,
|
||||
format_bytes,
|
||||
@ -93,13 +96,14 @@ def generator(test_case):
|
||||
params.setdefault('extract_flat', True)
|
||||
params.setdefault('skip_download', True)
|
||||
|
||||
ydl = YoutubeDL(params)
|
||||
ydl = YoutubeDL(params, auto_init=False)
|
||||
ydl.add_default_info_extractors()
|
||||
finished_hook_called = set()
|
||||
def _hook(status):
|
||||
if status['status'] == 'finished':
|
||||
finished_hook_called.add(status['filename'])
|
||||
ydl.add_progress_hook(_hook)
|
||||
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
||||
|
||||
def get_tc_filename(tc):
|
||||
return tc.get('file') or ydl.prepare_filename(tc.get('info_dict', {}))
|
||||
@ -139,7 +143,9 @@ def generator(test_case):
|
||||
|
||||
if is_playlist:
|
||||
self.assertEqual(res_dict['_type'], 'playlist')
|
||||
self.assertTrue('entries' in res_dict)
|
||||
expect_info_dict(self, test_case.get('info_dict', {}), res_dict)
|
||||
|
||||
if 'playlist_mincount' in test_case:
|
||||
assertGreaterEqual(
|
||||
self,
|
||||
@ -181,14 +187,16 @@ def generator(test_case):
|
||||
md5_for_file = _file_md5(tc_filename)
|
||||
self.assertEqual(md5_for_file, tc['md5'])
|
||||
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
||||
self.assertTrue(os.path.exists(info_json_fn))
|
||||
self.assertTrue(
|
||||
os.path.exists(info_json_fn),
|
||||
'Missing info file %s' % info_json_fn)
|
||||
with io.open(info_json_fn, encoding='utf-8') as infof:
|
||||
info_dict = json.load(infof)
|
||||
|
||||
expect_info_dict(self, tc.get('info_dict', {}), info_dict)
|
||||
finally:
|
||||
try_rm_tcs_files()
|
||||
if is_playlist and res_dict is not None:
|
||||
if is_playlist and res_dict is not None and res_dict.get('entries'):
|
||||
# Remove all other files that may have been extracted if the
|
||||
# extractor returns full results even with extract_flat
|
||||
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
||||
|
@ -15,6 +15,7 @@ from youtube_dl.extractor import (
|
||||
DailymotionIE,
|
||||
TEDIE,
|
||||
VimeoIE,
|
||||
WallaIE,
|
||||
)
|
||||
|
||||
|
||||
@ -279,5 +280,32 @@ class TestVimeoSubtitles(BaseTestSubtitles):
|
||||
self.assertTrue(subtitles.get(lang) is not None, u'Subtitles for \'%s\' not extracted' % lang)
|
||||
|
||||
|
||||
class TestWallaSubtitles(BaseTestSubtitles):
|
||||
url = 'http://vod.walla.co.il/movie/2705958/the-yes-men'
|
||||
IE = WallaIE
|
||||
|
||||
def test_list_subtitles(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['listsubtitles'] = True
|
||||
info_dict = self.getInfoDict()
|
||||
self.assertEqual(info_dict, None)
|
||||
|
||||
def test_allsubtitles(self):
|
||||
self.DL.expect_warning(u'Automatic Captions not supported by this server')
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(set(subtitles.keys()), set(['heb']))
|
||||
self.assertEqual(md5(subtitles['heb']), 'e758c5d7cb982f6bef14f377ec7a3920')
|
||||
|
||||
def test_nosubtitles(self):
|
||||
self.DL.expect_warning(u'video doesn\'t have subtitles')
|
||||
self.url = 'http://vod.walla.co.il/movie/2642630/one-direction-all-for-one'
|
||||
self.DL.params['writesubtitles'] = True
|
||||
self.DL.params['allsubtitles'] = True
|
||||
subtitles = self.getSubtitles()
|
||||
self.assertEqual(len(subtitles), 0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -37,7 +37,9 @@ def _make_testfunc(testfile):
|
||||
or os.path.getmtime(swf_file) < os.path.getmtime(as_file)):
|
||||
# Recompile
|
||||
try:
|
||||
subprocess.check_call(['mxmlc', '-output', swf_file, as_file])
|
||||
subprocess.check_call([
|
||||
'mxmlc', '-output', swf_file,
|
||||
'-static-link-runtime-shared-libraries', as_file])
|
||||
except OSError as ose:
|
||||
if ose.errno == errno.ENOENT:
|
||||
print('mxmlc not found! Skipping test.')
|
||||
|
@ -20,9 +20,9 @@ from youtube_dl.utils import (
|
||||
encodeFilename,
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands,
|
||||
get_meta_content,
|
||||
orderedSet,
|
||||
PagedList,
|
||||
OnDemandPagedList,
|
||||
InAdvancePagedList,
|
||||
parse_duration,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
@ -40,6 +40,11 @@ from youtube_dl.utils import (
|
||||
parse_iso8601,
|
||||
strip_jsonp,
|
||||
uppercase_escape,
|
||||
limit_length,
|
||||
escape_rfc3986,
|
||||
escape_url,
|
||||
js_to_json,
|
||||
get_filesystem_encoding,
|
||||
)
|
||||
|
||||
|
||||
@ -134,6 +139,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
|
||||
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
|
||||
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
|
||||
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
|
||||
|
||||
def test_find_xpath_attr(self):
|
||||
testxml = '''<root>
|
||||
@ -148,17 +154,6 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
|
||||
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
|
||||
|
||||
def test_meta_parser(self):
|
||||
testhtml = '''
|
||||
<head>
|
||||
<meta name="description" content="foo & bar">
|
||||
<meta content='Plato' name='author'/>
|
||||
</head>
|
||||
'''
|
||||
get_meta = lambda name: get_meta_content(name, testhtml)
|
||||
self.assertEqual(get_meta('description'), 'foo & bar')
|
||||
self.assertEqual(get_meta('author'), 'Plato')
|
||||
|
||||
def test_xpath_with_ns(self):
|
||||
testxml = '''<root xmlns:media="http://example.com/">
|
||||
<media:song>
|
||||
@ -243,10 +238,14 @@ class TestUtil(unittest.TestCase):
|
||||
for i in range(firstid, upto):
|
||||
yield i
|
||||
|
||||
pl = PagedList(get_page, pagesize)
|
||||
pl = OnDemandPagedList(get_page, pagesize)
|
||||
got = pl.getslice(*sliceargs)
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
|
||||
got = iapl.getslice(*sliceargs)
|
||||
self.assertEqual(got, expected)
|
||||
|
||||
testPL(5, 2, (), [0, 1, 2, 3, 4])
|
||||
testPL(5, 2, (1,), [1, 2, 3, 4])
|
||||
testPL(5, 2, (2,), [2, 3, 4])
|
||||
@ -276,6 +275,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
|
||||
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
|
||||
|
||||
def test_strip_jsonp(self):
|
||||
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
|
||||
@ -286,5 +286,64 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(uppercase_escape('aä'), 'aä')
|
||||
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
|
||||
|
||||
def test_limit_length(self):
|
||||
self.assertEqual(limit_length(None, 12), None)
|
||||
self.assertEqual(limit_length('foo', 12), 'foo')
|
||||
self.assertTrue(
|
||||
limit_length('foo bar baz asd', 12).startswith('foo bar'))
|
||||
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
|
||||
|
||||
def test_escape_rfc3986(self):
|
||||
reserved = "!*'();:@&=+$,/?#[]"
|
||||
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
|
||||
self.assertEqual(escape_rfc3986(reserved), reserved)
|
||||
self.assertEqual(escape_rfc3986(unreserved), unreserved)
|
||||
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
|
||||
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
|
||||
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
|
||||
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
|
||||
|
||||
def test_escape_url(self):
|
||||
self.assertEqual(
|
||||
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
|
||||
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
|
||||
)
|
||||
self.assertEqual(
|
||||
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
|
||||
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
|
||||
)
|
||||
self.assertEqual(
|
||||
escape_url('http://тест.рф/фрагмент'),
|
||||
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||
)
|
||||
self.assertEqual(
|
||||
escape_url('http://тест.рф/абв?абв=абв#абв'),
|
||||
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||
)
|
||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||
|
||||
def test_js_to_json_realworld(self):
|
||||
inp = '''{
|
||||
'clip':{'provider':'pseudo'}
|
||||
}'''
|
||||
self.assertEqual(js_to_json(inp), '''{
|
||||
"clip":{"provider":"pseudo"}
|
||||
}''')
|
||||
json.loads(js_to_json(inp))
|
||||
|
||||
inp = '''{
|
||||
'playlist':[{'controls':{'all':null}}]
|
||||
}'''
|
||||
self.assertEqual(js_to_json(inp), '''{
|
||||
"playlist":[{"controls":{"all":null}}]
|
||||
}''')
|
||||
|
||||
def test_js_to_json_edgecases(self):
|
||||
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
|
||||
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
|
||||
|
||||
on = js_to_json('{"abc": true}')
|
||||
self.assertEqual(json.loads(on), {'abc': True})
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -10,7 +10,6 @@ from test.helper import FakeYDL
|
||||
|
||||
|
||||
from youtube_dl.extractor import (
|
||||
YoutubeUserIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubeIE,
|
||||
YoutubeChannelIE,
|
||||
@ -43,28 +42,6 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
self.assertEqual(len(entries), 25)
|
||||
self.assertEqual(YoutubeIE().extract_id(entries[-1]['url']), 'rYefUsYuEp0')
|
||||
|
||||
def test_youtube_channel(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeChannelIE(dl)
|
||||
#test paginated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/UCKfVa3S1e4PHvxWcwyMMg8w')
|
||||
self.assertTrue(len(result['entries']) > 90)
|
||||
#test autogenerated channel
|
||||
result = ie.extract('https://www.youtube.com/channel/HCtnHdj3df7iM/videos')
|
||||
self.assertTrue(len(result['entries']) >= 18)
|
||||
|
||||
def test_youtube_user(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeUserIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/user/TheLinuxFoundation')
|
||||
self.assertTrue(len(result['entries']) >= 320)
|
||||
|
||||
def test_youtube_show(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeShowIE(dl)
|
||||
result = ie.extract('http://www.youtube.com/show/airdisasters')
|
||||
self.assertTrue(len(result) >= 3)
|
||||
|
||||
def test_youtube_mix(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubePlaylistIE(dl)
|
||||
@ -83,21 +60,5 @@ class TestYoutubeLists(unittest.TestCase):
|
||||
entries = result['entries']
|
||||
self.assertEqual(len(entries), 100)
|
||||
|
||||
def test_youtube_toplist(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeTopListIE(dl)
|
||||
result = ie.extract('yttoplist:music:Trending')
|
||||
entries = result['entries']
|
||||
self.assertTrue(len(entries) >= 5)
|
||||
|
||||
def test_youtube_search_url(self):
|
||||
dl = FakeYDL()
|
||||
ie = YoutubeSearchURLIE(dl)
|
||||
result = ie.extract('https://www.youtube.com/results?baz=bar&search_query=youtube-dl+test+video&filters=video&lclk=video')
|
||||
entries = result['entries']
|
||||
self.assertIsPlaylist(result)
|
||||
self.assertEqual(result['title'], 'youtube-dl test video')
|
||||
self.assertTrue(len(entries) >= 5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -14,7 +14,7 @@ import re
|
||||
import string
|
||||
|
||||
from youtube_dl.extractor import YoutubeIE
|
||||
from youtube_dl.utils import compat_str, compat_urlretrieve
|
||||
from youtube_dl.compat import compat_str, compat_urlretrieve
|
||||
|
||||
_TESTS = [
|
||||
(
|
||||
@ -47,18 +47,6 @@ _TESTS = [
|
||||
'2ACFC7A61CA478CD21425E5A57EBD73DDC78E22A.2094302436B2D377D14A3BBA23022D023B8BC25AA',
|
||||
'A52CB8B320D22032ABB3A41D773D2B6342034902.A22E87CDD37DBE75A5E52412DC874AC16A7CFCA2',
|
||||
),
|
||||
(
|
||||
'http://s.ytimg.com/yts/swfbin/player-vfl5vIhK2/watch_as3.swf',
|
||||
'swf',
|
||||
86,
|
||||
'O1I3456789abcde0ghijklmnopqrstuvwxyzABCDEFGHfJKLMN2PQRSTUVWXY\\!"#$%&\'()*+,-./:;<=>?'
|
||||
),
|
||||
(
|
||||
'http://s.ytimg.com/yts/swfbin/player-vflmDyk47/watch_as3.swf',
|
||||
'swf',
|
||||
'F375F75BF2AFDAAF2666E43868D46816F83F13E81C46.3725A8218E446A0DECD33F79DC282994D6AA92C92C9',
|
||||
'9C29AA6D499282CD97F33DCED0A644E8128A5273.64C18E31F38361864D86834E6662FAADFA2FB57F'
|
||||
),
|
||||
(
|
||||
'https://s.ytimg.com/yts/jsbin/html5player-en_US-vflBb0OQx.js',
|
||||
'js',
|
||||
|
@ -22,12 +22,16 @@ import traceback
|
||||
if os.name == 'nt':
|
||||
import ctypes
|
||||
|
||||
from .utils import (
|
||||
from .compat import (
|
||||
compat_cookiejar,
|
||||
compat_expanduser,
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from .utils import (
|
||||
escape_url,
|
||||
ContentTooShortError,
|
||||
date_from_str,
|
||||
DateRange,
|
||||
@ -60,7 +64,8 @@ from .utils import (
|
||||
from .cache import Cache
|
||||
from .extractor import get_info_extractor, gen_extractors
|
||||
from .downloader import get_suitable_downloader
|
||||
from .postprocessor import FFmpegMergerPP
|
||||
from .downloader.rtmp import rtmpdump_version
|
||||
from .postprocessor import FFmpegMergerPP, FFmpegPostProcessor
|
||||
from .version import __version__
|
||||
|
||||
|
||||
@ -106,6 +111,8 @@ class YoutubeDL(object):
|
||||
forcefilename: Force printing final filename.
|
||||
forceduration: Force printing duration.
|
||||
forcejson: Force printing info_dict as JSON.
|
||||
dump_single_json: Force printing the info_dict of the whole playlist
|
||||
(or video) as a single JSON line.
|
||||
simulate: Do not download the video files.
|
||||
format: Video format code.
|
||||
format_limit: Highest quality format to try.
|
||||
@ -164,6 +171,8 @@ class YoutubeDL(object):
|
||||
'auto' for elaborate guessing
|
||||
encoding: Use this encoding instead of the system-specified.
|
||||
extract_flat: Do not resolve URLs, return the immediate result.
|
||||
Pass in 'in_playlist' to only show this behavior for
|
||||
playlist items.
|
||||
|
||||
The following parameters are not used by YoutubeDL itself, they are used by
|
||||
the FileDownloader:
|
||||
@ -183,7 +192,7 @@ class YoutubeDL(object):
|
||||
_num_downloads = None
|
||||
_screen_file = None
|
||||
|
||||
def __init__(self, params=None):
|
||||
def __init__(self, params=None, auto_init=True):
|
||||
"""Create a FileDownloader object with the given options."""
|
||||
if params is None:
|
||||
params = {}
|
||||
@ -227,11 +236,11 @@ class YoutubeDL(object):
|
||||
|
||||
if (sys.version_info >= (3,) and sys.platform != 'win32' and
|
||||
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
|
||||
and not params['restrictfilenames']):
|
||||
and not params.get('restrictfilenames', False)):
|
||||
# On Python 3, the Unicode filesystem API will throw errors (#1474)
|
||||
self.report_warning(
|
||||
'Assuming --restrict-filenames since file system encoding '
|
||||
'cannot encode all charactes. '
|
||||
'cannot encode all characters. '
|
||||
'Set the LC_ALL environment variable to fix this.')
|
||||
self.params['restrictfilenames'] = True
|
||||
|
||||
@ -240,6 +249,10 @@ class YoutubeDL(object):
|
||||
|
||||
self._setup_opener()
|
||||
|
||||
if auto_init:
|
||||
self.print_debug_header()
|
||||
self.add_default_info_extractors()
|
||||
|
||||
def add_info_extractor(self, ie):
|
||||
"""Add an InfoExtractor object to the end of the list."""
|
||||
self._ies.append(ie)
|
||||
@ -446,7 +459,7 @@ class YoutubeDL(object):
|
||||
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
|
||||
|
||||
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
|
||||
tmpl = os.path.expanduser(outtmpl)
|
||||
tmpl = compat_expanduser(outtmpl)
|
||||
filename = tmpl % template_dict
|
||||
return filename
|
||||
except ValueError as err:
|
||||
@ -567,8 +580,12 @@ class YoutubeDL(object):
|
||||
|
||||
result_type = ie_result.get('_type', 'video')
|
||||
|
||||
if self.params.get('extract_flat', False):
|
||||
if result_type in ('url', 'url_transparent'):
|
||||
if result_type in ('url', 'url_transparent'):
|
||||
extract_flat = self.params.get('extract_flat', False)
|
||||
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
|
||||
extract_flat is True):
|
||||
if self.params.get('forcejson', False):
|
||||
self.to_stdout(json.dumps(ie_result))
|
||||
return ie_result
|
||||
|
||||
if result_type == 'video':
|
||||
@ -641,6 +658,8 @@ class YoutubeDL(object):
|
||||
extra = {
|
||||
'n_entries': n_entries,
|
||||
'playlist': playlist,
|
||||
'playlist_id': ie_result.get('id'),
|
||||
'playlist_title': ie_result.get('title'),
|
||||
'playlist_index': i + playliststart,
|
||||
'extractor': ie_result['extractor'],
|
||||
'webpage_url': ie_result['webpage_url'],
|
||||
@ -707,7 +726,7 @@ class YoutubeDL(object):
|
||||
if video_formats:
|
||||
return video_formats[0]
|
||||
else:
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp']
|
||||
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a']
|
||||
if format_spec in extensions:
|
||||
filter_f = lambda f: f['ext'] == format_spec
|
||||
else:
|
||||
@ -808,28 +827,36 @@ class YoutubeDL(object):
|
||||
if req_format in ('-1', 'all'):
|
||||
formats_to_download = formats
|
||||
else:
|
||||
# We can accept formats requested in the format: 34/5/best, we pick
|
||||
# the first that is available, starting from left
|
||||
req_formats = req_format.split('/')
|
||||
for rf in req_formats:
|
||||
if re.match(r'.+?\+.+?', rf) is not None:
|
||||
# Two formats have been requested like '137+139'
|
||||
format_1, format_2 = rf.split('+')
|
||||
formats_info = (self.select_format(format_1, formats),
|
||||
self.select_format(format_2, formats))
|
||||
if all(formats_info):
|
||||
selected_format = {
|
||||
'requested_formats': formats_info,
|
||||
'format': rf,
|
||||
'ext': formats_info[0]['ext'],
|
||||
}
|
||||
for rfstr in req_format.split(','):
|
||||
# We can accept formats requested in the format: 34/5/best, we pick
|
||||
# the first that is available, starting from left
|
||||
req_formats = rfstr.split('/')
|
||||
for rf in req_formats:
|
||||
if re.match(r'.+?\+.+?', rf) is not None:
|
||||
# Two formats have been requested like '137+139'
|
||||
format_1, format_2 = rf.split('+')
|
||||
formats_info = (self.select_format(format_1, formats),
|
||||
self.select_format(format_2, formats))
|
||||
if all(formats_info):
|
||||
# The first format must contain the video and the
|
||||
# second the audio
|
||||
if formats_info[0].get('vcodec') == 'none':
|
||||
self.report_error('The first format must '
|
||||
'contain the video, try using '
|
||||
'"-f %s+%s"' % (format_2, format_1))
|
||||
return
|
||||
selected_format = {
|
||||
'requested_formats': formats_info,
|
||||
'format': rf,
|
||||
'ext': formats_info[0]['ext'],
|
||||
}
|
||||
else:
|
||||
selected_format = None
|
||||
else:
|
||||
selected_format = None
|
||||
else:
|
||||
selected_format = self.select_format(rf, formats)
|
||||
if selected_format is not None:
|
||||
formats_to_download = [selected_format]
|
||||
break
|
||||
selected_format = self.select_format(rf, formats)
|
||||
if selected_format is not None:
|
||||
formats_to_download.append(selected_format)
|
||||
break
|
||||
if not formats_to_download:
|
||||
raise ExtractorError('requested format not available',
|
||||
expected=True)
|
||||
@ -895,6 +922,8 @@ class YoutubeDL(object):
|
||||
if self.params.get('forcejson', False):
|
||||
info_dict['_filename'] = filename
|
||||
self.to_stdout(json.dumps(info_dict))
|
||||
if self.params.get('dump_single_json', False):
|
||||
info_dict['_filename'] = filename
|
||||
|
||||
# Do nothing else if in simulate mode
|
||||
if self.params.get('simulate', False):
|
||||
@ -1013,7 +1042,7 @@ class YoutubeDL(object):
|
||||
downloaded = []
|
||||
success = True
|
||||
merger = FFmpegMergerPP(self, not self.params.get('keepvideo'))
|
||||
if not merger._get_executable():
|
||||
if not merger._executable:
|
||||
postprocessors = []
|
||||
self.report_warning('You have requested multiple '
|
||||
'formats but ffmpeg or avconv are not installed.'
|
||||
@ -1062,12 +1091,15 @@ class YoutubeDL(object):
|
||||
for url in url_list:
|
||||
try:
|
||||
#It also downloads the videos
|
||||
self.extract_info(url)
|
||||
res = self.extract_info(url)
|
||||
except UnavailableVideoError:
|
||||
self.report_error('unable to download video')
|
||||
except MaxDownloadsReached:
|
||||
self.to_screen('[info] Maximum number of downloaded files reached.')
|
||||
raise
|
||||
else:
|
||||
if self.params.get('dump_single_json', False):
|
||||
self.to_stdout(json.dumps(res))
|
||||
|
||||
return self._download_retcode
|
||||
|
||||
@ -1191,6 +1223,8 @@ class YoutubeDL(object):
|
||||
res += 'video@'
|
||||
if fdict.get('vbr') is not None:
|
||||
res += '%4dk' % fdict['vbr']
|
||||
if fdict.get('fps') is not None:
|
||||
res += ', %sfps' % fdict['fps']
|
||||
if fdict.get('acodec') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
@ -1241,6 +1275,26 @@ class YoutubeDL(object):
|
||||
|
||||
def urlopen(self, req):
|
||||
""" Start an HTTP download """
|
||||
|
||||
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
|
||||
# always respected by websites, some tend to give out URLs with non percent-encoded
|
||||
# non-ASCII characters (see telemb.py, ard.py [#3412])
|
||||
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
|
||||
# To work around aforementioned issue we will replace request's original URL with
|
||||
# percent-encoded one
|
||||
req_is_string = isinstance(req, basestring if sys.version_info < (3, 0) else compat_str)
|
||||
url = req if req_is_string else req.get_full_url()
|
||||
url_escaped = escape_url(url)
|
||||
|
||||
# Substitute URL if any change after escaping
|
||||
if url != url_escaped:
|
||||
if req_is_string:
|
||||
req = url_escaped
|
||||
else:
|
||||
req = compat_urllib_request.Request(
|
||||
url_escaped, data=req.data, headers=req.headers,
|
||||
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
|
||||
|
||||
return self._opener.open(req, timeout=self._socket_timeout)
|
||||
|
||||
def print_debug_header(self):
|
||||
@ -1252,11 +1306,13 @@ class YoutubeDL(object):
|
||||
self.report_warning(
|
||||
'Your Python is broken! Update to a newer and supported version')
|
||||
|
||||
stdout_encoding = getattr(
|
||||
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
|
||||
encoding_str = (
|
||||
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
|
||||
locale.getpreferredencoding(),
|
||||
sys.getfilesystemencoding(),
|
||||
sys.stdout.encoding,
|
||||
stdout_encoding,
|
||||
self.get_encoding()))
|
||||
write_string(encoding_str, encoding=None)
|
||||
|
||||
@ -1275,8 +1331,19 @@ class YoutubeDL(object):
|
||||
sys.exc_clear()
|
||||
except:
|
||||
pass
|
||||
self._write_string('[debug] Python version %s - %s' %
|
||||
(platform.python_version(), platform_name()) + '\n')
|
||||
self._write_string('[debug] Python version %s - %s\n' % (
|
||||
platform.python_version(), platform_name()))
|
||||
|
||||
exe_versions = FFmpegPostProcessor.get_versions()
|
||||
exe_versions['rtmpdump'] = rtmpdump_version()
|
||||
exe_str = ', '.join(
|
||||
'%s %s' % (exe, v)
|
||||
for exe, v in sorted(exe_versions.items())
|
||||
if v
|
||||
)
|
||||
if not exe_str:
|
||||
exe_str = 'none'
|
||||
self._write_string('[debug] exe versions: %s\n' % exe_str)
|
||||
|
||||
proxy_map = {}
|
||||
for handler in self._opener.handlers:
|
||||
|
@ -1,82 +1,6 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__authors__ = (
|
||||
'Ricardo Garcia Gonzalez',
|
||||
'Danny Colligan',
|
||||
'Benjamin Johnson',
|
||||
'Vasyl\' Vavrychuk',
|
||||
'Witold Baryluk',
|
||||
'Paweł Paprota',
|
||||
'Gergely Imreh',
|
||||
'Rogério Brito',
|
||||
'Philipp Hagemeister',
|
||||
'Sören Schulze',
|
||||
'Kevin Ngo',
|
||||
'Ori Avtalion',
|
||||
'shizeeg',
|
||||
'Filippo Valsorda',
|
||||
'Christian Albrecht',
|
||||
'Dave Vasilevsky',
|
||||
'Jaime Marquínez Ferrándiz',
|
||||
'Jeff Crouse',
|
||||
'Osama Khalid',
|
||||
'Michael Walter',
|
||||
'M. Yasoob Ullah Khalid',
|
||||
'Julien Fraichard',
|
||||
'Johny Mo Swag',
|
||||
'Axel Noack',
|
||||
'Albert Kim',
|
||||
'Pierre Rudloff',
|
||||
'Huarong Huo',
|
||||
'Ismael Mejía',
|
||||
'Steffan \'Ruirize\' James',
|
||||
'Andras Elso',
|
||||
'Jelle van der Waa',
|
||||
'Marcin Cieślak',
|
||||
'Anton Larionov',
|
||||
'Takuya Tsuchida',
|
||||
'Sergey M.',
|
||||
'Michael Orlitzky',
|
||||
'Chris Gahan',
|
||||
'Saimadhav Heblikar',
|
||||
'Mike Col',
|
||||
'Oleg Prutz',
|
||||
'pulpe',
|
||||
'Andreas Schmitz',
|
||||
'Michael Kaiser',
|
||||
'Niklas Laxström',
|
||||
'David Triendl',
|
||||
'Anthony Weems',
|
||||
'David Wagner',
|
||||
'Juan C. Olivares',
|
||||
'Mattias Harrysson',
|
||||
'phaer',
|
||||
'Sainyam Kapoor',
|
||||
'Nicolas Évrard',
|
||||
'Jason Normore',
|
||||
'Hoje Lee',
|
||||
'Adam Thalhammer',
|
||||
'Georg Jähnig',
|
||||
'Ralf Haring',
|
||||
'Koki Takahashi',
|
||||
'Ariset Llerena',
|
||||
'Adam Malcontenti-Wilson',
|
||||
'Tobias Bell',
|
||||
'Naglis Jonaitis',
|
||||
'Charles Chen',
|
||||
'Hassaan Ali',
|
||||
'Dobrosław Żybort',
|
||||
'David Fabijan',
|
||||
'Sebastian Haas',
|
||||
'Alexander Kirk',
|
||||
'Erik Johnson',
|
||||
'Keith Beckman',
|
||||
'Ole Ernst',
|
||||
'Aaron McDaniel (mcd1992)',
|
||||
'Magnus Kolstad',
|
||||
)
|
||||
|
||||
__license__ = 'Public Domain'
|
||||
|
||||
import codecs
|
||||
@ -89,9 +13,12 @@ import sys
|
||||
from .options import (
|
||||
parseOpts,
|
||||
)
|
||||
from .utils import (
|
||||
from .compat import (
|
||||
compat_expanduser,
|
||||
compat_getpass,
|
||||
compat_print,
|
||||
)
|
||||
from .utils import (
|
||||
DateRange,
|
||||
DEFAULT_OUTTMPL,
|
||||
decodeOption,
|
||||
@ -251,8 +178,6 @@ def _real_main(argv=None):
|
||||
date = DateRange.day(opts.date)
|
||||
else:
|
||||
date = DateRange(opts.dateafter, opts.datebefore)
|
||||
if opts.default_search not in ('auto', 'auto_warning', 'error', 'fixup_error', None) and ':' not in opts.default_search:
|
||||
parser.error(u'--default-search invalid; did you forget a colon (:) at the end?')
|
||||
|
||||
# Do not download videos when there are audio-only formats
|
||||
if opts.extractaudio and not opts.keepvideo and opts.format is None:
|
||||
@ -280,8 +205,8 @@ def _real_main(argv=None):
|
||||
u' file! Use "{0}.%(ext)s" instead of "{0}" as the output'
|
||||
u' template'.format(outtmpl))
|
||||
|
||||
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson
|
||||
download_archive_fn = os.path.expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||
any_printing = opts.geturl or opts.gettitle or opts.getid or opts.getthumbnail or opts.getdescription or opts.getfilename or opts.getformat or opts.getduration or opts.dumpjson or opts.dump_single_json
|
||||
download_archive_fn = compat_expanduser(opts.download_archive) if opts.download_archive is not None else opts.download_archive
|
||||
|
||||
ydl_opts = {
|
||||
'usenetrc': opts.usenetrc,
|
||||
@ -300,8 +225,9 @@ def _real_main(argv=None):
|
||||
'forcefilename': opts.getfilename,
|
||||
'forceformat': opts.getformat,
|
||||
'forcejson': opts.dumpjson,
|
||||
'simulate': opts.simulate,
|
||||
'skip_download': (opts.skip_download or opts.simulate or any_printing),
|
||||
'dump_single_json': opts.dump_single_json,
|
||||
'simulate': opts.simulate or any_printing,
|
||||
'skip_download': opts.skip_download,
|
||||
'format': opts.format,
|
||||
'format_limit': opts.format_limit,
|
||||
'listformats': opts.listformats,
|
||||
@ -365,12 +291,10 @@ def _real_main(argv=None):
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'exec_cmd': opts.exec_cmd,
|
||||
'extract_flat': opts.extract_flat,
|
||||
}
|
||||
|
||||
with YoutubeDL(ydl_opts) as ydl:
|
||||
ydl.print_debug_header()
|
||||
ydl.add_default_info_extractors()
|
||||
|
||||
# PostProcessors
|
||||
# Add the metadata pp first, the other pps will copy it
|
||||
if opts.addmetadata:
|
||||
|
@ -8,9 +8,8 @@ import re
|
||||
import shutil
|
||||
import traceback
|
||||
|
||||
from .utils import (
|
||||
write_json_file,
|
||||
)
|
||||
from .compat import compat_expanduser
|
||||
from .utils import write_json_file
|
||||
|
||||
|
||||
class Cache(object):
|
||||
@ -22,7 +21,7 @@ class Cache(object):
|
||||
if res is None:
|
||||
cache_root = os.environ.get('XDG_CACHE_HOME', '~/.cache')
|
||||
res = os.path.join(cache_root, 'youtube-dl')
|
||||
return os.path.expanduser(res)
|
||||
return compat_expanduser(res)
|
||||
|
||||
def _get_cache_fn(self, section, key, dtype):
|
||||
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
|
||||
|
317
youtube_dl/compat.py
Normal file
317
youtube_dl/compat.py
Normal file
@ -0,0 +1,317 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import getpass
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
try:
|
||||
import urllib.request as compat_urllib_request
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_request
|
||||
|
||||
try:
|
||||
import urllib.error as compat_urllib_error
|
||||
except ImportError: # Python 2
|
||||
import urllib2 as compat_urllib_error
|
||||
|
||||
try:
|
||||
import urllib.parse as compat_urllib_parse
|
||||
except ImportError: # Python 2
|
||||
import urllib as compat_urllib_parse
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse as compat_urllib_parse_urlparse
|
||||
except ImportError: # Python 2
|
||||
from urlparse import urlparse as compat_urllib_parse_urlparse
|
||||
|
||||
try:
|
||||
import urllib.parse as compat_urlparse
|
||||
except ImportError: # Python 2
|
||||
import urlparse as compat_urlparse
|
||||
|
||||
try:
|
||||
import http.cookiejar as compat_cookiejar
|
||||
except ImportError: # Python 2
|
||||
import cookielib as compat_cookiejar
|
||||
|
||||
try:
|
||||
import html.entities as compat_html_entities
|
||||
except ImportError: # Python 2
|
||||
import htmlentitydefs as compat_html_entities
|
||||
|
||||
try:
|
||||
import html.parser as compat_html_parser
|
||||
except ImportError: # Python 2
|
||||
import HTMLParser as compat_html_parser
|
||||
|
||||
try:
|
||||
import http.client as compat_http_client
|
||||
except ImportError: # Python 2
|
||||
import httplib as compat_http_client
|
||||
|
||||
try:
|
||||
from urllib.error import HTTPError as compat_HTTPError
|
||||
except ImportError: # Python 2
|
||||
from urllib2 import HTTPError as compat_HTTPError
|
||||
|
||||
try:
|
||||
from urllib.request import urlretrieve as compat_urlretrieve
|
||||
except ImportError: # Python 2
|
||||
from urllib import urlretrieve as compat_urlretrieve
|
||||
|
||||
|
||||
try:
|
||||
from subprocess import DEVNULL
|
||||
compat_subprocess_get_DEVNULL = lambda: DEVNULL
|
||||
except ImportError:
|
||||
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
|
||||
|
||||
try:
|
||||
from urllib.parse import unquote as compat_urllib_parse_unquote
|
||||
except ImportError:
|
||||
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
|
||||
if string == '':
|
||||
return string
|
||||
res = string.split('%')
|
||||
if len(res) == 1:
|
||||
return string
|
||||
if encoding is None:
|
||||
encoding = 'utf-8'
|
||||
if errors is None:
|
||||
errors = 'replace'
|
||||
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
|
||||
pct_sequence = b''
|
||||
string = res[0]
|
||||
for item in res[1:]:
|
||||
try:
|
||||
if not item:
|
||||
raise ValueError
|
||||
pct_sequence += item[:2].decode('hex')
|
||||
rest = item[2:]
|
||||
if not rest:
|
||||
# This segment was just a single percent-encoded character.
|
||||
# May be part of a sequence of code units, so delay decoding.
|
||||
# (Stored in pct_sequence).
|
||||
continue
|
||||
except ValueError:
|
||||
rest = '%' + item
|
||||
# Encountered non-percent-encoded characters. Flush the current
|
||||
# pct_sequence.
|
||||
string += pct_sequence.decode(encoding, errors) + rest
|
||||
pct_sequence = b''
|
||||
if pct_sequence:
|
||||
# Flush the final pct_sequence
|
||||
string += pct_sequence.decode(encoding, errors)
|
||||
return string
|
||||
|
||||
|
||||
try:
|
||||
from urllib.parse import parse_qs as compat_parse_qs
|
||||
except ImportError: # Python 2
|
||||
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
|
||||
# Python 2's version is apparently totally broken
|
||||
|
||||
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
|
||||
encoding='utf-8', errors='replace'):
|
||||
qs, _coerce_result = qs, unicode
|
||||
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
|
||||
r = []
|
||||
for name_value in pairs:
|
||||
if not name_value and not strict_parsing:
|
||||
continue
|
||||
nv = name_value.split('=', 1)
|
||||
if len(nv) != 2:
|
||||
if strict_parsing:
|
||||
raise ValueError("bad query field: %r" % (name_value,))
|
||||
# Handle case of a control-name with no equal sign
|
||||
if keep_blank_values:
|
||||
nv.append('')
|
||||
else:
|
||||
continue
|
||||
if len(nv[1]) or keep_blank_values:
|
||||
name = nv[0].replace('+', ' ')
|
||||
name = compat_urllib_parse_unquote(
|
||||
name, encoding=encoding, errors=errors)
|
||||
name = _coerce_result(name)
|
||||
value = nv[1].replace('+', ' ')
|
||||
value = compat_urllib_parse_unquote(
|
||||
value, encoding=encoding, errors=errors)
|
||||
value = _coerce_result(value)
|
||||
r.append((name, value))
|
||||
return r
|
||||
|
||||
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
|
||||
encoding='utf-8', errors='replace'):
|
||||
parsed_result = {}
|
||||
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
|
||||
encoding=encoding, errors=errors)
|
||||
for name, value in pairs:
|
||||
if name in parsed_result:
|
||||
parsed_result[name].append(value)
|
||||
else:
|
||||
parsed_result[name] = [value]
|
||||
return parsed_result
|
||||
|
||||
try:
|
||||
compat_str = unicode # Python 2
|
||||
except NameError:
|
||||
compat_str = str
|
||||
|
||||
try:
|
||||
compat_chr = unichr # Python 2
|
||||
except NameError:
|
||||
compat_chr = chr
|
||||
|
||||
try:
|
||||
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
|
||||
except ImportError: # Python 2.6
|
||||
from xml.parsers.expat import ExpatError as compat_xml_parse_error
|
||||
|
||||
try:
|
||||
from shlex import quote as shlex_quote
|
||||
except ImportError: # Python < 3.3
|
||||
def shlex_quote(s):
|
||||
return "'" + s.replace("'", "'\"'\"'") + "'"
|
||||
|
||||
|
||||
def compat_ord(c):
|
||||
if type(c) is int: return c
|
||||
else: return ord(c)
|
||||
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
compat_getenv = os.getenv
|
||||
compat_expanduser = os.path.expanduser
|
||||
else:
|
||||
# Environment variables should be decoded with filesystem encoding.
|
||||
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
|
||||
|
||||
def compat_getenv(key, default=None):
|
||||
from .utils import get_filesystem_encoding
|
||||
env = os.getenv(key, default)
|
||||
if env:
|
||||
env = env.decode(get_filesystem_encoding())
|
||||
return env
|
||||
|
||||
# HACK: The default implementations of os.path.expanduser from cpython do not decode
|
||||
# environment variables with filesystem encoding. We will work around this by
|
||||
# providing adjusted implementations.
|
||||
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||
# for different platforms with correct environment variables decoding.
|
||||
|
||||
if os.name == 'posix':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||
do nothing."""
|
||||
if not path.startswith('~'):
|
||||
return path
|
||||
i = path.find('/', 1)
|
||||
if i < 0:
|
||||
i = len(path)
|
||||
if i == 1:
|
||||
if 'HOME' not in os.environ:
|
||||
import pwd
|
||||
userhome = pwd.getpwuid(os.getuid()).pw_dir
|
||||
else:
|
||||
userhome = compat_getenv('HOME')
|
||||
else:
|
||||
import pwd
|
||||
try:
|
||||
pwent = pwd.getpwnam(path[1:i])
|
||||
except KeyError:
|
||||
return path
|
||||
userhome = pwent.pw_dir
|
||||
userhome = userhome.rstrip('/')
|
||||
return (userhome + path[i:]) or '/'
|
||||
elif os.name == 'nt' or os.name == 'ce':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructs.
|
||||
|
||||
If user or $HOME is unknown, do nothing."""
|
||||
if path[:1] != '~':
|
||||
return path
|
||||
i, n = 1, len(path)
|
||||
while i < n and path[i] not in '/\\':
|
||||
i = i + 1
|
||||
|
||||
if 'HOME' in os.environ:
|
||||
userhome = compat_getenv('HOME')
|
||||
elif 'USERPROFILE' in os.environ:
|
||||
userhome = compat_getenv('USERPROFILE')
|
||||
elif not 'HOMEPATH' in os.environ:
|
||||
return path
|
||||
else:
|
||||
try:
|
||||
drive = compat_getenv('HOMEDRIVE')
|
||||
except KeyError:
|
||||
drive = ''
|
||||
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
|
||||
|
||||
if i != 1: #~user
|
||||
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
|
||||
|
||||
return userhome + path[i:]
|
||||
else:
|
||||
compat_expanduser = os.path.expanduser
|
||||
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
def compat_print(s):
|
||||
from .utils import preferredencoding
|
||||
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
|
||||
else:
|
||||
def compat_print(s):
|
||||
assert type(s) == type(u'')
|
||||
print(s)
|
||||
|
||||
|
||||
try:
|
||||
subprocess_check_output = subprocess.check_output
|
||||
except AttributeError:
|
||||
def subprocess_check_output(*args, **kwargs):
|
||||
assert 'input' not in kwargs
|
||||
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
|
||||
output, _ = p.communicate()
|
||||
ret = p.poll()
|
||||
if ret:
|
||||
raise subprocess.CalledProcessError(ret, p.args, output=output)
|
||||
return output
|
||||
|
||||
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
||||
def compat_getpass(prompt, *args, **kwargs):
|
||||
if isinstance(prompt, compat_str):
|
||||
from .utils import preferredencoding
|
||||
prompt = prompt.encode(preferredencoding())
|
||||
return getpass.getpass(prompt, *args, **kwargs)
|
||||
else:
|
||||
compat_getpass = getpass.getpass
|
||||
|
||||
|
||||
__all__ = [
|
||||
'compat_HTTPError',
|
||||
'compat_chr',
|
||||
'compat_cookiejar',
|
||||
'compat_expanduser',
|
||||
'compat_getenv',
|
||||
'compat_getpass',
|
||||
'compat_html_entities',
|
||||
'compat_html_parser',
|
||||
'compat_http_client',
|
||||
'compat_ord',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_str',
|
||||
'compat_subprocess_get_DEVNULL',
|
||||
'compat_urllib_error',
|
||||
'compat_urllib_parse',
|
||||
'compat_urllib_parse_unquote',
|
||||
'compat_urllib_parse_urlparse',
|
||||
'compat_urllib_request',
|
||||
'compat_urlparse',
|
||||
'compat_urlretrieve',
|
||||
'compat_xml_parse_error',
|
||||
'shlex_quote',
|
||||
'subprocess_check_output',
|
||||
]
|
@ -2,6 +2,7 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import FileDownloader
|
||||
from .hls import HlsFD
|
||||
from .hls import NativeHlsFD
|
||||
from .http import HttpFD
|
||||
from .mplayer import MplayerFD
|
||||
from .rtmp import RtmpFD
|
||||
@ -19,6 +20,8 @@ def get_suitable_downloader(info_dict):
|
||||
|
||||
if url.startswith('rtmp'):
|
||||
return RtmpFD
|
||||
if protocol == 'm3u8_native':
|
||||
return NativeHlsFD
|
||||
if (protocol == 'm3u8') or (protocol is None and determine_ext(url) == 'm3u8'):
|
||||
return HlsFD
|
||||
if url.startswith('mms') or url.startswith('rtsp'):
|
||||
|
@ -42,6 +42,7 @@ class FileDownloader(object):
|
||||
Subclasses of this one must re-define the real_download method.
|
||||
"""
|
||||
|
||||
_TEST_FILE_SIZE = 10241
|
||||
params = None
|
||||
|
||||
def __init__(self, ydl, params):
|
||||
|
@ -16,6 +16,7 @@ from ..utils import (
|
||||
format_bytes,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
@ -243,14 +244,23 @@ class F4mFD(FileDownloader):
|
||||
lambda f: int(f[0]) == requested_bitrate, formats))[0]
|
||||
|
||||
base_url = compat_urlparse.urljoin(man_url, media.attrib['url'])
|
||||
bootstrap = base64.b64decode(doc.find(_add_ns('bootstrapInfo')).text)
|
||||
bootstrap_node = doc.find(_add_ns('bootstrapInfo'))
|
||||
if bootstrap_node.text is None:
|
||||
bootstrap_url = compat_urlparse.urljoin(
|
||||
base_url, bootstrap_node.attrib['url'])
|
||||
bootstrap = self.ydl.urlopen(bootstrap_url).read()
|
||||
else:
|
||||
bootstrap = base64.b64decode(bootstrap_node.text)
|
||||
metadata = base64.b64decode(media.find(_add_ns('metadata')).text)
|
||||
boot_info = read_bootstrap_info(bootstrap)
|
||||
|
||||
fragments_list = build_fragments_list(boot_info)
|
||||
if self.params.get('test', False):
|
||||
# We only download the first fragment
|
||||
fragments_list = fragments_list[:1]
|
||||
total_frags = len(fragments_list)
|
||||
# For some akamai manifests we'll need to add a query to the fragment url
|
||||
akamai_pv = xpath_text(doc, _add_ns('pv-2.0'))
|
||||
|
||||
tmpfilename = self.temp_name(filename)
|
||||
(dest_stream, tmpfilename) = sanitize_open(tmpfilename, 'wb')
|
||||
@ -290,6 +300,8 @@ class F4mFD(FileDownloader):
|
||||
for (seg_i, frag_i) in fragments_list:
|
||||
name = 'Seg%d-Frag%d' % (seg_i, frag_i)
|
||||
url = base_url + name
|
||||
if akamai_pv:
|
||||
url += '?' + akamai_pv.strip(';')
|
||||
frag_filename = '%s-%s' % (tmpfilename, name)
|
||||
success = http_dl.download(frag_filename, {'url': url})
|
||||
if not success:
|
||||
|
@ -1,8 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
compat_urllib_request,
|
||||
check_executable,
|
||||
encodeFilename,
|
||||
)
|
||||
@ -43,3 +48,57 @@ class HlsFD(FileDownloader):
|
||||
self.to_stderr(u"\n")
|
||||
self.report_error(u'%s exited with code %d' % (program, retval))
|
||||
return False
|
||||
|
||||
|
||||
class NativeHlsFD(FileDownloader):
|
||||
""" A more limited implementation that does not require ffmpeg """
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
self.to_screen(
|
||||
'[hlsnative] %s: Downloading m3u8 manifest' % info_dict['id'])
|
||||
data = self.ydl.urlopen(url).read()
|
||||
s = data.decode('utf-8', 'ignore')
|
||||
segment_urls = []
|
||||
for line in s.splitlines():
|
||||
line = line.strip()
|
||||
if line and not line.startswith('#'):
|
||||
segment_url = (
|
||||
line
|
||||
if re.match(r'^https?://', line)
|
||||
else compat_urlparse.urljoin(url, line))
|
||||
segment_urls.append(segment_url)
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
for i, segurl in enumerate(segment_urls):
|
||||
self.to_screen(
|
||||
'[hlsnative] %s: Downloading segment %d / %d' %
|
||||
(info_dict['id'], i + 1, len(segment_urls)))
|
||||
seg_req = compat_urllib_request.Request(segurl)
|
||||
if remaining_bytes is not None:
|
||||
seg_req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
segment = self.ydl.urlopen(seg_req).read()
|
||||
if remaining_bytes is not None:
|
||||
segment = segment[:remaining_bytes]
|
||||
remaining_bytes -= len(segment)
|
||||
outf.write(segment)
|
||||
byte_counter += len(segment)
|
||||
if remaining_bytes is not None and remaining_bytes <= 0:
|
||||
break
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
self.try_rename(tmpfilename, filename)
|
||||
return True
|
||||
|
||||
|
@ -14,8 +14,6 @@ from ..utils import (
|
||||
|
||||
|
||||
class HttpFD(FileDownloader):
|
||||
_TEST_FILE_SIZE = 10241
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
@ -12,9 +12,15 @@ from ..utils import (
|
||||
compat_str,
|
||||
encodeFilename,
|
||||
format_bytes,
|
||||
get_exe_version,
|
||||
)
|
||||
|
||||
|
||||
def rtmpdump_version():
|
||||
return get_exe_version(
|
||||
'rtmpdump', ['--help'], r'(?i)RTMPDump\s*v?([0-9a-zA-Z._-]+)')
|
||||
|
||||
|
||||
class RtmpFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
def run_rtmpdump(args):
|
||||
|
@ -20,11 +20,14 @@ from .arte import (
|
||||
ArteTVDDCIE,
|
||||
ArteTVEmbedIE,
|
||||
)
|
||||
from .audiomack import AudiomackIE
|
||||
from .auengine import AUEngineIE
|
||||
from .bambuser import BambuserIE, BambuserChannelIE
|
||||
from .bandcamp import BandcampIE, BandcampAlbumIE
|
||||
from .bbccouk import BBCCoUkIE
|
||||
from .beeg import BeegIE
|
||||
from .behindkink import BehindKinkIE
|
||||
from .bild import BildIE
|
||||
from .bilibili import BiliBiliIE
|
||||
from .blinkx import BlinkxIE
|
||||
from .bliptv import BlipTVIE, BlipTVUserIE
|
||||
@ -59,9 +62,11 @@ from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||
from .condenast import CondeNastIE
|
||||
from .cracked import CrackedIE
|
||||
from .criterion import CriterionIE
|
||||
from .crunchyroll import CrunchyrollIE
|
||||
from .crunchyroll import (
|
||||
CrunchyrollIE,
|
||||
CrunchyrollShowPlaylistIE
|
||||
)
|
||||
from .cspan import CSpanIE
|
||||
from .d8 import D8IE
|
||||
from .dailymotion import (
|
||||
DailymotionIE,
|
||||
DailymotionPlaylistIE,
|
||||
@ -83,6 +88,7 @@ from .dropbox import DropboxIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .ehow import EHowIE
|
||||
from .eighttracks import EightTracksIE
|
||||
from .einthusan import EinthusanIE
|
||||
from .eitb import EitbIE
|
||||
from .ellentv import (
|
||||
EllenTVIE,
|
||||
@ -132,13 +138,18 @@ from .gamestar import GameStarIE
|
||||
from .gametrailers import GametrailersIE
|
||||
from .gdcvault import GDCVaultIE
|
||||
from .generic import GenericIE
|
||||
from .glide import GlideIE
|
||||
from .globo import GloboIE
|
||||
from .godtube import GodTubeIE
|
||||
from .goldenmoustache import GoldenMoustacheIE
|
||||
from .golem import GolemIE
|
||||
from .googleplus import GooglePlusIE
|
||||
from .googlesearch import GoogleSearchIE
|
||||
from .gorillavid import GorillaVidIE
|
||||
from .goshgay import GoshgayIE
|
||||
from .grooveshark import GroovesharkIE
|
||||
from .hark import HarkIE
|
||||
from .heise import HeiseIE
|
||||
from .helsinki import HelsinkiIE
|
||||
from .hentaistigma import HentaiStigmaIE
|
||||
from .hornbunny import HornBunnyIE
|
||||
@ -168,7 +179,6 @@ from .jadorecettepub import JadoreCettePubIE
|
||||
from .jeuxvideo import JeuxVideoIE
|
||||
from .jove import JoveIE
|
||||
from .jukebox import JukeboxIE
|
||||
from .justintv import JustinTVIE
|
||||
from .jpopsukitv import JpopsukiIE
|
||||
from .kankan import KankanIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
@ -179,6 +189,7 @@ from .kontrtube import KontrTubeIE
|
||||
from .krasview import KrasViewIE
|
||||
from .ku6 import Ku6IE
|
||||
from .la7 import LA7IE
|
||||
from .laola1tv import Laola1TvIE
|
||||
from .lifenews import LifeNewsIE
|
||||
from .liveleak import LiveLeakIE
|
||||
from .livestream import (
|
||||
@ -186,6 +197,7 @@ from .livestream import (
|
||||
LivestreamOriginalIE,
|
||||
LivestreamShortenerIE,
|
||||
)
|
||||
from .lrt import LRTIE
|
||||
from .lynda import (
|
||||
LyndaIE,
|
||||
LyndaCourseIE
|
||||
@ -197,6 +209,7 @@ from .malemotion import MalemotionIE
|
||||
from .mdr import MDRIE
|
||||
from .metacafe import MetacafeIE
|
||||
from .metacritic import MetacriticIE
|
||||
from .mgoon import MgoonIE
|
||||
from .ministrygrid import MinistryGridIE
|
||||
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
||||
from .mitele import MiTeleIE
|
||||
@ -206,6 +219,7 @@ from .mpora import MporaIE
|
||||
from .moevideo import MoeVideoIE
|
||||
from .mofosex import MofosexIE
|
||||
from .mojvideo import MojvideoIE
|
||||
from .moniker import MonikerIE
|
||||
from .mooshare import MooshareIE
|
||||
from .morningstar import MorningstarIE
|
||||
from .motherless import MotherlessIE
|
||||
@ -218,6 +232,7 @@ from .mtv import (
|
||||
MTVServicesEmbeddedIE,
|
||||
MTVIggyIE,
|
||||
)
|
||||
from .muenchentv import MuenchenTVIE
|
||||
from .musicplayon import MusicPlayOnIE
|
||||
from .musicvault import MusicVaultIE
|
||||
from .muzu import MuzuTVIE
|
||||
@ -235,8 +250,9 @@ from .ndtv import NDTVIE
|
||||
from .newgrounds import NewgroundsIE
|
||||
from .newstube import NewstubeIE
|
||||
from .nfb import NFBIE
|
||||
from .nfl import NFLIE
|
||||
from .nhl import NHLIE, NHLVideocenterIE
|
||||
from .niconico import NiconicoIE
|
||||
from .niconico import NiconicoIE, NiconicoPlaylistIE
|
||||
from .ninegag import NineGagIE
|
||||
from .noco import NocoIE
|
||||
from .normalboots import NormalbootsIE
|
||||
@ -244,7 +260,10 @@ from .nosvideo import NosVideoIE
|
||||
from .novamov import NovaMovIE
|
||||
from .nowness import NownessIE
|
||||
from .nowvideo import NowVideoIE
|
||||
from .npo import NPOIE
|
||||
from .npo import (
|
||||
NPOIE,
|
||||
TegenlichtVproIE,
|
||||
)
|
||||
from .nrk import (
|
||||
NRKIE,
|
||||
NRKTVIE,
|
||||
@ -252,6 +271,7 @@ from .nrk import (
|
||||
from .ntv import NTVIE
|
||||
from .nytimes import NYTimesIE
|
||||
from .nuvid import NuvidIE
|
||||
from .oktoberfesttv import OktoberfestTVIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .orf import (
|
||||
ORFTVthekIE,
|
||||
@ -261,7 +281,10 @@ from .orf import (
|
||||
from .parliamentliveuk import ParliamentLiveUKIE
|
||||
from .patreon import PatreonIE
|
||||
from .pbs import PBSIE
|
||||
from .phoenix import PhoenixIE
|
||||
from .photobucket import PhotobucketIE
|
||||
from .planetaplay import PlanetaPlayIE
|
||||
from .played import PlayedIE
|
||||
from .playfm import PlayFMIE
|
||||
from .playvid import PlayvidIE
|
||||
from .podomatic import PodomaticIE
|
||||
@ -272,6 +295,7 @@ from .pornoxo import PornoXOIE
|
||||
from .promptfile import PromptFileIE
|
||||
from .prosiebensat1 import ProSiebenSat1IE
|
||||
from .pyvideo import PyvideoIE
|
||||
from .quickvid import QuickVidIE
|
||||
from .radiofrance import RadioFranceIE
|
||||
from .rai import RaiIE
|
||||
from .rbmaradio import RBMARadioIE
|
||||
@ -300,6 +324,7 @@ from .sbs import SBSIE
|
||||
from .scivee import SciVeeIE
|
||||
from .screencast import ScreencastIE
|
||||
from .servingsys import ServingSysIE
|
||||
from .sexykarma import SexyKarmaIE
|
||||
from .shared import SharedIE
|
||||
from .sharesix import ShareSixIE
|
||||
from .sina import SinaIE
|
||||
@ -330,7 +355,10 @@ from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
from .spiegeltv import SpiegeltvIE
|
||||
from .spike import SpikeIE
|
||||
from .sport5 import Sport5IE
|
||||
from .sportbox import SportBoxIE
|
||||
from .sportdeutschland import SportDeutschlandIE
|
||||
from .srmediathek import SRMediathekIE
|
||||
from .stanfordoc import StanfordOpenClassroomIE
|
||||
from .steam import SteamIE
|
||||
from .streamcloud import StreamcloudIE
|
||||
@ -340,6 +368,7 @@ from .swrmediathek import SWRMediathekIE
|
||||
from .syfy import SyfyIE
|
||||
from .sztvhu import SztvHuIE
|
||||
from .tagesschau import TagesschauIE
|
||||
from .tapely import TapelyIE
|
||||
from .teachertube import (
|
||||
TeacherTubeIE,
|
||||
TeacherTubeUserIE,
|
||||
@ -348,15 +377,22 @@ from .teachingchannel import TeachingChannelIE
|
||||
from .teamcoco import TeamcocoIE
|
||||
from .techtalks import TechTalksIE
|
||||
from .ted import TEDIE
|
||||
from .telecinco import TelecincoIE
|
||||
from .telemb import TeleMBIE
|
||||
from .tenplay import TenPlayIE
|
||||
from .testurl import TestURLIE
|
||||
from .tf1 import TF1IE
|
||||
from .theonion import TheOnionIE
|
||||
from .theplatform import ThePlatformIE
|
||||
from .thesixtyone import TheSixtyOneIE
|
||||
from .thisav import ThisAVIE
|
||||
from .tinypic import TinyPicIE
|
||||
from .tlc import TlcIE, TlcDeIE
|
||||
from .tnaflix import TNAFlixIE
|
||||
from .thvideo import (
|
||||
THVideoIE,
|
||||
THVideoPlaylistIE
|
||||
)
|
||||
from .toutv import TouTvIE
|
||||
from .toypics import ToypicsUserIE, ToypicsIE
|
||||
from .traileraddict import TrailerAddictIE
|
||||
@ -365,10 +401,12 @@ from .trutube import TruTubeIE
|
||||
from .tube8 import Tube8IE
|
||||
from .tudou import TudouIE
|
||||
from .tumblr import TumblrIE
|
||||
from .turbo import TurboIE
|
||||
from .tutv import TutvIE
|
||||
from .tvigle import TvigleIE
|
||||
from .tvp import TvpIE
|
||||
from .tvplay import TVPlayIE
|
||||
from .twitch import TwitchIE
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
@ -384,22 +422,26 @@ from .vesti import VestiIE
|
||||
from .vevo import VevoIE
|
||||
from .vgtv import VGTVIE
|
||||
from .vh1 import VH1IE
|
||||
from .vice import ViceIE
|
||||
from .viddler import ViddlerIE
|
||||
from .videobam import VideoBamIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videolecturesnet import VideoLecturesNetIE
|
||||
from .videofyme import VideofyMeIE
|
||||
from .videomega import VideoMegaIE
|
||||
from .videopremium import VideoPremiumIE
|
||||
from .videott import VideoTtIE
|
||||
from .videoweed import VideoWeedIE
|
||||
from .vidme import VidmeIE
|
||||
from .vidzi import VidziIE
|
||||
from .vimeo import (
|
||||
VimeoIE,
|
||||
VimeoChannelIE,
|
||||
VimeoUserIE,
|
||||
VimeoAlbumIE,
|
||||
VimeoChannelIE,
|
||||
VimeoGroupsIE,
|
||||
VimeoLikesIE,
|
||||
VimeoReviewIE,
|
||||
VimeoUserIE,
|
||||
VimeoWatchLaterIE,
|
||||
)
|
||||
from .vimple import VimpleIE
|
||||
@ -411,9 +453,11 @@ from .viki import VikiIE
|
||||
from .vk import VKIE
|
||||
from .vodlocker import VodlockerIE
|
||||
from .vporn import VpornIE
|
||||
from .vrt import VRTIE
|
||||
from .vube import VubeIE
|
||||
from .vuclip import VuClipIE
|
||||
from .vulture import VultureIE
|
||||
from .walla import WallaIE
|
||||
from .washingtonpost import WashingtonPostIE
|
||||
from .wat import WatIE
|
||||
from .wayofthemaster import WayOfTheMasterIE
|
||||
@ -435,12 +479,13 @@ from .xvideos import XVideosIE
|
||||
from .xtube import XTubeUserIE, XTubeIE
|
||||
from .yahoo import (
|
||||
YahooIE,
|
||||
YahooNewsIE,
|
||||
YahooSearchIE,
|
||||
)
|
||||
from .ynet import YnetIE
|
||||
from .youjizz import YouJizzIE
|
||||
from .youku import YoukuIE
|
||||
from .youporn import YouPornIE
|
||||
from .yourupload import YourUploadIE
|
||||
from .youtube import (
|
||||
YoutubeIE,
|
||||
YoutubeChannelIE,
|
||||
@ -458,10 +503,8 @@ from .youtube import (
|
||||
YoutubeUserIE,
|
||||
YoutubeWatchLaterIE,
|
||||
)
|
||||
|
||||
from .zdf import ZDFIE
|
||||
|
||||
|
||||
_ALL_CLASSES = [
|
||||
klass
|
||||
for name, klass in globals().items()
|
||||
|
@ -11,19 +11,18 @@ class ABCIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.abc\.net\.au/news/[^/]+/[^/]+/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.abc.net.au/news/2014-07-25/bringing-asylum-seekers-to-australia-would-give/5624716',
|
||||
'md5': 'dad6f8ad011a70d9ddf887ce6d5d0742',
|
||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||
'md5': 'cb3dd03b18455a661071ee1e28344d9f',
|
||||
'info_dict': {
|
||||
'id': '5624716',
|
||||
'id': '5868334',
|
||||
'ext': 'mp4',
|
||||
'title': 'Bringing asylum seekers to Australia would give them right to asylum claims: professor',
|
||||
'description': 'md5:ba36fa5e27e5c9251fd929d339aea4af',
|
||||
'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
|
||||
'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
urls_info_json = self._search_regex(
|
||||
|
@ -3,12 +3,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
@ -22,7 +22,7 @@ class AllocineIE(InfoExtractor):
|
||||
'id': '19546517',
|
||||
'ext': 'mp4',
|
||||
'title': 'Astérix - Le Domaine des Dieux Teaser VF',
|
||||
'description': 'md5:4a754271d9c6f16c72629a8a993ee884',
|
||||
'description': 'md5:abcd09ce503c6560512c14ebfdb720d2',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
|
@ -35,7 +35,7 @@ class AnySexIE(InfoExtractor):
|
||||
|
||||
title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'<div class="description">([^<]+)</div>', webpage, 'description', fatal=False)
|
||||
r'<div class="description"[^>]*>([^<]+)</div>', webpage, 'description', fatal=False)
|
||||
thumbnail = self._html_search_regex(
|
||||
r'preview_url\s*:\s*\'(.*?)\'', webpage, 'thumbnail', fatal=False)
|
||||
|
||||
@ -43,7 +43,7 @@ class AnySexIE(InfoExtractor):
|
||||
r'<a href="http://anysex\.com/categories/[^"]+" title="[^"]*">([^<]+)</a>', webpage)
|
||||
|
||||
duration = parse_duration(self._search_regex(
|
||||
r'<b>Duration:</b> (\d+:\d+)', webpage, 'duration', fatal=False))
|
||||
r'<b>Duration:</b> (?:<q itemprop="duration">)?(\d+:\d+)', webpage, 'duration', fatal=False))
|
||||
view_count = int_or_none(self._html_search_regex(
|
||||
r'<b>Views:</b> (\d+)', webpage, 'view count', fatal=False))
|
||||
|
||||
|
@ -4,16 +4,16 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .generic import GenericIE
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
qualities,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_parse,
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
unified_strdate,
|
||||
xpath_text,
|
||||
parse_xml,
|
||||
)
|
||||
|
||||
|
||||
@ -51,14 +51,16 @@ class ARDMediathekIE(InfoExtractor):
|
||||
else:
|
||||
video_id = m.group('video_id')
|
||||
|
||||
urlp = compat_urllib_parse_urlparse(url)
|
||||
url = urlp._replace(path=compat_urllib_parse.quote(urlp.path.encode('utf-8'))).geturl()
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if '>Der gewünschte Beitrag ist nicht mehr verfügbar.<' in webpage:
|
||||
raise ExtractorError('Video %s is no longer available' % video_id, expected=True)
|
||||
|
||||
if re.search(r'[\?&]rss($|[=&])', url):
|
||||
doc = parse_xml(webpage)
|
||||
if doc.tag == 'rss':
|
||||
return GenericIE()._extract_rss(url, video_id, doc)
|
||||
|
||||
title = self._html_search_regex(
|
||||
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
|
||||
r'<meta name="dcterms.title" content="(.*?)"/>',
|
||||
|
@ -10,8 +10,8 @@ from ..utils import (
|
||||
unified_strdate,
|
||||
determine_ext,
|
||||
get_element_by_id,
|
||||
compat_str,
|
||||
get_element_by_attribute,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
# There are different sources of video in arte.tv, the extraction process
|
||||
@ -86,15 +86,28 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
info = self._download_json(json_url, video_id)
|
||||
player_info = info['videoJsonPlayer']
|
||||
|
||||
upload_date_str = player_info.get('shootingDate')
|
||||
if not upload_date_str:
|
||||
upload_date_str = player_info.get('VDA', '').split(' ')[0]
|
||||
|
||||
title = player_info['VTI'].strip()
|
||||
subtitle = player_info.get('VSU', '').strip()
|
||||
if subtitle:
|
||||
title += ' - %s' % subtitle
|
||||
|
||||
info_dict = {
|
||||
'id': player_info['VID'],
|
||||
'title': player_info['VTI'],
|
||||
'title': title,
|
||||
'description': player_info.get('VDE'),
|
||||
'upload_date': unified_strdate(player_info.get('VDA', '').split(' ')[0]),
|
||||
'upload_date': unified_strdate(upload_date_str),
|
||||
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
|
||||
}
|
||||
|
||||
all_formats = player_info['VSR'].values()
|
||||
all_formats = []
|
||||
for format_id, format_dict in player_info['VSR'].items():
|
||||
fmt = dict(format_dict)
|
||||
fmt['format_id'] = format_id
|
||||
all_formats.append(fmt)
|
||||
# Some formats use the m3u8 protocol
|
||||
all_formats = list(filter(lambda f: f.get('videoFormat') != 'M3U8', all_formats))
|
||||
def _match_lang(f):
|
||||
@ -145,22 +158,12 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
)
|
||||
formats = sorted(formats, key=sort_key)
|
||||
def _format(format_info):
|
||||
quality = ''
|
||||
height = format_info.get('height')
|
||||
if height is not None:
|
||||
quality = compat_str(height)
|
||||
bitrate = format_info.get('bitrate')
|
||||
if bitrate is not None:
|
||||
quality += '-%d' % bitrate
|
||||
if format_info.get('versionCode') is not None:
|
||||
format_id = '%s-%s' % (quality, format_info['versionCode'])
|
||||
else:
|
||||
format_id = quality
|
||||
info = {
|
||||
'format_id': format_id,
|
||||
'format_note': format_info.get('versionLibelle'),
|
||||
'width': format_info.get('width'),
|
||||
'height': height,
|
||||
'format_id': format_info['format_id'],
|
||||
'format_note': '%s, %s' % (format_info.get('versionCode'), format_info.get('versionLibelle')),
|
||||
'width': int_or_none(format_info.get('width')),
|
||||
'height': int_or_none(format_info.get('height')),
|
||||
'tbr': int_or_none(format_info.get('bitrate')),
|
||||
}
|
||||
if format_info['mediaType'] == 'rtmp':
|
||||
info['url'] = format_info['streamer']
|
||||
|
69
youtube_dl/extractor/audiomack.py
Normal file
69
youtube_dl/extractor/audiomack.py
Normal file
@ -0,0 +1,69 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .soundcloud import SoundcloudIE
|
||||
from ..utils import ExtractorError
|
||||
|
||||
import time
|
||||
|
||||
|
||||
class AudiomackIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audiomack\.com/song/(?P<id>[\w/-]+)'
|
||||
IE_NAME = 'audiomack'
|
||||
_TESTS = [
|
||||
#hosted on audiomack
|
||||
{
|
||||
'url': 'http://www.audiomack.com/song/roosh-williams/extraordinary',
|
||||
'info_dict':
|
||||
{
|
||||
'id' : 'roosh-williams/extraordinary',
|
||||
'ext': 'mp3',
|
||||
'title': 'Roosh Williams - Extraordinary'
|
||||
}
|
||||
},
|
||||
#hosted on soundcloud via audiomack
|
||||
{
|
||||
'url': 'http://www.audiomack.com/song/xclusiveszone/take-kare',
|
||||
'file': '172419696.mp3',
|
||||
'info_dict':
|
||||
{
|
||||
'ext': 'mp3',
|
||||
'title': 'Young Thug ft Lil Wayne - Take Kare',
|
||||
"upload_date": "20141016",
|
||||
"description": "New track produced by London On Da Track called “Take Kare\"\n\nhttp://instagram.com/theyoungthugworld\nhttps://www.facebook.com/ThuggerThuggerCashMoney\n",
|
||||
"uploader": "Young Thug World"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
api_response = self._download_json(
|
||||
"http://www.audiomack.com/api/music/url/song/%s?_=%d" % (
|
||||
video_id, time.time()),
|
||||
video_id)
|
||||
|
||||
if "url" not in api_response:
|
||||
raise ExtractorError("Unable to deduce api url of song")
|
||||
realurl = api_response["url"]
|
||||
|
||||
#Audiomack wraps a lot of soundcloud tracks in their branded wrapper
|
||||
# - if so, pass the work off to the soundcloud extractor
|
||||
if SoundcloudIE.suitable(realurl):
|
||||
return {'_type': 'url', 'url': realurl, 'ie_key': 'Soundcloud'}
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
artist = self._html_search_regex(
|
||||
r'<span class="artist">(.*?)</span>', webpage, "artist")
|
||||
songtitle = self._html_search_regex(
|
||||
r'<h1 class="profile-title song-title"><span class="artist">.*?</span>(.*?)</h1>',
|
||||
webpage, "title")
|
||||
title = artist + " - " + songtitle
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': realurl,
|
||||
}
|
@ -24,8 +24,7 @@ class AUEngineIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(r'<title>(?P<title>.+?)</title>', webpage, 'title')
|
||||
|
@ -15,13 +15,23 @@ class BandcampIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
||||
_TESTS = [{
|
||||
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
|
||||
'file': '1812978515.mp3',
|
||||
'md5': 'c557841d5e50261777a6585648adf439',
|
||||
'info_dict': {
|
||||
"title": "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
||||
"duration": 9.8485,
|
||||
'id': '1812978515',
|
||||
'ext': 'mp3',
|
||||
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
|
||||
'duration': 9.8485,
|
||||
},
|
||||
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
||||
}, {
|
||||
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
||||
'md5': '2b68e5851514c20efdff2afc5603b8b4',
|
||||
'info_dict': {
|
||||
'id': '2650410135',
|
||||
'ext': 'mp3',
|
||||
'title': 'Lanius (Battle)',
|
||||
'uploader': 'Ben Prunty Music',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -59,9 +69,9 @@ class BandcampIE(InfoExtractor):
|
||||
raise ExtractorError('No free songs found')
|
||||
|
||||
download_link = m_download.group(1)
|
||||
video_id = re.search(
|
||||
r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
||||
webpage, re.MULTILINE | re.DOTALL).group('id')
|
||||
video_id = self._search_regex(
|
||||
r'var TralbumData = {.*?id: (?P<id>\d+),?$',
|
||||
webpage, 'video id', flags=re.MULTILINE | re.DOTALL)
|
||||
|
||||
download_webpage = self._download_webpage(download_link, video_id, 'Downloading free downloads page')
|
||||
# We get the dictionary of the track from some javascript code
|
||||
@ -100,20 +110,25 @@ class BandcampAlbumIE(InfoExtractor):
|
||||
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
|
||||
'playlist': [
|
||||
{
|
||||
'file': '1353101989.mp3',
|
||||
'md5': '39bc1eded3476e927c724321ddf116cf',
|
||||
'info_dict': {
|
||||
'id': '1353101989',
|
||||
'ext': 'mp3',
|
||||
'title': 'Intro',
|
||||
}
|
||||
},
|
||||
{
|
||||
'file': '38097443.mp3',
|
||||
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
|
||||
'info_dict': {
|
||||
'id': '38097443',
|
||||
'ext': 'mp3',
|
||||
'title': 'Kero One - Keep It Alive (Blazo remix)',
|
||||
}
|
||||
},
|
||||
],
|
||||
'info_dict': {
|
||||
'title': 'Jazz Format Mixtape vol.1',
|
||||
},
|
||||
'params': {
|
||||
'playlistend': 2
|
||||
},
|
||||
|
53
youtube_dl/extractor/behindkink.py
Normal file
53
youtube_dl/extractor/behindkink.py
Normal file
@ -0,0 +1,53 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import url_basename
|
||||
|
||||
|
||||
class BehindKinkIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.behindkink.com/2014/08/14/ab1576-performers-voice-finally-heard-the-bill-is-killed/',
|
||||
'md5': '41ad01222b8442089a55528fec43ec01',
|
||||
'info_dict': {
|
||||
'id': '36370',
|
||||
'ext': 'mp4',
|
||||
'title': 'AB1576 - PERFORMERS VOICE FINALLY HEARD - THE BILL IS KILLED!',
|
||||
'description': 'The adult industry voice was finally heard as Assembly Bill 1576 remained\xa0 in suspense today at the Senate Appropriations Hearing. AB1576 was, among other industry damaging issues, a condom mandate...',
|
||||
'upload_date': '20140814',
|
||||
'thumbnail': 'http://www.behindkink.com/wp-content/uploads/2014/08/36370_AB1576_Win.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
year = mobj.group('year')
|
||||
month = mobj.group('month')
|
||||
day = mobj.group('day')
|
||||
upload_date = year + month + day
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r"'file':\s*'([^']+)'",
|
||||
webpage, 'URL base')
|
||||
|
||||
video_id = url_basename(video_url)
|
||||
video_id = video_id.split('_')[0]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'display_id': display_id,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'upload_date': upload_date,
|
||||
'age_limit': 18,
|
||||
}
|
39
youtube_dl/extractor/bild.py
Normal file
39
youtube_dl/extractor/bild.py
Normal file
@ -0,0 +1,39 @@
|
||||
#coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class BildIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?bild\.de/(?:[^/]+/)+(?P<display_id>[^/]+)-(?P<id>\d+)(?:,auto=true)?\.bild\.html'
|
||||
IE_DESC = 'Bild.de'
|
||||
_TEST = {
|
||||
'url': 'http://www.bild.de/video/clip/apple-ipad-air/das-koennen-die-neuen-ipads-38184146.bild.html',
|
||||
'md5': 'dd495cbd99f2413502a1713a1156ac8a',
|
||||
'info_dict': {
|
||||
'id': '38184146',
|
||||
'ext': 'mp4',
|
||||
'title': 'BILD hat sie getestet',
|
||||
'thumbnail': 'http://bilder.bild.de/fotos/stand-das-koennen-die-neuen-ipads-38184138/Bild/1.bild.jpg',
|
||||
'duration': 196,
|
||||
'description': 'Mit dem iPad Air 2 und dem iPad Mini 3 hat Apple zwei neue Tablet-Modelle präsentiert. BILD-Reporter Sven Stein durfte die Geräte bereits testen. ',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
xml_url = url.split(".bild.html")[0] + ",view=xml.bild.xml"
|
||||
doc = self._download_xml(xml_url, video_id)
|
||||
|
||||
duration = int_or_none(doc.attrib.get('duration'), scale=1000)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': doc.attrib['ueberschrift'],
|
||||
'description': doc.attrib.get('text'),
|
||||
'url': doc.attrib['src'],
|
||||
'thumbnail': doc.attrib.get('img'),
|
||||
'duration': duration,
|
||||
}
|
@ -1,8 +1,6 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
@ -26,6 +24,8 @@ class BRIE(InfoExtractor):
|
||||
'title': 'Wenn das Traditions-Theater wackelt',
|
||||
'description': 'Heimatsound-Festival 2014: Wenn das Traditions-Theater wackelt',
|
||||
'duration': 34,
|
||||
'uploader': 'BR',
|
||||
'upload_date': '20140802',
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -66,8 +66,7 @@ class BRIE(InfoExtractor):
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
display_id = self._match_id(url)
|
||||
page = self._download_webpage(url, display_id)
|
||||
xml_url = self._search_regex(
|
||||
r"return BRavFramework\.register\(BRavFramework\('avPlayer_(?:[a-f0-9-]{36})'\)\.setup\({dataURL:'(/(?:[a-z0-9\-]+/)+[a-z0-9/~_.-]+)'}\)\);", page, 'XMLURL')
|
||||
|
@ -4,37 +4,61 @@ import re
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
parse_age_limit,
|
||||
)
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?break\.com/video/([^/]+)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
'md5': 'a3513fb1547fba4fb6cfac1bffc6c46b',
|
||||
'md5': '33aa4ff477ecd124d18d7b5d23b87ce5',
|
||||
'info_dict': {
|
||||
'id': '2468056',
|
||||
'ext': 'mp4',
|
||||
'title': 'When Girls Act Like D-Bags',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.break.com/video/ugc/baby-flex-2773063',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1).split("-")[-1]
|
||||
embed_url = 'http://www.break.com/embed/%s' % video_id
|
||||
webpage = self._download_webpage(embed_url, video_id)
|
||||
info_json = self._search_regex(r'var embedVars = ({.*})\s*?</script>',
|
||||
webpage, 'info json', flags=re.DOTALL)
|
||||
info = json.loads(info_json)
|
||||
video_url = info['videoUri']
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(
|
||||
'http://www.break.com/embed/%s' % video_id, video_id)
|
||||
info = json.loads(self._search_regex(
|
||||
r'var embedVars = ({.*})\s*?</script>',
|
||||
webpage, 'info json', flags=re.DOTALL))
|
||||
|
||||
youtube_id = info.get('youtubeId')
|
||||
if youtube_id:
|
||||
return self.url_result(youtube_id, 'Youtube')
|
||||
|
||||
final_url = video_url + '?' + info['AuthToken']
|
||||
formats = [{
|
||||
'url': media['uri'] + '?' + info['AuthToken'],
|
||||
'tbr': media['bitRate'],
|
||||
'width': media['width'],
|
||||
'height': media['height'],
|
||||
} for media in info['media']]
|
||||
|
||||
if not formats:
|
||||
formats.append({
|
||||
'url': info['videoUri']
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
duration = int_or_none(info.get('videoLengthInSeconds'))
|
||||
age_limit = parse_age_limit(info.get('audienceRating'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': final_url,
|
||||
'title': info['contentName'],
|
||||
'thumbnail': info['thumbUri'],
|
||||
'duration': duration,
|
||||
'age_limit': age_limit,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ from ..utils import (
|
||||
compat_str,
|
||||
compat_urllib_request,
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse_urlparse,
|
||||
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
@ -23,7 +24,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class BrightcoveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*\?(?P<query>.*)'
|
||||
_VALID_URL = r'https?://.*brightcove\.com/(services|viewer).*?\?(?P<query>.*)'
|
||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||
|
||||
_TESTS = [
|
||||
@ -87,6 +88,15 @@ class BrightcoveIE(InfoExtractor):
|
||||
'description': 'UCI MTB World Cup 2014: Fort William, UK - Downhill Finals',
|
||||
},
|
||||
},
|
||||
{
|
||||
# playlist test
|
||||
# from http://support.brightcove.com/en/video-cloud/docs/playlist-support-single-video-players
|
||||
'url': 'http://c.brightcove.com/services/viewer/htmlFederated?playerID=3550052898001&playerKey=AQ%7E%7E%2CAAABmA9XpXk%7E%2C-Kp7jNgisre1fG5OdqpAFUTcs0lP_ZoL',
|
||||
'info_dict': {
|
||||
'title': 'Sealife',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
},
|
||||
]
|
||||
|
||||
@classmethod
|
||||
@ -251,11 +261,19 @@ class BrightcoveIE(InfoExtractor):
|
||||
formats = []
|
||||
for rend in renditions:
|
||||
url = rend['defaultURL']
|
||||
if not url:
|
||||
continue
|
||||
if rend['remote']:
|
||||
# This type of renditions are served through akamaihd.net,
|
||||
# but they don't use f4m manifests
|
||||
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
|
||||
ext = 'flv'
|
||||
url_comp = compat_urllib_parse_urlparse(url)
|
||||
if url_comp.path.endswith('.m3u8'):
|
||||
formats.extend(
|
||||
self._extract_m3u8_formats(url, info['id'], 'mp4'))
|
||||
continue
|
||||
elif 'akamaihd.net' in url_comp.netloc:
|
||||
# This type of renditions are served through
|
||||
# akamaihd.net, but they don't use f4m manifests
|
||||
url = url.replace('control/', '') + '?&v=3.3.0&fp=13&r=FEEFJ&g=RTSJIMBMPFPB'
|
||||
ext = 'flv'
|
||||
else:
|
||||
ext = determine_ext(url)
|
||||
size = rend.get('size')
|
||||
|
@ -10,12 +10,12 @@ from ..utils import ExtractorError
|
||||
class BYUtvIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://(?:www\.)?byutv.org/watch/[0-9a-f-]+/(?P<video_id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.byutv.org/watch/44e80f7b-e3ba-43ba-8c51-b1fd96c94a79/granite-flats-talking',
|
||||
'url': 'http://www.byutv.org/watch/6587b9a3-89d2-42a6-a7f7-fd2f81840a7d/studio-c-season-5-episode-5',
|
||||
'info_dict': {
|
||||
'id': 'granite-flats-talking',
|
||||
'id': 'studio-c-season-5-episode-5',
|
||||
'ext': 'mp4',
|
||||
'description': 'md5:4e9a7ce60f209a33eca0ac65b4918e1c',
|
||||
'title': 'Talking',
|
||||
'description': 'md5:5438d33774b6bdc662f9485a340401cc',
|
||||
'title': 'Season 5 Episode 5',
|
||||
'thumbnail': 're:^https?://.*promo.*'
|
||||
},
|
||||
'params': {
|
||||
|
@ -7,15 +7,21 @@ from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
unified_strdate,
|
||||
url_basename,
|
||||
qualities,
|
||||
)
|
||||
|
||||
|
||||
class CanalplusIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.canalplus\.fr/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/cplus/%s'
|
||||
IE_NAME = 'canalplus.fr'
|
||||
IE_DESC = 'canalplus.fr, piwiplus.fr and d8.tv'
|
||||
_VALID_URL = r'https?://(?:www\.(?P<site>canalplus\.fr|piwiplus\.fr|d8\.tv)/.*?/(?P<path>.*)|player\.canalplus\.fr/#/(?P<id>[0-9]+))'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/%s/%s'
|
||||
_SITE_ID_MAP = {
|
||||
'canalplus.fr': 'cplus',
|
||||
'piwiplus.fr': 'teletoon',
|
||||
'd8.tv': 'd8',
|
||||
}
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.canalplus.fr/c-infos-documentaires/pid1830-c-zapping.html?vid=922470',
|
||||
'md5': '3db39fb48b9685438ecf33a1078023e4',
|
||||
'info_dict': {
|
||||
@ -25,36 +31,73 @@ class CanalplusIE(InfoExtractor):
|
||||
'description': 'Le meilleur de toutes les chaînes, tous les jours.\nEmission du 26 août 2013',
|
||||
'upload_date': '20130826',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.piwiplus.fr/videos-piwi/pid1405-le-labyrinthe-boing-super-ranger.html?vid=1108190',
|
||||
'info_dict': {
|
||||
'id': '1108190',
|
||||
'ext': 'flv',
|
||||
'title': 'Le labyrinthe - Boing super ranger',
|
||||
'description': 'md5:4cea7a37153be42c1ba2c1d3064376ff',
|
||||
'upload_date': '20140724',
|
||||
},
|
||||
'skip': 'Only works from France',
|
||||
}, {
|
||||
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||
'info_dict': {
|
||||
'id': '966289',
|
||||
'ext': 'flv',
|
||||
'title': 'Campagne intime - Documentaire exceptionnel',
|
||||
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
||||
'upload_date': '20131108',
|
||||
},
|
||||
'skip': 'videos get deleted after a while',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.groupdict().get('id')
|
||||
|
||||
site_id = self._SITE_ID_MAP[mobj.group('site') or 'canal']
|
||||
|
||||
# Beware, some subclasses do not define an id group
|
||||
display_id = url_basename(mobj.group('path'))
|
||||
|
||||
if video_id is None:
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._search_regex(r'<canal:player videoId="(\d+)"', webpage, 'video id')
|
||||
video_id = self._search_regex(
|
||||
r'<canal:player[^>]+?videoId="(\d+)"', webpage, 'video id')
|
||||
|
||||
info_url = self._VIDEO_INFO_TEMPLATE % video_id
|
||||
info_url = self._VIDEO_INFO_TEMPLATE % (site_id, video_id)
|
||||
doc = self._download_xml(info_url, video_id, 'Downloading video XML')
|
||||
|
||||
video_info = [video for video in doc if video.find('ID').text == video_id][0]
|
||||
media = video_info.find('MEDIA')
|
||||
infos = video_info.find('INFOS')
|
||||
|
||||
preferences = ['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS']
|
||||
preference = qualities(['MOBILE', 'BAS_DEBIT', 'HAUT_DEBIT', 'HD', 'HLS', 'HDS'])
|
||||
|
||||
formats = [
|
||||
{
|
||||
'url': fmt.text + '?hdcore=2.11.3' if fmt.tag == 'HDS' else fmt.text,
|
||||
'format_id': fmt.tag,
|
||||
'ext': 'mp4' if fmt.tag == 'HLS' else 'flv',
|
||||
'preference': preferences.index(fmt.tag) if fmt.tag in preferences else -1,
|
||||
} for fmt in media.find('VIDEOS') if fmt.text
|
||||
]
|
||||
formats = []
|
||||
for fmt in media.find('VIDEOS'):
|
||||
format_url = fmt.text
|
||||
if not format_url:
|
||||
continue
|
||||
format_id = fmt.tag
|
||||
if format_id == 'HLS':
|
||||
hls_formats = self._extract_m3u8_formats(format_url, video_id, 'flv')
|
||||
for fmt in hls_formats:
|
||||
fmt['preference'] = preference(format_id)
|
||||
formats.extend(hls_formats)
|
||||
elif format_id == 'HDS':
|
||||
hds_formats = self._extract_f4m_formats(format_url + '?hdcore=2.11.3', video_id)
|
||||
for fmt in hds_formats:
|
||||
fmt['preference'] = preference(format_id)
|
||||
formats.extend(hds_formats)
|
||||
else:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': format_id,
|
||||
'preference': preference(format_id),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
@ -27,7 +27,7 @@ class Channel9IE(InfoExtractor):
|
||||
'title': 'Developer Kick-Off Session: Stuff We Love',
|
||||
'description': 'md5:c08d72240b7c87fcecafe2692f80e35f',
|
||||
'duration': 4576,
|
||||
'thumbnail': 'http://media.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
||||
'thumbnail': 'http://video.ch9.ms/ch9/9d51/03902f2d-fc97-4d3c-b195-0bfe15a19d51/KOS002_220.jpg',
|
||||
'session_code': 'KOS002',
|
||||
'session_day': 'Day 1',
|
||||
'session_room': 'Arena 1A',
|
||||
@ -43,7 +43,7 @@ class Channel9IE(InfoExtractor):
|
||||
'title': 'Self-service BI with Power BI - nuclear testing',
|
||||
'description': 'md5:d1e6ecaafa7fb52a2cacdf9599829f5b',
|
||||
'duration': 1540,
|
||||
'thumbnail': 'http://media.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||
'thumbnail': 'http://video.ch9.ms/ch9/87e1/0300391f-a455-4c72-bec3-4422f19287e1/selfservicenuk_512.jpg',
|
||||
'authors': [ 'Mike Wilmot' ],
|
||||
},
|
||||
}
|
||||
@ -94,7 +94,7 @@ class Channel9IE(InfoExtractor):
|
||||
|
||||
def _extract_title(self, html):
|
||||
title = self._html_search_meta('title', html, 'title')
|
||||
if title is None:
|
||||
if title is None:
|
||||
title = self._og_search_title(html)
|
||||
TITLE_SUFFIX = ' (Channel 9)'
|
||||
if title is not None and title.endswith(TITLE_SUFFIX):
|
||||
@ -115,7 +115,7 @@ class Channel9IE(InfoExtractor):
|
||||
return self._html_search_meta('description', html, 'description')
|
||||
|
||||
def _extract_duration(self, html):
|
||||
m = re.search(r'data-video_duration="(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
||||
m = re.search(r'"length": *"(?P<hours>\d{2}):(?P<minutes>\d{2}):(?P<seconds>\d{2})"', html)
|
||||
return ((int(m.group('hours')) * 60 * 60) + (int(m.group('minutes')) * 60) + int(m.group('seconds'))) if m else None
|
||||
|
||||
def _extract_slides(self, html):
|
||||
@ -167,7 +167,7 @@ class Channel9IE(InfoExtractor):
|
||||
return re.findall(r'<a href="/Events/Speakers/[^"]+">([^<]+)</a>', html)
|
||||
|
||||
def _extract_content(self, html, content_path):
|
||||
# Look for downloadable content
|
||||
# Look for downloadable content
|
||||
formats = self._formats_from_html(html)
|
||||
slides = self._extract_slides(html)
|
||||
zip_ = self._extract_zip(html)
|
||||
@ -258,16 +258,17 @@ class Channel9IE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, content_path, 'Downloading web page')
|
||||
|
||||
page_type_m = re.search(r'<meta name="Search.PageType" content="(?P<pagetype>[^"]+)"/>', webpage)
|
||||
if page_type_m is None:
|
||||
raise ExtractorError('Search.PageType not found, don\'t know how to process this page', expected=True)
|
||||
page_type_m = re.search(r'<meta name="WT.entryid" content="(?P<pagetype>[^:]+)[^"]+"/>', webpage)
|
||||
if page_type_m is not None:
|
||||
page_type = page_type_m.group('pagetype')
|
||||
if page_type == 'Entry': # Any 'item'-like page, may contain downloadable content
|
||||
return self._extract_entry_item(webpage, content_path)
|
||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||
return self._extract_session(webpage, content_path)
|
||||
elif page_type == 'Event':
|
||||
return self._extract_list(content_path)
|
||||
else:
|
||||
raise ExtractorError('Unexpected WT.entryid %s' % page_type, expected=True)
|
||||
|
||||
page_type = page_type_m.group('pagetype')
|
||||
if page_type == 'List': # List page, may contain list of 'item'-like objects
|
||||
else: # Assuming list
|
||||
return self._extract_list(content_path)
|
||||
elif page_type == 'Entry.Item': # Any 'item'-like page, may contain downloadable content
|
||||
return self._extract_entry_item(webpage, content_path)
|
||||
elif page_type == 'Session': # Event session page, may contain downloadable content
|
||||
return self._extract_session(webpage, content_path)
|
||||
else:
|
||||
raise ExtractorError('Unexpected Search.PageType %s' % page_type, expected=True)
|
@ -42,53 +42,71 @@ class CinemassacreIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_date = mobj.group('date_Y') + mobj.group('date_m') + mobj.group('date_d')
|
||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?id=(?:Cinemassacre-)?(?P<video_id>.+?))"', webpage)
|
||||
mobj = re.search(r'src="(?P<embed_url>http://player\.screenwavemedia\.com/play/[a-zA-Z]+\.php\?[^"]*\bid=(?P<full_video_id>(?:Cinemassacre-)?(?P<video_id>.+?)))"', webpage)
|
||||
if not mobj:
|
||||
raise ExtractorError('Can\'t extract embed url and video id')
|
||||
playerdata_url = mobj.group('embed_url')
|
||||
video_id = mobj.group('video_id')
|
||||
full_video_id = mobj.group('full_video_id')
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<title>(?P<title>.+?)\|', webpage, 'title')
|
||||
video_description = self._html_search_regex(
|
||||
r'<div class="entry-content">(?P<description>.+?)</div>',
|
||||
webpage, 'description', flags=re.DOTALL, fatal=False)
|
||||
video_thumbnail = self._og_search_thumbnail(webpage)
|
||||
|
||||
playerdata = self._download_webpage(playerdata_url, video_id, 'Downloading player webpage')
|
||||
video_thumbnail = self._search_regex(
|
||||
r'image: \'(?P<thumbnail>[^\']+)\'', playerdata, 'thumbnail', fatal=False)
|
||||
sd_url = self._search_regex(r'file: \'([^\']+)\', label: \'SD\'', playerdata, 'sd_file')
|
||||
videolist_url = self._search_regex(r'file: \'([^\']+\.smil)\'}', playerdata, 'videolist_url')
|
||||
|
||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||
vidurl = self._search_regex(
|
||||
r'\'vidurl\'\s*:\s*"([^\']+)"', playerdata, 'vidurl').replace('\\/', '/')
|
||||
|
||||
formats = []
|
||||
baseurl = sd_url[:sd_url.rfind('/')+1]
|
||||
for video in videolist.findall('.//video'):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
continue
|
||||
file_ = src.partition(':')[-1]
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
bitrate = int_or_none(video.get('system-bitrate'))
|
||||
format = {
|
||||
'url': baseurl + file_,
|
||||
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
|
||||
}
|
||||
if width or height:
|
||||
format.update({
|
||||
'tbr': bitrate // 1000 if bitrate else None,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
else:
|
||||
format.update({
|
||||
'abr': bitrate // 1000 if bitrate else None,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
formats.append(format)
|
||||
self._sort_formats(formats)
|
||||
videolist_url = None
|
||||
|
||||
mobj = re.search(r"'videoserver'\s*:\s*'(?P<videoserver>[^']+)'", playerdata)
|
||||
if mobj:
|
||||
videoserver = mobj.group('videoserver')
|
||||
mobj = re.search(r'\'vidid\'\s*:\s*"(?P<vidid>[^\']+)"', playerdata)
|
||||
vidid = mobj.group('vidid') if mobj else full_video_id
|
||||
videolist_url = 'http://%s/vod/smil:%s.smil/jwplayer.smil' % (videoserver, vidid)
|
||||
else:
|
||||
mobj = re.search(r"file\s*:\s*'(?P<smil>http.+?/jwplayer\.smil)'", playerdata)
|
||||
if mobj:
|
||||
videolist_url = mobj.group('smil')
|
||||
|
||||
if videolist_url:
|
||||
videolist = self._download_xml(videolist_url, video_id, 'Downloading videolist XML')
|
||||
formats = []
|
||||
baseurl = vidurl[:vidurl.rfind('/')+1]
|
||||
for video in videolist.findall('.//video'):
|
||||
src = video.get('src')
|
||||
if not src:
|
||||
continue
|
||||
file_ = src.partition(':')[-1]
|
||||
width = int_or_none(video.get('width'))
|
||||
height = int_or_none(video.get('height'))
|
||||
bitrate = int_or_none(video.get('system-bitrate'))
|
||||
format = {
|
||||
'url': baseurl + file_,
|
||||
'format_id': src.rpartition('.')[0].rpartition('_')[-1],
|
||||
}
|
||||
if width or height:
|
||||
format.update({
|
||||
'tbr': bitrate // 1000 if bitrate else None,
|
||||
'width': width,
|
||||
'height': height,
|
||||
})
|
||||
else:
|
||||
format.update({
|
||||
'abr': bitrate // 1000 if bitrate else None,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
formats.append(format)
|
||||
self._sort_formats(formats)
|
||||
else:
|
||||
formats = [{
|
||||
'url': vidurl,
|
||||
}]
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -4,7 +4,6 @@ import json
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
_translation_table = {
|
||||
@ -35,14 +34,11 @@ class CliphunterIE(InfoExtractor):
|
||||
'title': 'Fun Jynx Maze solo',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'age_limit': 18,
|
||||
'duration': 1317,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._search_regex(
|
||||
@ -86,14 +82,11 @@ class CliphunterIE(InfoExtractor):
|
||||
thumbnail = self._search_regex(
|
||||
r"var\s+mov_thumb\s*=\s*'([^']+)';",
|
||||
webpage, 'thumbnail', fatal=False)
|
||||
duration = int_or_none(self._search_regex(
|
||||
r'pl_dur\s*=\s*([0-9]+)', webpage, 'duration', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'formats': formats,
|
||||
'duration': duration,
|
||||
'age_limit': self._rta_search(webpage),
|
||||
'thumbnail': thumbnail,
|
||||
}
|
||||
|
@ -4,10 +4,14 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
HEADRequest,
|
||||
remove_end,
|
||||
)
|
||||
|
||||
@ -21,6 +25,7 @@ class CloudyIE(InfoExtractor):
|
||||
'''
|
||||
_EMBED_URL = 'http://www.%s/embed.php?id=%s'
|
||||
_API_URL = 'http://www.%s/api/player.api.php?%s'
|
||||
_MAX_TRIES = 2
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'https://www.cloudy.ec/v/af511e2527aac',
|
||||
@ -42,24 +47,30 @@ class CloudyIE(InfoExtractor):
|
||||
}
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_host = mobj.group('host')
|
||||
video_id = mobj.group('id')
|
||||
def _extract_video(self, video_host, video_id, file_key, error_url=None, try_num=0):
|
||||
|
||||
url = self._EMBED_URL % (video_host, video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
if try_num > self._MAX_TRIES - 1:
|
||||
raise ExtractorError('Unable to extract video URL', expected=True)
|
||||
|
||||
file_key = self._search_regex(
|
||||
r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
|
||||
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode({
|
||||
form = {
|
||||
'file': video_id,
|
||||
'key': file_key,
|
||||
}))
|
||||
}
|
||||
|
||||
if error_url:
|
||||
form.update({
|
||||
'numOfErrors': try_num,
|
||||
'errorCode': '404',
|
||||
'errorUrl': error_url,
|
||||
})
|
||||
|
||||
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
|
||||
player_data = self._download_webpage(
|
||||
data_url, video_id, 'Downloading player data')
|
||||
data = compat_parse_qs(player_data)
|
||||
|
||||
try_num += 1
|
||||
|
||||
if 'error' in data:
|
||||
raise ExtractorError(
|
||||
'%s error: %s' % (self.IE_NAME, ' '.join(data['error_msg'])),
|
||||
@ -69,16 +80,31 @@ class CloudyIE(InfoExtractor):
|
||||
if title:
|
||||
title = remove_end(title, '&asdasdas').strip()
|
||||
|
||||
formats = []
|
||||
video_url = data.get('url', [None])[0]
|
||||
|
||||
if video_url:
|
||||
formats.append({
|
||||
'format_id': 'sd',
|
||||
'url': video_url,
|
||||
})
|
||||
try:
|
||||
self._request_webpage(HEADRequest(video_url), video_id, 'Checking video URL')
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in [404, 410]:
|
||||
self.report_warning('Invalid video URL, requesting another', video_id)
|
||||
return self._extract_video(video_host, video_id, file_key, video_url, try_num)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_host = mobj.group('host')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
url = self._EMBED_URL % (video_host, video_id)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
file_key = self._search_regex(
|
||||
r'filekey\s*=\s*"([^"]+)"', webpage, 'file_key')
|
||||
|
||||
return self._extract_video(video_host, video_id, file_key)
|
||||
|
@ -12,13 +12,14 @@ from ..utils import (
|
||||
|
||||
class CNNIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://((edition|www)\.)?cnn\.com/video/(data/.+?|\?)/
|
||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn|(?=&)))'''
|
||||
(?P<path>.+?/(?P<title>[^/]+?)(?:\.cnn(-ap)?|(?=&)))'''
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://edition.cnn.com/video/?/video/sports/2013/06/09/nadal-1-on-1.cnn',
|
||||
'file': 'sports_2013_06_09_nadal-1-on-1.cnn.mp4',
|
||||
'md5': '3e6121ea48df7e2259fe73a0628605c4',
|
||||
'info_dict': {
|
||||
'id': 'sports_2013_06_09_nadal-1-on-1.cnn',
|
||||
'ext': 'mp4',
|
||||
'title': 'Nadal wins 8th French Open title',
|
||||
'description': 'World Sport\'s Amanda Davies chats with 2013 French Open champion Rafael Nadal.',
|
||||
'duration': 135,
|
||||
@ -27,9 +28,10 @@ class CNNIE(InfoExtractor):
|
||||
},
|
||||
{
|
||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
"file": "us_2013_08_21_sot-student-gives-epic-speech.georgia-institute-of-technology.mp4",
|
||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
"info_dict": {
|
||||
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
||||
'ext': 'mp4',
|
||||
"title": "Student's epic speech stuns new freshmen",
|
||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
"upload_date": "20130821",
|
||||
|
@ -1,6 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import json
|
||||
import netrc
|
||||
@ -11,15 +12,18 @@ import sys
|
||||
import time
|
||||
import xml.etree.ElementTree
|
||||
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
|
||||
)
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
compiled_regex_type,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
RegexNotFoundError,
|
||||
sanitize_filename,
|
||||
@ -69,6 +73,7 @@ class InfoExtractor(object):
|
||||
* acodec Name of the audio codec in use
|
||||
* asr Audio sampling rate in Hertz
|
||||
* vbr Average video bitrate in KBit/s
|
||||
* fps Frame rate
|
||||
* vcodec Name of the video codec in use
|
||||
* container Name of the container format
|
||||
* filesize The number of bytes, if known in advance
|
||||
@ -86,6 +91,10 @@ class InfoExtractor(object):
|
||||
format, irrespective of the file format.
|
||||
-1 for default (order by other properties),
|
||||
-2 or smaller for less than default.
|
||||
* source_preference Order number for this video source
|
||||
(quality takes higher priority)
|
||||
-1 for default (order by other properties),
|
||||
-2 or smaller for less than default.
|
||||
* http_referer HTTP Referer header value to set.
|
||||
* http_method HTTP method to use for the download.
|
||||
* http_headers A dictionary of additional HTTP headers
|
||||
@ -130,9 +139,13 @@ class InfoExtractor(object):
|
||||
by YoutubeDL if it's missing)
|
||||
categories: A list of categories that the video falls in, for example
|
||||
["Sports", "Berlin"]
|
||||
is_live: True, False, or None (=unknown). Whether this video is a
|
||||
live stream that goes on instead of a fixed-length video.
|
||||
|
||||
Unless mentioned otherwise, the fields should be Unicode strings.
|
||||
|
||||
Unless mentioned otherwise, None is equivalent to absence of information.
|
||||
|
||||
Subclasses of this one should re-define the _real_initialize() and
|
||||
_real_extract() methods and define a _VALID_URL regexp.
|
||||
Probably, they should also be added to the list of extractors.
|
||||
@ -161,6 +174,14 @@ class InfoExtractor(object):
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
return cls._VALID_URL_RE.match(url) is not None
|
||||
|
||||
@classmethod
|
||||
def _match_id(cls, url):
|
||||
if '_VALID_URL_RE' not in cls.__dict__:
|
||||
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
|
||||
m = cls._VALID_URL_RE.match(url)
|
||||
assert m
|
||||
return m.group('id')
|
||||
|
||||
@classmethod
|
||||
def working(cls):
|
||||
"""Getter method for _WORKING."""
|
||||
@ -223,7 +244,6 @@ class InfoExtractor(object):
|
||||
|
||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
""" Returns a tuple (page content as string, URL handle) """
|
||||
|
||||
# Strip hashes from the URL (#1038)
|
||||
if isinstance(url_or_request, (compat_str, str)):
|
||||
url_or_request = url_or_request.partition('#')[0]
|
||||
@ -232,6 +252,10 @@ class InfoExtractor(object):
|
||||
if urlh is False:
|
||||
assert not fatal
|
||||
return False
|
||||
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal)
|
||||
return (content, urlh)
|
||||
|
||||
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
content_type = urlh.headers.get('Content-Type', '')
|
||||
webpage_bytes = urlh.read()
|
||||
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
|
||||
@ -266,6 +290,12 @@ class InfoExtractor(object):
|
||||
raw_filename = basen + '.dump'
|
||||
filename = sanitize_filename(raw_filename, restricted=True)
|
||||
self.to_screen('Saving request to ' + filename)
|
||||
# Working around MAX_PATH limitation on Windows (see
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
||||
if os.name == 'nt':
|
||||
absfilepath = os.path.abspath(filename)
|
||||
if len(absfilepath) > 259:
|
||||
filename = '\\\\?\\' + absfilepath
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(webpage_bytes)
|
||||
|
||||
@ -284,7 +314,7 @@ class InfoExtractor(object):
|
||||
msg += ' Visit %s for more details' % blocked_iframe
|
||||
raise ExtractorError(msg, expected=True)
|
||||
|
||||
return (content, urlh)
|
||||
return content
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
""" Returns the data of the page as a string """
|
||||
@ -321,7 +351,11 @@ class InfoExtractor(object):
|
||||
try:
|
||||
return json.loads(json_string)
|
||||
except ValueError as ve:
|
||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||
errmsg = '%s: Failed to parse JSON ' % video_id
|
||||
if fatal:
|
||||
raise ExtractorError(errmsg, cause=ve)
|
||||
else:
|
||||
self.report_warning(errmsg + str(ve))
|
||||
|
||||
def report_warning(self, msg, video_id=None):
|
||||
idstr = '' if video_id is None else '%s: ' % video_id
|
||||
@ -370,7 +404,7 @@ class InfoExtractor(object):
|
||||
video_info['title'] = playlist_title
|
||||
return video_info
|
||||
|
||||
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||
def _search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
"""
|
||||
Perform a regex search on the given string, using a single or a list of
|
||||
patterns returning the first matching group.
|
||||
@ -391,8 +425,11 @@ class InfoExtractor(object):
|
||||
_name = name
|
||||
|
||||
if mobj:
|
||||
# return the first matching group
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
if group is None:
|
||||
# return the first matching group
|
||||
return next(g for g in mobj.groups() if g is not None)
|
||||
else:
|
||||
return mobj.group(group)
|
||||
elif default is not _NO_DEFAULT:
|
||||
return default
|
||||
elif fatal:
|
||||
@ -402,11 +439,11 @@ class InfoExtractor(object):
|
||||
'please report this issue on http://yt-dl.org/bug' % _name)
|
||||
return None
|
||||
|
||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0):
|
||||
def _html_search_regex(self, pattern, string, name, default=_NO_DEFAULT, fatal=True, flags=0, group=None):
|
||||
"""
|
||||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags)
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
|
||||
if res:
|
||||
return clean_html(res).strip()
|
||||
else:
|
||||
@ -500,9 +537,9 @@ class InfoExtractor(object):
|
||||
display_name = name
|
||||
return self._html_search_regex(
|
||||
r'''(?ix)<meta
|
||||
(?=[^>]+(?:itemprop|name|property)=["\']?%s["\']?)
|
||||
[^>]+content=["\']([^"\']+)["\']''' % re.escape(name),
|
||||
html, display_name, fatal=fatal, **kwargs)
|
||||
(?=[^>]+(?:itemprop|name|property)=(["\']?)%s\1)
|
||||
[^>]+content=(["\'])(?P<content>.*?)\1''' % re.escape(name),
|
||||
html, display_name, fatal=fatal, group='content', **kwargs)
|
||||
|
||||
def _dc_search_uploader(self, html):
|
||||
return self._html_search_meta('dc.creator', html, 'uploader')
|
||||
@ -586,14 +623,16 @@ class InfoExtractor(object):
|
||||
f.get('vbr') if f.get('vbr') is not None else -1,
|
||||
f.get('abr') if f.get('abr') is not None else -1,
|
||||
audio_ext_preference,
|
||||
f.get('fps') if f.get('fps') is not None else -1,
|
||||
f.get('filesize') if f.get('filesize') is not None else -1,
|
||||
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
|
||||
f.get('source_preference') if f.get('source_preference') is not None else -1,
|
||||
f.get('format_id'),
|
||||
)
|
||||
formats.sort(key=_formats_key)
|
||||
|
||||
def http_scheme(self):
|
||||
""" Either "https:" or "https:", depending on the user's preferences """
|
||||
""" Either "http:" or "https:", depending on the user's preferences """
|
||||
return (
|
||||
'http:'
|
||||
if self._downloader.params.get('prefer_insecure', False)
|
||||
@ -638,7 +677,9 @@ class InfoExtractor(object):
|
||||
|
||||
return formats
|
||||
|
||||
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None):
|
||||
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
|
||||
entry_protocol='m3u8', preference=None):
|
||||
|
||||
formats = [{
|
||||
'format_id': 'm3u8-meta',
|
||||
'url': m3u8_url,
|
||||
@ -649,7 +690,15 @@ class InfoExtractor(object):
|
||||
'format_note': 'Quality selection URL',
|
||||
}]
|
||||
|
||||
m3u8_doc = self._download_webpage(m3u8_url, video_id)
|
||||
format_url = lambda u: (
|
||||
u
|
||||
if re.match(r'^https?://', u)
|
||||
else compat_urlparse.urljoin(m3u8_url, u))
|
||||
|
||||
m3u8_doc = self._download_webpage(
|
||||
m3u8_url, video_id,
|
||||
note='Downloading m3u8 information',
|
||||
errnote='Failed to download m3u8 information')
|
||||
last_info = None
|
||||
kv_rex = re.compile(
|
||||
r'(?P<key>[a-zA-Z_-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)')
|
||||
@ -665,15 +714,17 @@ class InfoExtractor(object):
|
||||
continue
|
||||
else:
|
||||
if last_info is None:
|
||||
formats.append({'url': line})
|
||||
formats.append({'url': format_url(line)})
|
||||
continue
|
||||
tbr = int_or_none(last_info.get('BANDWIDTH'), scale=1000)
|
||||
|
||||
f = {
|
||||
'format_id': 'm3u8-%d' % (tbr if tbr else len(formats)),
|
||||
'url': line.strip(),
|
||||
'url': format_url(line.strip()),
|
||||
'tbr': tbr,
|
||||
'ext': ext,
|
||||
'protocol': entry_protocol,
|
||||
'preference': preference,
|
||||
}
|
||||
codecs = last_info.get('CODECS')
|
||||
if codecs:
|
||||
@ -693,6 +744,34 @@ class InfoExtractor(object):
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
return name + ' ' + now_str
|
||||
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
res = int_or_none(v, **kwargs)
|
||||
if 'get_attr' in kwargs:
|
||||
print(getattr(v, kwargs['get_attr']))
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
self._downloader.report_warning(msg)
|
||||
return res
|
||||
|
||||
def _float(self, v, name, fatal=False, **kwargs):
|
||||
res = float_or_none(v, **kwargs)
|
||||
if res is None:
|
||||
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
|
||||
if fatal:
|
||||
raise ExtractorError(msg)
|
||||
else:
|
||||
self._downloader.report_warning(msg)
|
||||
return res
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
"""
|
||||
|
@ -34,6 +34,8 @@ class CondeNastIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||
|
||||
EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed)/.+?' % '|'.join(_SITES.keys())
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://video.wired.com/watch/3d-printed-speakers-lit-with-led',
|
||||
'md5': '1921f713ed48aabd715691f774c451f7',
|
||||
|
@ -9,7 +9,7 @@ import xml.etree.ElementTree
|
||||
|
||||
from hashlib import sha1
|
||||
from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
compat_urllib_parse,
|
||||
@ -17,16 +17,16 @@ from ..utils import (
|
||||
bytes_to_intlist,
|
||||
intlist_to_bytes,
|
||||
unified_strdate,
|
||||
clean_html,
|
||||
urlencode_postdata,
|
||||
)
|
||||
from ..aes import (
|
||||
aes_cbc_decrypt,
|
||||
inc,
|
||||
)
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class CrunchyrollIE(InfoExtractor):
|
||||
class CrunchyrollIE(SubtitlesInfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?:[^/]*/[^/?&]*?|media/\?id=)(?P<video_id>[0-9]+))(?:[/?&]|$)'
|
||||
_TEST = {
|
||||
'url': 'http://www.crunchyroll.com/wanna-be-the-strongest-in-the-world/episode-1-an-idol-wrestler-is-born-645513',
|
||||
@ -39,6 +39,7 @@ class CrunchyrollIE(InfoExtractor):
|
||||
'thumbnail': 'http://img1.ak.crunchyroll.com/i/spire1-tmb/20c6b5e10f1a47b10516877d3c039cae1380951166_full.jpg',
|
||||
'uploader': 'Yomiuri Telecasting Corporation (YTV)',
|
||||
'upload_date': '20131013',
|
||||
'url': 're:(?!.*&)',
|
||||
},
|
||||
'params': {
|
||||
# rtmp
|
||||
@ -107,19 +108,17 @@ class CrunchyrollIE(InfoExtractor):
|
||||
decrypted_data = intlist_to_bytes(aes_cbc_decrypt(data, key, iv))
|
||||
return zlib.decompress(decrypted_data)
|
||||
|
||||
def _convert_subtitles_to_srt(self, subtitles):
|
||||
def _convert_subtitles_to_srt(self, sub_root):
|
||||
output = ''
|
||||
for i, (start, end, text) in enumerate(re.findall(r'<event [^>]*?start="([^"]+)" [^>]*?end="([^"]+)" [^>]*?text="([^"]+)"[^>]*?>', subtitles), 1):
|
||||
start = start.replace('.', ',')
|
||||
end = end.replace('.', ',')
|
||||
text = clean_html(text)
|
||||
text = text.replace('\\N', '\n')
|
||||
if not text:
|
||||
continue
|
||||
|
||||
for i, event in enumerate(sub_root.findall('./events/event'), 1):
|
||||
start = event.attrib['start'].replace('.', ',')
|
||||
end = event.attrib['end'].replace('.', ',')
|
||||
text = event.attrib['text'].replace('\\N', '\n')
|
||||
output += '%d\n%s --> %s\n%s\n\n' % (i, start, end, text)
|
||||
return output
|
||||
|
||||
def _convert_subtitles_to_ass(self, subtitles):
|
||||
def _convert_subtitles_to_ass(self, sub_root):
|
||||
output = ''
|
||||
|
||||
def ass_bool(strvalue):
|
||||
@ -128,10 +127,6 @@ class CrunchyrollIE(InfoExtractor):
|
||||
assvalue = '-1'
|
||||
return assvalue
|
||||
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitles)
|
||||
if not sub_root:
|
||||
return output
|
||||
|
||||
output = '[Script Info]\n'
|
||||
output += 'Title: %s\n' % sub_root.attrib["title"]
|
||||
output += 'ScriptType: v4.00+\n'
|
||||
@ -237,12 +232,14 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
streamdata_req.data = 'req=RpcApiVideoEncode%5FGetStreamInfo&video%5Fencode%5Fquality='+stream_quality+'&media%5Fid='+stream_id+'&video%5Fformat='+stream_format
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata_req.add_header('Content-Length', str(len(streamdata_req.data)))
|
||||
streamdata = self._download_webpage(streamdata_req, video_id, note='Downloading media info for '+video_format)
|
||||
video_url = self._search_regex(r'<host>([^<]+)', streamdata, 'video_url')
|
||||
video_play_path = self._search_regex(r'<file>([^<]+)', streamdata, 'video_play_path')
|
||||
streamdata = self._download_xml(
|
||||
streamdata_req, video_id,
|
||||
note='Downloading media info for %s' % video_format)
|
||||
video_url = streamdata.find('.//host').text
|
||||
video_play_path = streamdata.find('.//file').text
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'play_path': video_play_path,
|
||||
'play_path': video_play_path,
|
||||
'ext': 'flv',
|
||||
'format': video_format,
|
||||
'format_id': video_format,
|
||||
@ -266,10 +263,17 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
lang_code = self._search_regex(r'lang_code=["\']([^"\']+)', subtitle, 'subtitle_lang_code', fatal=False)
|
||||
if not lang_code:
|
||||
continue
|
||||
sub_root = xml.etree.ElementTree.fromstring(subtitle)
|
||||
if not sub_root:
|
||||
subtitles[lang_code] = ''
|
||||
if sub_format == 'ass':
|
||||
subtitles[lang_code] = self._convert_subtitles_to_ass(subtitle)
|
||||
subtitles[lang_code] = self._convert_subtitles_to_ass(sub_root)
|
||||
else:
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(subtitle)
|
||||
subtitles[lang_code] = self._convert_subtitles_to_srt(sub_root)
|
||||
|
||||
if self._downloader.params.get('listsubtitles', False):
|
||||
self._list_available_subtitles(video_id, subtitles)
|
||||
return
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
@ -281,3 +285,40 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class CrunchyrollShowPlaylistIE(InfoExtractor):
|
||||
IE_NAME = "crunchyroll:playlist"
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?$'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.crunchyroll.com/a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
|
||||
'info_dict': {
|
||||
'id': 'a-bridge-to-the-starry-skies-hoshizora-e-kakaru-hashi',
|
||||
'title': 'A Bridge to the Starry Skies - Hoshizora e Kakaru Hashi'
|
||||
},
|
||||
'playlist_count': 13,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
show_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, show_id)
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h1[^>]*>\s*<span itemprop="name">(.*?)</span>',
|
||||
webpage, 'title')
|
||||
episode_paths = re.findall(
|
||||
r'(?s)<li id="showview_videos_media_[0-9]+"[^>]+>.*?<a href="([^"]+)"',
|
||||
webpage)
|
||||
entries = [
|
||||
self.url_result('http://www.crunchyroll.com' + ep, 'Crunchyroll')
|
||||
for ep in episode_paths
|
||||
]
|
||||
entries.reverse()
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': show_id,
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
|
@ -1,25 +0,0 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .canalplus import CanalplusIE
|
||||
|
||||
|
||||
class D8IE(CanalplusIE):
|
||||
_VALID_URL = r'https?://www\.d8\.tv/.*?/(?P<path>.*)'
|
||||
_VIDEO_INFO_TEMPLATE = 'http://service.canal-plus.com/video/rest/getVideosLiees/d8/%s'
|
||||
IE_NAME = 'd8.tv'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.d8.tv/d8-docs-mags/pid6589-d8-campagne-intime.html',
|
||||
'file': '966289.flv',
|
||||
'info_dict': {
|
||||
'title': 'Campagne intime - Documentaire exceptionnel',
|
||||
'description': 'md5:d2643b799fb190846ae09c61e59a859f',
|
||||
'upload_date': '20131108',
|
||||
},
|
||||
'params': {
|
||||
# rtmp
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'videos get deleted after a while',
|
||||
}
|
@ -82,11 +82,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id and simplified title from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_id = self._match_id(url)
|
||||
url = 'http://www.dailymotion.com/video/%s' % video_id
|
||||
|
||||
# Retrieve video webpage to extract further information
|
||||
@ -98,7 +94,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
|
||||
# It may just embed a vevo video:
|
||||
m_vevo = re.search(
|
||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?videoId=(?P<id>[\w]*)',
|
||||
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
|
||||
webpage)
|
||||
if m_vevo is not None:
|
||||
vevo_id = m_vevo.group('id')
|
||||
@ -147,18 +143,23 @@ class DailymotionIE(DailymotionBaseInfoExtractor, SubtitlesInfoExtractor):
|
||||
self._list_available_subtitles(video_id, webpage)
|
||||
return
|
||||
|
||||
view_count = self._search_regex(
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)', webpage, 'view count', fatal=False)
|
||||
if view_count is not None:
|
||||
view_count = str_to_int(view_count)
|
||||
view_count = str_to_int(self._search_regex(
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
title = self._og_search_title(webpage, default=None)
|
||||
if title is None:
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
|
||||
'title')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'uploader': info['owner.screenname'],
|
||||
'upload_date': video_upload_date,
|
||||
'title': self._og_search_title(webpage),
|
||||
'subtitles': video_subtitles,
|
||||
'upload_date': video_upload_date,
|
||||
'title': title,
|
||||
'subtitles': video_subtitles,
|
||||
'thumbnail': info['thumbnail_url'],
|
||||
'age_limit': age_limit,
|
||||
'view_count': view_count,
|
||||
|
@ -11,10 +11,10 @@ from ..utils import (
|
||||
|
||||
|
||||
class DaumIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/.*?clipid=(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:m\.)?tvpot\.daum\.net/(?:v/|.*?clipid=)(?P<id>[^?#&]+)'
|
||||
IE_NAME = 'daum.net'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://tvpot.daum.net/clip/ClipView.do?clipid=52554690',
|
||||
'info_dict': {
|
||||
'id': '52554690',
|
||||
@ -24,11 +24,17 @@ class DaumIE(InfoExtractor):
|
||||
'upload_date': '20130831',
|
||||
'duration': 3868,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://tvpot.daum.net/v/vab4dyeDBysyBssyukBUjBz',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://tvpot.daum.net/v/07dXWRka62Y%24',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
video_id = mobj.group('id')
|
||||
canonical_url = 'http://tvpot.daum.net/v/%s' % video_id
|
||||
webpage = self._download_webpage(canonical_url, video_id)
|
||||
full_id = self._search_regex(
|
||||
@ -42,7 +48,6 @@ class DaumIE(InfoExtractor):
|
||||
'http://videofarm.daum.net/controller/api/open/v1_2/MovieData.apixml?' + query,
|
||||
video_id, 'Downloading video formats info')
|
||||
|
||||
self.to_screen(u'%s: Getting video urls' % video_id)
|
||||
formats = []
|
||||
for format_el in urls.findall('result/output_list/output_list'):
|
||||
profile = format_el.attrib['profile']
|
||||
@ -52,7 +57,7 @@ class DaumIE(InfoExtractor):
|
||||
})
|
||||
url_doc = self._download_xml(
|
||||
'http://videofarm.daum.net/controller/api/open/v1_2/MovieLocation.apixml?' + format_query,
|
||||
video_id, note=False)
|
||||
video_id, note='Downloading video data for %s format' % profile)
|
||||
format_url = url_doc.find('result/url').text
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
|
@ -7,7 +7,7 @@ class DivxStageIE(NovaMovIE):
|
||||
IE_NAME = 'divxstage'
|
||||
IE_DESC = 'DivxStage'
|
||||
|
||||
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag)'}
|
||||
_VALID_URL = NovaMovIE._VALID_URL_TEMPLATE % {'host': 'divxstage\.(?:eu|net|ch|co|at|ag|to)'}
|
||||
|
||||
_HOST = 'www.divxstage.eu'
|
||||
|
||||
@ -24,4 +24,4 @@ class DivxStageIE(NovaMovIE):
|
||||
'title': 'youtubedl test video',
|
||||
'description': 'This is a test video for youtubedl.',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -5,28 +5,33 @@ import os.path
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import compat_urllib_parse_unquote
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import url_basename
|
||||
|
||||
|
||||
class DropboxIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/s/(?P<id>[a-zA-Z0-9]{15})/(?P<title>[^?#]*)'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?dropbox[.]com/sh?/(?P<id>[a-zA-Z0-9]{15})/.*'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.dropbox.com/s/nelirfsxnmcfbfh/youtube-dl%20test%20video%20%27%C3%A4%22BaW_jenozKc.mp4?dl=0',
|
||||
'info_dict': {
|
||||
'id': 'nelirfsxnmcfbfh',
|
||||
'ext': 'mp4',
|
||||
'title': 'youtube-dl test video \'ä"BaW_jenozKc'
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'https://www.dropbox.com/sh/662glsejgzoj9sr/AAByil3FGH9KFNZ13e08eSa1a/Pregame%20Ceremony%20Program%20PA%2020140518.m4v',
|
||||
'only_matching': True,
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
fn = compat_urllib_parse_unquote(mobj.group('title'))
|
||||
fn = compat_urllib_parse_unquote(url_basename(url))
|
||||
title = os.path.splitext(fn)[0]
|
||||
video_url = (
|
||||
re.sub(r'[?&]dl=0', '', url) +
|
||||
('?' if '?' in url else '&') + 'dl=1')
|
||||
video_url = re.sub(r'[?&]dl=0', '', url)
|
||||
video_url += ('?' if '?' not in video_url else '&') + 'dl=1'
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -19,7 +19,7 @@ class DrTuberIE(InfoExtractor):
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'categories': list, # NSFW
|
||||
'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
|
||||
'thumbnail': 're:https?://.*\.jpg$',
|
||||
'age_limit': 18,
|
||||
}
|
||||
@ -52,9 +52,9 @@ class DrTuberIE(InfoExtractor):
|
||||
r'<span class="comments_count">([\d,\.]+)</span>',
|
||||
webpage, 'comment count', fatal=False))
|
||||
|
||||
cats_str = self._html_search_regex(
|
||||
r'<meta name="keywords" content="([^"]+)"', webpage, 'categories', fatal=False)
|
||||
categories = None if cats_str is None else cats_str.split(' ')
|
||||
cats_str = self._search_regex(
|
||||
r'<span>Categories:</span><div>(.+?)</div>', webpage, 'categories', fatal=False)
|
||||
categories = [] if not cats_str else re.findall(r'<a title="([^"]+)"', cats_str)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .subtitles import SubtitlesInfoExtractor
|
||||
from .common import ExtractorError
|
||||
from ..utils import parse_iso8601
|
||||
@ -25,8 +23,7 @@ class DRTVIE(SubtitlesInfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
programcard = self._download_json(
|
||||
'http://www.dr.dk/mu/programcard/expanded/%s' % video_id, video_id, 'Downloading video JSON')
|
||||
@ -35,7 +32,7 @@ class DRTVIE(SubtitlesInfoExtractor):
|
||||
|
||||
title = data['Title']
|
||||
description = data['Description']
|
||||
timestamp = parse_iso8601(data['CreatedTime'][:-5])
|
||||
timestamp = parse_iso8601(data['CreatedTime'])
|
||||
|
||||
thumbnail = None
|
||||
duration = None
|
||||
|
61
youtube_dl/extractor/einthusan.py
Normal file
61
youtube_dl/extractor/einthusan.py
Normal file
@ -0,0 +1,61 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class EinthusanIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?einthusan\.com/movies/watch.php\?([^#]*?)id=(?P<id>[0-9]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.einthusan.com/movies/watch.php?id=2447',
|
||||
'md5': 'af244f4458cd667205e513d75da5b8b1',
|
||||
'info_dict': {
|
||||
'id': '2447',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ek Villain',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'md5:9d29fc91a7abadd4591fb862fa560d93',
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://www.einthusan.com/movies/watch.php?id=1671',
|
||||
'md5': 'ef63c7a803e22315880ed182c10d1c5c',
|
||||
'info_dict': {
|
||||
'id': '1671',
|
||||
'ext': 'mp4',
|
||||
'title': 'Soodhu Kavvuum',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'description': 'md5:05d8a0c0281a4240d86d76e14f2f4d51',
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_title = self._html_search_regex(
|
||||
r'<h1><a class="movie-title".*?>(.*?)</a></h1>', webpage, 'title')
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'''(?s)jwplayer\("mediaplayer"\)\.setup\({.*?'file': '([^']+)'.*?}\);''',
|
||||
webpage, 'video url')
|
||||
|
||||
description = self._html_search_meta('description', webpage)
|
||||
thumbnail = self._html_search_regex(
|
||||
r'''<a class="movie-cover-wrapper".*?><img src=["'](.*?)["'].*?/></a>''',
|
||||
webpage, "thumbnail url", fatal=False)
|
||||
if thumbnail is not None:
|
||||
thumbnail = thumbnail.replace('..', 'http://www.einthusan.com')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'url': video_url,
|
||||
'thumbnail': thumbnail,
|
||||
'description': description,
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -7,20 +9,20 @@ from ..utils import ExtractorError
|
||||
|
||||
|
||||
class EitbIE(InfoExtractor):
|
||||
IE_NAME = u'eitb.tv'
|
||||
IE_NAME = 'eitb.tv'
|
||||
_VALID_URL = r'https?://www\.eitb\.tv/(eu/bideoa|es/video)/[^/]+/(?P<playlist_id>\d+)/(?P<chapter_id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
u'add_ie': ['Brightcove'],
|
||||
u'url': u'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
u'md5': u'edf4436247185adee3ea18ce64c47998',
|
||||
u'info_dict': {
|
||||
u'id': u'2743577154001',
|
||||
u'ext': u'mp4',
|
||||
u'title': u'60 minutos (Lasa y Zabala, 30 años)',
|
||||
'add_ie': ['Brightcove'],
|
||||
'url': 'http://www.eitb.tv/es/video/60-minutos-60-minutos-2013-2014/2677100210001/2743577154001/lasa-y-zabala-30-anos/',
|
||||
'md5': 'edf4436247185adee3ea18ce64c47998',
|
||||
'info_dict': {
|
||||
'id': '2743577154001',
|
||||
'ext': 'mp4',
|
||||
'title': '60 minutos (Lasa y Zabala, 30 años)',
|
||||
# All videos from eitb has this description in the brightcove info
|
||||
u'description': u'.',
|
||||
u'uploader': u'Euskal Telebista',
|
||||
'description': '.',
|
||||
'uploader': 'Euskal Telebista',
|
||||
},
|
||||
}
|
||||
|
||||
@ -30,7 +32,7 @@ class EitbIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
bc_url = BrightcoveIE._extract_brightcove_url(webpage)
|
||||
if bc_url is None:
|
||||
raise ExtractorError(u'Could not extract the Brightcove url')
|
||||
raise ExtractorError('Could not extract the Brightcove url')
|
||||
# The BrightcoveExperience object doesn't contain the video id, we set
|
||||
# it manually
|
||||
bc_url += '&%40videoPlayer={0}'.format(chapter_id)
|
||||
|
@ -14,11 +14,11 @@ class EpornerIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
|
||||
'md5': '3b427ae4b9d60619106de3185c2987cd',
|
||||
'md5': '39d486f046212d8e1b911c52ab4691f8',
|
||||
'info_dict': {
|
||||
'id': '95008',
|
||||
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Infamous Tiffany Teen Strip Tease Video',
|
||||
'duration': 194,
|
||||
'view_count': int,
|
||||
|
@ -7,6 +7,7 @@ from ..utils import (
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urllib_request,
|
||||
compat_urllib_parse,
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
|
||||
@ -20,6 +21,7 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'Music Video 14 british euro brit european cumshots swallow',
|
||||
'uploader': 'unknown',
|
||||
'view_count': int,
|
||||
'age_limit': 18,
|
||||
}
|
||||
}, {
|
||||
@ -39,8 +41,12 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
video_title = self._html_search_regex(
|
||||
r'<h1 [^>]*?title="([^"]+)"[^>]*>', webpage, 'title')
|
||||
uploader = self._html_search_regex(
|
||||
r'>Posted by:(?=<)(?:\s|<[^>]*>)*(.+?)\|', webpage, 'uploader',
|
||||
fatal=False)
|
||||
r'Uploaded by:\s*</strong>\s*(.+?)\s*</div>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
view_count = str_to_int(self._html_search_regex(
|
||||
r'Views:\s*</strong>\s*<span>([\d,\.]+)</span>',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
video_url = compat_urllib_parse.unquote(self._html_search_regex(
|
||||
r'video_url=(.+?)&', webpage, 'video_url'))
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
@ -51,6 +57,7 @@ class ExtremeTubeIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'uploader': uploader,
|
||||
'view_count': view_count,
|
||||
'url': video_url,
|
||||
'format': format,
|
||||
'format_id': format,
|
||||
|
@ -5,15 +5,17 @@ import re
|
||||
import socket
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_http_client,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
)
|
||||
from ..utils import (
|
||||
urlencode_postdata,
|
||||
|
||||
ExtractorError,
|
||||
limit_length,
|
||||
)
|
||||
|
||||
|
||||
@ -35,7 +37,15 @@ class FacebookIE(InfoExtractor):
|
||||
'id': '637842556329505',
|
||||
'ext': 'mp4',
|
||||
'duration': 38,
|
||||
'title': 'Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam fin...',
|
||||
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
|
||||
}
|
||||
}, {
|
||||
'note': 'Video without discernible title',
|
||||
'url': 'https://www.facebook.com/video.php?v=274175099429670',
|
||||
'info_dict': {
|
||||
'id': '274175099429670',
|
||||
'ext': 'mp4',
|
||||
'title': 'Facebook video #274175099429670',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
|
||||
@ -131,8 +141,7 @@ class FacebookIE(InfoExtractor):
|
||||
video_title = self._html_search_regex(
|
||||
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
|
||||
webpage, 'alternative title', default=None)
|
||||
if len(video_title) > 80 + 3:
|
||||
video_title = video_title[:80] + '...'
|
||||
video_title = limit_length(video_title, 80)
|
||||
if not video_title:
|
||||
video_title = 'Facebook video #%s' % video_id
|
||||
|
||||
|
@ -1,49 +1,48 @@
|
||||
# encoding: utf-8
|
||||
import re
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class FazIE(InfoExtractor):
|
||||
IE_NAME = u'faz.net'
|
||||
IE_NAME = 'faz.net'
|
||||
_VALID_URL = r'https?://www\.faz\.net/multimedia/videos/.*?-(?P<id>\d+)\.html'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
||||
u'file': u'12610585.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
||||
u'description': u'md5:1453fbf9a0d041d985a47306192ea253',
|
||||
'url': 'http://www.faz.net/multimedia/videos/stockholm-chemie-nobelpreis-fuer-drei-amerikanische-forscher-12610585.html',
|
||||
'info_dict': {
|
||||
'id': '12610585',
|
||||
'ext': 'mp4',
|
||||
'title': 'Stockholm: Chemie-Nobelpreis für drei amerikanische Forscher',
|
||||
'description': 'md5:1453fbf9a0d041d985a47306192ea253',
|
||||
},
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
self.to_screen(video_id)
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
config_xml_url = self._search_regex(r'writeFLV\(\'(.+?)\',', webpage,
|
||||
u'config xml url')
|
||||
config = self._download_xml(config_xml_url, video_id,
|
||||
u'Downloading config xml')
|
||||
config_xml_url = self._search_regex(
|
||||
r'writeFLV\(\'(.+?)\',', webpage, 'config xml url')
|
||||
config = self._download_xml(
|
||||
config_xml_url, video_id, 'Downloading config xml')
|
||||
|
||||
encodings = config.find('ENCODINGS')
|
||||
formats = []
|
||||
for code in ['LOW', 'HIGH', 'HQ']:
|
||||
for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
|
||||
encoding = encodings.find(code)
|
||||
if encoding is None:
|
||||
continue
|
||||
encoding_url = encoding.find('FILENAME').text
|
||||
formats.append({
|
||||
'url': encoding_url,
|
||||
'ext': determine_ext(encoding_url),
|
||||
'format_id': code.lower(),
|
||||
'quality': pref,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
descr = self._html_search_regex(r'<p class="Content Copy">(.*?)</p>', webpage, u'description')
|
||||
descr = self._html_search_regex(
|
||||
r'<p class="Content Copy">(.*?)</p>', webpage, 'description', fatal=False)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage),
|
||||
|
@ -1,25 +1,27 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
import random
|
||||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
get_element_by_id,
|
||||
clean_html,
|
||||
)
|
||||
|
||||
|
||||
class FKTVIE(InfoExtractor):
|
||||
IE_NAME = u'fernsehkritik.tv'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
||||
IE_NAME = 'fernsehkritik.tv'
|
||||
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/folge-(?P<ep>[0-9]+)(?:/.*)?'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://fernsehkritik.tv/folge-1',
|
||||
u'file': u'00011.flv',
|
||||
u'info_dict': {
|
||||
u'title': u'Folge 1 vom 10. April 2007',
|
||||
u'description': u'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||
'url': 'http://fernsehkritik.tv/folge-1',
|
||||
'info_dict': {
|
||||
'id': '00011',
|
||||
'ext': 'flv',
|
||||
'title': 'Folge 1 vom 10. April 2007',
|
||||
'description': 'md5:fb4818139c7cfe6907d4b83412a6864f',
|
||||
},
|
||||
}
|
||||
|
||||
@ -32,7 +34,7 @@ class FKTVIE(InfoExtractor):
|
||||
start_webpage = self._download_webpage('http://fernsehkritik.tv/folge-%d/Start' % episode,
|
||||
episode)
|
||||
playlist = self._search_regex(r'playlist = (\[.*?\]);', start_webpage,
|
||||
u'playlist', flags=re.DOTALL)
|
||||
'playlist', flags=re.DOTALL)
|
||||
files = json.loads(re.sub('{[^{}]*?}', '{}', playlist))
|
||||
# TODO: return a single multipart video
|
||||
videos = []
|
||||
@ -42,7 +44,6 @@ class FKTVIE(InfoExtractor):
|
||||
videos.append({
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': clean_html(get_element_by_id('eptitle', start_webpage)),
|
||||
'description': clean_html(get_element_by_id('contentlist', start_webpage)),
|
||||
'thumbnail': video_thumbnail
|
||||
@ -51,14 +52,15 @@ class FKTVIE(InfoExtractor):
|
||||
|
||||
|
||||
class FKTVPosteckeIE(InfoExtractor):
|
||||
IE_NAME = u'fernsehkritik.tv:postecke'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||
IE_NAME = 'fernsehkritik.tv:postecke'
|
||||
_VALID_URL = r'http://(?:www\.)?fernsehkritik\.tv/inline-video/postecke\.php\?(.*&)?ep=(?P<ep>[0-9]+)(&|$)'
|
||||
_TEST = {
|
||||
u'url': u'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||
u'file': u'0120.flv',
|
||||
u'md5': u'262f0adbac80317412f7e57b4808e5c4',
|
||||
u'info_dict': {
|
||||
u"title": u"Postecke 120"
|
||||
'url': 'http://fernsehkritik.tv/inline-video/postecke.php?iframe=true&width=625&height=440&ep=120',
|
||||
'md5': '262f0adbac80317412f7e57b4808e5c4',
|
||||
'info_dict': {
|
||||
'id': '0120',
|
||||
'ext': 'flv',
|
||||
'title': 'Postecke 120',
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,8 +73,7 @@ class FKTVPosteckeIE(InfoExtractor):
|
||||
video_url = 'http://dl%d.fernsehkritik.tv/postecke/postecke%d.flv' % (server, episode)
|
||||
video_title = 'Postecke %d' % episode
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
}
|
||||
|
@ -10,13 +10,13 @@ from ..utils import (
|
||||
|
||||
|
||||
class FlickrIE(InfoExtractor):
|
||||
"""Information Extractor for Flickr videos"""
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||
_VALID_URL = r'https?://(?:www\.|secure\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
||||
_TEST = {
|
||||
'url': 'http://www.flickr.com/photos/forestwander-nature-pictures/5645318632/in/photostream/',
|
||||
'file': '5645318632.mp4',
|
||||
'md5': '6fdc01adbc89d72fc9c4f15b4a4ba87b',
|
||||
'info_dict': {
|
||||
'id': '5645318632',
|
||||
'ext': 'mp4',
|
||||
"description": "Waterfalls in the Springtime at Dark Hollow Waterfalls. These are located just off of Skyline Drive in Virginia. They are only about 6/10 of a mile hike but it is a pretty steep hill and a good climb back up.",
|
||||
"uploader_id": "forestwander-nature-pictures",
|
||||
"title": "Dark Hollow Waterfalls"
|
||||
@ -49,12 +49,12 @@ class FlickrIE(InfoExtractor):
|
||||
raise ExtractorError('Unable to extract video url')
|
||||
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
||||
|
||||
return [{
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': self._og_search_title(webpage),
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'uploader_id': video_uploader_id,
|
||||
}]
|
||||
}
|
||||
|
@ -4,16 +4,21 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class FranceInterIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]{6})'
|
||||
_VALID_URL = r'http://(?:www\.)?franceinter\.fr/player/reecouter\?play=(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
|
||||
'file': '793962.mp3',
|
||||
'md5': '4764932e466e6f6c79c317d2e74f6884',
|
||||
"info_dict": {
|
||||
"title": "L’Histoire dans les jeux vidéo",
|
||||
'id': '793962',
|
||||
'ext': 'mp3',
|
||||
'title': 'L’Histoire dans les jeux vidéo',
|
||||
'description': 'md5:7e93ddb4451e7530022792240a3049c7',
|
||||
'timestamp': 1387369800,
|
||||
'upload_date': '20131218',
|
||||
},
|
||||
}
|
||||
|
||||
@ -22,17 +27,26 @@ class FranceInterIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(
|
||||
r'<span class="roll_overflow">(.*?)</span></h1>', webpage, 'title')
|
||||
|
||||
path = self._search_regex(
|
||||
r'&urlAOD=(.*?)&startTime', webpage, 'video url')
|
||||
r'<a id="player".+?href="([^"]+)"', webpage, 'video url')
|
||||
video_url = 'http://www.franceinter.fr/' + path
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<span class="title">(.+?)</span>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r'<span class="description">(.*?)</span>',
|
||||
webpage, 'description', fatal=False)
|
||||
timestamp = int_or_none(self._search_regex(
|
||||
r'data-date="(\d+)"', webpage, 'upload date', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'timestamp': timestamp,
|
||||
'formats': [{
|
||||
'url': video_url,
|
||||
'vcodec': 'none',
|
||||
}],
|
||||
'title': title,
|
||||
}
|
||||
|
@ -8,45 +8,68 @@ import json
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
parse_duration,
|
||||
compat_urllib_parse_urlparse,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class FranceTVBaseInfoExtractor(InfoExtractor):
|
||||
def _extract_video(self, video_id):
|
||||
info = self._download_xml(
|
||||
'http://www.francetvinfo.fr/appftv/webservices/video/'
|
||||
'getInfosOeuvre.php?id-diffusion='
|
||||
+ video_id, video_id, 'Downloading XML config')
|
||||
def _extract_video(self, video_id, catalogue):
|
||||
info = self._download_json(
|
||||
'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s'
|
||||
% (video_id, catalogue),
|
||||
video_id, 'Downloading video JSON')
|
||||
|
||||
manifest_url = info.find('videos/video/url').text
|
||||
manifest_url = manifest_url.replace('/z/', '/i/')
|
||||
|
||||
if manifest_url.startswith('rtmp'):
|
||||
formats = [{'url': manifest_url, 'ext': 'flv'}]
|
||||
else:
|
||||
formats = []
|
||||
available_formats = self._search_regex(r'/[^,]*,(.*?),k\.mp4', manifest_url, 'available formats')
|
||||
for index, format_descr in enumerate(available_formats.split(',')):
|
||||
format_info = {
|
||||
'url': manifest_url.replace('manifest.f4m', 'index_%d_av.m3u8' % index),
|
||||
'ext': 'mp4',
|
||||
}
|
||||
m_resolution = re.search(r'(?P<width>\d+)x(?P<height>\d+)', format_descr)
|
||||
if m_resolution is not None:
|
||||
format_info.update({
|
||||
'width': int(m_resolution.group('width')),
|
||||
'height': int(m_resolution.group('height')),
|
||||
})
|
||||
formats.append(format_info)
|
||||
if info.get('status') == 'NOK':
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, info['message']), expected=True)
|
||||
|
||||
thumbnail_path = info.find('image').text
|
||||
formats = []
|
||||
for video in info['videos']:
|
||||
if video['statut'] != 'ONLINE':
|
||||
continue
|
||||
video_url = video['url']
|
||||
if not video_url:
|
||||
continue
|
||||
format_id = video['format']
|
||||
if video_url.endswith('.f4m'):
|
||||
video_url_parsed = compat_urllib_parse_urlparse(video_url)
|
||||
f4m_url = self._download_webpage(
|
||||
'http://hdfauth.francetv.fr/esi/urltokengen2.html?url=%s' % video_url_parsed.path,
|
||||
video_id, 'Downloading f4m manifest token', fatal=False)
|
||||
if f4m_url:
|
||||
f4m_formats = self._extract_f4m_formats(f4m_url, video_id)
|
||||
for f4m_format in f4m_formats:
|
||||
f4m_format['preference'] = 1
|
||||
formats.extend(f4m_formats)
|
||||
elif video_url.endswith('.m3u8'):
|
||||
formats.extend(self._extract_m3u8_formats(video_url, video_id, 'mp4'))
|
||||
elif video_url.startswith('rtmp'):
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_id': 'rtmp-%s' % format_id,
|
||||
'ext': 'flv',
|
||||
'preference': 1,
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_id': format_id,
|
||||
'preference': -1,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info.find('titre').text,
|
||||
'title': info['titre'],
|
||||
'description': clean_html(info['synopsis']),
|
||||
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', info['image']),
|
||||
'duration': parse_duration(info['duree']),
|
||||
'timestamp': int_or_none(info['diffusion']['timestamp']),
|
||||
'formats': formats,
|
||||
'thumbnail': compat_urlparse.urljoin('http://pluzz.francetv.fr', thumbnail_path),
|
||||
'description': info.find('synopsis').text,
|
||||
}
|
||||
|
||||
|
||||
@ -61,7 +84,7 @@ class PluzzIE(FranceTVBaseInfoExtractor):
|
||||
webpage = self._download_webpage(url, title)
|
||||
video_id = self._search_regex(
|
||||
r'data-diffusion="(\d+)"', webpage, 'ID')
|
||||
return self._extract_video(video_id)
|
||||
return self._extract_video(video_id, 'Pluzz')
|
||||
|
||||
|
||||
class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
@ -72,11 +95,10 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
'url': 'http://www.francetvinfo.fr/replay-jt/france-3/soir-3/jt-grand-soir-3-lundi-26-aout-2013_393427.html',
|
||||
'info_dict': {
|
||||
'id': '84981923',
|
||||
'ext': 'mp4',
|
||||
'ext': 'flv',
|
||||
'title': 'Soir 3',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'upload_date': '20130826',
|
||||
'timestamp': 1377548400,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.francetvinfo.fr/elections/europeennes/direct-europeennes-regardez-le-debat-entre-les-candidats-a-la-presidence-de-la-commission_600639.html',
|
||||
@ -88,15 +110,17 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor):
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'HLS (reqires ffmpeg)'
|
||||
}
|
||||
},
|
||||
'skip': 'Ce direct est terminé et sera disponible en rattrapage dans quelques minutes.',
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_title = mobj.group('title')
|
||||
webpage = self._download_webpage(url, page_title)
|
||||
video_id = self._search_regex(r'id-video=((?:[^0-9]*?_)?[0-9]+)[@"]', webpage, 'video id')
|
||||
return self._extract_video(video_id)
|
||||
video_id, catalogue = self._search_regex(
|
||||
r'id-video=([^@]+@[^"]+)', webpage, 'video id').split('@')
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
|
||||
class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||
@ -112,91 +136,77 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||
# france2
|
||||
{
|
||||
'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104',
|
||||
'file': '75540104.mp4',
|
||||
'md5': 'c03fc87cb85429ffd55df32b9fc05523',
|
||||
'info_dict': {
|
||||
'title': '13h15, le samedi...',
|
||||
'description': 'md5:2e5b58ba7a2d3692b35c792be081a03d',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
'id': '109169362',
|
||||
'ext': 'flv',
|
||||
'title': '13h15, le dimanche...',
|
||||
'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7',
|
||||
'upload_date': '20140914',
|
||||
'timestamp': 1410693600,
|
||||
},
|
||||
},
|
||||
# france3
|
||||
{
|
||||
'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575',
|
||||
'md5': '679bb8f8921f8623bd658fa2f8364da0',
|
||||
'info_dict': {
|
||||
'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Le scandale du prix des médicaments',
|
||||
'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
'upload_date': '20131113',
|
||||
'timestamp': 1384380000,
|
||||
},
|
||||
},
|
||||
# france4
|
||||
{
|
||||
'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
||||
'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c',
|
||||
'info_dict': {
|
||||
'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Hero Corp Making of - Extrait 1',
|
||||
'description': 'md5:c87d54871b1790679aec1197e73d650a',
|
||||
},
|
||||
'params': {
|
||||
# rtmp download
|
||||
'skip_download': True,
|
||||
'upload_date': '20131106',
|
||||
'timestamp': 1383766500,
|
||||
},
|
||||
},
|
||||
# france5
|
||||
{
|
||||
'url': 'http://www.france5.fr/emissions/c-a-dire/videos/92837968',
|
||||
'md5': '78f0f4064f9074438e660785bbf2c5d9',
|
||||
'info_dict': {
|
||||
'id': '92837968',
|
||||
'ext': 'mp4',
|
||||
'id': '108961659',
|
||||
'ext': 'flv',
|
||||
'title': 'C à dire ?!',
|
||||
'description': 'md5:fb1db1cbad784dcce7c7a7bd177c8e2f',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
'description': 'md5:1a4aeab476eb657bf57c4ff122129f81',
|
||||
'upload_date': '20140915',
|
||||
'timestamp': 1410795000,
|
||||
},
|
||||
},
|
||||
# franceo
|
||||
{
|
||||
'url': 'http://www.franceo.fr/jt/info-afrique/04-12-2013',
|
||||
'md5': '52f0bfe202848b15915a2f39aaa8981b',
|
||||
'info_dict': {
|
||||
'id': '92327925',
|
||||
'ext': 'mp4',
|
||||
'title': 'Infô-Afrique',
|
||||
'id': '108634970',
|
||||
'ext': 'flv',
|
||||
'title': 'Infô Afrique',
|
||||
'description': 'md5:ebf346da789428841bee0fd2a935ea55',
|
||||
'upload_date': '20140915',
|
||||
'timestamp': 1410822000,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'The id changes frequently',
|
||||
},
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
if mobj.group('key'):
|
||||
webpage = self._download_webpage(url, mobj.group('key'))
|
||||
id_res = [
|
||||
(r'''(?x)<div\s+class="video-player">\s*
|
||||
<a\s+href="http://videos.francetv.fr/video/([0-9]+)"\s+
|
||||
class="francetv-video-player">'''),
|
||||
(r'<a id="player_direct" href="http://info\.francetelevisions'
|
||||
'\.fr/\?id-video=([^"/&]+)'),
|
||||
(r'<a class="video" id="ftv_player_(.+?)"'),
|
||||
]
|
||||
video_id = self._html_search_regex(id_res, webpage, 'video ID')
|
||||
else:
|
||||
video_id = mobj.group('id')
|
||||
return self._extract_video(video_id)
|
||||
webpage = self._download_webpage(url, mobj.group('key') or mobj.group('id'))
|
||||
video_id, catalogue = self._html_search_regex(
|
||||
r'href="http://videos\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
webpage, 'video ID').split('@')
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
|
||||
class GenerationQuoiIE(InfoExtractor):
|
||||
@ -232,16 +242,15 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||
_VALID_URL = r'https?://(?:m\.)?culturebox\.francetvinfo\.fr/(?P<name>.*?)(\?|$)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://culturebox.francetvinfo.fr/einstein-on-the-beach-au-theatre-du-chatelet-146813',
|
||||
'url': 'http://culturebox.francetvinfo.fr/festivals/dans-les-jardins-de-william-christie/dans-les-jardins-de-william-christie-le-camus-162553',
|
||||
'md5': '5ad6dec1ffb2a3fbcb20cc4b744be8d6',
|
||||
'info_dict': {
|
||||
'id': 'EV_6785',
|
||||
'ext': 'mp4',
|
||||
'title': 'Einstein on the beach au Théâtre du Châtelet',
|
||||
'description': 'md5:9ce2888b1efefc617b5e58b3f6200eeb',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
'id': 'EV_22853',
|
||||
'ext': 'flv',
|
||||
'title': 'Dans les jardins de William Christie - Le Camus',
|
||||
'description': 'md5:4710c82315c40f0c865ca8b9a68b5299',
|
||||
'upload_date': '20140829',
|
||||
'timestamp': 1409317200,
|
||||
},
|
||||
}
|
||||
|
||||
@ -249,5 +258,7 @@ class CultureboxIE(FranceTVBaseInfoExtractor):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
name = mobj.group('name')
|
||||
webpage = self._download_webpage(url, name)
|
||||
video_id = self._search_regex(r'"http://videos\.francetv\.fr/video/(.*?)"', webpage, 'video id')
|
||||
return self._extract_video(video_id)
|
||||
video_id, catalogue = self._search_regex(
|
||||
r'"http://videos\.francetv\.fr/video/([^@]+@[^"]+)"', webpage, 'video id').split('@')
|
||||
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
@ -8,7 +8,7 @@ from ..utils import ExtractorError
|
||||
|
||||
|
||||
class FunnyOrDieIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
|
||||
_VALID_URL = r'https?://(?:www\.)?funnyordie\.com/(?P<type>embed|articles|videos)/(?P<id>[0-9a-f]+)(?:$|[?#/])'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version',
|
||||
'md5': 'bcd81e0c4f26189ee09be362ad6e6ba9',
|
||||
@ -21,7 +21,6 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.funnyordie.com/embed/e402820827',
|
||||
'md5': 'ff4d83318f89776ed0250634cfaa8d36',
|
||||
'info_dict': {
|
||||
'id': 'e402820827',
|
||||
'ext': 'mp4',
|
||||
@ -29,6 +28,9 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
'description': 'Please use this to sell something. www.jonlajoie.com',
|
||||
'thumbnail': 're:^http:.*\.jpg$',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.funnyordie.com/articles/ebf5e34fc8/10-hours-of-walking-in-nyc-as-a-man',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -37,7 +39,7 @@ class FunnyOrDieIE(InfoExtractor):
|
||||
video_id = mobj.group('id')
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
links = re.findall(r'<source src="([^"]+/v)\d+\.([^"]+)" type=\'video', webpage)
|
||||
links = re.findall(r'<source src="([^"]+/v)[^"]+\.([^"]+)" type=\'video', webpage)
|
||||
if not links:
|
||||
raise ExtractorError('No media links available for %s' % video_id)
|
||||
|
||||
|
@ -8,12 +8,11 @@ from ..utils import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
unescapeHTML,
|
||||
get_meta_content,
|
||||
)
|
||||
|
||||
|
||||
class GameSpotIE(InfoExtractor):
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<page_id>\d+)/?'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
|
||||
_TEST = {
|
||||
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
|
||||
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
|
||||
@ -26,10 +25,10 @@ class GameSpotIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
page_id = mobj.group('page_id')
|
||||
page_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
data_video_json = self._search_regex(r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||
data_video_json = self._search_regex(
|
||||
r'data-video=["\'](.*?)["\']', webpage, 'data video')
|
||||
data_video = json.loads(unescapeHTML(data_video_json))
|
||||
|
||||
# Transform the manifest url to a link to the mp4 files
|
||||
@ -41,7 +40,8 @@ class GameSpotIE(InfoExtractor):
|
||||
http_path = f4m_path[1:].split('/', 1)[1]
|
||||
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
|
||||
http_template = http_template.replace('.csmil/manifest.f4m', '')
|
||||
http_template = compat_urlparse.urljoin('http://video.gamespotcdn.com/', http_template)
|
||||
http_template = compat_urlparse.urljoin(
|
||||
'http://video.gamespotcdn.com/', http_template)
|
||||
formats = []
|
||||
for q in qualities:
|
||||
formats.append({
|
||||
@ -52,8 +52,9 @@ class GameSpotIE(InfoExtractor):
|
||||
|
||||
return {
|
||||
'id': data_video['guid'],
|
||||
'display_id': page_id,
|
||||
'title': compat_urllib_parse.unquote(data_video['title']),
|
||||
'formats': formats,
|
||||
'description': get_meta_content('description', webpage),
|
||||
'description': self._html_search_meta('description', webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
|
@ -7,11 +7,12 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
@ -28,6 +29,7 @@ from .brightcove import BrightcoveIE
|
||||
from .ooyala import OoyalaIE
|
||||
from .rutv import RUTVIE
|
||||
from .smotri import SmotriIE
|
||||
from .condenast import CondeNastIE
|
||||
|
||||
|
||||
class GenericIE(InfoExtractor):
|
||||
@ -98,6 +100,22 @@ class GenericIE(InfoExtractor):
|
||||
'uploader': 'Championat',
|
||||
},
|
||||
},
|
||||
{
|
||||
# https://github.com/rg3/youtube-dl/issues/3541
|
||||
'add_ie': ['Brightcove'],
|
||||
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
|
||||
'info_dict': {
|
||||
'id': '3866516442001',
|
||||
'ext': 'mp4',
|
||||
'title': 'Leer mij vrouwen kennen: Aflevering 1',
|
||||
'description': 'Leer mij vrouwen kennen: Aflevering 1',
|
||||
'uploader': 'SBS Broadcasting',
|
||||
},
|
||||
'skip': 'Restricted to Netherlands',
|
||||
'params': {
|
||||
'skip_download': True, # m3u8 download
|
||||
},
|
||||
},
|
||||
# Direct link to a video
|
||||
{
|
||||
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
|
||||
@ -155,7 +173,6 @@ class GenericIE(InfoExtractor):
|
||||
# funnyordie embed
|
||||
{
|
||||
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
|
||||
'md5': '7cf780be104d40fea7bae52eed4a470e',
|
||||
'info_dict': {
|
||||
'id': '18e820ec3f',
|
||||
'ext': 'mp4',
|
||||
@ -180,13 +197,13 @@ class GenericIE(InfoExtractor):
|
||||
# Embedded TED video
|
||||
{
|
||||
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
|
||||
'md5': 'deeeabcc1085eb2ba205474e7235a3d5',
|
||||
'md5': '65fdff94098e4a607385a60c5177c638',
|
||||
'info_dict': {
|
||||
'id': '981',
|
||||
'id': '1969',
|
||||
'ext': 'mp4',
|
||||
'title': 'My web playroom',
|
||||
'uploader': 'Ze Frank',
|
||||
'description': 'md5:ddb2a40ecd6b6a147e400e535874947b',
|
||||
'title': 'Hidden miracles of the natural world',
|
||||
'uploader': 'Louie Schwartzberg',
|
||||
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
|
||||
}
|
||||
},
|
||||
# Embeded Ustream video
|
||||
@ -226,21 +243,6 @@ class GenericIE(InfoExtractor):
|
||||
'skip_download': 'Requires rtmpdump'
|
||||
}
|
||||
},
|
||||
# smotri embed
|
||||
{
|
||||
'url': 'http://rbctv.rbc.ru/archive/news/562949990879132.shtml',
|
||||
'md5': 'ec40048448e9284c9a1de77bb188108b',
|
||||
'info_dict': {
|
||||
'id': 'v27008541fad',
|
||||
'ext': 'mp4',
|
||||
'title': 'Крым и Севастополь вошли в состав России',
|
||||
'description': 'md5:fae01b61f68984c7bd2fa741e11c3175',
|
||||
'duration': 900,
|
||||
'upload_date': '20140318',
|
||||
'uploader': 'rbctv_2012_4',
|
||||
'uploader_id': 'rbctv_2012_4',
|
||||
},
|
||||
},
|
||||
# Condé Nast embed
|
||||
{
|
||||
'url': 'http://www.wired.com/2014/04/honda-asimo/',
|
||||
@ -295,13 +297,13 @@ class GenericIE(InfoExtractor):
|
||||
{
|
||||
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
|
||||
'info_dict': {
|
||||
'id': 'jpSGZsgga_I',
|
||||
'id': '4vAffPZIT44',
|
||||
'ext': 'mp4',
|
||||
'title': 'Asphalt 8: Airborne - Launch Trailer',
|
||||
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
|
||||
'uploader': 'Gameloft',
|
||||
'uploader_id': 'gameloft',
|
||||
'upload_date': '20130821',
|
||||
'description': 'md5:87bd95f13d8be3e7da87a5f2c443106a',
|
||||
'upload_date': '20140828',
|
||||
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
@ -340,7 +342,7 @@ class GenericIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'age_limit': 18,
|
||||
'uploader': 'www.handjobhub.com',
|
||||
'title': 'Busty Blonde Siri Tit Fuck While Wank at Handjob Hub',
|
||||
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
|
||||
}
|
||||
},
|
||||
# RSS feed
|
||||
@ -382,14 +384,59 @@ class GenericIE(InfoExtractor):
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
},
|
||||
# Wistia embed
|
||||
{
|
||||
'url': 'http://education-portal.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
|
||||
'md5': '8788b683c777a5cf25621eaf286d0c23',
|
||||
'info_dict': {
|
||||
'id': '1cfaf6b7ea',
|
||||
'ext': 'mov',
|
||||
'title': 'md5:51364a8d3d009997ba99656004b5e20d',
|
||||
'duration': 643.0,
|
||||
'filesize': 182808282,
|
||||
'uploader': 'education-portal.com',
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
|
||||
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
|
||||
'info_dict': {
|
||||
'id': 'uxjb0lwrcz',
|
||||
'ext': 'mp4',
|
||||
'title': 'Conversation about Hexagonal Rails Part 1 - ThoughtWorks',
|
||||
'duration': 1715.0,
|
||||
'uploader': 'thoughtworks.wistia.com',
|
||||
},
|
||||
},
|
||||
# Direct download with broken HEAD
|
||||
{
|
||||
'url': 'http://ai-radio.org:8000/radio.opus',
|
||||
'info_dict': {
|
||||
'id': 'radio',
|
||||
'ext': 'opus',
|
||||
'title': 'radio',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # infinite live stream
|
||||
},
|
||||
'expected_warnings': [
|
||||
r'501.*Not Implemented'
|
||||
],
|
||||
},
|
||||
# Soundcloud embed
|
||||
{
|
||||
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
|
||||
'info_dict': {
|
||||
'id': '174391317',
|
||||
'ext': 'mp3',
|
||||
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
|
||||
'uploader': 'Sophos Security',
|
||||
'title': 'Chet Chat 171 - Oct 29, 2014',
|
||||
'upload_date': '20141029',
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
def report_download_webpage(self, video_id):
|
||||
"""Report webpage download."""
|
||||
if not self._downloader.params.get('test', False):
|
||||
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||
super(GenericIE, self).report_download_webpage(video_id)
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
"""Report information extraction."""
|
||||
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
|
||||
@ -484,11 +531,13 @@ class GenericIE(InfoExtractor):
|
||||
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
|
||||
) % (url, url), expected=True)
|
||||
else:
|
||||
assert ':' in default_search
|
||||
if ':' not in default_search:
|
||||
default_search += ':'
|
||||
return self.url_result(default_search + url)
|
||||
|
||||
url, smuggled_data = unsmuggle_url(url)
|
||||
force_videoid = None
|
||||
is_intentional = smuggled_data and smuggled_data.get('to_generic')
|
||||
if smuggled_data and 'force_videoid' in smuggled_data:
|
||||
force_videoid = smuggled_data['force_videoid']
|
||||
video_id = force_videoid
|
||||
@ -498,14 +547,14 @@ class GenericIE(InfoExtractor):
|
||||
self.to_screen('%s: Requesting header' % video_id)
|
||||
|
||||
head_req = HEADRequest(url)
|
||||
response = self._request_webpage(
|
||||
head_response = self._request_webpage(
|
||||
head_req, video_id,
|
||||
note=False, errnote='Could not send HEAD request to %s' % url,
|
||||
fatal=False)
|
||||
|
||||
if response is not False:
|
||||
if head_response is not False:
|
||||
# Check for redirect
|
||||
new_url = response.geturl()
|
||||
new_url = head_response.geturl()
|
||||
if url != new_url:
|
||||
self.report_following_redirect(new_url)
|
||||
if force_videoid:
|
||||
@ -513,31 +562,36 @@ class GenericIE(InfoExtractor):
|
||||
new_url, {'force_videoid': force_videoid})
|
||||
return self.url_result(new_url)
|
||||
|
||||
# Check for direct link to a video
|
||||
content_type = response.headers.get('Content-Type', '')
|
||||
m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
|
||||
if m:
|
||||
upload_date = response.headers.get('Last-Modified')
|
||||
if upload_date:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': os.path.splitext(url_basename(url))[0],
|
||||
'formats': [{
|
||||
'format_id': m.group('format_id'),
|
||||
'url': url,
|
||||
'vcodec': 'none' if m.group('type') == 'audio' else None
|
||||
}],
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
full_response = None
|
||||
if head_response is False:
|
||||
full_response = self._request_webpage(url, video_id)
|
||||
head_response = full_response
|
||||
|
||||
try:
|
||||
# Check for direct link to a video
|
||||
content_type = head_response.headers.get('Content-Type', '')
|
||||
m = re.match(r'^(?P<type>audio|video|application(?=/ogg$))/(?P<format_id>.+)$', content_type)
|
||||
if m:
|
||||
upload_date = unified_strdate(
|
||||
head_response.headers.get('Last-Modified'))
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': os.path.splitext(url_basename(url))[0],
|
||||
'direct': True,
|
||||
'formats': [{
|
||||
'format_id': m.group('format_id'),
|
||||
'url': url,
|
||||
'vcodec': 'none' if m.group('type') == 'audio' else None
|
||||
}],
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
if not self._downloader.params.get('test', False) and not is_intentional:
|
||||
self._downloader.report_warning('Falling back on generic information extractor.')
|
||||
|
||||
if full_response:
|
||||
webpage = self._webpage_read_content(full_response, url, video_id)
|
||||
else:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
except ValueError:
|
||||
# since this is the last-resort InfoExtractor, if
|
||||
# this error is thrown, it'll be thrown here
|
||||
raise ExtractorError('Failed to download URL: %s' % url)
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Is it an RSS feed?
|
||||
@ -584,7 +638,9 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
# Helper method
|
||||
def _playlist_from_matches(matches, getter, ie=None):
|
||||
urlrs = orderedSet(self.url_result(getter(m), ie) for m in matches)
|
||||
urlrs = orderedSet(
|
||||
self.url_result(self._proto_relative_url(getter(m)), ie)
|
||||
for m in matches)
|
||||
return self.playlist_result(
|
||||
urlrs, playlist_id=video_id, playlist_title=video_title)
|
||||
|
||||
@ -611,13 +667,13 @@ class GenericIE(InfoExtractor):
|
||||
if mobj:
|
||||
player_url = unescapeHTML(mobj.group('url'))
|
||||
surl = smuggle_url(player_url, {'Referer': url})
|
||||
return self.url_result(surl, 'Vimeo')
|
||||
return self.url_result(surl)
|
||||
|
||||
# Look for embedded (swf embed) Vimeo player
|
||||
mobj = re.search(
|
||||
r'<embed[^>]+?src="(https?://(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
|
||||
r'<embed[^>]+?src="((?:https?:)?//(?:www\.)?vimeo\.com/moogaloop\.swf.+?)"', webpage)
|
||||
if mobj:
|
||||
return self.url_result(mobj.group(1), 'Vimeo')
|
||||
return self.url_result(mobj.group(1))
|
||||
|
||||
# Look for embedded YouTube player
|
||||
matches = re.findall(r'''(?x)
|
||||
@ -625,15 +681,16 @@ class GenericIE(InfoExtractor):
|
||||
<iframe[^>]+?src=|
|
||||
data-video-url=|
|
||||
<embed[^>]+?src=|
|
||||
embedSWF\(?:\s*
|
||||
embedSWF\(?:\s*|
|
||||
new\s+SWFObject\(
|
||||
)
|
||||
(["\'])
|
||||
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
|
||||
(?:embed|v)/.+?)
|
||||
(?:embed|v|p)/.+?)
|
||||
\1''', webpage)
|
||||
if matches:
|
||||
return _playlist_from_matches(
|
||||
matches, lambda m: unescapeHTML(m[1]), ie='Youtube')
|
||||
matches, lambda m: unescapeHTML(m[1]))
|
||||
|
||||
# Look for embedded Dailymotion player
|
||||
matches = re.findall(
|
||||
@ -642,18 +699,41 @@ class GenericIE(InfoExtractor):
|
||||
return _playlist_from_matches(
|
||||
matches, lambda m: unescapeHTML(m[1]))
|
||||
|
||||
# Look for embedded Dailymotion playlist player (#3822)
|
||||
m = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
|
||||
if m:
|
||||
playlists = re.findall(
|
||||
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
|
||||
if playlists:
|
||||
return _playlist_from_matches(
|
||||
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
|
||||
|
||||
# Look for embedded Wistia player
|
||||
match = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
|
||||
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
|
||||
if match:
|
||||
embed_url = self._proto_relative_url(
|
||||
unescapeHTML(match.group('url')))
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': unescapeHTML(match.group('url')),
|
||||
'url': embed_url,
|
||||
'ie_key': 'Wistia',
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'id': video_id,
|
||||
}
|
||||
|
||||
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
|
||||
if match:
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': 'http://fast.wistia.net/embed/iframe/{0:}'.format(match.group('id')),
|
||||
'ie_key': 'Wistia',
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'id': match.group('id')
|
||||
}
|
||||
|
||||
# Look for embedded blip.tv player
|
||||
mobj = re.search(r'<meta\s[^>]*https?://api\.blip\.tv/\w+/redirect/\w+/(\d+)', webpage)
|
||||
@ -788,7 +868,7 @@ class GenericIE(InfoExtractor):
|
||||
|
||||
# Look for embeded soundcloud player
|
||||
mobj = re.search(
|
||||
r'<iframe src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
|
||||
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
|
||||
webpage)
|
||||
if mobj is not None:
|
||||
url = unescapeHTML(mobj.group('url'))
|
||||
@ -825,59 +905,69 @@ class GenericIE(InfoExtractor):
|
||||
return self.url_result(mobj.group('url'), 'SBS')
|
||||
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
|
||||
webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'MLB')
|
||||
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
|
||||
webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
|
||||
|
||||
def check_video(vurl):
|
||||
vpath = compat_urlparse.urlparse(vurl).path
|
||||
vext = determine_ext(vpath)
|
||||
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
|
||||
|
||||
def filter_video(urls):
|
||||
return list(filter(check_video, urls))
|
||||
|
||||
# Start with something easy: JW Player in SWFObject
|
||||
found = re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage)
|
||||
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
|
||||
if not found:
|
||||
# Look for gorilla-vid style embedding
|
||||
found = re.findall(r'''(?sx)
|
||||
found = filter_video(re.findall(r'''(?sx)
|
||||
(?:
|
||||
jw_plugins|
|
||||
JWPlayerOptions|
|
||||
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
|
||||
)
|
||||
.*?file\s*:\s*["\'](.*?)["\']''', webpage)
|
||||
.*?file\s*:\s*["\'](.*?)["\']''', webpage))
|
||||
if not found:
|
||||
# Broaden the search a little bit
|
||||
found = re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage)
|
||||
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
|
||||
if not found:
|
||||
# Broaden the findall a little bit: JWPlayer JS loader
|
||||
found = re.findall(r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage)
|
||||
found = filter_video(re.findall(
|
||||
r'[^A-Za-z0-9]?file["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
|
||||
if not found:
|
||||
# Flow player
|
||||
found = re.findall(r'''(?xs)
|
||||
found = filter_video(re.findall(r'''(?xs)
|
||||
flowplayer\("[^"]+",\s*
|
||||
\{[^}]+?\}\s*,
|
||||
\s*{[^}]+? ["']?clip["']?\s*:\s*\{\s*
|
||||
["']?url["']?\s*:\s*["']([^"']+)["']
|
||||
''', webpage)
|
||||
''', webpage))
|
||||
if not found:
|
||||
# Try to find twitter cards info
|
||||
found = re.findall(r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage)
|
||||
found = filter_video(re.findall(
|
||||
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
|
||||
if not found:
|
||||
# We look for Open Graph info:
|
||||
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
|
||||
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
|
||||
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
|
||||
if m_video_type is not None:
|
||||
def check_video(vurl):
|
||||
vpath = compat_urlparse.urlparse(vurl).path
|
||||
vext = determine_ext(vpath)
|
||||
return '.' in vpath and vext not in ('swf', 'png', 'jpg')
|
||||
found = list(filter(
|
||||
check_video,
|
||||
re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage)))
|
||||
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
|
||||
if not found:
|
||||
# HTML5 video
|
||||
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]+)? src="([^"]+)"', webpage)
|
||||
found = re.findall(r'(?s)<video[^<]*(?:>.*?<source[^>]*)?\s+src="([^"]+)"', webpage)
|
||||
if not found:
|
||||
found = re.search(
|
||||
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
|
||||
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'([^\']+)\'"',
|
||||
r'(?:[a-z-]+="[^"]+"\s+)*?content="[0-9]{,2};url=\'?([^\'"]+)',
|
||||
webpage)
|
||||
if found:
|
||||
new_url = found.group(1)
|
||||
|
40
youtube_dl/extractor/glide.py
Normal file
40
youtube_dl/extractor/glide.py
Normal file
@ -0,0 +1,40 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class GlideIE(InfoExtractor):
|
||||
IE_DESC = 'Glide mobile video messages (glide.me)'
|
||||
_VALID_URL = r'https?://share\.glide\.me/(?P<id>[A-Za-z0-9\-=_+]+)'
|
||||
_TEST = {
|
||||
'url': 'http://share.glide.me/UZF8zlmuQbe4mr+7dCiQ0w==',
|
||||
'md5': '4466372687352851af2d131cfaa8a4c7',
|
||||
'info_dict': {
|
||||
'id': 'UZF8zlmuQbe4mr+7dCiQ0w==',
|
||||
'ext': 'mp4',
|
||||
'title': 'Damon Timm\'s Glide message',
|
||||
'thumbnail': 're:^https?://.*?\.cloudfront\.net/.*\.jpg$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._html_search_regex(
|
||||
r'<title>(.*?)</title>', webpage, 'title')
|
||||
video_url = self.http_scheme() + self._search_regex(
|
||||
r'<source src="(.*?)" type="video/mp4">', webpage, 'video URL')
|
||||
thumbnail_url = self._search_regex(
|
||||
r'<img id="video-thumbnail" src="(.*?)"',
|
||||
webpage, 'thumbnail url', fatal=False)
|
||||
thumbnail = (
|
||||
thumbnail_url if thumbnail_url is None
|
||||
else self.http_scheme() + thumbnail_url)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
400
youtube_dl/extractor/globo.py
Normal file
400
youtube_dl/extractor/globo.py
Normal file
@ -0,0 +1,400 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import math
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_chr,
|
||||
compat_ord,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
)
|
||||
|
||||
|
||||
class GloboIE(InfoExtractor):
|
||||
_VALID_URL = 'https?://.+?\.globo\.com/(?P<id>.+)'
|
||||
|
||||
_API_URL_TEMPLATE = 'http://api.globovideos.com/videos/%s/playlist'
|
||||
_SECURITY_URL_TEMPLATE = 'http://security.video.globo.com/videos/%s/hash?player=flash&version=2.9.9.50&resource_id=%s'
|
||||
|
||||
_VIDEOID_REGEXES = [
|
||||
r'\bdata-video-id="(\d+)"',
|
||||
r'\bdata-player-videosids="(\d+)"',
|
||||
r'<div[^>]+\bid="(\d+)"',
|
||||
]
|
||||
|
||||
_RESIGN_EXPIRATION = 86400
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://globotv.globo.com/sportv/futebol-nacional/v/os-gols-de-atletico-mg-3-x-2-santos-pela-24a-rodada-do-brasileirao/3654973/',
|
||||
'md5': '03ebf41cb7ade43581608b7d9b71fab0',
|
||||
'info_dict': {
|
||||
'id': '3654973',
|
||||
'ext': 'mp4',
|
||||
'title': 'Os gols de Atlético-MG 3 x 2 Santos pela 24ª rodada do Brasileirão',
|
||||
'duration': 251.585,
|
||||
'uploader': 'SporTV',
|
||||
'uploader_id': 698,
|
||||
'like_count': int,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://g1.globo.com/carros/autoesporte/videos/t/exclusivos-do-g1/v/mercedes-benz-gla-passa-por-teste-de-colisao-na-europa/3607726/',
|
||||
'md5': 'b3ccc801f75cd04a914d51dadb83a78d',
|
||||
'info_dict': {
|
||||
'id': '3607726',
|
||||
'ext': 'mp4',
|
||||
'title': 'Mercedes-Benz GLA passa por teste de colisão na Europa',
|
||||
'duration': 103.204,
|
||||
'uploader': 'Globo.com',
|
||||
'uploader_id': 265,
|
||||
'like_count': int,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://g1.globo.com/jornal-nacional/noticia/2014/09/novidade-na-fiscalizacao-de-bagagem-pela-receita-provoca-discussoes.html',
|
||||
'md5': '307fdeae4390ccfe6ba1aa198cf6e72b',
|
||||
'info_dict': {
|
||||
'id': '3652183',
|
||||
'ext': 'mp4',
|
||||
'title': 'Receita Federal explica como vai fiscalizar bagagens de quem retorna ao Brasil de avião',
|
||||
'duration': 110.711,
|
||||
'uploader': 'Rede Globo',
|
||||
'uploader_id': 196,
|
||||
'like_count': int,
|
||||
}
|
||||
},
|
||||
]
|
||||
|
||||
class MD5():
|
||||
HEX_FORMAT_LOWERCASE = 0
|
||||
HEX_FORMAT_UPPERCASE = 1
|
||||
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
|
||||
BASE64_PAD_CHARACTER_RFC_COMPLIANCE = '='
|
||||
PADDING = '=0xFF01DD'
|
||||
hexcase = 0
|
||||
b64pad = ''
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
class JSArray(list):
|
||||
def __getitem__(self, y):
|
||||
try:
|
||||
return list.__getitem__(self, y)
|
||||
except IndexError:
|
||||
return 0
|
||||
|
||||
def __setitem__(self, i, y):
|
||||
try:
|
||||
return list.__setitem__(self, i, y)
|
||||
except IndexError:
|
||||
self.extend([0] * (i - len(self) + 1))
|
||||
self[-1] = y
|
||||
|
||||
@classmethod
|
||||
def hex_md5(cls, param1):
|
||||
return cls.rstr2hex(cls.rstr_md5(cls.str2rstr_utf8(param1)))
|
||||
|
||||
@classmethod
|
||||
def b64_md5(cls, param1, param2=None):
|
||||
return cls.rstr2b64(cls.rstr_md5(cls.str2rstr_utf8(param1, param2)))
|
||||
|
||||
@classmethod
|
||||
def any_md5(cls, param1, param2):
|
||||
return cls.rstr2any(cls.rstr_md5(cls.str2rstr_utf8(param1)), param2)
|
||||
|
||||
@classmethod
|
||||
def rstr_md5(cls, param1):
|
||||
return cls.binl2rstr(cls.binl_md5(cls.rstr2binl(param1), len(param1) * 8))
|
||||
|
||||
@classmethod
|
||||
def rstr2hex(cls, param1):
|
||||
_loc_2 = '0123456789ABCDEF' if cls.hexcase else '0123456789abcdef'
|
||||
_loc_3 = ''
|
||||
for _loc_5 in range(0, len(param1)):
|
||||
_loc_4 = compat_ord(param1[_loc_5])
|
||||
_loc_3 += _loc_2[_loc_4 >> 4 & 15] + _loc_2[_loc_4 & 15]
|
||||
return _loc_3
|
||||
|
||||
@classmethod
|
||||
def rstr2b64(cls, param1):
|
||||
_loc_2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_'
|
||||
_loc_3 = ''
|
||||
_loc_4 = len(param1)
|
||||
for _loc_5 in range(0, _loc_4, 3):
|
||||
_loc_6_1 = compat_ord(param1[_loc_5]) << 16
|
||||
_loc_6_2 = compat_ord(param1[_loc_5 + 1]) << 8 if _loc_5 + 1 < _loc_4 else 0
|
||||
_loc_6_3 = compat_ord(param1[_loc_5 + 2]) if _loc_5 + 2 < _loc_4 else 0
|
||||
_loc_6 = _loc_6_1 | _loc_6_2 | _loc_6_3
|
||||
for _loc_7 in range(0, 4):
|
||||
if _loc_5 * 8 + _loc_7 * 6 > len(param1) * 8:
|
||||
_loc_3 += cls.b64pad
|
||||
else:
|
||||
_loc_3 += _loc_2[_loc_6 >> 6 * (3 - _loc_7) & 63]
|
||||
return _loc_3
|
||||
|
||||
@staticmethod
|
||||
def rstr2any(param1, param2):
|
||||
_loc_3 = len(param2)
|
||||
_loc_4 = []
|
||||
_loc_9 = [0] * ((len(param1) >> 2) + 1)
|
||||
for _loc_5 in range(0, len(_loc_9)):
|
||||
_loc_9[_loc_5] = compat_ord(param1[_loc_5 * 2]) << 8 | compat_ord(param1[_loc_5 * 2 + 1])
|
||||
|
||||
while len(_loc_9) > 0:
|
||||
_loc_8 = []
|
||||
_loc_7 = 0
|
||||
for _loc_5 in range(0, len(_loc_9)):
|
||||
_loc_7 = (_loc_7 << 16) + _loc_9[_loc_5]
|
||||
_loc_6 = math.floor(_loc_7 / _loc_3)
|
||||
_loc_7 -= _loc_6 * _loc_3
|
||||
if len(_loc_8) > 0 or _loc_6 > 0:
|
||||
_loc_8[len(_loc_8)] = _loc_6
|
||||
|
||||
_loc_4[len(_loc_4)] = _loc_7
|
||||
_loc_9 = _loc_8
|
||||
|
||||
_loc_10 = ''
|
||||
_loc_5 = len(_loc_4) - 1
|
||||
while _loc_5 >= 0:
|
||||
_loc_10 += param2[_loc_4[_loc_5]]
|
||||
_loc_5 -= 1
|
||||
|
||||
return _loc_10
|
||||
|
||||
@classmethod
|
||||
def str2rstr_utf8(cls, param1, param2=None):
|
||||
_loc_3 = ''
|
||||
_loc_4 = -1
|
||||
if not param2:
|
||||
param2 = cls.PADDING
|
||||
param1 = param1 + param2[1:9]
|
||||
while True:
|
||||
_loc_4 += 1
|
||||
if _loc_4 >= len(param1):
|
||||
break
|
||||
_loc_5 = compat_ord(param1[_loc_4])
|
||||
_loc_6 = compat_ord(param1[_loc_4 + 1]) if _loc_4 + 1 < len(param1) else 0
|
||||
if 55296 <= _loc_5 <= 56319 and 56320 <= _loc_6 <= 57343:
|
||||
_loc_5 = 65536 + ((_loc_5 & 1023) << 10) + (_loc_6 & 1023)
|
||||
_loc_4 += 1
|
||||
if _loc_5 <= 127:
|
||||
_loc_3 += compat_chr(_loc_5)
|
||||
continue
|
||||
if _loc_5 <= 2047:
|
||||
_loc_3 += compat_chr(192 | _loc_5 >> 6 & 31) + compat_chr(128 | _loc_5 & 63)
|
||||
continue
|
||||
if _loc_5 <= 65535:
|
||||
_loc_3 += compat_chr(224 | _loc_5 >> 12 & 15) + compat_chr(128 | _loc_5 >> 6 & 63) + compat_chr(
|
||||
128 | _loc_5 & 63)
|
||||
continue
|
||||
if _loc_5 <= 2097151:
|
||||
_loc_3 += compat_chr(240 | _loc_5 >> 18 & 7) + compat_chr(128 | _loc_5 >> 12 & 63) + compat_chr(
|
||||
128 | _loc_5 >> 6 & 63) + compat_chr(128 | _loc_5 & 63)
|
||||
return _loc_3
|
||||
|
||||
@staticmethod
|
||||
def rstr2binl(param1):
|
||||
_loc_2 = [0] * ((len(param1) >> 2) + 1)
|
||||
for _loc_3 in range(0, len(_loc_2)):
|
||||
_loc_2[_loc_3] = 0
|
||||
for _loc_3 in range(0, len(param1) * 8, 8):
|
||||
_loc_2[_loc_3 >> 5] |= (compat_ord(param1[_loc_3 // 8]) & 255) << _loc_3 % 32
|
||||
return _loc_2
|
||||
|
||||
@staticmethod
|
||||
def binl2rstr(param1):
|
||||
_loc_2 = ''
|
||||
for _loc_3 in range(0, len(param1) * 32, 8):
|
||||
_loc_2 += compat_chr(param1[_loc_3 >> 5] >> _loc_3 % 32 & 255)
|
||||
return _loc_2
|
||||
|
||||
@classmethod
|
||||
def binl_md5(cls, param1, param2):
|
||||
param1 = cls.JSArray(param1)
|
||||
param1[param2 >> 5] |= 128 << param2 % 32
|
||||
param1[(param2 + 64 >> 9 << 4) + 14] = param2
|
||||
_loc_3 = 1732584193
|
||||
_loc_4 = -271733879
|
||||
_loc_5 = -1732584194
|
||||
_loc_6 = 271733878
|
||||
for _loc_7 in range(0, len(param1), 16):
|
||||
_loc_8 = _loc_3
|
||||
_loc_9 = _loc_4
|
||||
_loc_10 = _loc_5
|
||||
_loc_11 = _loc_6
|
||||
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 7, -680876936)
|
||||
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 1], 12, -389564586)
|
||||
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 17, 606105819)
|
||||
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 3], 22, -1044525330)
|
||||
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 7, -176418897)
|
||||
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 5], 12, 1200080426)
|
||||
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 17, -1473231341)
|
||||
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 7], 22, -45705983)
|
||||
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 7, 1770035416)
|
||||
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 9], 12, -1958414417)
|
||||
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 17, -42063)
|
||||
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 11], 22, -1990404162)
|
||||
_loc_3 = cls.md5_ff(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 7, 1804603682)
|
||||
_loc_6 = cls.md5_ff(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 13], 12, -40341101)
|
||||
_loc_5 = cls.md5_ff(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 17, -1502002290)
|
||||
_loc_4 = cls.md5_ff(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 15], 22, 1236535329)
|
||||
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 5, -165796510)
|
||||
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 6], 9, -1069501632)
|
||||
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 14, 643717713)
|
||||
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 0], 20, -373897302)
|
||||
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 5, -701558691)
|
||||
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 10], 9, 38016083)
|
||||
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 14, -660478335)
|
||||
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 4], 20, -405537848)
|
||||
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 5, 568446438)
|
||||
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 14], 9, -1019803690)
|
||||
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 14, -187363961)
|
||||
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 8], 20, 1163531501)
|
||||
_loc_3 = cls.md5_gg(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 5, -1444681467)
|
||||
_loc_6 = cls.md5_gg(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 2], 9, -51403784)
|
||||
_loc_5 = cls.md5_gg(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 14, 1735328473)
|
||||
_loc_4 = cls.md5_gg(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 12], 20, -1926607734)
|
||||
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 5], 4, -378558)
|
||||
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 8], 11, -2022574463)
|
||||
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 11], 16, 1839030562)
|
||||
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 14], 23, -35309556)
|
||||
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 1], 4, -1530992060)
|
||||
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 4], 11, 1272893353)
|
||||
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 7], 16, -155497632)
|
||||
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 10], 23, -1094730640)
|
||||
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 13], 4, 681279174)
|
||||
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 0], 11, -358537222)
|
||||
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 3], 16, -722521979)
|
||||
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 6], 23, 76029189)
|
||||
_loc_3 = cls.md5_hh(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 9], 4, -640364487)
|
||||
_loc_6 = cls.md5_hh(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 12], 11, -421815835)
|
||||
_loc_5 = cls.md5_hh(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 15], 16, 530742520)
|
||||
_loc_4 = cls.md5_hh(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 2], 23, -995338651)
|
||||
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 0], 6, -198630844)
|
||||
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 7], 10, 1126891415)
|
||||
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 14], 15, -1416354905)
|
||||
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 5], 21, -57434055)
|
||||
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 12], 6, 1700485571)
|
||||
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 3], 10, -1894986606)
|
||||
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 10], 15, -1051523)
|
||||
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 1], 21, -2054922799)
|
||||
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 8], 6, 1873313359)
|
||||
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 15], 10, -30611744)
|
||||
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 6], 15, -1560198380)
|
||||
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 13], 21, 1309151649)
|
||||
_loc_3 = cls.md5_ii(_loc_3, _loc_4, _loc_5, _loc_6, param1[_loc_7 + 4], 6, -145523070)
|
||||
_loc_6 = cls.md5_ii(_loc_6, _loc_3, _loc_4, _loc_5, param1[_loc_7 + 11], 10, -1120210379)
|
||||
_loc_5 = cls.md5_ii(_loc_5, _loc_6, _loc_3, _loc_4, param1[_loc_7 + 2], 15, 718787259)
|
||||
_loc_4 = cls.md5_ii(_loc_4, _loc_5, _loc_6, _loc_3, param1[_loc_7 + 9], 21, -343485551)
|
||||
_loc_3 = cls.safe_add(_loc_3, _loc_8)
|
||||
_loc_4 = cls.safe_add(_loc_4, _loc_9)
|
||||
_loc_5 = cls.safe_add(_loc_5, _loc_10)
|
||||
_loc_6 = cls.safe_add(_loc_6, _loc_11)
|
||||
return [_loc_3, _loc_4, _loc_5, _loc_6]
|
||||
|
||||
@classmethod
|
||||
def md5_cmn(cls, param1, param2, param3, param4, param5, param6):
|
||||
return cls.safe_add(
|
||||
cls.bit_rol(cls.safe_add(cls.safe_add(param2, param1), cls.safe_add(param4, param6)), param5), param3)
|
||||
|
||||
@classmethod
|
||||
def md5_ff(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||
return cls.md5_cmn(param2 & param3 | ~param2 & param4, param1, param2, param5, param6, param7)
|
||||
|
||||
@classmethod
|
||||
def md5_gg(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||
return cls.md5_cmn(param2 & param4 | param3 & ~param4, param1, param2, param5, param6, param7)
|
||||
|
||||
@classmethod
|
||||
def md5_hh(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||
return cls.md5_cmn(param2 ^ param3 ^ param4, param1, param2, param5, param6, param7)
|
||||
|
||||
@classmethod
|
||||
def md5_ii(cls, param1, param2, param3, param4, param5, param6, param7):
|
||||
return cls.md5_cmn(param3 ^ (param2 | ~param4), param1, param2, param5, param6, param7)
|
||||
|
||||
@classmethod
|
||||
def safe_add(cls, param1, param2):
|
||||
_loc_3 = (param1 & 65535) + (param2 & 65535)
|
||||
_loc_4 = (param1 >> 16) + (param2 >> 16) + (_loc_3 >> 16)
|
||||
return cls.lshift(_loc_4, 16) | _loc_3 & 65535
|
||||
|
||||
@classmethod
|
||||
def bit_rol(cls, param1, param2):
|
||||
return cls.lshift(param1, param2) | (param1 & 0xFFFFFFFF) >> (32 - param2)
|
||||
|
||||
@staticmethod
|
||||
def lshift(value, count):
|
||||
r = (0xFFFFFFFF & value) << count
|
||||
return -(~(r - 1) & 0xFFFFFFFF) if r > 0x7FFFFFFF else r
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_id = self._search_regex(self._VIDEOID_REGEXES, webpage, 'video id')
|
||||
|
||||
video = self._download_json(
|
||||
self._API_URL_TEMPLATE % video_id, video_id)['videos'][0]
|
||||
|
||||
title = video['title']
|
||||
duration = float_or_none(video['duration'], 1000)
|
||||
like_count = video['likes']
|
||||
uploader = video['channel']
|
||||
uploader_id = video['channel_id']
|
||||
|
||||
formats = []
|
||||
|
||||
for resource in video['resources']:
|
||||
resource_id = resource.get('_id')
|
||||
if not resource_id:
|
||||
continue
|
||||
|
||||
security = self._download_json(
|
||||
self._SECURITY_URL_TEMPLATE % (video_id, resource_id),
|
||||
video_id, 'Downloading security hash for %s' % resource_id)
|
||||
|
||||
security_hash = security.get('hash')
|
||||
if not security_hash:
|
||||
message = security.get('message')
|
||||
if message:
|
||||
raise ExtractorError(
|
||||
'%s returned error: %s' % (self.IE_NAME, message), expected=True)
|
||||
continue
|
||||
|
||||
hash_code = security_hash[:2]
|
||||
received_time = int(security_hash[2:12])
|
||||
received_random = security_hash[12:22]
|
||||
received_md5 = security_hash[22:]
|
||||
|
||||
sign_time = received_time + self._RESIGN_EXPIRATION
|
||||
padding = '%010d' % random.randint(1, 10000000000)
|
||||
|
||||
signed_md5 = self.MD5.b64_md5(received_md5 + compat_str(sign_time) + padding)
|
||||
signed_hash = hash_code + compat_str(received_time) + received_random + compat_str(sign_time) + padding + signed_md5
|
||||
|
||||
formats.append({
|
||||
'url': '%s?h=%s&k=%s' % (resource['url'], signed_hash, 'flash'),
|
||||
'format_id': resource_id,
|
||||
'height': resource['height']
|
||||
})
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'like_count': like_count,
|
||||
'formats': formats
|
||||
}
|
@ -36,16 +36,16 @@ class GodTubeIE(InfoExtractor):
|
||||
'http://www.godtube.com/resource/mediaplayer/%s.xml' % video_id.lower(),
|
||||
video_id, 'Downloading player config XML')
|
||||
|
||||
video_url = config.find('.//file').text
|
||||
uploader = config.find('.//author').text
|
||||
timestamp = parse_iso8601(config.find('.//date').text)
|
||||
duration = parse_duration(config.find('.//duration').text)
|
||||
thumbnail = config.find('.//image').text
|
||||
video_url = config.find('file').text
|
||||
uploader = config.find('author').text
|
||||
timestamp = parse_iso8601(config.find('date').text)
|
||||
duration = parse_duration(config.find('duration').text)
|
||||
thumbnail = config.find('image').text
|
||||
|
||||
media = self._download_xml(
|
||||
'http://www.godtube.com/media/xml/?v=%s' % video_id, video_id, 'Downloading media XML')
|
||||
|
||||
title = media.find('.//title').text
|
||||
title = media.find('title').text
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
48
youtube_dl/extractor/goldenmoustache.py
Normal file
48
youtube_dl/extractor/goldenmoustache.py
Normal file
@ -0,0 +1,48 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
int_or_none,
|
||||
)
|
||||
|
||||
|
||||
class GoldenMoustacheIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?goldenmoustache\.com/(?P<display_id>[\w-]+)-(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.goldenmoustache.com/suricate-le-poker-3700/',
|
||||
'md5': '0f904432fa07da5054d6c8beb5efb51a',
|
||||
'info_dict': {
|
||||
'id': '3700',
|
||||
'ext': 'mp4',
|
||||
'title': 'Suricate - Le Poker',
|
||||
'description': 'md5:3d1f242f44f8c8cb0a106f1fd08e5dc9',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'view_count': int,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
video_url = self._html_search_regex(
|
||||
r'data-src-type="mp4" data-src="([^"]+)"', webpage, 'video URL')
|
||||
title = self._html_search_regex(
|
||||
r'<title>(.*?) - Golden Moustache</title>', webpage, 'title')
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
view_count = int_or_none(self._html_search_regex(
|
||||
r'<strong>([0-9]+)</strong>\s*VUES</span>',
|
||||
webpage, 'view count', fatal=False))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'title': title,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
'view_count': view_count,
|
||||
}
|
69
youtube_dl/extractor/golem.py
Normal file
69
youtube_dl/extractor/golem.py
Normal file
@ -0,0 +1,69 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
determine_ext,
|
||||
)
|
||||
|
||||
|
||||
class GolemIE(InfoExtractor):
|
||||
_VALID_URL = r'^https?://video\.golem\.de/.+?/(?P<id>.+?)/'
|
||||
_TEST = {
|
||||
'url': 'http://video.golem.de/handy/14095/iphone-6-und-6-plus-test.html',
|
||||
'md5': 'c1a2c0a3c863319651c7c992c5ee29bf',
|
||||
'info_dict': {
|
||||
'id': '14095',
|
||||
'format_id': 'high',
|
||||
'ext': 'mp4',
|
||||
'title': 'iPhone 6 und 6 Plus - Test',
|
||||
'duration': 300.44,
|
||||
'filesize': 65309548,
|
||||
}
|
||||
}
|
||||
|
||||
_PREFIX = 'http://video.golem.de'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
config = self._download_xml(
|
||||
'https://video.golem.de/xml/{0}.xml'.format(video_id), video_id)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': config.findtext('./title', 'golem'),
|
||||
'duration': self._float(config.findtext('./playtime'), 'duration'),
|
||||
}
|
||||
|
||||
formats = []
|
||||
for e in config:
|
||||
url = e.findtext('./url')
|
||||
if not url:
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'format_id': e.tag,
|
||||
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||
'height': self._int(e.get('height'), 'height'),
|
||||
'width': self._int(e.get('width'), 'width'),
|
||||
'filesize': self._int(e.findtext('filesize'), 'filesize'),
|
||||
'ext': determine_ext(e.findtext('./filename')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
info['formats'] = formats
|
||||
|
||||
thumbnails = []
|
||||
for e in config.findall('.//teaser'):
|
||||
url = e.findtext('./url')
|
||||
if not url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': compat_urlparse.urljoin(self._PREFIX, url),
|
||||
'width': self._int(e.get('width'), 'thumbnail width'),
|
||||
'height': self._int(e.get('height'), 'thumbnail height'),
|
||||
})
|
||||
info['thumbnails'] = thumbnails
|
||||
|
||||
return info
|
@ -1,13 +1,11 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import datetime
|
||||
import re
|
||||
import codecs
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
)
|
||||
from ..utils import unified_strdate
|
||||
|
||||
|
||||
class GooglePlusIE(InfoExtractor):
|
||||
@ -19,74 +17,57 @@ class GooglePlusIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'ZButuJc6CtH',
|
||||
'ext': 'flv',
|
||||
'title': '嘆きの天使 降臨',
|
||||
'upload_date': '20120613',
|
||||
'uploader': '井上ヨシマサ',
|
||||
'title': '嘆きの天使 降臨',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
# Extract id from URL
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
# Step 1, Retrieve post webpage to extract further information
|
||||
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
|
||||
|
||||
self.report_extraction(video_id)
|
||||
|
||||
# Extract update date
|
||||
upload_date = self._html_search_regex(
|
||||
title = self._og_search_description(webpage).splitlines()[0]
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
|
||||
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
|
||||
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
|
||||
if upload_date:
|
||||
# Convert timestring to a format suitable for filename
|
||||
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
|
||||
upload_date = upload_date.strftime('%Y%m%d')
|
||||
|
||||
# Extract uploader
|
||||
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
|
||||
# Extract title
|
||||
# Get the first line for title
|
||||
video_title = self._og_search_description(webpage).splitlines()[0]
|
||||
webpage, 'upload date', fatal=False, flags=re.VERBOSE))
|
||||
uploader = self._html_search_regex(
|
||||
r'rel="author".*?>(.*?)</a>', webpage, 'uploader', fatal=False)
|
||||
|
||||
# Step 2, Simulate clicking the image box to launch video
|
||||
DOMAIN = 'https://plus.google.com/'
|
||||
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||
video_page = self._search_regex(
|
||||
r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
|
||||
webpage, 'video page URL')
|
||||
if not video_page.startswith(DOMAIN):
|
||||
video_page = DOMAIN + video_page
|
||||
|
||||
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
|
||||
|
||||
def unicode_escape(s):
|
||||
decoder = codecs.getdecoder('unicode_escape')
|
||||
return re.sub(
|
||||
r'\\u[0-9a-fA-F]{4,}',
|
||||
lambda m: decoder(m.group(0))[0],
|
||||
s)
|
||||
|
||||
# Extract video links all sizes
|
||||
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
|
||||
mobj = re.findall(pattern, webpage)
|
||||
if len(mobj) == 0:
|
||||
raise ExtractorError('Unable to extract video links')
|
||||
|
||||
# Sort in resolution
|
||||
links = sorted(mobj)
|
||||
|
||||
# Choose the lowest of the sort, i.e. highest resolution
|
||||
video_url = links[-1]
|
||||
# Only get the url. The resolution part in the tuple has no use anymore
|
||||
video_url = video_url[-1]
|
||||
# Treat escaped \u0026 style hex
|
||||
try:
|
||||
video_url = video_url.decode("unicode_escape")
|
||||
except AttributeError: # Python 3
|
||||
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
|
||||
formats = [{
|
||||
'url': unicode_escape(video_url),
|
||||
'ext': 'flv',
|
||||
'width': int(width),
|
||||
'height': int(height),
|
||||
} for width, height, video_url in re.findall(
|
||||
r'\d+,(\d+),(\d+),"(https?://redirector\.googlevideo\.com.*?)"', webpage)]
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'uploader': uploader,
|
||||
'upload_date': upload_date,
|
||||
'title': video_title,
|
||||
'ext': 'flv',
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
determine_ext,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
@ -12,20 +13,22 @@ from ..utils import (
|
||||
|
||||
|
||||
class GorillaVidIE(InfoExtractor):
|
||||
IE_DESC = 'GorillaVid.in and daclips.in'
|
||||
IE_DESC = 'GorillaVid.in, daclips.in and movpod.in'
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?P<host>(?:www\.)?
|
||||
(?:daclips\.in|gorillavid\.in))/
|
||||
(?:daclips\.in|gorillavid\.in|movpod\.in))/
|
||||
(?:embed-)?(?P<id>[0-9a-zA-Z]+)(?:-[0-9]+x[0-9]+\.html)?
|
||||
'''
|
||||
|
||||
_FILE_NOT_FOUND_REGEX = r'>(?:404 - )?File Not Found<'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://gorillavid.in/06y9juieqpmi',
|
||||
'md5': '5ae4a3580620380619678ee4875893ba',
|
||||
'info_dict': {
|
||||
'id': '06y9juieqpmi',
|
||||
'ext': 'flv',
|
||||
'title': 'Rebecca Black My Moment Official Music Video Reaction',
|
||||
'title': 'Rebecca Black My Moment Official Music Video Reaction-6GK87Rc8bzQ',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
},
|
||||
}, {
|
||||
@ -43,9 +46,12 @@ class GorillaVidIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '3rso4kdn6f9m',
|
||||
'ext': 'mp4',
|
||||
'title': 'Micro Pig piglets ready on 16th July 2009',
|
||||
'title': 'Micro Pig piglets ready on 16th July 2009-bG0PdrCdxUc',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://movpod.in/0wguyyxi1yca',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -54,6 +60,9 @@ class GorillaVidIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage('http://%s/%s' % (mobj.group('host'), video_id), video_id)
|
||||
|
||||
if re.search(self._FILE_NOT_FOUND_REGEX, webpage) is not None:
|
||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||
|
||||
fields = dict(re.findall(r'''(?x)<input\s+
|
||||
type="hidden"\s+
|
||||
name="([^"]+)"\s+
|
||||
@ -69,14 +78,14 @@ class GorillaVidIE(InfoExtractor):
|
||||
|
||||
webpage = self._download_webpage(req, video_id, 'Downloading video page')
|
||||
|
||||
title = self._search_regex(r'style="z-index: [0-9]+;">([0-9a-zA-Z ]+)(?:-.+)?</span>', webpage, 'title')
|
||||
thumbnail = self._search_regex(r'image:\'(http[^\']+)\',', webpage, 'thumbnail')
|
||||
url = self._search_regex(r'file: \'(http[^\']+)\',', webpage, 'file url')
|
||||
title = self._search_regex(r'style="z-index: [0-9]+;">([^<]+)</span>', webpage, 'title')
|
||||
video_url = self._search_regex(r'file\s*:\s*\'(http[^\']+)\',', webpage, 'file url')
|
||||
thumbnail = self._search_regex(r'image\s*:\s*\'(http[^\']+)\',', webpage, 'thumbnail', fatal=False)
|
||||
|
||||
formats = [{
|
||||
'format_id': 'sd',
|
||||
'url': url,
|
||||
'ext': determine_ext(url),
|
||||
'url': video_url,
|
||||
'ext': determine_ext(video_url),
|
||||
'quality': 1,
|
||||
}]
|
||||
|
||||
|
@ -1,15 +1,11 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
str_to_int,
|
||||
ExtractorError,
|
||||
)
|
||||
import json
|
||||
|
||||
|
||||
class GoshgayIE(InfoExtractor):
|
||||
@ -27,36 +23,27 @@ class GoshgayIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._search_regex(r'class="video-title"><h1>(.+?)<', webpage, 'title')
|
||||
title = self._og_search_title(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
family_friendly = self._html_search_meta(
|
||||
'isFamilyFriendly', webpage, default='false')
|
||||
config_url = self._search_regex(
|
||||
r"'config'\s*:\s*'([^']+)'", webpage, 'config URL')
|
||||
|
||||
player_config = self._search_regex(
|
||||
r'(?s)jwplayer\("player"\)\.setup\(({.+?})\)', webpage, 'config settings')
|
||||
player_vars = json.loads(player_config.replace("'", '"'))
|
||||
width = str_to_int(player_vars.get('width'))
|
||||
height = str_to_int(player_vars.get('height'))
|
||||
config_uri = player_vars.get('config')
|
||||
config = self._download_xml(
|
||||
config_url, video_id, 'Downloading player config XML')
|
||||
|
||||
if config_uri is None:
|
||||
raise ExtractorError('Missing config URI')
|
||||
node = self._download_xml(config_uri, video_id, 'Downloading player config XML',
|
||||
errnote='Unable to download XML')
|
||||
if node is None:
|
||||
if config is None:
|
||||
raise ExtractorError('Missing config XML')
|
||||
if node.tag != 'config':
|
||||
if config.tag != 'config':
|
||||
raise ExtractorError('Missing config attribute')
|
||||
fns = node.findall('file')
|
||||
imgs = node.findall('image')
|
||||
if len(fns) != 1:
|
||||
fns = config.findall('file')
|
||||
if len(fns) < 1:
|
||||
raise ExtractorError('Missing media URI')
|
||||
video_url = fns[0].text
|
||||
if len(imgs) < 1:
|
||||
thumbnail = None
|
||||
else:
|
||||
thumbnail = imgs[0].text
|
||||
|
||||
url_comp = compat_urlparse.urlparse(url)
|
||||
ref = "%s://%s%s" % (url_comp[0], url_comp[1], url_comp[2])
|
||||
@ -65,9 +52,7 @@ class GoshgayIE(InfoExtractor):
|
||||
'id': video_id,
|
||||
'url': video_url,
|
||||
'title': title,
|
||||
'width': width,
|
||||
'height': height,
|
||||
'thumbnail': thumbnail,
|
||||
'http_referer': ref,
|
||||
'age_limit': 18,
|
||||
'age_limit': 0 if family_friendly == 'true' else 18,
|
||||
}
|
||||
|
@ -8,12 +8,13 @@ import re
|
||||
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, compat_urllib_request, compat_html_parser
|
||||
|
||||
from ..utils import (
|
||||
from ..compat import (
|
||||
compat_html_parser,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class GroovesharkHtmlParser(compat_html_parser.HTMLParser):
|
||||
|
@ -1,37 +1,33 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import re
|
||||
import json
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import determine_ext
|
||||
|
||||
|
||||
class HarkIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.hark\.com/clips/(.+?)-.+'
|
||||
_VALID_URL = r'https?://www\.hark\.com/clips/(?P<id>.+?)-.+'
|
||||
_TEST = {
|
||||
u'url': u'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
|
||||
u'file': u'mmbzyhkgny.mp3',
|
||||
u'md5': u'6783a58491b47b92c7c1af5a77d4cbee',
|
||||
u'info_dict': {
|
||||
u'title': u"Obama: 'Beyond The Afghan Theater, We Only Target Al Qaeda' on May 23, 2013",
|
||||
u'description': u'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
|
||||
u'duration': 11,
|
||||
'url': 'http://www.hark.com/clips/mmbzyhkgny-obama-beyond-the-afghan-theater-we-only-target-al-qaeda-on-may-23-2013',
|
||||
'md5': '6783a58491b47b92c7c1af5a77d4cbee',
|
||||
'info_dict': {
|
||||
'id': 'mmbzyhkgny',
|
||||
'ext': 'mp3',
|
||||
'title': 'Obama: \'Beyond The Afghan Theater, We Only Target Al Qaeda\' on May 23, 2013',
|
||||
'description': 'President Barack Obama addressed the nation live on May 23, 2013 in a speech aimed at addressing counter-terrorism policies including the use of drone strikes, detainees at Guantanamo Bay prison facility, and American citizens who are terrorists.',
|
||||
'duration': 11,
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group(1)
|
||||
json_url = "http://www.hark.com/clips/%s.json" %(video_id)
|
||||
info_json = self._download_webpage(json_url, video_id)
|
||||
info = json.loads(info_json)
|
||||
final_url = info['url']
|
||||
video_id = self._match_id(url)
|
||||
data = self._download_json(
|
||||
'http://www.hark.com/clips/%s.json' % video_id, video_id)
|
||||
|
||||
return {'id': video_id,
|
||||
'url' : final_url,
|
||||
'title': info['name'],
|
||||
'ext': determine_ext(final_url),
|
||||
'description': info['description'],
|
||||
'thumbnail': info['image_original'],
|
||||
'duration': info['duration'],
|
||||
}
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': data['url'],
|
||||
'title': data['name'],
|
||||
'description': data.get('description'),
|
||||
'thumbnail': data.get('image_original'),
|
||||
'duration': data.get('duration'),
|
||||
}
|
||||
|
79
youtube_dl/extractor/heise.py
Normal file
79
youtube_dl/extractor/heise.py
Normal file
@ -0,0 +1,79 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
)
|
||||
|
||||
|
||||
class HeiseIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?heise\.de/video/artikel/
|
||||
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
|
||||
'''
|
||||
_TEST = {
|
||||
'url': (
|
||||
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
|
||||
),
|
||||
'md5': 'ffed432483e922e88545ad9f2f15d30e',
|
||||
'info_dict': {
|
||||
'id': '2404147',
|
||||
'ext': 'mp4',
|
||||
'title': (
|
||||
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
|
||||
),
|
||||
'format_id': 'mp4_720p',
|
||||
'timestamp': 1411812600,
|
||||
'upload_date': '20140927',
|
||||
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
|
||||
'thumbnail': 're:^https?://.*\.jpe?g$',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
container_id = self._search_regex(
|
||||
r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
|
||||
webpage, 'container ID')
|
||||
sequenz_id = self._search_regex(
|
||||
r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
|
||||
webpage, 'sequenz ID')
|
||||
data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
|
||||
doc = self._download_xml(data_url, video_id)
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'timestamp': parse_iso8601(
|
||||
self._html_search_meta('date', webpage)),
|
||||
'description': self._og_search_description(webpage),
|
||||
}
|
||||
|
||||
title = self._html_search_meta('fulltitle', webpage)
|
||||
if title:
|
||||
info['title'] = title
|
||||
else:
|
||||
info['title'] = self._og_search_title(webpage)
|
||||
|
||||
formats = []
|
||||
for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
|
||||
label = source_node.attrib['label']
|
||||
height = int_or_none(self._search_regex(
|
||||
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
|
||||
video_url = source_node.attrib['file']
|
||||
ext = determine_ext(video_url, '')
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'format_note': label,
|
||||
'format_id': '%s_%s' % (ext, label),
|
||||
'height': height,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
info['formats'] = formats
|
||||
|
||||
return info
|
@ -28,13 +28,13 @@ class HowStuffWorksIE(InfoExtractor):
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://adventure.howstuffworks.com/39516-deadliest-catch-jakes-farewell-pots-video.htm',
|
||||
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
|
||||
'info_dict': {
|
||||
'id': '553470',
|
||||
'display_id': 'deadliest-catch-jakes-farewell-pots',
|
||||
'id': '453464',
|
||||
'display_id': 'survival-zone-food-and-water-in-the-savanna',
|
||||
'ext': 'mp4',
|
||||
'title': 'Deadliest Catch: Jake\'s Farewell Pots',
|
||||
'description': 'md5:9632c346d5e43ee238028c9cefd8dbbc',
|
||||
'title': 'Survival Zone: Food and Water In the Savanna',
|
||||
'description': 'md5:7e1c89f6411434970c15fa094170c371',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
'params': {
|
||||
|
@ -33,8 +33,7 @@ class HuffPostIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
api_url = 'http://embed.live.huffingtonpost.com/api/segments/%s.json' % video_id
|
||||
data = self._download_json(api_url, video_id)['data']
|
||||
|
@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@ -20,13 +18,11 @@ class IconosquareIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
html_title = self._html_search_regex(
|
||||
r'<title>(.+?)</title>',
|
||||
title = self._html_search_regex(
|
||||
r'<title>(.+?)(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)</title>',
|
||||
webpage, 'title')
|
||||
title = re.sub(r'(?: *\(Videos?\))? \| (?:Iconosquare|Statigram)$', '', html_title)
|
||||
uploader_id = self._html_search_regex(
|
||||
r'@([^ ]+)', title, 'uploader name', fatal=False)
|
||||
|
||||
|
@ -71,6 +71,7 @@ class IGNIE(InfoExtractor):
|
||||
|
||||
def _find_video_id(self, webpage):
|
||||
res_id = [
|
||||
r'"video_id"\s*:\s*"(.*?)"',
|
||||
r'data-video-id="(.+?)"',
|
||||
r'<object id="vid_(.+?)"',
|
||||
r'<meta name="og:image" content=".*/(.+?)-(.+?)/.+.jpg"',
|
||||
@ -85,10 +86,15 @@ class IGNIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, name_or_id)
|
||||
if page_type != 'video':
|
||||
multiple_urls = re.findall(
|
||||
'<param name="flashvars" value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
|
||||
'<param name="flashvars"[^>]*value="[^"]*?url=(https?://www\.ign\.com/videos/.*?)["&]',
|
||||
webpage)
|
||||
if multiple_urls:
|
||||
return [self.url_result(u, ie='IGN') for u in multiple_urls]
|
||||
entries = [self.url_result(u, ie='IGN') for u in multiple_urls]
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': name_or_id,
|
||||
'entries': entries,
|
||||
}
|
||||
|
||||
video_id = self._find_video_id(webpage)
|
||||
result = self._get_video_info(video_id)
|
||||
@ -111,13 +117,13 @@ class IGNIE(InfoExtractor):
|
||||
|
||||
|
||||
class OneUPIE(IGNIE):
|
||||
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)'
|
||||
_VALID_URL = r'https?://gamevideos\.1up\.com/(?P<type>video)/id/(?P<name_or_id>.+)\.html'
|
||||
IE_NAME = '1up.com'
|
||||
|
||||
_DESCRIPTION_RE = r'<div id="vid_summary">(.+?)</div>'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://gamevideos.1up.com/video/id/34976',
|
||||
'url': 'http://gamevideos.1up.com/video/id/34976.html',
|
||||
'md5': '68a54ce4ebc772e4b71e3123d413163d',
|
||||
'info_dict': {
|
||||
'id': '34976',
|
||||
|
@ -6,7 +6,6 @@ import json
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_urlparse,
|
||||
get_element_by_attribute,
|
||||
)
|
||||
|
||||
|
||||
@ -27,10 +26,11 @@ class ImdbIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage('http://www.imdb.com/video/imdb/vi%s' % video_id, video_id)
|
||||
descr = get_element_by_attribute('itemprop', 'description', webpage)
|
||||
descr = self._html_search_regex(
|
||||
r'(?s)<span itemprop="description">(.*?)</span>',
|
||||
webpage, 'description', fatal=False)
|
||||
available_formats = re.findall(
|
||||
r'case \'(?P<f_id>.*?)\' :$\s+url = \'(?P<path>.*?)\'', webpage,
|
||||
flags=re.MULTILINE)
|
||||
@ -73,9 +73,7 @@ class ImdbListIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
list_id = mobj.group('id')
|
||||
|
||||
list_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, list_id)
|
||||
entries = [
|
||||
self.url_result('http://www.imdb.com' + m, 'Imdb')
|
||||
|
@ -1,3 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -12,12 +14,13 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://video\.internetvideoarchive\.net/flash/players/.*?\?.*?publishedid.*?'
|
||||
|
||||
_TEST = {
|
||||
u'url': u'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
|
||||
u'file': u'452693.mp4',
|
||||
u'info_dict': {
|
||||
u'title': u'SKYFALL',
|
||||
u'description': u'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
|
||||
u'duration': 153,
|
||||
'url': 'http://video.internetvideoarchive.net/flash/players/flashconfiguration.aspx?customerid=69249&publishedid=452693&playerid=247',
|
||||
'info_dict': {
|
||||
'id': '452693',
|
||||
'ext': 'mp4',
|
||||
'title': 'SKYFALL',
|
||||
'description': 'In SKYFALL, Bond\'s loyalty to M is tested as her past comes back to haunt her. As MI6 comes under attack, 007 must track down and destroy the threat, no matter how personal the cost.',
|
||||
'duration': 149,
|
||||
},
|
||||
}
|
||||
|
||||
@ -42,7 +45,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
url = self._build_url(query)
|
||||
|
||||
flashconfiguration = self._download_xml(url, video_id,
|
||||
u'Downloading flash configuration')
|
||||
'Downloading flash configuration')
|
||||
file_url = flashconfiguration.find('file').text
|
||||
file_url = file_url.replace('/playlist.aspx', '/mrssplaylist.aspx')
|
||||
# Replace some of the parameters in the query to get the best quality
|
||||
@ -51,7 +54,7 @@ class InternetVideoArchiveIE(InfoExtractor):
|
||||
lambda m: self._clean_query(m.group()),
|
||||
file_url)
|
||||
info = self._download_xml(file_url, video_id,
|
||||
u'Downloading video info')
|
||||
'Downloading video info')
|
||||
item = info.find('channel/item')
|
||||
|
||||
def _bp(p):
|
||||
|
@ -5,11 +5,11 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
get_element_by_id,
|
||||
parse_iso8601,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
get_element_by_id,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
str_to_int,
|
||||
)
|
||||
|
||||
@ -30,7 +30,7 @@ class IzleseneIE(InfoExtractor):
|
||||
'description': 'md5:253753e2655dde93f59f74b572454f6d',
|
||||
'thumbnail': 're:^http://.*\.jpg',
|
||||
'uploader_id': 'pelikzzle',
|
||||
'timestamp': 1404298698,
|
||||
'timestamp': 1404302298,
|
||||
'upload_date': '20140702',
|
||||
'duration': 95.395,
|
||||
'age_limit': 0,
|
||||
@ -46,7 +46,7 @@ class IzleseneIE(InfoExtractor):
|
||||
'description': 'Tarkan Dortmund 2006 Konseri',
|
||||
'thumbnail': 're:^http://.*\.jpg',
|
||||
'uploader_id': 'parlayankiz',
|
||||
'timestamp': 1163318593,
|
||||
'timestamp': 1163322193,
|
||||
'upload_date': '20061112',
|
||||
'duration': 253.666,
|
||||
'age_limit': 0,
|
||||
@ -55,15 +55,15 @@ class IzleseneIE(InfoExtractor):
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
url = 'http://www.izlesene.com/video/%s' % video_id
|
||||
video_id = self._match_id(url)
|
||||
|
||||
url = 'http://www.izlesene.com/video/%s' % video_id
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._og_search_title(webpage)
|
||||
description = self._og_search_description(webpage)
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
thumbnail = self._proto_relative_url(
|
||||
self._og_search_thumbnail(webpage), scheme='http:')
|
||||
|
||||
uploader = self._html_search_regex(
|
||||
r"adduserUsername\s*=\s*'([^']+)';",
|
||||
|
@ -1,8 +1,6 @@
|
||||
# coding=utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
@ -12,14 +10,14 @@ from ..utils import (
|
||||
|
||||
class JpopsukiIE(InfoExtractor):
|
||||
IE_NAME = 'jpopsuki.tv'
|
||||
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/video/(.*?)/(?P<id>\S+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?jpopsuki\.tv/(?:category/)?video/[^/]+/(?P<id>\S+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.jpopsuki.tv/video/ayumi-hamasaki---evolution/00be659d23b0b40508169cdee4545771',
|
||||
'md5': '88018c0c1a9b1387940e90ec9e7e198e',
|
||||
'file': '00be659d23b0b40508169cdee4545771.mp4',
|
||||
'info_dict': {
|
||||
'id': '00be659d23b0b40508169cdee4545771',
|
||||
'ext': 'mp4',
|
||||
'title': 'ayumi hamasaki - evolution',
|
||||
'description': 'Release date: 2001.01.31\r\n浜崎あゆみ - evolution',
|
||||
'thumbnail': 'http://www.jpopsuki.tv/cache/89722c74d2a2ebe58bcac65321c115b2.jpg',
|
||||
@ -30,8 +28,7 @@ class JpopsukiIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
@ -47,11 +44,9 @@ class JpopsukiIE(InfoExtractor):
|
||||
uploader_id = self._html_search_regex(
|
||||
r'<li>from: <a href="/user/view/user/\S*?/uid/(\d*)',
|
||||
webpage, 'video uploader_id', fatal=False)
|
||||
upload_date = self._html_search_regex(
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r'<li>uploaded: (.*?)</li>', webpage, 'video upload_date',
|
||||
fatal=False)
|
||||
if upload_date is not None:
|
||||
upload_date = unified_strdate(upload_date)
|
||||
fatal=False))
|
||||
view_count_str = self._html_search_regex(
|
||||
r'<li>Hits: ([0-9]+?)</li>', webpage, 'video view_count',
|
||||
fatal=False)
|
||||
|
@ -11,10 +11,9 @@ from ..utils import (
|
||||
|
||||
|
||||
class JukeboxIE(InfoExtractor):
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<video_id>[a-z0-9\-]+)\.html'
|
||||
_VALID_URL = r'^http://www\.jukebox?\..+?\/.+[,](?P<id>[a-z0-9\-]+)\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.jukebox.es/kosheen/videoclip,pride,r303r.html',
|
||||
'md5': '1574e9b4d6438446d5b7dbcdf2786276',
|
||||
'info_dict': {
|
||||
'id': 'r303r',
|
||||
'ext': 'flv',
|
||||
@ -24,8 +23,7 @@ class JukeboxIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('video_id')
|
||||
video_id = self._match_id(url)
|
||||
|
||||
html = self._download_webpage(url, video_id)
|
||||
iframe_url = unescapeHTML(self._search_regex(r'<iframe .*src="([^"]*)"', html, 'iframe url'))
|
||||
|
@ -1,155 +0,0 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import itertools
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
compat_str,
|
||||
ExtractorError,
|
||||
formatSeconds,
|
||||
)
|
||||
|
||||
|
||||
class JustinTVIE(InfoExtractor):
|
||||
"""Information extractor for justin.tv and twitch.tv"""
|
||||
# TODO: One broadcast may be split into multiple videos. The key
|
||||
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
||||
# starts at 1 and increases. Can we treat all parts as one video?
|
||||
|
||||
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
|
||||
(?:
|
||||
(?P<channelid>[^/]+)|
|
||||
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
|
||||
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
|
||||
)
|
||||
/?(?:\#.*)?$
|
||||
"""
|
||||
_JUSTIN_PAGE_LIMIT = 100
|
||||
IE_NAME = 'justin.tv'
|
||||
IE_DESC = 'justin.tv and twitch.tv'
|
||||
_TEST = {
|
||||
'url': 'http://www.twitch.tv/thegamedevhub/b/296128360',
|
||||
'md5': 'ecaa8a790c22a40770901460af191c9a',
|
||||
'info_dict': {
|
||||
'id': '296128360',
|
||||
'ext': 'flv',
|
||||
'upload_date': '20110927',
|
||||
'uploader_id': 25114803,
|
||||
'uploader': 'thegamedevhub',
|
||||
'title': 'Beginner Series - Scripting With Python Pt.1'
|
||||
}
|
||||
}
|
||||
|
||||
# Return count of items, list of *valid* items
|
||||
def _parse_page(self, url, video_id, counter):
|
||||
info_json = self._download_webpage(
|
||||
url, video_id,
|
||||
'Downloading video info JSON on page %d' % counter,
|
||||
'Unable to download video info JSON %d' % counter)
|
||||
|
||||
response = json.loads(info_json)
|
||||
if type(response) != list:
|
||||
error_text = response.get('error', 'unknown error')
|
||||
raise ExtractorError('Justin.tv API: %s' % error_text)
|
||||
info = []
|
||||
for clip in response:
|
||||
video_url = clip['video_file_url']
|
||||
if video_url:
|
||||
video_extension = os.path.splitext(video_url)[1][1:]
|
||||
video_date = re.sub('-', '', clip['start_time'][:10])
|
||||
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
|
||||
video_id = clip['id']
|
||||
video_title = clip.get('title', video_id)
|
||||
info.append({
|
||||
'id': compat_str(video_id),
|
||||
'url': video_url,
|
||||
'title': video_title,
|
||||
'uploader': clip.get('channel_name', video_uploader_id),
|
||||
'uploader_id': video_uploader_id,
|
||||
'upload_date': video_date,
|
||||
'ext': video_extension,
|
||||
})
|
||||
return (len(response), info)
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
|
||||
api_base = 'http://api.justin.tv'
|
||||
paged = False
|
||||
if mobj.group('channelid'):
|
||||
paged = True
|
||||
video_id = mobj.group('channelid')
|
||||
api = api_base + '/channel/archives/%s.json' % video_id
|
||||
elif mobj.group('chapterid'):
|
||||
chapter_id = mobj.group('chapterid')
|
||||
|
||||
webpage = self._download_webpage(url, chapter_id)
|
||||
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
|
||||
if not m:
|
||||
raise ExtractorError('Cannot find archive of a chapter')
|
||||
archive_id = m.group(1)
|
||||
|
||||
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
||||
doc = self._download_xml(
|
||||
api, chapter_id,
|
||||
note='Downloading chapter information',
|
||||
errnote='Chapter information download failed')
|
||||
for a in doc.findall('.//archive'):
|
||||
if archive_id == a.find('./id').text:
|
||||
break
|
||||
else:
|
||||
raise ExtractorError('Could not find chapter in chapter information')
|
||||
|
||||
video_url = a.find('./video_file_url').text
|
||||
video_ext = video_url.rpartition('.')[2] or 'flv'
|
||||
|
||||
chapter_api_url = 'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
||||
chapter_info = self._download_json(
|
||||
chapter_api_url, 'c' + chapter_id,
|
||||
note='Downloading chapter metadata',
|
||||
errnote='Download of chapter metadata failed')
|
||||
|
||||
bracket_start = int(doc.find('.//bracket_start').text)
|
||||
bracket_end = int(doc.find('.//bracket_end').text)
|
||||
|
||||
# TODO determine start (and probably fix up file)
|
||||
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
||||
#video_url += '?start=' + TODO:start_timestamp
|
||||
# bracket_start is 13290, but we want 51670615
|
||||
self._downloader.report_warning('Chapter detected, but we can just download the whole file. '
|
||||
'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
||||
|
||||
info = {
|
||||
'id': 'c' + chapter_id,
|
||||
'url': video_url,
|
||||
'ext': video_ext,
|
||||
'title': chapter_info['title'],
|
||||
'thumbnail': chapter_info['preview'],
|
||||
'description': chapter_info['description'],
|
||||
'uploader': chapter_info['channel']['display_name'],
|
||||
'uploader_id': chapter_info['channel']['name'],
|
||||
}
|
||||
return info
|
||||
else:
|
||||
video_id = mobj.group('videoid')
|
||||
api = api_base + '/broadcast/by_archive/%s.json' % video_id
|
||||
|
||||
entries = []
|
||||
offset = 0
|
||||
limit = self._JUSTIN_PAGE_LIMIT
|
||||
for counter in itertools.count(1):
|
||||
page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
|
||||
page_count, page_info = self._parse_page(
|
||||
page_url, video_id, counter)
|
||||
entries.extend(page_info)
|
||||
if not paged or page_count != limit:
|
||||
break
|
||||
offset += limit
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': video_id,
|
||||
'entries': entries,
|
||||
}
|
@ -1,8 +1,6 @@
|
||||
# encoding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@ -21,22 +19,17 @@ class KickStarterIE(InfoExtractor):
|
||||
}, {
|
||||
'note': 'Embedded video (not using the native kickstarter video service)',
|
||||
'url': 'https://www.kickstarter.com/projects/597507018/pebble-e-paper-watch-for-iphone-and-android/posts/659178',
|
||||
'playlist': [
|
||||
{
|
||||
'info_dict': {
|
||||
'id': '78704821',
|
||||
'ext': 'mp4',
|
||||
'uploader_id': 'pebble',
|
||||
'uploader': 'Pebble Technology',
|
||||
'title': 'Pebble iOS Notifications',
|
||||
}
|
||||
}
|
||||
],
|
||||
'info_dict': {
|
||||
'id': '78704821',
|
||||
'ext': 'mp4',
|
||||
'uploader_id': 'pebble',
|
||||
'uploader': 'Pebble Technology',
|
||||
'title': 'Pebble iOS Notifications',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
m = re.match(self._VALID_URL, url)
|
||||
video_id = m.group('id')
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = self._html_search_regex(
|
||||
|
@ -34,7 +34,7 @@ class KontrTubeIE(InfoExtractor):
|
||||
video_url = self._html_search_regex(r"video_url: '(.+?)/?',", webpage, 'video URL')
|
||||
thumbnail = self._html_search_regex(r"preview_url: '(.+?)/?',", webpage, 'video thumbnail', fatal=False)
|
||||
title = self._html_search_regex(
|
||||
r'<title>(.+?) - Труба зовёт - Интересный видеохостинг</title>', webpage, 'video title')
|
||||
r'<title>(.+?)</title>', webpage, 'video title')
|
||||
description = self._html_search_meta('description', webpage, 'video description')
|
||||
|
||||
mobj = re.search(
|
||||
|
@ -1,7 +1,5 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
@ -18,11 +16,11 @@ class Ku6IE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
title = self._search_regex(r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<h1 title=.*>(.*?)</h1>', webpage, 'title')
|
||||
dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id
|
||||
jsonData = self._download_json(dataUrl, video_id)
|
||||
downloadUrl = jsonData['data']['f']
|
||||
|
78
youtube_dl/extractor/laola1tv.py
Normal file
78
youtube_dl/extractor/laola1tv.py
Normal file
@ -0,0 +1,78 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class Laola1TvIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
|
||||
_TEST = {
|
||||
'url': 'http://www.laola1.tv/de-de/live/bwf-bitburger-open-grand-prix-gold-court-1/250019.html',
|
||||
'info_dict': {
|
||||
'id': '250019',
|
||||
'ext': 'mp4',
|
||||
'title': 'Bitburger Open Grand Prix Gold - Court 1',
|
||||
'categories': ['Badminton'],
|
||||
'uploader': 'BWF - Badminton World Federation',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
|
||||
_BROKEN = True # Not really - extractor works fine, but f4m downloader does not support live streams yet.
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
lang = mobj.group('lang')
|
||||
portal = mobj.group('portal')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
iframe_url = self._search_regex(
|
||||
r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
|
||||
webpage, 'iframe URL')
|
||||
|
||||
iframe = self._download_webpage(
|
||||
iframe_url, video_id, note='Downloading iframe')
|
||||
flashvars_m = re.findall(
|
||||
r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
|
||||
flashvars = dict((m[0], m[1]) for m in flashvars_m)
|
||||
|
||||
xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
|
||||
'play=%s&partner=1&portal=%s&v5ident=&lang=%s' % (
|
||||
video_id, portal, lang))
|
||||
hd_doc = self._download_xml(xml_url, video_id)
|
||||
|
||||
title = hd_doc.find('.//video/title').text
|
||||
flash_url = hd_doc.find('.//video/url').text
|
||||
categories = hd_doc.find('.//video/meta_sports').text.split(',')
|
||||
uploader = hd_doc.find('.//video/meta_organistation').text
|
||||
|
||||
ident = random.randint(10000000, 99999999)
|
||||
token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
|
||||
flash_url, ident, flashvars['timestamp'], flashvars['auth'])
|
||||
|
||||
token_doc = self._download_xml(
|
||||
token_url, video_id, note='Downloading token')
|
||||
token_attrib = token_doc.find('.//token').attrib
|
||||
if token_attrib.get('auth') == 'blocked':
|
||||
raise ExtractorError('Token error: ' % token_attrib.get('comment'))
|
||||
|
||||
video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
|
||||
token_attrib['url'], token_attrib['auth'])
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'is_live': True,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
'categories': categories,
|
||||
'ext': 'mp4',
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user