Compare commits
475 Commits
2016.02.13
...
2016.03.27
Author | SHA1 | Date | |
---|---|---|---|
a122e7080b | |||
b22ca76204 | |||
f7df343b4a | |||
19dbaeece3 | |||
395fd4b08a | |||
8018028d0f | |||
00322ad4fd | |||
4cf3489c6e | |||
b24ab3e341 | |||
af4116f4f0 | |||
f973e5d54e | |||
62f55aa68a | |||
02d7634d24 | |||
48dce58ca9 | |||
efcba804f6 | |||
6dee688e6d | |||
eedb7ba536 | |||
dcf77cf1a7 | |||
17bcc626bf | |||
b5a5bbf376 | |||
e68d3a010f | |||
d10fe8358c | |||
d6c340cae5 | |||
5964b598ff | |||
62cdb96f51 | |||
e289d6d62c | |||
6e6bc8dae5 | |||
15707c7e02 | |||
2156f16ca7 | |||
4db441de72 | |||
0be8314dc8 | |||
d7f62b049a | |||
3bb3356812 | |||
3f15fec1d1 | |||
98e68806fb | |||
e031768666 | |||
5eb7db4ee9 | |||
f0e83681d9 | |||
ff9d5d0938 | |||
d041a73674 | |||
f07e276a04 | |||
993271da0a | |||
369e7e3ff0 | |||
5767b4eeae | |||
622d19160b | |||
32d88410eb | |||
5a51775a58 | |||
87696e78d7 | |||
c4096e8aea | |||
fc27ea9464 | |||
088e1aac59 | |||
81f36eba88 | |||
2d60465e44 | |||
4333d56494 | |||
882c699296 | |||
efbed08dc2 | |||
7da2c87119 | |||
c6ca11f1b3 | |||
2beeb286e1 | |||
cc7397b04d | |||
bc5d16b302 | |||
85c637b737 | |||
5c69f7a479 | |||
ff5873b72d | |||
065c4b27bf | |||
1600ed1ff9 | |||
5886b38d73 | |||
0cef27ad25 | |||
12af4beb3e | |||
9016d76f71 | |||
3c5d183c19 | |||
3e8bb9a972 | |||
daef04a4e7 | |||
7caae128a7 | |||
2648918c81 | |||
920d318d3c | |||
9e3c2f1d74 | |||
2bfeee69b9 | |||
664bcd80b9 | |||
3c20208eff | |||
db264e3cc3 | |||
d396f30467 | |||
96a9f22d98 | |||
40025ee2a3 | |||
3ff63fb365 | |||
5c7cd37ebd | |||
298c04b464 | |||
d95114dd83 | |||
94dcade8f8 | |||
fa023ccb2c | |||
e36f4aa72b | |||
9261e347cc | |||
f1ced6df51 | |||
8b0d7a66ef | |||
3aec71766d | |||
16a8b7986b | |||
617e58d850 | |||
e33baba0dd | |||
721f26b821 | |||
52bb437e41 | |||
782b1b5bd1 | |||
0d769bcb78 | |||
4cd70099ea | |||
09fc33198a | |||
4c3b16d5d1 | |||
d5aacf9a90 | |||
19e2617a6f | |||
edd9b71c2c | |||
5940862d5a | |||
de6c51e88e | |||
303dcdb995 | |||
20938f768b | |||
955737b2d4 | |||
263eff9537 | |||
cae21032ab | |||
6187091532 | |||
0d33166ec5 | |||
87c03c6bd2 | |||
4c92fd2e83 | |||
e3d17b3c07 | |||
810c10baa1 | |||
57f7e3c62d | |||
0d0e282912 | |||
85e8f26b82 | |||
b57fecfddd | |||
8c97e7efb6 | |||
cc162f6a0a | |||
cf45ed786e | |||
574b2a7393 | |||
9f02ff537c | |||
0436ec0e7a | |||
11f12195af | |||
a646a8cf98 | |||
63f41d3821 | |||
c5229f3926 | |||
96f4f796fb | |||
70cab344c4 | |||
a7ba57dc17 | |||
83548824c2 | |||
354dbbd880 | |||
23edc49509 | |||
48254c3f2c | |||
2cab48704c | |||
64d4f31d78 | |||
0c9ff24041 | |||
3ff8279e80 | |||
cb6e477dfe | |||
edfd93518e | |||
89807d6a82 | |||
49dea4913b | |||
dec2cae0a7 | |||
cf6cd07396 | |||
975b9c9ab0 | |||
8ac73bdbe4 | |||
877f440f7b | |||
d13bdc3824 | |||
744daf9418 | |||
bf475e1990 | |||
203f3d779a | |||
4230c4894d | |||
6bb266693f | |||
5d53c32701 | |||
2e7e561c1d | |||
d8515fd41c | |||
694c47b261 | |||
77dea16ac8 | |||
6ae27bed01 | |||
da1973a038 | |||
be24916a7f | |||
2cb99ebbd0 | |||
91ee320bfa | |||
8fb754bcd0 | |||
b7b72db9ad | |||
634415ca17 | |||
2f7ae819ac | |||
0a477f8731 | |||
a755f82549 | |||
7f4173ae7c | |||
fb47597b09 | |||
450b233cc2 | |||
b7d7674f1e | |||
0e832c2c97 | |||
8e4aa7bf18 | |||
a42dfa629e | |||
b970dfddaf | |||
46a4ea8276 | |||
3f2f4a94aa | |||
f930e0c76e | |||
0fdbb3322b | |||
e9c8999ede | |||
73cbd709f9 | |||
9dce3c095b | |||
e5a2e17a9c | |||
0ec589fac3 | |||
36bb63e084 | |||
91d6aafb48 | |||
c8868a9d83 | |||
09f572fbc0 | |||
58e6d097d8 | |||
15bf934de5 | |||
cdfee16818 | |||
bcb668de18 | |||
fac7e79277 | |||
a6c8b75904 | |||
25cb05bda9 | |||
6fa6d38549 | |||
883c052378 | |||
61f317c24c | |||
64f08d4ff2 | |||
e738e43358 | |||
f6f6217a98 | |||
31db8709bf | |||
5080cbf9fd | |||
9880124196 | |||
9c7b509b2a | |||
e0dccdd398 | |||
5d583bdf6c | |||
1e501364d5 | |||
74278def2e | |||
e375a149e1 | |||
2bfc0e97f6 | |||
ac45505528 | |||
7404061141 | |||
46c329d6f6 | |||
1818e4c2b4 | |||
e7bd17373d | |||
c58e74062f | |||
6d210f2090 | |||
af7d5a63b2 | |||
e41acb6364 | |||
bdf7f13954 | |||
0f56a4b443 | |||
1b5284b13f | |||
d1e4a464cd | |||
ff059017c0 | |||
f22ba4bd60 | |||
1db772673e | |||
75313f2baa | |||
090eb8e25f | |||
a9793f58a1 | |||
7177fd24f8 | |||
1e501f6c40 | |||
2629a3802c | |||
51ce91174b | |||
107d0c421a | |||
18b0b23992 | |||
d1b29d1342 | |||
2def60c5f3 | |||
19a17d4623 | |||
845817aadf | |||
3233a68fbb | |||
cf074e5ddd | |||
002c755248 | |||
d627cec608 | |||
1315224cbb | |||
7760b9ff4d | |||
28559564b2 | |||
fa880d20ad | |||
ae7d31af1c | |||
9d303bf29b | |||
5f1688f271 | |||
1d4c9ed90c | |||
d48352fb5d | |||
6d6536acb2 | |||
b6f94d81ea | |||
8477a69283 | |||
d58cb3ec7e | |||
8a370aedac | |||
24ca0e9c0b | |||
e1dd521e49 | |||
1255733945 | |||
3201a67f61 | |||
d0ff690d68 | |||
fb640d0a3d | |||
38f9ef31dc | |||
a8276b2680 | |||
ececca6cde | |||
8bbb4b56ee | |||
539a1641c6 | |||
1b0635aba3 | |||
429491f531 | |||
e9c0cdd389 | |||
0cae023b24 | |||
8ee239e921 | |||
8bb56eeeea | |||
fa9e259fd9 | |||
f3bdae76de | |||
03879ff054 | |||
c8398a9b87 | |||
b8972bd69d | |||
0ae937a798 | |||
4459bef203 | |||
e07237f640 | |||
8c5a994424 | |||
2eb25b256b | |||
f3bc19a989 | |||
7a8fef3173 | |||
7465e7e42d | |||
5e73a67d44 | |||
2316dc2b9a | |||
a2d7797cee | |||
fd050249af | |||
7bcd2830dd | |||
47462a125b | |||
7caf9830b0 | |||
2bc0c46f98 | |||
3318832e9d | |||
e7d2084568 | |||
c2d3cb4c63 | |||
c48dd4400f | |||
e38cafe986 | |||
85ca019d96 | |||
4a5ba28a87 | |||
82156fdbf0 | |||
6114090418 | |||
3099b31276 | |||
f17f86513e | |||
90f794c6c3 | |||
66ca2cfddd | |||
269dd2c6a7 | |||
e7998f59aa | |||
9fb556eef0 | |||
e781ab63db | |||
3e76968220 | |||
2812c24c16 | |||
d77ab8e255 | |||
4b3cd7316c | |||
6dae56384a | |||
2b2dfae83e | |||
6c10dbeae9 | |||
9173202b84 | |||
8870bb4653 | |||
7a0e7779fe | |||
a048ffc9b0 | |||
4587915b2a | |||
da665ddc25 | |||
5add979d91 | |||
20afe8bd14 | |||
940b606a07 | |||
9505053704 | |||
2c9ca78281 | |||
63719a8ac3 | |||
8fab62482a | |||
d6e9c2706f | |||
f7f2e53a0a | |||
9cdffeeb3f | |||
fbb6edd298 | |||
5eb6bdced4 | |||
5633b4d39d | |||
4435c6e98e | |||
2ebd2eac88 | |||
b78b292f0c | |||
efbd6fb8bb | |||
680079be39 | |||
e4fc8d2ebe | |||
f52354a889 | |||
59f898b7a7 | |||
8f4a2124a9 | |||
481888294d | |||
d1e440a4a1 | |||
81bdc8fdf6 | |||
e048d87fc9 | |||
e26cde0927 | |||
20108c6b90 | |||
9195ef745a | |||
d0459c530d | |||
f160785c5c | |||
5c0a57185c | |||
43479d9e9d | |||
c0da50d2b2 | |||
c24883a1c0 | |||
1b77ee6248 | |||
bf4b3b6bd9 | |||
efbeddead3 | |||
3cfeb1624a | |||
b95dc034ca | |||
86a7dbe66e | |||
b43a7a92cd | |||
6563d31710 | |||
cf89ba9eff | |||
9b01272832 | |||
58525c94d5 | |||
621bd0cda9 | |||
1610f770d7 | |||
0fc871d2f0 | |||
1ad6143061 | |||
92da3cd848 | |||
6212bcb191 | |||
d69abbd3f0 | |||
1d00a8823e | |||
5d6e1011df | |||
f5bdb44443 | |||
7efc1c2b49 | |||
132e3b74bd | |||
bdbf4ba40e | |||
acb6e97e6a | |||
445d72b8b5 | |||
92c5e11b40 | |||
0dd046c16c | |||
305168ca3e | |||
b72f6163dc | |||
33d4fdabfa | |||
cafcf657a4 | |||
101067de12 | |||
7360db05b4 | |||
c1c05c67ea | |||
399a76e67b | |||
765ac263db | |||
a4e4d7dfcd | |||
73f9c2867d | |||
9c86d50916 | |||
1d14c75f55 | |||
99709cc3f1 | |||
5bc880b988 | |||
958759f44b | |||
f34294fa0c | |||
99cbe98ce8 | |||
86bf29050e | |||
04cbc4980d | |||
8765151c8a | |||
12b84ac8c1 | |||
8ec64ac683 | |||
ed8648a322 | |||
88641243ab | |||
40e146aa1e | |||
f3f9cd9234 | |||
ebf1b291d0 | |||
bc7a9cd8fb | |||
d48502b82a | |||
479ec54a8d | |||
49625662a9 | |||
8b809a079a | |||
778433cb90 | |||
411cb8f476 | |||
63bf4f0dc0 | |||
80e59a0d5d | |||
8bbd3d1476 | |||
e725e4bced | |||
08d65046f0 | |||
44b9745000 | |||
9654fc875b | |||
0f425e65ec | |||
199e724291 | |||
e277f2a63b | |||
f4db09178a | |||
86be3cdc2a | |||
cb64ccc715 | |||
f66a3c7bc2 | |||
fe80df3080 | |||
1932476c13 | |||
d2c1f79f20 | |||
8eacae8cf9 | |||
c8a80fd818 | |||
b9e8d7140a | |||
6eff2605d6 | |||
fd7a3ea4a4 | |||
8d3eeb36d7 | |||
8e0548e180 | |||
a517bb4b1e | |||
9dcefb23a1 | |||
d9da74bc06 | |||
5e19323ed9 | |||
611c1dd96e | |||
d800609c62 | |||
c78c9cd10d | |||
e76394f36c | |||
080e09557d | |||
fca2e6d5a6 | |||
b45f2b1d6e | |||
fc2e70ee90 | |||
b4561e857f | |||
7023251239 | |||
e2bd68c901 | |||
dd86780596 | |||
c43fe0268c |
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.class
|
||||
*~
|
||||
*.DS_Store
|
||||
wine-py2exe/
|
||||
@ -32,4 +33,4 @@ test/testdata
|
||||
.tox
|
||||
youtube-dl.zsh
|
||||
.idea
|
||||
.idea/*
|
||||
.idea/*
|
||||
|
10
AUTHORS
10
AUTHORS
@ -157,3 +157,13 @@ Founder Fang
|
||||
Andrew Alexeyew
|
||||
Saso Bezlaj
|
||||
Erwin de Haan
|
||||
Jens Wille
|
||||
Robin Houtevelts
|
||||
Patrick Griffis
|
||||
Aidan Rowe
|
||||
mutantmonkey
|
||||
Ben Congdon
|
||||
Kacper Michajłow
|
||||
José Joaquín Atria
|
||||
Viťas Strádal
|
||||
Kagami Hiiragi
|
||||
|
@ -1,6 +1,6 @@
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
@ -85,14 +85,16 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
||||
If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
* python
|
||||
* make
|
||||
* make (both GNU make and BSD make are supported)
|
||||
* pandoc
|
||||
* zip
|
||||
* nosetests
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
|
||||
After you have ensured this site is distributing it's content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
@ -140,16 +142,17 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
|
13
Makefile
13
Makefile
@ -3,6 +3,7 @@ all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bas
|
||||
clean:
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
||||
find . -name "*.pyc" -delete
|
||||
find . -name "*.class" -delete
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
@ -11,15 +12,7 @@ SHAREDIR ?= $(PREFIX)/share
|
||||
PYTHON ?= /usr/bin/env python
|
||||
|
||||
# set SYSCONFDIR to /etc if PREFIX=/usr or PREFIX=/usr/local
|
||||
ifeq ($(PREFIX),/usr)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
ifeq ($(PREFIX),/usr/local)
|
||||
SYSCONFDIR=/etc
|
||||
else
|
||||
SYSCONFDIR=$(PREFIX)/etc
|
||||
endif
|
||||
endif
|
||||
SYSCONFDIR != if [ $(PREFIX) = /usr -o $(PREFIX) = /usr/local ]; then echo /etc; else echo $(PREFIX)/etc; fi
|
||||
|
||||
install: youtube-dl youtube-dl.1 youtube-dl.bash-completion youtube-dl.zsh youtube-dl.fish
|
||||
install -d $(DESTDIR)$(BINDIR)
|
||||
@ -44,7 +37,7 @@ test:
|
||||
ot: offlinetest
|
||||
|
||||
offlinetest: codetest
|
||||
nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
|
||||
$(PYTHON) -m nose --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py
|
||||
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
|
63
README.md
63
README.md
@ -80,6 +80,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
on Windows)
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them.
|
||||
--mark-watched Mark videos watched (YouTube only)
|
||||
--no-mark-watched Do not mark videos watched (YouTube only)
|
||||
--no-color Do not emit color codes in output
|
||||
|
||||
## Network Options:
|
||||
@ -162,6 +164,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
(e.g. 50K or 4.2M)
|
||||
-R, --retries RETRIES Number of retries (default is 10), or
|
||||
"infinite".
|
||||
--fragment-retries RETRIES Number of retries for a fragment (default
|
||||
is 10), or "infinite" (DASH only)
|
||||
--buffer-size SIZE Size of download buffer (e.g. 1024 or 16K)
|
||||
(default is 1024)
|
||||
--no-resize-buffer Do not automatically adjust the buffer
|
||||
@ -179,7 +183,7 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
to play it)
|
||||
--external-downloader COMMAND Use the specified external downloader.
|
||||
Currently supports
|
||||
aria2c,axel,curl,httpie,wget
|
||||
aria2c,avconv,axel,curl,ffmpeg,httpie,wget
|
||||
--external-downloader-args ARGS Give these arguments to the external
|
||||
downloader
|
||||
|
||||
@ -374,8 +378,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
--no-post-overwrites Do not overwrite post-processed files; the
|
||||
post-processed files are overwritten by
|
||||
default
|
||||
--embed-subs Embed subtitles in the video (only for mkv
|
||||
and mp4 videos)
|
||||
--embed-subs Embed subtitles in the video (only for mp4,
|
||||
webm and mkv videos)
|
||||
--embed-thumbnail Embed thumbnail in the audio as cover art
|
||||
--add-metadata Write metadata to the video file
|
||||
--metadata-from-title FORMAT Parse additional metadata like song title /
|
||||
@ -409,13 +413,18 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime and use a proxy:
|
||||
You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`.
|
||||
|
||||
For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime, use a proxy and save all videos under `Movies` directory in your home directory:
|
||||
```
|
||||
--extract-audio
|
||||
-x
|
||||
--no-mtime
|
||||
--proxy 127.0.0.1:3128
|
||||
-o ~/Movies/%(title)s.%(ext)s
|
||||
```
|
||||
|
||||
Note that options in configuration file are just the same options aka switches used in regular command line calls thus there **must be no whitespace** after `-` or `--`, e.g. `-o` or `--proxy` but not `- o` or `-- proxy`.
|
||||
|
||||
You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run.
|
||||
|
||||
### Authentication with `.netrc` file
|
||||
@ -440,7 +449,11 @@ On Windows you may also need to setup the `%HOME%` environment variable manually
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
|
||||
The `-o` option allows users to indicate a template for the output file names.
|
||||
|
||||
**tl;dr:** [navigate me to examples](#output-template-examples).
|
||||
|
||||
The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
|
||||
|
||||
- `id`: Video identifier
|
||||
- `title`: Video title
|
||||
@ -449,6 +462,7 @@ The `-o` option allows users to indicate a template for the output file names. T
|
||||
- `alt_title`: A secondary title of the video
|
||||
- `display_id`: An alternative identifier for the video
|
||||
- `uploader`: Full name of the video uploader
|
||||
- `license`: License name the video is licensed under
|
||||
- `creator`: The main artist who created the video
|
||||
- `release_date`: The date (YYYYMMDD) when the video was released
|
||||
- `timestamp`: UNIX timestamp of the moment the video became available
|
||||
@ -513,7 +527,9 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
|
||||
Examples (note on Windows you may need to use double quotes instead of single):
|
||||
#### Output template examples
|
||||
|
||||
Note on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
$ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc
|
||||
@ -525,6 +541,9 @@ youtube-dl_test_video_.mp4 # A simple file name
|
||||
# Download YouTube playlist videos in separate directory indexed by video order in a playlist
|
||||
$ youtube-dl -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
|
||||
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
||||
$ youtube-dl -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/user/TheLinuxFoundation/playlists
|
||||
|
||||
# Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home
|
||||
$ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
|
||||
|
||||
@ -543,6 +562,8 @@ But sometimes you may want to download in a different format, for example when y
|
||||
|
||||
The general syntax for format selection is `--format FORMAT` or shorter `-f FORMAT` where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||
|
||||
**tl;dr:** [navigate me to examples](#format-selection-examples).
|
||||
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
|
||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download best quality format of particular file extension served as a single file, e.g. `-f webm` will download best quality format with `webm` extension served as a single file.
|
||||
@ -588,11 +609,14 @@ You can merge the video and audio of two formats into a single file using `-f <v
|
||||
|
||||
Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
||||
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see [#5447](https://github.com/rg3/youtube-dl/issues/5447), [#5456](https://github.com/rg3/youtube-dl/issues/5456)). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||
|
||||
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
|
||||
|
||||
Examples (note on Windows you may need to use double quotes instead of single):
|
||||
#### Format selection examples
|
||||
|
||||
Note on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Download best mp4 format available or any other best if no mp4 available
|
||||
$ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
|
||||
@ -733,7 +757,7 @@ means you're using an outdated version of Python. Please update to Python 2.6 or
|
||||
|
||||
### What is this binary file? Where has the code gone?
|
||||
|
||||
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
|
||||
Since June 2012 ([#342](https://github.com/rg3/youtube-dl/issues/342)) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
|
||||
|
||||
### The exe throws a *Runtime error from Visual C++*
|
||||
|
||||
@ -809,14 +833,16 @@ To run the test, simply invoke your favorite test runner, or execute a test file
|
||||
If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
* python
|
||||
* make
|
||||
* make (both GNU make and BSD make are supported)
|
||||
* pandoc
|
||||
* zip
|
||||
* nosetests
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
|
||||
After you have ensured this site is distributing it's content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
@ -864,16 +890,17 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
@ -935,9 +962,9 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||
|
||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
||||
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
|
@ -30,6 +30,7 @@
|
||||
- **AlJazeera**
|
||||
- **Allocine**
|
||||
- **AlphaPorno**
|
||||
- **AnimeOnDemand**
|
||||
- **anitube.se**
|
||||
- **AnySex**
|
||||
- **Aparat**
|
||||
@ -49,9 +50,11 @@
|
||||
- **arte.tv:ddc**
|
||||
- **arte.tv:embed**
|
||||
- **arte.tv:future**
|
||||
- **arte.tv:magazine**
|
||||
- **AtresPlayer**
|
||||
- **ATTTechChannel**
|
||||
- **AudiMedia**
|
||||
- **AudioBoom**
|
||||
- **audiomack**
|
||||
- **audiomack:album**
|
||||
- **Azubu**
|
||||
@ -71,12 +74,15 @@
|
||||
- **Bigflix**
|
||||
- **Bild**: Bild.de
|
||||
- **BiliBili**
|
||||
- **BioBioChileTV**
|
||||
- **BleacherReport**
|
||||
- **BleacherReportCMS**
|
||||
- **blinkx**
|
||||
- **Bloomberg**
|
||||
- **BokeCC**
|
||||
- **Bpb**: Bundeszentrale für politische Bildung
|
||||
- **BR**: Bayerischer Rundfunk Mediathek
|
||||
- **BravoTV**
|
||||
- **Break**
|
||||
- **brightcove:legacy**
|
||||
- **brightcove:new**
|
||||
@ -95,6 +101,7 @@
|
||||
- **CBSNews**: CBS News
|
||||
- **CBSNewsLiveVideo**: CBS News Live Videos
|
||||
- **CBSSports**
|
||||
- **CDA**
|
||||
- **CeskaTelevize**
|
||||
- **channel9**: Channel 9
|
||||
- **Chaturbate**
|
||||
@ -164,6 +171,8 @@
|
||||
- **Dump**
|
||||
- **Dumpert**
|
||||
- **dvtv**: http://video.aktualne.cz/
|
||||
- **dw**
|
||||
- **dw:article**
|
||||
- **EaglePlatform**
|
||||
- **EbaumsWorld**
|
||||
- **EchoMsk**
|
||||
@ -187,10 +196,10 @@
|
||||
- **ExpoTV**
|
||||
- **ExtremeTube**
|
||||
- **facebook**
|
||||
- **facebook:post**
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **Fczenit**
|
||||
- **features.aol.com**
|
||||
- **fernsehkritik.tv**
|
||||
- **Firstpost**
|
||||
- **FiveTV**
|
||||
@ -237,6 +246,7 @@
|
||||
- **GPUTechConf**
|
||||
- **Groupon**
|
||||
- **Hark**
|
||||
- **HBO**
|
||||
- **HearThisAt**
|
||||
- **Heise**
|
||||
- **HellPorno**
|
||||
@ -290,6 +300,7 @@
|
||||
- **kontrtube**: KontrTube.ru - Труба зовёт
|
||||
- **KrasView**: Красвью
|
||||
- **Ku6**
|
||||
- **KUSI**
|
||||
- **kuwo:album**: 酷我音乐 - 专辑
|
||||
- **kuwo:category**: 酷我音乐 - 分类
|
||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
||||
@ -298,12 +309,11 @@
|
||||
- **kuwo:song**: 酷我音乐
|
||||
- **la7.tv**
|
||||
- **Laola1Tv**
|
||||
- **Le**: 乐视网
|
||||
- **Lecture2Go**
|
||||
- **Lemonde**
|
||||
- **Letv**: 乐视网
|
||||
- **LePlaylist**
|
||||
- **LetvCloud**: 乐视云
|
||||
- **LetvPlaylist**
|
||||
- **LetvTv**
|
||||
- **Libsyn**
|
||||
- **life:embed**
|
||||
- **lifenews**: LIFE | NEWS
|
||||
@ -321,6 +331,7 @@
|
||||
- **m6**
|
||||
- **macgamestore**: MacGameStore trailers
|
||||
- **mailru**: Видео@Mail.Ru
|
||||
- **MakersChannel**
|
||||
- **MakerTV**
|
||||
- **Malemotion**
|
||||
- **MatchTV**
|
||||
@ -331,10 +342,12 @@
|
||||
- **Mgoon**
|
||||
- **Minhateca**
|
||||
- **MinistryGrid**
|
||||
- **Minoto**
|
||||
- **miomio.tv**
|
||||
- **MiTele**: mitele.es
|
||||
- **mixcloud**
|
||||
- **MLB**
|
||||
- **Mnet**
|
||||
- **MoeVideo**: LetitBit video services: moevideo.net, playreplay.net and videochart.net
|
||||
- **Mofosex**
|
||||
- **Mojvideo**
|
||||
@ -360,7 +373,7 @@
|
||||
- **MySpace:album**
|
||||
- **MySpass**
|
||||
- **Myvi**
|
||||
- **myvideo**
|
||||
- **myvideo** (Currently broken)
|
||||
- **MyVidster**
|
||||
- **n-tv.de**
|
||||
- **NationalGeographic**
|
||||
@ -410,6 +423,7 @@
|
||||
- **NowTV** (Currently broken)
|
||||
- **NowTVList**
|
||||
- **nowvideo**: NowVideo
|
||||
- **Noz**
|
||||
- **npo**: npo.nl and ntr.nl
|
||||
- **npo.nl:live**
|
||||
- **npo.nl:radio**
|
||||
@ -417,6 +431,7 @@
|
||||
- **Npr**
|
||||
- **NRK**
|
||||
- **NRKPlaylist**
|
||||
- **NRKSkole**: NRK Skole
|
||||
- **NRKTV**: NRK TV and NRK Radio
|
||||
- **ntv.ru**
|
||||
- **Nuvid**
|
||||
@ -429,6 +444,7 @@
|
||||
- **OnionStudios**
|
||||
- **Ooyala**
|
||||
- **OoyalaExternal**
|
||||
- **Openload**
|
||||
- **OraTV**
|
||||
- **orf:fm4**: radio FM4
|
||||
- **orf:iptv**: iptv.ORF.at
|
||||
@ -460,6 +476,7 @@
|
||||
- **PornHd**
|
||||
- **PornHub**
|
||||
- **PornHubPlaylist**
|
||||
- **PornHubUserVideos**
|
||||
- **Pornotube**
|
||||
- **PornoVoisines**
|
||||
- **PornoXO**
|
||||
@ -488,6 +505,7 @@
|
||||
- **Restudy**
|
||||
- **ReverbNation**
|
||||
- **Revision3**
|
||||
- **RICE**
|
||||
- **RingTV**
|
||||
- **RottenTomatoes**
|
||||
- **Roxwel**
|
||||
@ -512,6 +530,7 @@
|
||||
- **RUTV**: RUTV.RU
|
||||
- **Ruutu**
|
||||
- **safari**: safaribooksonline.com online video
|
||||
- **safari:api**
|
||||
- **safari:course**: safaribooksonline.com online courses
|
||||
- **Sandia**: Sandia National Laboratories
|
||||
- **Sapo**: SAPO Vídeos
|
||||
@ -522,6 +541,7 @@
|
||||
- **screen.yahoo:search**: Yahoo screen search
|
||||
- **Screencast**
|
||||
- **ScreencastOMatic**
|
||||
- **ScreenJunkies**
|
||||
- **ScreenwaveMedia**
|
||||
- **SenateISVP**
|
||||
- **ServingSys**
|
||||
@ -555,7 +575,6 @@
|
||||
- **southpark.de**
|
||||
- **southpark.nl**
|
||||
- **southparkstudios.dk**
|
||||
- **Space**
|
||||
- **SpankBang**
|
||||
- **Spankwire**
|
||||
- **Spiegel**
|
||||
@ -605,7 +624,9 @@
|
||||
- **TheOnion**
|
||||
- **ThePlatform**
|
||||
- **ThePlatformFeed**
|
||||
- **TheScene**
|
||||
- **TheSixtyOne**
|
||||
- **TheStar**
|
||||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **THVideo**
|
||||
@ -615,6 +636,7 @@
|
||||
- **TMZ**
|
||||
- **TMZArticle**
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **tou.tv**
|
||||
- **Toypics**: Toypics user profile
|
||||
@ -638,6 +660,7 @@
|
||||
- **tv.dfb.de**
|
||||
- **TV2**
|
||||
- **TV2Article**
|
||||
- **TV3**
|
||||
- **TV4**: tv4.se and tv4play.se
|
||||
- **TVC**
|
||||
- **TVCArticle**
|
||||
@ -655,6 +678,7 @@
|
||||
- **twitch:video**
|
||||
- **twitch:vod**
|
||||
- **twitter**
|
||||
- **twitter:amplify**
|
||||
- **twitter:card**
|
||||
- **Ubu**
|
||||
- **udemy**
|
||||
@ -662,8 +686,10 @@
|
||||
- **UDNEmbed**: 聯合影音
|
||||
- **Unistra**
|
||||
- **Urort**: NRK P3 Urørt
|
||||
- **USAToday**
|
||||
- **ustream**
|
||||
- **ustream:channel**
|
||||
- **Ustudio**
|
||||
- **Varzesh3**
|
||||
- **Vbox7**
|
||||
- **VeeHD**
|
||||
@ -674,12 +700,13 @@
|
||||
- **VGTV**: VGTV, BTTV, FTV, Aftenposten and Aftonbladet
|
||||
- **vh1.com**
|
||||
- **Vice**
|
||||
- **ViceShow**
|
||||
- **Viddler**
|
||||
- **video.google:search**: Google Video search
|
||||
- **video.mit.edu**
|
||||
- **VideoDetective**
|
||||
- **videofy.me**
|
||||
- **VideoMega** (Currently broken)
|
||||
- **VideoMega**
|
||||
- **videomore**
|
||||
- **videomore:season**
|
||||
- **videomore:video**
|
||||
@ -701,6 +728,7 @@
|
||||
- **vimeo:channel**
|
||||
- **vimeo:group**
|
||||
- **vimeo:likes**: Vimeo user likes
|
||||
- **vimeo:ondemand**
|
||||
- **vimeo:review**: Review pages on vimeo
|
||||
- **vimeo:user**
|
||||
- **vimeo:watchlater**: Vimeo watch later list, "vimeowatchlater" keyword (requires authentication)
|
||||
@ -765,6 +793,7 @@
|
||||
- **youtube:channel**: YouTube.com channels
|
||||
- **youtube:favorites**: YouTube.com favourite videos, ":ytfav" for short (requires authentication)
|
||||
- **youtube:history**: Youtube watch history, ":ythistory" for short (requires authentication)
|
||||
- **youtube:live**: YouTube.com live streams
|
||||
- **youtube:playlist**: YouTube.com playlists
|
||||
- **youtube:playlists**: YouTube.com user/channel playlists
|
||||
- **youtube:recommended**: YouTube.com recommended videos, ":ytrec" for short (requires authentication)
|
||||
|
@ -11,8 +11,11 @@ import sys
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.utils import (
|
||||
from youtube_dl.compat import (
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
preferredencoding,
|
||||
write_string,
|
||||
)
|
||||
@ -42,7 +45,7 @@ def report_warning(message):
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
if sys.stderr.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
|
@ -222,6 +222,11 @@ class TestFormatSelection(unittest.TestCase):
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
||||
|
||||
ydl = YDL({'format': 'bestvideo[format_id^=dash][format_id$=low]'})
|
||||
ydl.process_ie_result(info_dict.copy())
|
||||
downloaded = ydl.downloaded_info_dicts[0]
|
||||
self.assertEqual(downloaded['format_id'], 'dash-video-low')
|
||||
|
||||
formats = [
|
||||
{'format_id': 'vid-vcodec-dot', 'ext': 'mp4', 'preference': 1, 'vcodec': 'avc1.123456', 'acodec': 'none', 'url': TEST_URL},
|
||||
]
|
||||
@ -234,7 +239,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_youtube_format_selection(self):
|
||||
order = [
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
|
||||
# Apple HTTP Live Streaming
|
||||
'96', '95', '94', '93', '92', '132', '151',
|
||||
# 3D
|
||||
@ -502,6 +507,9 @@ class TestYoutubeDL(unittest.TestCase):
|
||||
assertRegexpMatches(self, ydl._format_note({
|
||||
'vbr': 10,
|
||||
}), '^\s*10k$')
|
||||
assertRegexpMatches(self, ydl._format_note({
|
||||
'fps': 30,
|
||||
}), '^30fps$')
|
||||
|
||||
def test_postprocessors(self):
|
||||
filename = 'post-processor-testfile.mp4'
|
||||
|
@ -19,6 +19,7 @@ from youtube_dl.compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_unquote_plus,
|
||||
compat_urllib_parse_urlencode,
|
||||
)
|
||||
|
||||
|
||||
@ -70,6 +71,12 @@ class TestCompat(unittest.TestCase):
|
||||
self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def')
|
||||
self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def')
|
||||
|
||||
def test_compat_urllib_parse_urlencode(self):
|
||||
self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def')
|
||||
self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def')
|
||||
self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def')
|
||||
self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def')
|
||||
|
||||
def test_compat_shlex_split(self):
|
||||
self.assertEqual(compat_shlex_split('-option "one two"'), ['-option', 'one two'])
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
@ -52,7 +53,12 @@ class TestHTTP(unittest.TestCase):
|
||||
('localhost', 0), HTTPTestRequestHandler)
|
||||
self.httpd.socket = ssl.wrap_socket(
|
||||
self.httpd.socket, certfile=certfn, server_side=True)
|
||||
self.port = self.httpd.socket.getsockname()[1]
|
||||
if os.name == 'java':
|
||||
# In Jython SSLSocket is not a subclass of socket.socket
|
||||
sock = self.httpd.socket.sock
|
||||
else:
|
||||
sock = self.httpd.socket
|
||||
self.port = sock.getsockname()[1]
|
||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||
self.server_thread.daemon = True
|
||||
self.server_thread.start()
|
||||
@ -115,5 +121,14 @@ class TestProxy(unittest.TestCase):
|
||||
response = ydl.urlopen(req).read().decode('utf-8')
|
||||
self.assertEqual(response, 'cn: {0}'.format(url))
|
||||
|
||||
def test_proxy_with_idn(self):
|
||||
ydl = YoutubeDL({
|
||||
'proxy': 'localhost:{0}'.format(self.port),
|
||||
})
|
||||
url = 'http://中文.tw/'
|
||||
response = ydl.urlopen(url).read().decode('utf-8')
|
||||
# b'xn--fiq228c' is '中文'.encode('idna')
|
||||
self.assertEqual(response, 'normal: http://xn--fiq228c.tw/')
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
47
test/test_iqiyi_sdk_interpreter.py
Normal file
47
test/test_iqiyi_sdk_interpreter.py
Normal file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from youtube_dl.extractor import IqiyiIE
|
||||
|
||||
|
||||
class IqiyiIEWithCredentials(IqiyiIE):
|
||||
def _get_login_info(self):
|
||||
return 'foo', 'bar'
|
||||
|
||||
|
||||
class WarningLogger(object):
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
|
||||
def warning(self, msg):
|
||||
self.messages.append(msg)
|
||||
|
||||
def debug(self, msg):
|
||||
pass
|
||||
|
||||
def error(self, msg):
|
||||
pass
|
||||
|
||||
|
||||
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||
def test_iqiyi_sdk_interpreter(self):
|
||||
'''
|
||||
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
||||
|
||||
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
||||
'''
|
||||
logger = WarningLogger()
|
||||
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
|
||||
ie._login()
|
||||
self.assertTrue('unable to log in:' in logger.messages[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -18,6 +18,7 @@ import xml.etree.ElementTree
|
||||
from youtube_dl.utils import (
|
||||
age_restricted,
|
||||
args_to_str,
|
||||
encode_base_n,
|
||||
clean_html,
|
||||
DateRange,
|
||||
detect_exe_version,
|
||||
@ -27,6 +28,7 @@ from youtube_dl.utils import (
|
||||
encodeFilename,
|
||||
escape_rfc3986,
|
||||
escape_url,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands,
|
||||
@ -35,10 +37,12 @@ from youtube_dl.utils import (
|
||||
is_html,
|
||||
js_to_json,
|
||||
limit_length,
|
||||
ohdave_rsa_encrypt,
|
||||
OnDemandPagedList,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_filesize,
|
||||
parse_count,
|
||||
parse_iso8601,
|
||||
read_batch_urls,
|
||||
sanitize_filename,
|
||||
@ -59,6 +63,7 @@ from youtube_dl.utils import (
|
||||
lowercase_escape,
|
||||
url_basename,
|
||||
urlencode_postdata,
|
||||
update_url_query,
|
||||
version_tuple,
|
||||
xpath_with_ns,
|
||||
xpath_element,
|
||||
@ -73,7 +78,10 @@ from youtube_dl.utils import (
|
||||
cli_bool_option,
|
||||
)
|
||||
from youtube_dl.compat import (
|
||||
compat_chr,
|
||||
compat_etree_fromstring,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
)
|
||||
|
||||
|
||||
@ -248,6 +256,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(
|
||||
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
||||
'20150202')
|
||||
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
|
||||
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
||||
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
||||
|
||||
@ -451,6 +460,40 @@ class TestUtil(unittest.TestCase):
|
||||
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_update_url_query(self):
|
||||
def query_dict(url):
|
||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||
query_dict('http://example.com/path'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||
query_dict('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||
query_dict('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'bitrate': 5020.43})),
|
||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'test': '第二行тест'})),
|
||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
|
||||
def test_dict_get(self):
|
||||
FALSE_VALUES = {
|
||||
'none': None,
|
||||
@ -534,11 +577,11 @@ class TestUtil(unittest.TestCase):
|
||||
)
|
||||
self.assertEqual(
|
||||
escape_url('http://тест.рф/фрагмент'),
|
||||
'http://тест.рф/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
|
||||
)
|
||||
self.assertEqual(
|
||||
escape_url('http://тест.рф/абв?абв=абв#абв'),
|
||||
'http://тест.рф/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
|
||||
)
|
||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||
|
||||
@ -588,6 +631,44 @@ class TestUtil(unittest.TestCase):
|
||||
on = js_to_json('{"abc": "def",}')
|
||||
self.assertEqual(json.loads(on), {'abc': 'def'})
|
||||
|
||||
def test_extract_attributes(self):
|
||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
|
||||
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
|
||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
|
||||
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
|
||||
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
|
||||
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
|
||||
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
|
||||
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
|
||||
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
|
||||
self.assertEqual(extract_attributes('<e x >'), {'x': None})
|
||||
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
|
||||
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
|
||||
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
|
||||
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
|
||||
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
|
||||
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
|
||||
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
|
||||
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
|
||||
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
|
||||
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
|
||||
# "Narrow" Python builds don't support unicode code points outside BMP.
|
||||
try:
|
||||
compat_chr(0x10000)
|
||||
supports_outside_bmp = True
|
||||
except ValueError:
|
||||
supports_outside_bmp = False
|
||||
if supports_outside_bmp:
|
||||
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
|
||||
|
||||
def test_clean_html(self):
|
||||
self.assertEqual(clean_html('a:\nb'), 'a: b')
|
||||
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
|
||||
@ -613,6 +694,17 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
|
||||
self.assertEqual(parse_filesize('1,24 KB'), 1240)
|
||||
|
||||
def test_parse_count(self):
|
||||
self.assertEqual(parse_count(None), None)
|
||||
self.assertEqual(parse_count(''), None)
|
||||
self.assertEqual(parse_count('0'), 0)
|
||||
self.assertEqual(parse_count('1000'), 1000)
|
||||
self.assertEqual(parse_count('1.000'), 1000)
|
||||
self.assertEqual(parse_count('1.1k'), 1100)
|
||||
self.assertEqual(parse_count('1.1kk'), 1100000)
|
||||
self.assertEqual(parse_count('1.1kk '), 1100000)
|
||||
self.assertEqual(parse_count('1.1kk views'), 1100000)
|
||||
|
||||
def test_version_tuple(self):
|
||||
self.assertEqual(version_tuple('1'), (1,))
|
||||
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
|
||||
@ -792,6 +884,24 @@ The first line
|
||||
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||
['--check-certificate=true'])
|
||||
|
||||
def test_ohdave_rsa_encrypt(self):
|
||||
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
|
||||
e = 65537
|
||||
|
||||
self.assertEqual(
|
||||
ohdave_rsa_encrypt(b'aa111222', e, N),
|
||||
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
|
||||
|
||||
def test_encode_base_n(self):
|
||||
self.assertEqual(encode_base_n(0, 30), '0')
|
||||
self.assertEqual(encode_base_n(80, 30), '2k')
|
||||
|
||||
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
|
||||
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
|
||||
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
|
||||
|
||||
self.assertRaises(ValueError, encode_base_n, 0, 70)
|
||||
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
2
tox.ini
2
tox.ini
@ -8,6 +8,6 @@ deps =
|
||||
passenv = HOME
|
||||
defaultargs = test --exclude test_download.py --exclude test_age_restriction.py
|
||||
--exclude test_subtitles.py --exclude test_write_annotations.py
|
||||
--exclude test_youtube_lists.py
|
||||
--exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py
|
||||
commands = nosetests --verbose {posargs:{[testenv]defaultargs}} # --with-coverage --cover-package=youtube_dl --cover-html
|
||||
# test.test_download:TestDownload.test_NowVideo
|
||||
|
@ -24,9 +24,6 @@ import time
|
||||
import tokenize
|
||||
import traceback
|
||||
|
||||
if os.name == 'nt':
|
||||
import ctypes
|
||||
|
||||
from .compat import (
|
||||
compat_basestring,
|
||||
compat_cookiejar,
|
||||
@ -34,6 +31,7 @@ from .compat import (
|
||||
compat_get_terminal_size,
|
||||
compat_http_client,
|
||||
compat_kwargs,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_tokenize_tokenize,
|
||||
compat_urllib_error,
|
||||
@ -41,6 +39,8 @@ from .compat import (
|
||||
compat_urllib_request_DataHandler,
|
||||
)
|
||||
from .utils import (
|
||||
age_restricted,
|
||||
args_to_str,
|
||||
ContentTooShortError,
|
||||
date_from_str,
|
||||
DateRange,
|
||||
@ -60,13 +60,16 @@ from .utils import (
|
||||
PagedList,
|
||||
parse_filesize,
|
||||
PerRequestProxyHandler,
|
||||
PostProcessingError,
|
||||
platform_name,
|
||||
PostProcessingError,
|
||||
preferredencoding,
|
||||
prepend_extension,
|
||||
render_table,
|
||||
replace_extension,
|
||||
SameFileError,
|
||||
sanitize_filename,
|
||||
sanitize_path,
|
||||
sanitize_url,
|
||||
sanitized_Request,
|
||||
std_headers,
|
||||
subtitles_filename,
|
||||
@ -77,16 +80,13 @@ from .utils import (
|
||||
write_string,
|
||||
YoutubeDLCookieProcessor,
|
||||
YoutubeDLHandler,
|
||||
prepend_extension,
|
||||
replace_extension,
|
||||
args_to_str,
|
||||
age_restricted,
|
||||
)
|
||||
from .cache import Cache
|
||||
from .extractor import get_info_extractor, gen_extractors
|
||||
from .downloader import get_suitable_downloader
|
||||
from .downloader.rtmp import rtmpdump_version
|
||||
from .postprocessor import (
|
||||
FFmpegFixupM3u8PP,
|
||||
FFmpegFixupM4aPP,
|
||||
FFmpegFixupStretchedPP,
|
||||
FFmpegMergerPP,
|
||||
@ -95,6 +95,9 @@ from .postprocessor import (
|
||||
)
|
||||
from .version import __version__
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
import ctypes
|
||||
|
||||
|
||||
class YoutubeDL(object):
|
||||
"""YoutubeDL class.
|
||||
@ -450,7 +453,7 @@ class YoutubeDL(object):
|
||||
def to_console_title(self, message):
|
||||
if not self.params.get('consoletitle', False):
|
||||
return
|
||||
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
|
||||
if compat_os_name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
|
||||
# c_wchar_p() might not be necessary if `message` is
|
||||
# already of type unicode()
|
||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||
@ -521,7 +524,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
if self.params.get('no_warnings'):
|
||||
return
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt':
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
@ -533,7 +536,7 @@ class YoutubeDL(object):
|
||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||
in red if stderr is a tty file.
|
||||
'''
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt':
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;31mERROR:\033[0m'
|
||||
else:
|
||||
_msg_header = 'ERROR:'
|
||||
@ -566,7 +569,7 @@ class YoutubeDL(object):
|
||||
elif template_dict.get('height'):
|
||||
template_dict['resolution'] = '%sp' % template_dict['height']
|
||||
elif template_dict.get('width'):
|
||||
template_dict['resolution'] = '?x%d' % template_dict['width']
|
||||
template_dict['resolution'] = '%dx?' % template_dict['width']
|
||||
|
||||
sanitize = lambda k, v: sanitize_filename(
|
||||
compat_str(v),
|
||||
@ -605,12 +608,12 @@ class YoutubeDL(object):
|
||||
if rejecttitle:
|
||||
if re.search(rejecttitle, title, re.IGNORECASE):
|
||||
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
||||
date = info_dict.get('upload_date', None)
|
||||
date = info_dict.get('upload_date')
|
||||
if date is not None:
|
||||
dateRange = self.params.get('daterange', DateRange())
|
||||
if date not in dateRange:
|
||||
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
||||
view_count = info_dict.get('view_count', None)
|
||||
view_count = info_dict.get('view_count')
|
||||
if view_count is not None:
|
||||
min_views = self.params.get('min_views')
|
||||
if min_views is not None and view_count < min_views:
|
||||
@ -747,18 +750,18 @@ class YoutubeDL(object):
|
||||
new_result, download=download, extra_info=extra_info)
|
||||
elif result_type == 'playlist' or result_type == 'multi_video':
|
||||
# We process each entry in the playlist
|
||||
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
||||
playlist = ie_result.get('title') or ie_result.get('id')
|
||||
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
||||
|
||||
playlist_results = []
|
||||
|
||||
playliststart = self.params.get('playliststart', 1) - 1
|
||||
playlistend = self.params.get('playlistend', None)
|
||||
playlistend = self.params.get('playlistend')
|
||||
# For backwards compatibility, interpret -1 as whole list
|
||||
if playlistend == -1:
|
||||
playlistend = None
|
||||
|
||||
playlistitems_str = self.params.get('playlist_items', None)
|
||||
playlistitems_str = self.params.get('playlist_items')
|
||||
playlistitems = None
|
||||
if playlistitems_str is not None:
|
||||
def iter_playlistitems(format):
|
||||
@ -782,7 +785,7 @@ class YoutubeDL(object):
|
||||
entries = ie_entries[playliststart:playlistend]
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
|
||||
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
|
||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||
elif isinstance(ie_entries, PagedList):
|
||||
if playlistitems:
|
||||
@ -796,7 +799,7 @@ class YoutubeDL(object):
|
||||
playliststart, playlistend)
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Downloading %d videos" %
|
||||
'[%s] playlist %s: Downloading %d videos' %
|
||||
(ie_result['extractor'], playlist, n_entries))
|
||||
else: # iterable
|
||||
if playlistitems:
|
||||
@ -807,7 +810,7 @@ class YoutubeDL(object):
|
||||
ie_entries, playliststart, playlistend))
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Downloading %d videos" %
|
||||
'[%s] playlist %s: Downloading %d videos' %
|
||||
(ie_result['extractor'], playlist, n_entries))
|
||||
|
||||
if self.params.get('playlistreverse', False):
|
||||
@ -903,7 +906,7 @@ class YoutubeDL(object):
|
||||
'*=': lambda attr, value: value in attr,
|
||||
}
|
||||
str_operator_rex = re.compile(r'''(?x)
|
||||
\s*(?P<key>ext|acodec|vcodec|container|protocol)
|
||||
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
|
||||
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
|
||||
\s*(?P<value>[a-zA-Z0-9._-]+)
|
||||
\s*$
|
||||
@ -1227,11 +1230,16 @@ class YoutubeDL(object):
|
||||
t.get('preference'), t.get('width'), t.get('height'),
|
||||
t.get('id'), t.get('url')))
|
||||
for i, t in enumerate(thumbnails):
|
||||
t['url'] = sanitize_url(t['url'])
|
||||
if t.get('width') and t.get('height'):
|
||||
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
||||
if t.get('id') is None:
|
||||
t['id'] = '%d' % i
|
||||
|
||||
if self.params.get('list_thumbnails'):
|
||||
self.list_thumbnails(info_dict)
|
||||
return
|
||||
|
||||
if thumbnails and 'thumbnail' not in info_dict:
|
||||
info_dict['thumbnail'] = thumbnails[-1]['url']
|
||||
|
||||
@ -1257,6 +1265,7 @@ class YoutubeDL(object):
|
||||
if subtitles:
|
||||
for _, subtitle in subtitles.items():
|
||||
for subtitle_format in subtitle:
|
||||
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
|
||||
if 'ext' not in subtitle_format:
|
||||
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
|
||||
|
||||
@ -1286,6 +1295,8 @@ class YoutubeDL(object):
|
||||
if 'url' not in format:
|
||||
raise ExtractorError('Missing "url" key in result (index %d)' % i)
|
||||
|
||||
format['url'] = sanitize_url(format['url'])
|
||||
|
||||
if format.get('format_id') is None:
|
||||
format['format_id'] = compat_str(i)
|
||||
else:
|
||||
@ -1333,9 +1344,6 @@ class YoutubeDL(object):
|
||||
if self.params.get('listformats'):
|
||||
self.list_formats(info_dict)
|
||||
return
|
||||
if self.params.get('list_thumbnails'):
|
||||
self.list_thumbnails(info_dict)
|
||||
return
|
||||
|
||||
req_format = self.params.get('format')
|
||||
if req_format is None:
|
||||
@ -1631,12 +1639,14 @@ class YoutubeDL(object):
|
||||
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
|
||||
if success:
|
||||
if success and filename != '-':
|
||||
# Fixup content
|
||||
fixup_policy = self.params.get('fixup')
|
||||
if fixup_policy is None:
|
||||
fixup_policy = 'detect_or_warn'
|
||||
|
||||
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
|
||||
|
||||
stretched_ratio = info_dict.get('stretched_ratio')
|
||||
if stretched_ratio is not None and stretched_ratio != 1:
|
||||
if fixup_policy == 'warn':
|
||||
@ -1649,15 +1659,18 @@ class YoutubeDL(object):
|
||||
info_dict['__postprocessors'].append(stretched_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
|
||||
info_dict['id'], stretched_ratio))
|
||||
'%s: Non-uniform pixel ratio (%s). %s'
|
||||
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
|
||||
if (info_dict.get('requested_formats') is None and
|
||||
info_dict.get('container') == 'm4a_dash'):
|
||||
if fixup_policy == 'warn':
|
||||
self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
|
||||
info_dict['id']))
|
||||
self.report_warning(
|
||||
'%s: writing DASH m4a. '
|
||||
'Only some players support this container.'
|
||||
% info_dict['id'])
|
||||
elif fixup_policy == 'detect_or_warn':
|
||||
fixup_pp = FFmpegFixupM4aPP(self)
|
||||
if fixup_pp.available:
|
||||
@ -1665,8 +1678,27 @@ class YoutubeDL(object):
|
||||
info_dict['__postprocessors'].append(fixup_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
|
||||
info_dict['id']))
|
||||
'%s: writing DASH m4a. '
|
||||
'Only some players support this container. %s'
|
||||
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
if (info_dict.get('protocol') == 'm3u8_native' or
|
||||
info_dict.get('protocol') == 'm3u8' and
|
||||
self.params.get('hls_prefer_native')):
|
||||
if fixup_policy == 'warn':
|
||||
self.report_warning('%s: malformated aac bitstream.' % (
|
||||
info_dict['id']))
|
||||
elif fixup_policy == 'detect_or_warn':
|
||||
fixup_pp = FFmpegFixupM3u8PP(self)
|
||||
if fixup_pp.available:
|
||||
info_dict.setdefault('__postprocessors', [])
|
||||
info_dict['__postprocessors'].append(fixup_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: malformated aac bitstream. %s'
|
||||
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
@ -1809,7 +1841,7 @@ class YoutubeDL(object):
|
||||
if fdict.get('language'):
|
||||
if res:
|
||||
res += ' '
|
||||
res += '[%s]' % fdict['language']
|
||||
res += '[%s] ' % fdict['language']
|
||||
if fdict.get('format_note') is not None:
|
||||
res += fdict['format_note'] + ' '
|
||||
if fdict.get('tbr') is not None:
|
||||
@ -1830,7 +1862,9 @@ class YoutubeDL(object):
|
||||
if fdict.get('vbr') is not None:
|
||||
res += '%4dk' % fdict['vbr']
|
||||
if fdict.get('fps') is not None:
|
||||
res += ', %sfps' % fdict['fps']
|
||||
if res:
|
||||
res += ', '
|
||||
res += '%sfps' % fdict['fps']
|
||||
if fdict.get('acodec') is not None:
|
||||
if res:
|
||||
res += ', '
|
||||
@ -1873,13 +1907,8 @@ class YoutubeDL(object):
|
||||
def list_thumbnails(self, info_dict):
|
||||
thumbnails = info_dict.get('thumbnails')
|
||||
if not thumbnails:
|
||||
tn_url = info_dict.get('thumbnail')
|
||||
if tn_url:
|
||||
thumbnails = [{'id': '0', 'url': tn_url}]
|
||||
else:
|
||||
self.to_screen(
|
||||
'[info] No thumbnails present for %s' % info_dict['id'])
|
||||
return
|
||||
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
|
||||
return
|
||||
|
||||
self.to_screen(
|
||||
'[info] Thumbnails for %s:' % info_dict['id'])
|
||||
|
@ -144,14 +144,20 @@ def _real_main(argv=None):
|
||||
if numeric_limit is None:
|
||||
parser.error('invalid max_filesize specified')
|
||||
opts.max_filesize = numeric_limit
|
||||
if opts.retries is not None:
|
||||
if opts.retries in ('inf', 'infinite'):
|
||||
opts_retries = float('inf')
|
||||
|
||||
def parse_retries(retries):
|
||||
if retries in ('inf', 'infinite'):
|
||||
parsed_retries = float('inf')
|
||||
else:
|
||||
try:
|
||||
opts_retries = int(opts.retries)
|
||||
parsed_retries = int(retries)
|
||||
except (TypeError, ValueError):
|
||||
parser.error('invalid retry count specified')
|
||||
return parsed_retries
|
||||
if opts.retries is not None:
|
||||
opts.retries = parse_retries(opts.retries)
|
||||
if opts.fragment_retries is not None:
|
||||
opts.fragment_retries = parse_retries(opts.fragment_retries)
|
||||
if opts.buffersize is not None:
|
||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||
if numeric_buffersize is None:
|
||||
@ -299,7 +305,8 @@ def _real_main(argv=None):
|
||||
'force_generic_extractor': opts.force_generic_extractor,
|
||||
'ratelimit': opts.ratelimit,
|
||||
'nooverwrites': opts.nooverwrites,
|
||||
'retries': opts_retries,
|
||||
'retries': opts.retries,
|
||||
'fragment_retries': opts.fragment_retries,
|
||||
'buffersize': opts.buffersize,
|
||||
'noresizebuffer': opts.noresizebuffer,
|
||||
'continuedl': opts.continue_dl,
|
||||
@ -355,6 +362,7 @@ def _real_main(argv=None):
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'mark_watched': opts.mark_watched,
|
||||
'merge_output_format': opts.merge_output_format,
|
||||
'postprocessors': postprocessors,
|
||||
'fixup': opts.fixup,
|
||||
|
@ -7,7 +7,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
|
||||
if __package__ is None and not hasattr(sys, "frozen"):
|
||||
if __package__ is None and not hasattr(sys, 'frozen'):
|
||||
# direct call of __main__.py
|
||||
import os.path
|
||||
path = os.path.realpath(os.path.abspath(__file__))
|
||||
|
@ -161,7 +161,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
class Counter:
|
||||
class Counter(object):
|
||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
|
||||
def next_value(self):
|
||||
|
@ -77,6 +77,11 @@ try:
|
||||
except ImportError: # Python 2
|
||||
from urllib import urlretrieve as compat_urlretrieve
|
||||
|
||||
try:
|
||||
from html.parser import HTMLParser as compat_HTMLParser
|
||||
except ImportError: # Python 2
|
||||
from HTMLParser import HTMLParser as compat_HTMLParser
|
||||
|
||||
|
||||
try:
|
||||
from subprocess import DEVNULL
|
||||
@ -164,6 +169,31 @@ except ImportError: # Python 2
|
||||
string = string.replace('+', ' ')
|
||||
return compat_urllib_parse_unquote(string, encoding, errors)
|
||||
|
||||
try:
|
||||
from urllib.parse import urlencode as compat_urllib_parse_urlencode
|
||||
except ImportError: # Python 2
|
||||
# Python 2 will choke in urlencode on mixture of byte and unicode strings.
|
||||
# Possible solutions are to either port it from python 3 with all
|
||||
# the friends or manually ensure input query contains only byte strings.
|
||||
# We will stick with latter thus recursively encoding the whole query.
|
||||
def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
|
||||
def encode_elem(e):
|
||||
if isinstance(e, dict):
|
||||
e = encode_dict(e)
|
||||
elif isinstance(e, (list, tuple,)):
|
||||
e = encode_list(e)
|
||||
elif isinstance(e, compat_str):
|
||||
e = e.encode(encoding)
|
||||
return e
|
||||
|
||||
def encode_dict(d):
|
||||
return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
|
||||
|
||||
def encode_list(l):
|
||||
return [encode_elem(e) for e in l]
|
||||
|
||||
return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
|
||||
|
||||
try:
|
||||
from urllib.request import DataHandler as compat_urllib_request_DataHandler
|
||||
except ImportError: # Python < 3.4
|
||||
@ -181,20 +211,20 @@ except ImportError: # Python < 3.4
|
||||
# parameter := attribute "=" value
|
||||
url = req.get_full_url()
|
||||
|
||||
scheme, data = url.split(":", 1)
|
||||
mediatype, data = data.split(",", 1)
|
||||
scheme, data = url.split(':', 1)
|
||||
mediatype, data = data.split(',', 1)
|
||||
|
||||
# even base64 encoded data URLs might be quoted so unquote in any case:
|
||||
data = compat_urllib_parse_unquote_to_bytes(data)
|
||||
if mediatype.endswith(";base64"):
|
||||
if mediatype.endswith(';base64'):
|
||||
data = binascii.a2b_base64(data)
|
||||
mediatype = mediatype[:-7]
|
||||
|
||||
if not mediatype:
|
||||
mediatype = "text/plain;charset=US-ASCII"
|
||||
mediatype = 'text/plain;charset=US-ASCII'
|
||||
|
||||
headers = email.message_from_string(
|
||||
"Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
|
||||
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
|
||||
|
||||
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
||||
|
||||
@ -251,6 +281,16 @@ else:
|
||||
el.text = el.text.decode('utf-8')
|
||||
return doc
|
||||
|
||||
if sys.version_info < (2, 7):
|
||||
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
|
||||
# .//node does not match if a node is a direct child of . !
|
||||
def compat_xpath(xpath):
|
||||
if isinstance(xpath, compat_str):
|
||||
xpath = xpath.encode('ascii')
|
||||
return xpath
|
||||
else:
|
||||
compat_xpath = lambda xpath: xpath
|
||||
|
||||
try:
|
||||
from urllib.parse import parse_qs as compat_parse_qs
|
||||
except ImportError: # Python 2
|
||||
@ -268,7 +308,7 @@ except ImportError: # Python 2
|
||||
nv = name_value.split('=', 1)
|
||||
if len(nv) != 2:
|
||||
if strict_parsing:
|
||||
raise ValueError("bad query field: %r" % (name_value,))
|
||||
raise ValueError('bad query field: %r' % (name_value,))
|
||||
# Handle case of a control-name with no equal sign
|
||||
if keep_blank_values:
|
||||
nv.append('')
|
||||
@ -326,6 +366,9 @@ def compat_ord(c):
|
||||
return ord(c)
|
||||
|
||||
|
||||
compat_os_name = os._name if os.name == 'java' else os.name
|
||||
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
compat_getenv = os.getenv
|
||||
compat_expanduser = os.path.expanduser
|
||||
@ -346,7 +389,7 @@ else:
|
||||
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||
# for different platforms with correct environment variables decoding.
|
||||
|
||||
if os.name == 'posix':
|
||||
if compat_os_name == 'posix':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||
do nothing."""
|
||||
@ -370,7 +413,7 @@ else:
|
||||
userhome = pwent.pw_dir
|
||||
userhome = userhome.rstrip('/')
|
||||
return (userhome + path[i:]) or '/'
|
||||
elif os.name == 'nt' or os.name == 'ce':
|
||||
elif compat_os_name == 'nt' or compat_os_name == 'ce':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructs.
|
||||
|
||||
@ -466,7 +509,7 @@ if sys.version_info < (2, 7):
|
||||
if err is not None:
|
||||
raise err
|
||||
else:
|
||||
raise socket.error("getaddrinfo returns an empty list")
|
||||
raise socket.error('getaddrinfo returns an empty list')
|
||||
else:
|
||||
compat_socket_create_connection = socket.create_connection
|
||||
|
||||
@ -540,6 +583,7 @@ else:
|
||||
from tokenize import generate_tokens as compat_tokenize_tokenize
|
||||
|
||||
__all__ = [
|
||||
'compat_HTMLParser',
|
||||
'compat_HTTPError',
|
||||
'compat_basestring',
|
||||
'compat_chr',
|
||||
@ -556,6 +600,7 @@ __all__ = [
|
||||
'compat_itertools_count',
|
||||
'compat_kwargs',
|
||||
'compat_ord',
|
||||
'compat_os_name',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_shlex_split',
|
||||
@ -568,6 +613,7 @@ __all__ = [
|
||||
'compat_urllib_parse_unquote',
|
||||
'compat_urllib_parse_unquote_plus',
|
||||
'compat_urllib_parse_unquote_to_bytes',
|
||||
'compat_urllib_parse_urlencode',
|
||||
'compat_urllib_parse_urlparse',
|
||||
'compat_urllib_request',
|
||||
'compat_urllib_request_DataHandler',
|
||||
@ -575,6 +621,7 @@ __all__ = [
|
||||
'compat_urlparse',
|
||||
'compat_urlretrieve',
|
||||
'compat_xml_parse_error',
|
||||
'compat_xpath',
|
||||
'shlex_quote',
|
||||
'subprocess_check_output',
|
||||
'workaround_optparse_bug9161',
|
||||
|
@ -1,14 +1,16 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import FileDownloader
|
||||
from .external import get_external_downloader
|
||||
from .f4m import F4mFD
|
||||
from .hls import HlsFD
|
||||
from .hls import NativeHlsFD
|
||||
from .http import HttpFD
|
||||
from .rtsp import RtspFD
|
||||
from .rtmp import RtmpFD
|
||||
from .dash import DashSegmentsFD
|
||||
from .rtsp import RtspFD
|
||||
from .external import (
|
||||
get_external_downloader,
|
||||
FFmpegFD,
|
||||
)
|
||||
|
||||
from ..utils import (
|
||||
determine_protocol,
|
||||
@ -16,8 +18,8 @@ from ..utils import (
|
||||
|
||||
PROTOCOL_MAP = {
|
||||
'rtmp': RtmpFD,
|
||||
'm3u8_native': NativeHlsFD,
|
||||
'm3u8': HlsFD,
|
||||
'm3u8_native': HlsFD,
|
||||
'm3u8': FFmpegFD,
|
||||
'mms': RtspFD,
|
||||
'rtsp': RtspFD,
|
||||
'f4m': F4mFD,
|
||||
@ -30,14 +32,17 @@ def get_suitable_downloader(info_dict, params={}):
|
||||
protocol = determine_protocol(info_dict)
|
||||
info_dict['protocol'] = protocol
|
||||
|
||||
# if (info_dict.get('start_time') or info_dict.get('end_time')) and not info_dict.get('requested_formats') and FFmpegFD.can_download(info_dict):
|
||||
# return FFmpegFD
|
||||
|
||||
external_downloader = params.get('external_downloader')
|
||||
if external_downloader is not None:
|
||||
ed = get_external_downloader(external_downloader)
|
||||
if ed.supports(info_dict):
|
||||
if ed.can_download(info_dict):
|
||||
return ed
|
||||
|
||||
if protocol == 'm3u8' and params.get('hls_prefer_native'):
|
||||
return NativeHlsFD
|
||||
return HlsFD
|
||||
|
||||
return PROTOCOL_MAP.get(protocol, HttpFD)
|
||||
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ..compat import compat_os_name
|
||||
from ..utils import (
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
@ -114,6 +115,10 @@ class FileDownloader(object):
|
||||
return '%10s' % '---b/s'
|
||||
return '%10s' % ('%s/s' % format_bytes(speed))
|
||||
|
||||
@staticmethod
|
||||
def format_retries(retries):
|
||||
return 'inf' if retries == float('inf') else '%.0f' % retries
|
||||
|
||||
@staticmethod
|
||||
def best_block_size(elapsed_time, bytes):
|
||||
new_min = max(bytes / 2.0, 1.0)
|
||||
@ -157,7 +162,7 @@ class FileDownloader(object):
|
||||
|
||||
def slow_down(self, start_time, now, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
rate_limit = self.params.get('ratelimit', None)
|
||||
rate_limit = self.params.get('ratelimit')
|
||||
if rate_limit is None or byte_counter == 0:
|
||||
return
|
||||
if now is None:
|
||||
@ -219,7 +224,7 @@ class FileDownloader(object):
|
||||
if self.params.get('progress_with_newline', False):
|
||||
self.to_screen(fullmsg)
|
||||
else:
|
||||
if os.name == 'nt':
|
||||
if compat_os_name == 'nt':
|
||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||
0)
|
||||
if prev_len > len(fullmsg):
|
||||
@ -296,7 +301,9 @@ class FileDownloader(object):
|
||||
|
||||
def report_retry(self, count, retries):
|
||||
"""Report retry in case of HTTP error 5xx"""
|
||||
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %.0f)...' % (count, retries))
|
||||
self.to_screen(
|
||||
'[download] Got server HTTP error. Retrying (attempt %d of %s)...'
|
||||
% (count, self.format_retries(retries)))
|
||||
|
||||
def report_file_already_downloaded(self, file_name):
|
||||
"""Report file has already been fully downloaded."""
|
||||
|
@ -1,67 +1,81 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..utils import sanitized_Request
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import compat_urllib_error
|
||||
from ..utils import (
|
||||
sanitize_open,
|
||||
encodeFilename,
|
||||
)
|
||||
|
||||
|
||||
class DashSegmentsFD(FileDownloader):
|
||||
class DashSegmentsFD(FragmentFD):
|
||||
"""
|
||||
Download segments in a DASH manifest
|
||||
"""
|
||||
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
base_url = info_dict['url']
|
||||
segment_urls = info_dict['segment_urls']
|
||||
segment_urls = [info_dict['segment_urls'][0]] if self.params.get('test', False) else info_dict['segment_urls']
|
||||
initialization_url = info_dict.get('initialization_url')
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': len(segment_urls) + (1 if initialization_url else 0),
|
||||
}
|
||||
|
||||
def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
|
||||
self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
|
||||
req = sanitized_Request(target_url)
|
||||
if remaining_bytes is not None:
|
||||
req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
data = self.ydl.urlopen(req).read()
|
||||
|
||||
if remaining_bytes is not None:
|
||||
data = data[:remaining_bytes]
|
||||
|
||||
outf.write(data)
|
||||
return len(data)
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
def combine_url(base_url, target_url):
|
||||
if re.match(r'^https?://', target_url):
|
||||
return target_url
|
||||
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
|
||||
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
if info_dict.get('initialization_url'):
|
||||
append_url_to_file(
|
||||
outf, combine_url(base_url, info_dict['initialization_url']),
|
||||
'initialization segment')
|
||||
for i, segment_url in enumerate(segment_urls):
|
||||
segment_len = append_url_to_file(
|
||||
outf, combine_url(base_url, segment_url),
|
||||
'segment %d / %d' % (i + 1, len(segment_urls)),
|
||||
remaining_bytes)
|
||||
byte_counter += segment_len
|
||||
if remaining_bytes is not None:
|
||||
remaining_bytes -= segment_len
|
||||
if remaining_bytes <= 0:
|
||||
break
|
||||
segments_filenames = []
|
||||
|
||||
self.try_rename(tmpfilename, filename)
|
||||
fragment_retries = self.params.get('fragment_retries', 0)
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
def append_url_to_file(target_url, tmp_filename, segment_name):
|
||||
target_filename = '%s-%s' % (tmp_filename, segment_name)
|
||||
count = 0
|
||||
while count <= fragment_retries:
|
||||
try:
|
||||
success = ctx['dl'].download(target_filename, {'url': combine_url(base_url, target_url)})
|
||||
if not success:
|
||||
return False
|
||||
down, target_sanitized = sanitize_open(target_filename, 'rb')
|
||||
ctx['dest_stream'].write(down.read())
|
||||
down.close()
|
||||
segments_filenames.append(target_sanitized)
|
||||
break
|
||||
except (compat_urllib_error.HTTPError, ) as err:
|
||||
# YouTube may often return 404 HTTP error for a fragment causing the
|
||||
# whole download to fail. However if the same fragment is immediately
|
||||
# retried with the same request data this usually succeeds (1-2 attemps
|
||||
# is usually enough) thus allowing to download the whole file successfully.
|
||||
# So, we will retry all fragments that fail with 404 HTTP error for now.
|
||||
if err.code != 404:
|
||||
raise
|
||||
# Retry fragment
|
||||
count += 1
|
||||
if count <= fragment_retries:
|
||||
self.report_retry_fragment(segment_name, count, fragment_retries)
|
||||
if count > fragment_retries:
|
||||
self.report_error('giving up after %s fragment retries' % fragment_retries)
|
||||
return False
|
||||
|
||||
if initialization_url:
|
||||
append_url_to_file(initialization_url, ctx['tmpfilename'], 'Init')
|
||||
for i, segment_url in enumerate(segment_urls):
|
||||
append_url_to_file(segment_url, ctx['tmpfilename'], 'Seg%d' % i)
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
|
||||
for segment_file in segments_filenames:
|
||||
os.remove(encodeFilename(segment_file))
|
||||
|
||||
return True
|
||||
|
@ -2,8 +2,11 @@ from __future__ import unicode_literals
|
||||
|
||||
import os.path
|
||||
import subprocess
|
||||
import sys
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..postprocessor.ffmpeg import FFmpegPostProcessor, EXT_TO_OUT_FORMATS
|
||||
from ..utils import (
|
||||
cli_option,
|
||||
cli_valueless_option,
|
||||
@ -11,6 +14,8 @@ from ..utils import (
|
||||
cli_configuration_args,
|
||||
encodeFilename,
|
||||
encodeArgument,
|
||||
handle_youtubedl_headers,
|
||||
check_executable,
|
||||
)
|
||||
|
||||
|
||||
@ -45,10 +50,18 @@ class ExternalFD(FileDownloader):
|
||||
def exe(self):
|
||||
return self.params.get('external_downloader')
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
return check_executable(cls.get_basename(), [cls.AVAILABLE_OPT])
|
||||
|
||||
@classmethod
|
||||
def supports(cls, info_dict):
|
||||
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps')
|
||||
|
||||
@classmethod
|
||||
def can_download(cls, info_dict):
|
||||
return cls.available() and cls.supports(info_dict)
|
||||
|
||||
def _option(self, command_option, param):
|
||||
return cli_option(self.params, command_option, param)
|
||||
|
||||
@ -76,6 +89,8 @@ class ExternalFD(FileDownloader):
|
||||
|
||||
|
||||
class CurlFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-V'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '--location', '-o', tmpfilename]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
@ -89,6 +104,8 @@ class CurlFD(ExternalFD):
|
||||
|
||||
|
||||
class AxelFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-V'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-o', tmpfilename]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
@ -99,6 +116,8 @@ class AxelFD(ExternalFD):
|
||||
|
||||
|
||||
class WgetFD(ExternalFD):
|
||||
AVAILABLE_OPT = '--version'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-O', tmpfilename, '-nv', '--no-cookies']
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
@ -112,6 +131,8 @@ class WgetFD(ExternalFD):
|
||||
|
||||
|
||||
class Aria2cFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-v'
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-c']
|
||||
cmd += self._configuration_args([
|
||||
@ -130,12 +151,112 @@ class Aria2cFD(ExternalFD):
|
||||
|
||||
|
||||
class HttpieFD(ExternalFD):
|
||||
@classmethod
|
||||
def available(cls):
|
||||
return check_executable('http', ['--version'])
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = ['http', '--download', '--output', tmpfilename, info_dict['url']]
|
||||
for key, val in info_dict['http_headers'].items():
|
||||
cmd += ['%s:%s' % (key, val)]
|
||||
return cmd
|
||||
|
||||
|
||||
class FFmpegFD(ExternalFD):
|
||||
@classmethod
|
||||
def supports(cls, info_dict):
|
||||
return info_dict['protocol'] in ('http', 'https', 'ftp', 'ftps', 'm3u8', 'rtsp', 'rtmp', 'mms')
|
||||
|
||||
@classmethod
|
||||
def available(cls):
|
||||
return FFmpegPostProcessor().available
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
url = info_dict['url']
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
if not ffpp.available:
|
||||
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
||||
return False
|
||||
ffpp.check_version()
|
||||
|
||||
args = [ffpp.executable, '-y']
|
||||
|
||||
args += self._configuration_args()
|
||||
|
||||
# start_time = info_dict.get('start_time') or 0
|
||||
# if start_time:
|
||||
# args += ['-ss', compat_str(start_time)]
|
||||
# end_time = info_dict.get('end_time')
|
||||
# if end_time:
|
||||
# args += ['-t', compat_str(end_time - start_time)]
|
||||
|
||||
if info_dict['http_headers'] and re.match(r'^https?://', url):
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||
args += [
|
||||
'-headers',
|
||||
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
|
||||
|
||||
protocol = info_dict.get('protocol')
|
||||
|
||||
if protocol == 'rtmp':
|
||||
player_url = info_dict.get('player_url')
|
||||
page_url = info_dict.get('page_url')
|
||||
app = info_dict.get('app')
|
||||
play_path = info_dict.get('play_path')
|
||||
tc_url = info_dict.get('tc_url')
|
||||
flash_version = info_dict.get('flash_version')
|
||||
live = info_dict.get('rtmp_live', False)
|
||||
if player_url is not None:
|
||||
args += ['-rtmp_swfverify', player_url]
|
||||
if page_url is not None:
|
||||
args += ['-rtmp_pageurl', page_url]
|
||||
if app is not None:
|
||||
args += ['-rtmp_app', app]
|
||||
if play_path is not None:
|
||||
args += ['-rtmp_playpath', play_path]
|
||||
if tc_url is not None:
|
||||
args += ['-rtmp_tcurl', tc_url]
|
||||
if flash_version is not None:
|
||||
args += ['-rtmp_flashver', flash_version]
|
||||
if live:
|
||||
args += ['-rtmp_live', 'live']
|
||||
|
||||
args += ['-i', url, '-c', 'copy']
|
||||
if protocol == 'm3u8':
|
||||
if self.params.get('hls_use_mpegts', False):
|
||||
args += ['-f', 'mpegts']
|
||||
else:
|
||||
args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
|
||||
elif protocol == 'rtmp':
|
||||
args += ['-f', 'flv']
|
||||
else:
|
||||
args += ['-f', EXT_TO_OUT_FORMATS.get(info_dict['ext'], info_dict['ext'])]
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE)
|
||||
try:
|
||||
retval = proc.wait()
|
||||
except KeyboardInterrupt:
|
||||
# subprocces.run would send the SIGKILL signal to ffmpeg and the
|
||||
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
|
||||
# produces a file that is playable (this is mostly useful for live
|
||||
# streams). Note that Windows is not affected and produces playable
|
||||
# files (see https://github.com/rg3/youtube-dl/issues/8300).
|
||||
if sys.platform != 'win32':
|
||||
proc.communicate(b'q')
|
||||
raise
|
||||
return retval
|
||||
|
||||
|
||||
class AVconvFD(FFmpegFD):
|
||||
pass
|
||||
|
||||
_BY_NAME = dict(
|
||||
(klass.get_basename(), klass)
|
||||
for name, klass in globals().items()
|
||||
|
@ -223,6 +223,12 @@ def write_metadata_tag(stream, metadata):
|
||||
write_unsigned_int(stream, FLV_TAG_HEADER_LEN + len(metadata))
|
||||
|
||||
|
||||
def remove_encrypted_media(media):
|
||||
return list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
||||
'drmAdditionalHeaderSetId' not in e.attrib,
|
||||
media))
|
||||
|
||||
|
||||
def _add_ns(prop):
|
||||
return '{http://ns.adobe.com/f4m/1.0}%s' % prop
|
||||
|
||||
@ -244,9 +250,7 @@ class F4mFD(FragmentFD):
|
||||
# without drmAdditionalHeaderId or drmAdditionalHeaderSetId attribute
|
||||
if 'id' not in e.attrib:
|
||||
self.report_error('Missing ID in f4m DRM')
|
||||
media = list(filter(lambda e: 'drmAdditionalHeaderId' not in e.attrib and
|
||||
'drmAdditionalHeaderSetId' not in e.attrib,
|
||||
media))
|
||||
media = remove_encrypted_media(media)
|
||||
if not media:
|
||||
self.report_error('Unsupported DRM')
|
||||
return media
|
||||
|
@ -19,8 +19,17 @@ class HttpQuietDownloader(HttpFD):
|
||||
class FragmentFD(FileDownloader):
|
||||
"""
|
||||
A base file downloader class for fragmented media (e.g. f4m/m3u8 manifests).
|
||||
|
||||
Available options:
|
||||
|
||||
fragment_retries: Number of times to retry a fragment for HTTP error (DASH only)
|
||||
"""
|
||||
|
||||
def report_retry_fragment(self, fragment_name, count, retries):
|
||||
self.to_screen(
|
||||
'[download] Got server HTTP error. Retrying fragment %s (attempt %d of %s)...'
|
||||
% (fragment_name, count, self.format_retries(retries)))
|
||||
|
||||
def _prepare_and_start_frag_download(self, ctx):
|
||||
self._prepare_frag_download(ctx)
|
||||
self._start_frag_download(ctx)
|
||||
@ -38,7 +47,7 @@ class FragmentFD(FileDownloader):
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'ratelimit': self.params.get('ratelimit'),
|
||||
'retries': self.params.get('retries', 0),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
@ -99,7 +108,8 @@ class FragmentFD(FileDownloader):
|
||||
state['eta'] = self.calc_eta(
|
||||
start, time_now, estimated_size,
|
||||
state['downloaded_bytes'])
|
||||
state['speed'] = s.get('speed')
|
||||
state['speed'] = s.get('speed') or ctx.get('speed')
|
||||
ctx['speed'] = state['speed']
|
||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||
self._hook_progress(state)
|
||||
|
||||
|
@ -1,87 +1,19 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from .common import FileDownloader
|
||||
from .fragment import FragmentFD
|
||||
|
||||
from ..compat import compat_urlparse
|
||||
from ..postprocessor.ffmpeg import FFmpegPostProcessor
|
||||
from ..utils import (
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
sanitize_open,
|
||||
handle_youtubedl_headers,
|
||||
)
|
||||
|
||||
|
||||
class HlsFD(FileDownloader):
|
||||
def real_download(self, filename, info_dict):
|
||||
url = info_dict['url']
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
if not ffpp.available:
|
||||
self.report_error('m3u8 download detected but ffmpeg or avconv could not be found. Please install one.')
|
||||
return False
|
||||
ffpp.check_version()
|
||||
|
||||
args = [ffpp.executable, '-y']
|
||||
|
||||
if info_dict['http_headers'] and re.match(r'^https?://', url):
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||
args += [
|
||||
'-headers',
|
||||
''.join('%s: %s\r\n' % (key, val) for key, val in headers.items())]
|
||||
|
||||
args += ['-i', url, '-c', 'copy']
|
||||
if self.params.get('hls_use_mpegts', False):
|
||||
args += ['-f', 'mpegts']
|
||||
else:
|
||||
args += ['-f', 'mp4', '-bsf:a', 'aac_adtstoasc']
|
||||
|
||||
args = [encodeArgument(opt) for opt in args]
|
||||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
|
||||
self._debug_cmd(args)
|
||||
|
||||
proc = subprocess.Popen(args, stdin=subprocess.PIPE)
|
||||
try:
|
||||
retval = proc.wait()
|
||||
except KeyboardInterrupt:
|
||||
# subprocces.run would send the SIGKILL signal to ffmpeg and the
|
||||
# mp4 file couldn't be played, but if we ask ffmpeg to quit it
|
||||
# produces a file that is playable (this is mostly useful for live
|
||||
# streams). Note that Windows is not affected and produces playable
|
||||
# files (see https://github.com/rg3/youtube-dl/issues/8300).
|
||||
if sys.platform != 'win32':
|
||||
proc.communicate(b'q')
|
||||
raise
|
||||
if retval == 0:
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
|
||||
self.try_rename(tmpfilename, filename)
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': fsize,
|
||||
'total_bytes': fsize,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
return True
|
||||
else:
|
||||
self.to_stderr('\n')
|
||||
self.report_error('%s exited with code %d' % (ffpp.basename, retval))
|
||||
return False
|
||||
|
||||
|
||||
class NativeHlsFD(FragmentFD):
|
||||
""" A more limited implementation that does not require ffmpeg """
|
||||
class HlsFD(FragmentFD):
|
||||
""" A limited implementation that does not require ffmpeg """
|
||||
|
||||
FD_NAME = 'hlsnative'
|
||||
|
||||
|
@ -140,8 +140,8 @@ class HttpFD(FileDownloader):
|
||||
|
||||
if data_len is not None:
|
||||
data_len = int(data_len) + resume_len
|
||||
min_data_len = self.params.get("min_filesize", None)
|
||||
max_data_len = self.params.get("max_filesize", None)
|
||||
min_data_len = self.params.get('min_filesize')
|
||||
max_data_len = self.params.get('max_filesize')
|
||||
if min_data_len is not None and data_len < min_data_len:
|
||||
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||
return False
|
||||
|
@ -94,15 +94,15 @@ class RtmpFD(FileDownloader):
|
||||
return proc.returncode
|
||||
|
||||
url = info_dict['url']
|
||||
player_url = info_dict.get('player_url', None)
|
||||
page_url = info_dict.get('page_url', None)
|
||||
app = info_dict.get('app', None)
|
||||
play_path = info_dict.get('play_path', None)
|
||||
tc_url = info_dict.get('tc_url', None)
|
||||
flash_version = info_dict.get('flash_version', None)
|
||||
player_url = info_dict.get('player_url')
|
||||
page_url = info_dict.get('page_url')
|
||||
app = info_dict.get('app')
|
||||
play_path = info_dict.get('play_path')
|
||||
tc_url = info_dict.get('tc_url')
|
||||
flash_version = info_dict.get('flash_version')
|
||||
live = info_dict.get('rtmp_live', False)
|
||||
conn = info_dict.get('rtmp_conn', None)
|
||||
protocol = info_dict.get('rtmp_protocol', None)
|
||||
conn = info_dict.get('rtmp_conn')
|
||||
protocol = info_dict.get('rtmp_protocol')
|
||||
real_time = info_dict.get('rtmp_real_time', False)
|
||||
no_resume = info_dict.get('no_resume', False)
|
||||
continue_dl = self.params.get('continuedl', True)
|
||||
|
@ -20,9 +20,13 @@ from .aftonbladet import AftonbladetIE
|
||||
from .airmozilla import AirMozillaIE
|
||||
from .aljazeera import AlJazeeraIE
|
||||
from .alphaporno import AlphaPornoIE
|
||||
from .animeondemand import AnimeOnDemandIE
|
||||
from .anitube import AnitubeIE
|
||||
from .anysex import AnySexIE
|
||||
from .aol import AolIE
|
||||
from .aol import (
|
||||
AolIE,
|
||||
AolFeaturesIE,
|
||||
)
|
||||
from .allocine import AllocineIE
|
||||
from .aparat import AparatIE
|
||||
from .appleconnect import AppleConnectIE
|
||||
@ -44,11 +48,13 @@ from .arte import (
|
||||
ArteTVFutureIE,
|
||||
ArteTVCinemaIE,
|
||||
ArteTVDDCIE,
|
||||
ArteTVMagazineIE,
|
||||
ArteTVEmbedIE,
|
||||
)
|
||||
from .atresplayer import AtresPlayerIE
|
||||
from .atttechchannel import ATTTechChannelIE
|
||||
from .audimedia import AudiMediaIE
|
||||
from .audioboom import AudioBoomIE
|
||||
from .audiomack import AudiomackIE, AudiomackAlbumIE
|
||||
from .azubu import AzubuIE, AzubuLiveIE
|
||||
from .baidu import BaiduVideoIE
|
||||
@ -66,14 +72,17 @@ from .bet import BetIE
|
||||
from .bigflix import BigflixIE
|
||||
from .bild import BildIE
|
||||
from .bilibili import BiliBiliIE
|
||||
from .biobiochiletv import BioBioChileTVIE
|
||||
from .bleacherreport import (
|
||||
BleacherReportIE,
|
||||
BleacherReportCMSIE,
|
||||
)
|
||||
from .blinkx import BlinkxIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .bokecc import BokeCCIE
|
||||
from .bpb import BpbIE
|
||||
from .br import BRIE
|
||||
from .bravotv import BravoTVIE
|
||||
from .breakcom import BreakIE
|
||||
from .brightcove import (
|
||||
BrightcoveLegacyIE,
|
||||
@ -100,6 +109,7 @@ from .cbsnews import (
|
||||
)
|
||||
from .cbssports import CBSSportsIE
|
||||
from .ccc import CCCIE
|
||||
from .cda import CDAIE
|
||||
from .ceskatelevize import CeskaTelevizeIE
|
||||
from .channel9 import Channel9IE
|
||||
from .chaturbate import ChaturbateIE
|
||||
@ -128,6 +138,7 @@ from .collegerama import CollegeRamaIE
|
||||
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
|
||||
from .comcarcoff import ComCarCoffIE
|
||||
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
|
||||
from .commonprotocols import RtmpIE
|
||||
from .condenast import CondeNastIE
|
||||
from .cracked import CrackedIE
|
||||
from .crackle import CrackleIE
|
||||
@ -182,6 +193,10 @@ from .dumpert import DumpertIE
|
||||
from .defense import DefenseGouvFrIE
|
||||
from .discovery import DiscoveryIE
|
||||
from .dropbox import DropboxIE
|
||||
from .dw import (
|
||||
DWIE,
|
||||
DWArticleIE,
|
||||
)
|
||||
from .eagleplatform import EaglePlatformIE
|
||||
from .ebaumsworld import EbaumsWorldIE
|
||||
from .echomsk import EchoMskIE
|
||||
@ -206,10 +221,7 @@ from .everyonesmixtape import EveryonesMixtapeIE
|
||||
from .exfm import ExfmIE
|
||||
from .expotv import ExpoTVIE
|
||||
from .extremetube import ExtremeTubeIE
|
||||
from .facebook import (
|
||||
FacebookIE,
|
||||
FacebookPostIE,
|
||||
)
|
||||
from .facebook import FacebookIE
|
||||
from .faz import FazIE
|
||||
from .fc2 import FC2IE
|
||||
from .fczenit import FczenitIE
|
||||
@ -274,6 +286,7 @@ from .goshgay import GoshgayIE
|
||||
from .gputechconf import GPUTechConfIE
|
||||
from .groupon import GrouponIE
|
||||
from .hark import HarkIE
|
||||
from .hbo import HBOIE
|
||||
from .hearthisat import HearThisAtIE
|
||||
from .heise import HeiseIE
|
||||
from .hellporno import HellPornoIE
|
||||
@ -337,6 +350,7 @@ from .konserthusetplay import KonserthusetPlayIE
|
||||
from .kontrtube import KontrTubeIE
|
||||
from .krasview import KrasViewIE
|
||||
from .ku6 import Ku6IE
|
||||
from .kusi import KUSIIE
|
||||
from .kuwo import (
|
||||
KuwoIE,
|
||||
KuwoAlbumIE,
|
||||
@ -349,10 +363,9 @@ from .la7 import LA7IE
|
||||
from .laola1tv import Laola1TvIE
|
||||
from .lecture2go import Lecture2GoIE
|
||||
from .lemonde import LemondeIE
|
||||
from .letv import (
|
||||
LetvIE,
|
||||
LetvTvIE,
|
||||
LetvPlaylistIE,
|
||||
from .leeco import (
|
||||
LeIE,
|
||||
LePlaylistIE,
|
||||
LetvCloudIE,
|
||||
)
|
||||
from .libsyn import LibsynIE
|
||||
@ -381,6 +394,7 @@ from .lynda import (
|
||||
from .m6 import M6IE
|
||||
from .macgamestore import MacGameStoreIE
|
||||
from .mailru import MailRuIE
|
||||
from .makerschannel import MakersChannelIE
|
||||
from .makertv import MakerTVIE
|
||||
from .malemotion import MalemotionIE
|
||||
from .matchtv import MatchTVIE
|
||||
@ -390,11 +404,13 @@ from .metacritic import MetacriticIE
|
||||
from .mgoon import MgoonIE
|
||||
from .minhateca import MinhatecaIE
|
||||
from .ministrygrid import MinistryGridIE
|
||||
from .minoto import MinotoIE
|
||||
from .miomio import MioMioIE
|
||||
from .mit import TechTVMITIE, MITIE, OCWMITIE
|
||||
from .mitele import MiTeleIE
|
||||
from .mixcloud import MixcloudIE
|
||||
from .mlb import MLBIE
|
||||
from .mnet import MnetIE
|
||||
from .mpora import MporaIE
|
||||
from .moevideo import MoeVideoIE
|
||||
from .mofosex import MofosexIE
|
||||
@ -489,6 +505,7 @@ from .nowtv import (
|
||||
NowTVIE,
|
||||
NowTVListIE,
|
||||
)
|
||||
from .noz import NozIE
|
||||
from .npo import (
|
||||
NPOIE,
|
||||
NPOLiveIE,
|
||||
@ -502,6 +519,7 @@ from .npr import NprIE
|
||||
from .nrk import (
|
||||
NRKIE,
|
||||
NRKPlaylistIE,
|
||||
NRKSkoleIE,
|
||||
NRKTVIE,
|
||||
)
|
||||
from .ntvde import NTVDeIE
|
||||
@ -518,6 +536,7 @@ from .ooyala import (
|
||||
OoyalaIE,
|
||||
OoyalaExternalIE,
|
||||
)
|
||||
from .openload import OpenloadIE
|
||||
from .ora import OraTVIE
|
||||
from .orf import (
|
||||
ORFTVthekIE,
|
||||
@ -552,6 +571,7 @@ from .pornhd import PornHdIE
|
||||
from .pornhub import (
|
||||
PornHubIE,
|
||||
PornHubPlaylistIE,
|
||||
PornHubUserVideosIE,
|
||||
)
|
||||
from .pornotube import PornotubeIE
|
||||
from .pornovoisines import PornoVoisinesIE
|
||||
@ -585,6 +605,7 @@ from .regiotv import RegioTVIE
|
||||
from .restudy import RestudyIE
|
||||
from .reverbnation import ReverbNationIE
|
||||
from .revision3 import Revision3IE
|
||||
from .rice import RICEIE
|
||||
from .ringtv import RingTVIE
|
||||
from .ro220 import Ro220IE
|
||||
from .rottentomatoes import RottenTomatoesIE
|
||||
@ -611,6 +632,7 @@ from .ruutu import RuutuIE
|
||||
from .sandia import SandiaIE
|
||||
from .safari import (
|
||||
SafariIE,
|
||||
SafariApiIE,
|
||||
SafariCourseIE,
|
||||
)
|
||||
from .sapo import SapoIE
|
||||
@ -619,6 +641,7 @@ from .sbs import SBSIE
|
||||
from .scivee import SciVeeIE
|
||||
from .screencast import ScreencastIE
|
||||
from .screencastomatic import ScreencastOMaticIE
|
||||
from .screenjunkies import ScreenJunkiesIE
|
||||
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
|
||||
from .senateisvp import SenateISVPIE
|
||||
from .servingsys import ServingSysIE
|
||||
@ -664,7 +687,6 @@ from .southpark import (
|
||||
SouthParkEsIE,
|
||||
SouthParkNlIE
|
||||
)
|
||||
from .space import SpaceIE
|
||||
from .spankbang import SpankBangIE
|
||||
from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
@ -722,7 +744,9 @@ from .theplatform import (
|
||||
ThePlatformIE,
|
||||
ThePlatformFeedIE,
|
||||
)
|
||||
from .thescene import TheSceneIE
|
||||
from .thesixtyone import TheSixtyOneIE
|
||||
from .thestar import TheStarIE
|
||||
from .thisamericanlife import ThisAmericanLifeIE
|
||||
from .thisav import ThisAVIE
|
||||
from .tinypic import TinyPicIE
|
||||
@ -732,6 +756,7 @@ from .tmz import (
|
||||
TMZArticleIE,
|
||||
)
|
||||
from .tnaflix import (
|
||||
TNAFlixNetworkEmbedIE,
|
||||
TNAFlixIE,
|
||||
EMPFlixIE,
|
||||
MovieFapIE,
|
||||
@ -768,6 +793,7 @@ from .tv2 import (
|
||||
TV2IE,
|
||||
TV2ArticleIE,
|
||||
)
|
||||
from .tv3 import TV3IE
|
||||
from .tv4 import TV4IE
|
||||
from .tvc import (
|
||||
TVCIE,
|
||||
@ -793,7 +819,11 @@ from .twitch import (
|
||||
TwitchBookmarksIE,
|
||||
TwitchStreamIE,
|
||||
)
|
||||
from .twitter import TwitterCardIE, TwitterIE
|
||||
from .twitter import (
|
||||
TwitterCardIE,
|
||||
TwitterIE,
|
||||
TwitterAmplifyIE,
|
||||
)
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
@ -803,7 +833,9 @@ from .udn import UDNEmbedIE
|
||||
from .digiteka import DigitekaIE
|
||||
from .unistra import UnistraIE
|
||||
from .urort import UrortIE
|
||||
from .usatoday import USATodayIE
|
||||
from .ustream import UstreamIE, UstreamChannelIE
|
||||
from .ustudio import UstudioIE
|
||||
from .varzesh3 import Varzesh3IE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veehd import VeeHDIE
|
||||
@ -817,7 +849,10 @@ from .vgtv import (
|
||||
VGTVIE,
|
||||
)
|
||||
from .vh1 import VH1IE
|
||||
from .vice import ViceIE
|
||||
from .vice import (
|
||||
ViceIE,
|
||||
ViceShowIE,
|
||||
)
|
||||
from .viddler import ViddlerIE
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videofyme import VideofyMeIE
|
||||
@ -844,6 +879,7 @@ from .vimeo import (
|
||||
VimeoChannelIE,
|
||||
VimeoGroupsIE,
|
||||
VimeoLikesIE,
|
||||
VimeoOndemandIE,
|
||||
VimeoReviewIE,
|
||||
VimeoUserIE,
|
||||
VimeoWatchLaterIE,
|
||||
@ -925,7 +961,9 @@ from .youtube import (
|
||||
YoutubeChannelIE,
|
||||
YoutubeFavouritesIE,
|
||||
YoutubeHistoryIE,
|
||||
YoutubeLiveIE,
|
||||
YoutubePlaylistIE,
|
||||
YoutubePlaylistsIE,
|
||||
YoutubeRecommendedIE,
|
||||
YoutubeSearchDateIE,
|
||||
YoutubeSearchIE,
|
||||
@ -935,7 +973,6 @@ from .youtube import (
|
||||
YoutubeTruncatedIDIE,
|
||||
YoutubeTruncatedURLIE,
|
||||
YoutubeUserIE,
|
||||
YoutubePlaylistsIE,
|
||||
YoutubeWatchLaterIE,
|
||||
)
|
||||
from .zapiks import ZapiksIE
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class ABCIE(InfoExtractor):
|
||||
IE_NAME = 'abc.net.au'
|
||||
_VALID_URL = r'http://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://www\.abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
|
||||
|
@ -44,6 +44,7 @@ class Abc7NewsIE(InfoExtractor):
|
||||
'contentURL', webpage, 'm3u8 url', fatal=True)
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8, display_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
title = self._og_search_title(webpage).strip()
|
||||
description = self._og_search_description(webpage).strip()
|
||||
|
@ -6,7 +6,7 @@ from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -16,7 +16,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class AddAnimeIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
|
||||
_VALID_URL = r'https?://(?:\w+\.)?add-anime\.net/(?:watch_video\.php\?(?:.*?)v=|video/)(?P<id>[\w_]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.add-anime.net/watch_video.php?v=24MR3YO5SAS9',
|
||||
'md5': '72954ea10bc979ab5e2eb288b21425a0',
|
||||
@ -60,7 +60,7 @@ class AddAnimeIE(InfoExtractor):
|
||||
confirm_url = (
|
||||
parsed_url.scheme + '://' + parsed_url.netloc +
|
||||
action + '?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
compat_urllib_parse_urlencode({
|
||||
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
|
||||
self._download_webpage(
|
||||
confirm_url, video_id,
|
||||
|
@ -28,7 +28,7 @@ class AENetworksIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'eg47EERs_JsZ',
|
||||
'ext': 'mp4',
|
||||
'title': "Winter Is Coming",
|
||||
'title': 'Winter Is Coming',
|
||||
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
||||
},
|
||||
'params': {
|
||||
|
@ -6,7 +6,7 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class AftonbladetIE(InfoExtractor):
|
||||
_VALID_URL = r'http://tv\.aftonbladet\.se/abtv/articles/(?P<id>[0-9]+)'
|
||||
_VALID_URL = r'https?://tv\.aftonbladet\.se/abtv/articles/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'http://tv.aftonbladet.se/abtv/articles/36015',
|
||||
'info_dict': {
|
||||
|
@ -4,7 +4,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class AlJazeeraIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
|
||||
_VALID_URL = r'https?://www\.aljazeera\.com/programmes/.*?/(?P<id>[^/]+)\.html'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.aljazeera.com/programmes/the-slum/2014/08/deliverance-201482883754237240.html',
|
||||
@ -13,24 +13,18 @@ class AlJazeeraIE(InfoExtractor):
|
||||
'ext': 'mp4',
|
||||
'title': 'The Slum - Episode 1: Deliverance',
|
||||
'description': 'As a birth attendant advocating for family planning, Remy is on the frontline of Tondo\'s battle with overcrowding.',
|
||||
'uploader': 'Al Jazeera English',
|
||||
'uploader_id': '665003303001',
|
||||
'timestamp': 1411116829,
|
||||
'upload_date': '20140919',
|
||||
},
|
||||
'add_ie': ['BrightcoveLegacy'],
|
||||
'add_ie': ['BrightcoveNew'],
|
||||
'skip': 'Not accessible from Travis CI server',
|
||||
}
|
||||
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/665003303001/default_default/index.html?videoId=%s'
|
||||
|
||||
def _real_extract(self, url):
|
||||
program_name = self._match_id(url)
|
||||
webpage = self._download_webpage(url, program_name)
|
||||
brightcove_id = self._search_regex(
|
||||
r'RenderPagesVideo\(\'(.+?)\'', webpage, 'brightcove id')
|
||||
|
||||
return {
|
||||
'_type': 'url',
|
||||
'url': (
|
||||
'brightcove:'
|
||||
'playerKey=AQ~~%2CAAAAmtVJIFk~%2CTVGOQ5ZTwJbeMWnq5d_H4MOM57xfzApc'
|
||||
'&%40videoPlayer={0}'.format(brightcove_id)
|
||||
),
|
||||
'ie_key': 'BrightcoveLegacy',
|
||||
}
|
||||
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
||||
|
242
youtube_dl/extractor/animeondemand.py
Normal file
242
youtube_dl/extractor/animeondemand.py
Normal file
@ -0,0 +1,242 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
class AnimeOnDemandIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
|
||||
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
|
||||
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
|
||||
_NETRC_MACHINE = 'animeondemand'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.anime-on-demand.de/anime/161',
|
||||
'info_dict': {
|
||||
'id': '161',
|
||||
'title': 'Grimgar, Ashes and Illusions (OmU)',
|
||||
'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
|
||||
},
|
||||
'playlist_mincount': 4,
|
||||
}, {
|
||||
# Film wording is used instead of Episode
|
||||
'url': 'https://www.anime-on-demand.de/anime/39',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# Episodes without titles
|
||||
'url': 'https://www.anime-on-demand.de/anime/162',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# ger/jap, Dub/OmU, account required
|
||||
'url': 'https://www.anime-on-demand.de/anime/169',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_page = self._download_webpage(
|
||||
self._LOGIN_URL, None, 'Downloading login page')
|
||||
|
||||
if '>Our licensing terms allow the distribution of animes only to German-speaking countries of Europe' in login_page:
|
||||
self.raise_geo_restricted(
|
||||
'%s is only available in German-speaking countries of Europe' % self.IE_NAME)
|
||||
|
||||
login_form = self._form_hidden_inputs('new_user', login_page)
|
||||
|
||||
login_form.update({
|
||||
'user[login]': username,
|
||||
'user[password]': password,
|
||||
})
|
||||
|
||||
post_url = self._search_regex(
|
||||
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
|
||||
'post url', default=self._LOGIN_URL, group='url')
|
||||
|
||||
if not post_url.startswith('http'):
|
||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||
|
||||
request = sanitized_Request(
|
||||
post_url, urlencode_postdata(login_form))
|
||||
request.add_header('Referer', self._LOGIN_URL)
|
||||
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
|
||||
error = self._search_regex(
|
||||
r'<p class="alert alert-danger">(.+?)</p>',
|
||||
response, 'error', default=None)
|
||||
if error:
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _real_extract(self, url):
|
||||
anime_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, anime_id)
|
||||
|
||||
if 'data-playlist=' not in webpage:
|
||||
self._download_webpage(
|
||||
self._APPLY_HTML5_URL, anime_id,
|
||||
'Activating HTML5 beta', 'Unable to apply HTML5 beta')
|
||||
webpage = self._download_webpage(url, anime_id)
|
||||
|
||||
csrf_token = self._html_search_meta(
|
||||
'csrf-token', webpage, 'csrf token', fatal=True)
|
||||
|
||||
anime_title = self._html_search_regex(
|
||||
r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
|
||||
webpage, 'anime name')
|
||||
anime_description = self._html_search_regex(
|
||||
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
|
||||
webpage, 'anime description', default=None)
|
||||
|
||||
entries = []
|
||||
|
||||
for num, episode_html in enumerate(re.findall(
|
||||
r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage), 1):
|
||||
episodebox_title = self._search_regex(
|
||||
(r'class="episodebox-title"[^>]+title=(["\'])(?P<title>.+?)\1',
|
||||
r'class="episodebox-title"[^>]+>(?P<title>.+?)<'),
|
||||
episode_html, 'episodebox title', default=None, group='title')
|
||||
if not episodebox_title:
|
||||
continue
|
||||
|
||||
episode_number = int(self._search_regex(
|
||||
r'(?:Episode|Film)\s*(\d+)',
|
||||
episodebox_title, 'episode number', default=num))
|
||||
episode_title = self._search_regex(
|
||||
r'(?:Episode|Film)\s*\d+\s*-\s*(.+)',
|
||||
episodebox_title, 'episode title', default=None)
|
||||
|
||||
video_id = 'episode-%d' % episode_number
|
||||
|
||||
common_info = {
|
||||
'id': video_id,
|
||||
'series': anime_title,
|
||||
'episode': episode_title,
|
||||
'episode_number': episode_number,
|
||||
}
|
||||
|
||||
formats = []
|
||||
|
||||
for input_ in re.findall(
|
||||
r'<input[^>]+class=["\'].*?streamstarter_html5[^>]+>', episode_html):
|
||||
attributes = extract_attributes(input_)
|
||||
playlist_urls = []
|
||||
for playlist_key in ('data-playlist', 'data-otherplaylist'):
|
||||
playlist_url = attributes.get(playlist_key)
|
||||
if isinstance(playlist_url, compat_str) and re.match(
|
||||
r'/?[\da-zA-Z]+', playlist_url):
|
||||
playlist_urls.append(attributes[playlist_key])
|
||||
if not playlist_urls:
|
||||
continue
|
||||
|
||||
lang = attributes.get('data-lang')
|
||||
lang_note = attributes.get('value')
|
||||
|
||||
for playlist_url in playlist_urls:
|
||||
kind = self._search_regex(
|
||||
r'videomaterialurl/\d+/([^/]+)/',
|
||||
playlist_url, 'media kind', default=None)
|
||||
format_id_list = []
|
||||
if lang:
|
||||
format_id_list.append(lang)
|
||||
if kind:
|
||||
format_id_list.append(kind)
|
||||
if not format_id_list:
|
||||
format_id_list.append(compat_str(num))
|
||||
format_id = '-'.join(format_id_list)
|
||||
format_note = ', '.join(filter(None, (kind, lang_note)))
|
||||
request = sanitized_Request(
|
||||
compat_urlparse.urljoin(url, playlist_url),
|
||||
headers={
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
'X-CSRF-Token': csrf_token,
|
||||
'Referer': url,
|
||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
})
|
||||
playlist = self._download_json(
|
||||
request, video_id, 'Downloading %s playlist JSON' % format_id,
|
||||
fatal=False)
|
||||
if not playlist:
|
||||
continue
|
||||
start_video = playlist.get('startvideo', 0)
|
||||
playlist = playlist.get('playlist')
|
||||
if not playlist or not isinstance(playlist, list):
|
||||
continue
|
||||
playlist = playlist[start_video]
|
||||
title = playlist.get('title')
|
||||
if not title:
|
||||
continue
|
||||
description = playlist.get('description')
|
||||
for source in playlist.get('sources', []):
|
||||
file_ = source.get('file')
|
||||
if not file_:
|
||||
continue
|
||||
ext = determine_ext(file_)
|
||||
format_id_list = [lang, kind]
|
||||
if ext == 'm3u8':
|
||||
format_id_list.append('hls')
|
||||
elif source.get('type') == 'video/dash' or ext == 'mpd':
|
||||
format_id_list.append('dash')
|
||||
format_id = '-'.join(filter(None, format_id_list))
|
||||
if ext == 'm3u8':
|
||||
file_formats = self._extract_m3u8_formats(
|
||||
file_, video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False)
|
||||
elif source.get('type') == 'video/dash' or ext == 'mpd':
|
||||
continue
|
||||
file_formats = self._extract_mpd_formats(
|
||||
file_, video_id, mpd_id=format_id, fatal=False)
|
||||
else:
|
||||
continue
|
||||
for f in file_formats:
|
||||
f.update({
|
||||
'language': lang,
|
||||
'format_note': format_note,
|
||||
})
|
||||
formats.extend(file_formats)
|
||||
|
||||
if formats:
|
||||
self._sort_formats(formats)
|
||||
f = common_info.copy()
|
||||
f.update({
|
||||
'title': title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
})
|
||||
entries.append(f)
|
||||
|
||||
# Extract teaser only when full episode is not available
|
||||
if not formats:
|
||||
m = re.search(
|
||||
r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
|
||||
episode_html)
|
||||
if m:
|
||||
f = common_info.copy()
|
||||
f.update({
|
||||
'id': '%s-teaser' % f['id'],
|
||||
'title': m.group('title'),
|
||||
'url': compat_urlparse.urljoin(url, m.group('href')),
|
||||
})
|
||||
entries.append(f)
|
||||
|
||||
return self.playlist_result(entries, anime_id, anime_title, anime_description)
|
@ -1,24 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class AolIE(InfoExtractor):
|
||||
IE_NAME = 'on.aol.com'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
aol-video:|
|
||||
http://on\.aol\.com/
|
||||
(?:
|
||||
video/.*-|
|
||||
playlist/(?P<playlist_display_id>[^/?#]+?)-(?P<playlist_id>[0-9]+)[?#].*_videoid=
|
||||
)
|
||||
)
|
||||
(?P<id>[0-9]+)
|
||||
(?:$|\?)
|
||||
'''
|
||||
_VALID_URL = r'(?:aol-video:|https?://on\.aol\.com/video/.*-)(?P<id>[0-9]+)(?:$|\?)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
|
||||
@ -29,42 +16,31 @@ class AolIE(InfoExtractor):
|
||||
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}, {
|
||||
'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
|
||||
'info_dict': {
|
||||
'id': '152147',
|
||||
'title': 'Brace Yourself - Today\'s Weirdest News',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
playlist_id = mobj.group('playlist_id')
|
||||
if not playlist_id or self._downloader.params.get('noplaylist'):
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
video_id = self._match_id(url)
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
|
||||
playlist_html = self._search_regex(
|
||||
r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
|
||||
'playlist HTML')
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': 'aol-video:%s' % m.group('id'),
|
||||
'ie_key': 'Aol',
|
||||
} for m in re.finditer(
|
||||
r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
|
||||
playlist_html)]
|
||||
class AolFeaturesIE(InfoExtractor):
|
||||
IE_NAME = 'features.aol.com'
|
||||
_VALID_URL = r'https?://features\.aol\.com/video/(?P<id>[^/?#]+)'
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'display_id': mobj.group('playlist_display_id'),
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
_TESTS = [{
|
||||
'url': 'http://features.aol.com/video/behind-secret-second-careers-late-night-talk-show-hosts',
|
||||
'md5': '7db483bb0c09c85e241f84a34238cc75',
|
||||
'info_dict': {
|
||||
'id': '519507715',
|
||||
'ext': 'mp4',
|
||||
'title': 'What To Watch - February 17, 2016',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
return self.url_result(self._search_regex(
|
||||
r'<script type="text/javascript" src="(https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js[^"]+)"',
|
||||
webpage, '5min embed url'), 'FiveMin')
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class AppleTrailersIE(InfoExtractor):
|
||||
IE_NAME = 'appletrailers'
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.|movie)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
|
||||
'info_dict': {
|
||||
@ -73,6 +73,9 @@ class AppleTrailersIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://movietrailers.apple.com/trailers/focus_features/kuboandthetwostrings/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
@ -23,7 +23,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||
_VALID_URL = r'https?://videos\.arte\.tv/(?P<lang>fr|de|en|es)/.*-(?P<id>.*?)\.html'
|
||||
IE_NAME = 'arte.tv'
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -63,7 +63,7 @@ class ArteTvIE(InfoExtractor):
|
||||
|
||||
class ArteTVPlus7IE(InfoExtractor):
|
||||
IE_NAME = 'arte.tv:+7'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/(?:(?:sendungen|emissions|embed)/)?(?P<id>[^/]+)/(?P<name>[^/?#&+])'
|
||||
|
||||
@classmethod
|
||||
def _extract_url_info(cls, url):
|
||||
@ -102,23 +102,45 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
iframe_url = find_iframe_url(webpage, None)
|
||||
if not iframe_url:
|
||||
embed_url = self._html_search_regex(
|
||||
r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url')
|
||||
player = self._download_json(
|
||||
embed_url, video_id, 'Downloading player page')
|
||||
iframe_url = find_iframe_url(player['html'])
|
||||
json_url = compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url', default=None)
|
||||
if embed_url:
|
||||
player = self._download_json(
|
||||
embed_url, video_id, 'Downloading player page')
|
||||
iframe_url = find_iframe_url(player['html'])
|
||||
# en and es URLs produce react-based pages with different layout (e.g.
|
||||
# http://www.arte.tv/guide/en/053330-002-A/carnival-italy?zone=world)
|
||||
if not iframe_url:
|
||||
program = self._search_regex(
|
||||
r'program\s*:\s*({.+?["\']embed_html["\'].+?}),?\s*\n',
|
||||
webpage, 'program', default=None)
|
||||
if program:
|
||||
embed_html = self._parse_json(program, video_id)
|
||||
if embed_html:
|
||||
iframe_url = find_iframe_url(embed_html['embed_html'])
|
||||
if iframe_url:
|
||||
json_url = compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||
if json_url:
|
||||
title = self._search_regex(
|
||||
r'<h3[^>]+title=(["\'])(?P<title>.+?)\1',
|
||||
webpage, 'title', default=None, group='title')
|
||||
return self._extract_from_json_url(json_url, video_id, lang, title=title)
|
||||
# Different kind of embed URL (e.g.
|
||||
# http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
|
||||
embed_url = self._search_regex(
|
||||
r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1',
|
||||
webpage, 'embed url', group='url')
|
||||
return self.url_result(embed_url)
|
||||
|
||||
def _extract_from_json_url(self, json_url, video_id, lang):
|
||||
def _extract_from_json_url(self, json_url, video_id, lang, title=None):
|
||||
info = self._download_json(json_url, video_id)
|
||||
player_info = info['videoJsonPlayer']
|
||||
|
||||
upload_date_str = player_info.get('shootingDate')
|
||||
if not upload_date_str:
|
||||
upload_date_str = player_info.get('VDA', '').split(' ')[0]
|
||||
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
||||
|
||||
title = player_info['VTI'].strip()
|
||||
title = (player_info.get('VTI') or title or player_info['VID']).strip()
|
||||
subtitle = player_info.get('VSU', '').strip()
|
||||
if subtitle:
|
||||
title += ' - %s' % subtitle
|
||||
@ -132,27 +154,30 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
}
|
||||
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
|
||||
|
||||
LANGS = {
|
||||
'fr': 'F',
|
||||
'de': 'A',
|
||||
'en': 'E[ANG]',
|
||||
'es': 'E[ESP]',
|
||||
}
|
||||
|
||||
formats = []
|
||||
for format_id, format_dict in player_info['VSR'].items():
|
||||
f = dict(format_dict)
|
||||
versionCode = f.get('versionCode')
|
||||
|
||||
langcode = {
|
||||
'fr': 'F',
|
||||
'de': 'A',
|
||||
}.get(lang, lang)
|
||||
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
|
||||
lang_pref = (
|
||||
None if versionCode is None else (
|
||||
10 if any(re.match(r, versionCode) for r in lang_rexs)
|
||||
else -10))
|
||||
langcode = LANGS.get(lang, lang)
|
||||
lang_rexs = [r'VO?%s-' % re.escape(langcode), r'VO?.-ST%s$' % re.escape(langcode)]
|
||||
lang_pref = None
|
||||
if versionCode:
|
||||
matched_lang_rexs = [r for r in lang_rexs if re.match(r, versionCode)]
|
||||
lang_pref = -10 if not matched_lang_rexs else 10 * len(matched_lang_rexs)
|
||||
source_pref = 0
|
||||
if versionCode is not None:
|
||||
# The original version with subtitles has lower relevance
|
||||
if re.match(r'VO-ST(F|A)', versionCode):
|
||||
if re.match(r'VO-ST(F|A|E)', versionCode):
|
||||
source_pref -= 10
|
||||
# The version with sourds/mal subtitles has also lower relevance
|
||||
elif re.match(r'VO?(F|A)-STM\1', versionCode):
|
||||
elif re.match(r'VO?(F|A|E)-STM\1', versionCode):
|
||||
source_pref -= 9
|
||||
format = {
|
||||
'format_id': format_id,
|
||||
@ -185,7 +210,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||
class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:creative'
|
||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
|
||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:magazine?/)?(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||
@ -209,7 +234,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:future'
|
||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://future.arte.tv/fr/info-sciences/les-ecrevisses-aussi-sont-anxieuses',
|
||||
@ -217,6 +242,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
'id': '050940-028-A',
|
||||
'ext': 'mp4',
|
||||
'title': 'Les écrevisses aussi peuvent être anxieuses',
|
||||
'upload_date': '20140902',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://future.arte.tv/fr/la-science-est-elle-responsable',
|
||||
@ -226,7 +252,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:ddc'
|
||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>[^/?#&]+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, lang = self._extract_url_info(url)
|
||||
@ -244,7 +270,7 @@ class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVConcertIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:concert'
|
||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
|
||||
@ -261,7 +287,7 @@ class ArteTVConcertIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVCinemaIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:cinema'
|
||||
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://cinema.arte.tv/de/node/38291',
|
||||
@ -276,6 +302,37 @@ class ArteTVCinemaIE(ArteTVPlus7IE):
|
||||
}
|
||||
|
||||
|
||||
class ArteTVMagazineIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:magazine'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/magazine/[^/]+/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
# Embedded via <iframe src="http://www.arte.tv/arte_vp/index.php?json_url=..."
|
||||
'url': 'http://www.arte.tv/magazine/trepalium/fr/entretien-avec-le-realisateur-vincent-lannoo-trepalium',
|
||||
'md5': '2a9369bcccf847d1c741e51416299f25',
|
||||
'info_dict': {
|
||||
'id': '065965-000-A',
|
||||
'ext': 'mp4',
|
||||
'title': 'Trepalium - Extrait Ep.01',
|
||||
'upload_date': '20160121',
|
||||
},
|
||||
}, {
|
||||
# Embedded via <iframe src="http://www.arte.tv/guide/fr/embed/054813-004-A/medium"
|
||||
'url': 'http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium',
|
||||
'md5': 'fedc64fc7a946110fe311634e79782ca',
|
||||
'info_dict': {
|
||||
'id': '054813-004_PLUS7-F',
|
||||
'ext': 'mp4',
|
||||
'title': 'Trepalium (4/6)',
|
||||
'description': 'md5:10057003c34d54e95350be4f9b05cb40',
|
||||
'upload_date': '20160218',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.arte.tv/magazine/metropolis/de/frank-woeste-german-paris-metropolis',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class ArteTVEmbedIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:embed'
|
||||
_VALID_URL = r'''(?x)
|
||||
|
@ -6,16 +6,14 @@ import hashlib
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
sanitized_Request,
|
||||
xpath_text,
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
@ -86,7 +84,7 @@ class AtresPlayerIE(InfoExtractor):
|
||||
}
|
||||
|
||||
request = sanitized_Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
@ -10,9 +10,9 @@ from ..utils import (
|
||||
|
||||
|
||||
class AudiMediaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test',
|
||||
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
|
||||
'md5': '79a8b71c46d49042609795ab59779b66',
|
||||
'info_dict': {
|
||||
'id': '1565',
|
||||
@ -32,7 +32,10 @@ class AudiMediaIE(InfoExtractor):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload')
|
||||
raw_payload = self._search_regex([
|
||||
r'class="amtv-embed"[^>]+id="([^"]+)"',
|
||||
r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
|
||||
], webpage, 'raw payload')
|
||||
_, stage_mode, video_id, lang = raw_payload.split('-')
|
||||
|
||||
# TODO: handle s and e stage_mode (live streams and ended live streams)
|
||||
@ -59,13 +62,19 @@ class AudiMediaIE(InfoExtractor):
|
||||
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
|
||||
if not video_version_url:
|
||||
continue
|
||||
formats.append({
|
||||
f = {
|
||||
'url': video_version_url,
|
||||
'width': int_or_none(video_version.get('width')),
|
||||
'height': int_or_none(video_version.get('height')),
|
||||
'abr': int_or_none(video_version.get('audio_bitrate')),
|
||||
'vbr': int_or_none(video_version.get('video_bitrate')),
|
||||
})
|
||||
}
|
||||
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
|
||||
if bitrate:
|
||||
f.update({
|
||||
'format_id': 'http-%s' % bitrate,
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
66
youtube_dl/extractor/audioboom.py
Normal file
66
youtube_dl/extractor/audioboom.py
Normal file
@ -0,0 +1,66 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import float_or_none
|
||||
|
||||
|
||||
class AudioBoomIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audioboom\.com/boos/(?P<id>[0-9]+)'
|
||||
_TEST = {
|
||||
'url': 'https://audioboom.com/boos/4279833-3-09-2016-czaban-hour-3?t=0',
|
||||
'md5': '63a8d73a055c6ed0f1e51921a10a5a76',
|
||||
'info_dict': {
|
||||
'id': '4279833',
|
||||
'ext': 'mp3',
|
||||
'title': '3/09/2016 Czaban Hour 3',
|
||||
'description': 'Guest: Nate Davis - NFL free agency, Guest: Stan Gans',
|
||||
'duration': 2245.72,
|
||||
'uploader': 'Steve Czaban',
|
||||
'uploader_url': 're:https?://(?:www\.)?audioboom\.com/channel/steveczabanyahoosportsradio',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
clip = None
|
||||
|
||||
clip_store = self._parse_json(
|
||||
self._search_regex(
|
||||
r'data-new-clip-store=(["\'])(?P<json>{.*?"clipId"\s*:\s*%s.*?})\1' % video_id,
|
||||
webpage, 'clip store', default='{}', group='json'),
|
||||
video_id, fatal=False)
|
||||
if clip_store:
|
||||
clips = clip_store.get('clips')
|
||||
if clips and isinstance(clips, list) and isinstance(clips[0], dict):
|
||||
clip = clips[0]
|
||||
|
||||
def from_clip(field):
|
||||
if clip:
|
||||
clip.get(field)
|
||||
|
||||
audio_url = from_clip('clipURLPriorToLoading') or self._og_search_property(
|
||||
'audio', webpage, 'audio url')
|
||||
title = from_clip('title') or self._og_search_title(webpage)
|
||||
description = from_clip('description') or self._og_search_description(webpage)
|
||||
|
||||
duration = float_or_none(from_clip('duration') or self._html_search_meta(
|
||||
'weibo:audio:duration', webpage))
|
||||
|
||||
uploader = from_clip('author') or self._og_search_property(
|
||||
'audio:artist', webpage, 'uploader', fatal=False)
|
||||
uploader_url = from_clip('author_url') or self._html_search_meta(
|
||||
'audioboo:channel', webpage, 'uploader url')
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': audio_url,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'uploader': uploader,
|
||||
'uploader_url': uploader_url,
|
||||
}
|
@ -98,7 +98,7 @@ class AzubuIE(InfoExtractor):
|
||||
|
||||
|
||||
class AzubuLiveIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.azubu.tv/(?P<id>[^/]+)$'
|
||||
_VALID_URL = r'https?://www.azubu.tv/(?P<id>[^/]+)$'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.azubu.tv/MarsTVMDLen',
|
||||
@ -120,6 +120,7 @@ class AzubuLiveIE(InfoExtractor):
|
||||
bc_info = self._download_json(req, user)
|
||||
m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS')
|
||||
formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': info['id'],
|
||||
|
@ -9,7 +9,7 @@ from ..utils import unescapeHTML
|
||||
|
||||
class BaiduVideoIE(InfoExtractor):
|
||||
IE_DESC = '百度视频'
|
||||
_VALID_URL = r'http://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm'
|
||||
_VALID_URL = r'https?://v\.baidu\.com/(?P<type>[a-z]+)/(?P<id>\d+)\.htm'
|
||||
_TESTS = [{
|
||||
'url': 'http://v.baidu.com/comic/1069.htm?frp=bdbrand&q=%E4%B8%AD%E5%8D%8E%E5%B0%8F%E5%BD%93%E5%AE%B6',
|
||||
'info_dict': {
|
||||
|
@ -4,15 +4,13 @@ import re
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_str,
|
||||
)
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
@ -58,7 +56,7 @@ class BambuserIE(InfoExtractor):
|
||||
}
|
||||
|
||||
request = sanitized_Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
request.add_header('Referer', self._LOGIN_URL)
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
@ -10,7 +10,6 @@ from ..utils import (
|
||||
int_or_none,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
remove_end,
|
||||
unescapeHTML,
|
||||
)
|
||||
from ..compat import (
|
||||
@ -86,7 +85,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
|
||||
'duration': 5100,
|
||||
},
|
||||
'params': {
|
||||
@ -561,7 +560,7 @@ class BBCIE(BBCCoUkIE):
|
||||
'url': 'http://www.bbc.co.uk/blogs/adamcurtis/entries/3662a707-0af9-3149-963f-47bea720b460',
|
||||
'info_dict': {
|
||||
'id': '3662a707-0af9-3149-963f-47bea720b460',
|
||||
'title': 'BBC Blogs - Adam Curtis - BUGGER',
|
||||
'title': 'BUGGER',
|
||||
},
|
||||
'playlist_count': 18,
|
||||
}, {
|
||||
@ -670,9 +669,17 @@ class BBCIE(BBCCoUkIE):
|
||||
'url': 'http://www.bbc.com/sport/0/football/34475836',
|
||||
'info_dict': {
|
||||
'id': '34475836',
|
||||
'title': 'What Liverpool can expect from Klopp',
|
||||
'title': 'Jurgen Klopp: Furious football from a witty and winning coach',
|
||||
},
|
||||
'playlist_count': 3,
|
||||
}, {
|
||||
# school report article with single video
|
||||
'url': 'http://www.bbc.co.uk/schoolreport/35744779',
|
||||
'info_dict': {
|
||||
'id': '35744779',
|
||||
'title': 'School which breaks down barriers in Jerusalem',
|
||||
},
|
||||
'playlist_count': 1,
|
||||
}, {
|
||||
# single video with playlist URL from weather section
|
||||
'url': 'http://www.bbc.com/weather/features/33601775',
|
||||
@ -735,8 +742,17 @@ class BBCIE(BBCCoUkIE):
|
||||
|
||||
json_ld_info = self._search_json_ld(webpage, playlist_id, default=None)
|
||||
timestamp = json_ld_info.get('timestamp')
|
||||
|
||||
playlist_title = json_ld_info.get('title')
|
||||
playlist_description = json_ld_info.get('description')
|
||||
if not playlist_title:
|
||||
playlist_title = self._og_search_title(
|
||||
webpage, default=None) or self._html_search_regex(
|
||||
r'<title>(.+?)</title>', webpage, 'playlist title', default=None)
|
||||
if playlist_title:
|
||||
playlist_title = re.sub(r'(.+)\s*-\s*BBC.*?$', r'\1', playlist_title).strip()
|
||||
|
||||
playlist_description = json_ld_info.get(
|
||||
'description') or self._og_search_description(webpage, default=None)
|
||||
|
||||
if not timestamp:
|
||||
timestamp = parse_iso8601(self._search_regex(
|
||||
@ -797,8 +813,6 @@ class BBCIE(BBCCoUkIE):
|
||||
playlist.get('progressiveDownloadUrl'), playlist_id, timestamp))
|
||||
|
||||
if entries:
|
||||
playlist_title = playlist_title or remove_end(self._og_search_title(webpage), ' - BBC News')
|
||||
playlist_description = playlist_description or self._og_search_description(webpage, default=None)
|
||||
return self.playlist_result(entries, playlist_id, playlist_title, playlist_description)
|
||||
|
||||
# single video story (e.g. http://www.bbc.com/travel/story/20150625-sri-lankas-spicy-secret)
|
||||
@ -829,10 +843,6 @@ class BBCIE(BBCCoUkIE):
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
playlist_title = self._html_search_regex(
|
||||
r'<title>(.*?)(?:\s*-\s*BBC [^ ]+)?</title>', webpage, 'playlist title')
|
||||
playlist_description = self._og_search_description(webpage, default=None)
|
||||
|
||||
def extract_all(pattern):
|
||||
return list(filter(None, map(
|
||||
lambda s: self._parse_json(s, playlist_id, fatal=False),
|
||||
@ -932,7 +942,7 @@ class BBCIE(BBCCoUkIE):
|
||||
|
||||
|
||||
class BBCCoUkArticleIE(InfoExtractor):
|
||||
_VALID_URL = 'http://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
|
||||
_VALID_URL = r'https?://www.bbc.co.uk/programmes/articles/(?P<id>[a-zA-Z0-9]+)'
|
||||
IE_NAME = 'bbc.co.uk:article'
|
||||
IE_DESC = 'BBC articles'
|
||||
|
||||
|
@ -8,7 +8,7 @@ from ..utils import url_basename
|
||||
|
||||
|
||||
class BehindKinkIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?behindkink\.com/(?P<year>[0-9]{4})/(?P<month>[0-9]{2})/(?P<day>[0-9]{2})/(?P<id>[^/#?_]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.behindkink.com/2014/12/05/what-are-you-passionate-about-marley-blaze/',
|
||||
'md5': '507b57d8fdcd75a41a9a7bdb7989c762',
|
||||
|
@ -94,6 +94,7 @@ class BetIE(InfoExtractor):
|
||||
xpath_with_ns('./media:thumbnail', NS_MAP)).get('url')
|
||||
|
||||
formats = self._extract_smil_formats(smil_url, display_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -14,7 +14,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class BiliBiliIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
|
||||
_VALID_URL = r'https?://www\.bilibili\.(?:tv|com)/video/av(?P<id>\d+)(?:/index_(?P<page_num>\d+).html)?'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.bilibili.tv/video/av1074402/',
|
||||
|
86
youtube_dl/extractor/biobiochiletv.py
Normal file
86
youtube_dl/extractor/biobiochiletv.py
Normal file
@ -0,0 +1,86 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import remove_end
|
||||
|
||||
|
||||
class BioBioChileTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://tv\.biobiochile\.cl/notas/(?:[^/]+/)+(?P<id>[^/]+)\.shtml'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/sobre-camaras-y-camarillas-parlamentarias.shtml',
|
||||
'md5': '26f51f03cf580265defefb4518faec09',
|
||||
'info_dict': {
|
||||
'id': 'sobre-camaras-y-camarillas-parlamentarias',
|
||||
'ext': 'mp4',
|
||||
'title': 'Sobre Cámaras y camarillas parlamentarias',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Fernando Atria',
|
||||
},
|
||||
}, {
|
||||
# different uploader layout
|
||||
'url': 'http://tv.biobiochile.cl/notas/2016/03/18/natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades.shtml',
|
||||
'md5': 'edc2e6b58974c46d5b047dea3c539ff3',
|
||||
'info_dict': {
|
||||
'id': 'natalia-valdebenito-repasa-a-diputado-hasbun-paso-a-la-categoria-de-hablar-brutalidades',
|
||||
'ext': 'mp4',
|
||||
'title': 'Natalia Valdebenito repasa a diputado Hasbún: Pasó a la categoría de hablar brutalidades',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': 'Piangella Obrador',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://tv.biobiochile.cl/notas/2015/10/22/ninos-transexuales-de-quien-es-la-decision.shtml',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://tv.biobiochile.cl/notas/2015/10/21/exclusivo-hector-pinto-formador-de-chupete-revela-version-del-ex-delantero-albo.shtml',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
title = remove_end(self._og_search_title(webpage), ' - BioBioChile TV')
|
||||
|
||||
file_url = self._search_regex(
|
||||
r'loadFWPlayerVideo\([^,]+,\s*(["\'])(?P<url>.+?)\1',
|
||||
webpage, 'file url', group='url')
|
||||
|
||||
base_url = self._search_regex(
|
||||
r'file\s*:\s*(["\'])(?P<url>.+?)\1\s*\+\s*fileURL', webpage,
|
||||
'base url', default='http://unlimited2-cl.digitalproserver.com/bbtv/',
|
||||
group='url')
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
'%s%s/playlist.m3u8' % (base_url, file_url), video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
|
||||
f = {
|
||||
'url': '%s%s' % (base_url, file_url),
|
||||
'format_id': 'http',
|
||||
'protocol': 'http',
|
||||
'preference': 1,
|
||||
}
|
||||
if formats:
|
||||
f_copy = formats[-1].copy()
|
||||
f_copy.update(f)
|
||||
f = f_copy
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
thumbnail = self._og_search_thumbnail(webpage)
|
||||
uploader = self._html_search_regex(
|
||||
r'<a[^>]+href=["\']https?://busca\.biobiochile\.cl/author[^>]+>(.+?)</a>',
|
||||
webpage, 'uploader', fatal=False)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'uploader': uploader,
|
||||
'formats': formats,
|
||||
}
|
@ -28,10 +28,10 @@ class BleacherReportIE(InfoExtractor):
|
||||
'add_ie': ['Ooyala'],
|
||||
}, {
|
||||
'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
|
||||
'md5': 'af5f90dc9c7ba1c19d0a3eac806bbf50',
|
||||
'md5': '6a5cd403418c7b01719248ca97fb0692',
|
||||
'info_dict': {
|
||||
'id': '2586817',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo',
|
||||
'timestamp': 1446839961,
|
||||
'uploader': 'Sean Fay',
|
||||
@ -93,10 +93,14 @@ class BleacherReportCMSIE(AMPIE):
|
||||
'md5': '8c2c12e3af7805152675446c905d159b',
|
||||
'info_dict': {
|
||||
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
|
||||
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
60
youtube_dl/extractor/bokecc.py
Normal file
60
youtube_dl/extractor/bokecc.py
Normal file
@ -0,0 +1,60 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_parse_qs
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class BokeCCBaseIE(InfoExtractor):
|
||||
def _extract_bokecc_formats(self, webpage, video_id, format_id=None):
|
||||
player_params_str = self._html_search_regex(
|
||||
r'<(?:script|embed)[^>]+src="http://p\.bokecc\.com/player\?([^"]+)',
|
||||
webpage, 'player params')
|
||||
|
||||
player_params = compat_parse_qs(player_params_str)
|
||||
|
||||
info_xml = self._download_xml(
|
||||
'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % (
|
||||
player_params['siteid'][0], player_params['vid'][0]), video_id)
|
||||
|
||||
formats = [{
|
||||
'format_id': format_id,
|
||||
'url': quality.find('./copy').attrib['playurl'],
|
||||
'preference': int(quality.attrib['value']),
|
||||
} for quality in info_xml.findall('./video/quality')]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
|
||||
class BokeCCIE(BokeCCBaseIE):
|
||||
_IE_DESC = 'CC视频'
|
||||
_VALID_URL = r'https?://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://union.bokecc.com/playvideo.bo?vid=E44D40C15E65EA30&uid=CD0C5D3C8614B28B',
|
||||
'info_dict': {
|
||||
'id': 'CD0C5D3C8614B28B_E44D40C15E65EA30',
|
||||
'ext': 'flv',
|
||||
'title': 'BokeCC Video',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query'))
|
||||
if not qs.get('vid') or not qs.get('uid'):
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
|
||||
video_id = '%s_%s' % (qs['uid'][0], qs['vid'][0])
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': 'BokeCC Video', # no title provided in the webpage
|
||||
'formats': self._extract_bokecc_formats(webpage, video_id),
|
||||
}
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class BpbIE(InfoExtractor):
|
||||
IE_DESC = 'Bundeszentrale für politische Bildung'
|
||||
_VALID_URL = r'http://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
|
||||
_VALID_URL = r'https?://www\.bpb\.de/mediathek/(?P<id>[0-9]+)/'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.bpb.de/mediathek/297/joachim-gauck-zu-1989-und-die-erinnerung-an-die-ddr',
|
||||
|
28
youtube_dl/extractor/bravotv.py
Normal file
28
youtube_dl/extractor/bravotv.py
Normal file
@ -0,0 +1,28 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import smuggle_url
|
||||
|
||||
|
||||
class BravoTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?bravotv\.com/(?:[^/]+/)+videos/(?P<id>[^/?]+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.bravotv.com/last-chance-kitchen/season-5/videos/lck-ep-12-fishy-finale',
|
||||
'md5': 'd60cdf68904e854fac669bd26cccf801',
|
||||
'info_dict': {
|
||||
'id': 'LitrBdX64qLn',
|
||||
'ext': 'mp4',
|
||||
'title': 'Last Chance Kitchen Returns',
|
||||
'description': 'S13: Last Chance Kitchen Returns for Top Chef Season 13',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
account_pid = self._search_regex(r'"account_pid"\s*:\s*"([^"]+)"', webpage, 'account pid')
|
||||
release_pid = self._search_regex(r'"release_pid"\s*:\s*"([^"]+)"', webpage, 'release pid')
|
||||
return self.url_result(smuggle_url(
|
||||
'http://link.theplatform.com/s/%s/%s?mbr=true&switch=progressive' % (account_pid, release_pid),
|
||||
{'force_smil_url': True}), 'ThePlatform', release_pid)
|
@ -11,7 +11,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class BreakIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?break\.com/video/(?:[^/]+/)*.+-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.break.com/video/when-girls-act-like-guys-2468056',
|
||||
'info_dict': {
|
||||
|
@ -9,10 +9,10 @@ from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
compat_xml_parse_error,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
@ -23,16 +23,16 @@ from ..utils import (
|
||||
js_to_json,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
sanitized_Request,
|
||||
unescapeHTML,
|
||||
unsmuggle_url,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
|
||||
class BrightcoveLegacyIE(InfoExtractor):
|
||||
IE_NAME = 'brightcove:legacy'
|
||||
_VALID_URL = r'(?:https?://.*brightcove\.com/(services|viewer).*?\?|brightcove:)(?P<query>.*)'
|
||||
_FEDERATED_URL_TEMPLATE = 'http://c.brightcove.com/services/viewer/htmlFederated?%s'
|
||||
_FEDERATED_URL = 'http://c.brightcove.com/services/viewer/htmlFederated'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
@ -136,13 +136,16 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
else:
|
||||
flashvars = {}
|
||||
|
||||
data_url = object_doc.attrib.get('data', '')
|
||||
data_url_params = compat_parse_qs(compat_urllib_parse_urlparse(data_url).query)
|
||||
|
||||
def find_param(name):
|
||||
if name in flashvars:
|
||||
return flashvars[name]
|
||||
node = find_xpath_attr(object_doc, './param', 'name', name)
|
||||
if node is not None:
|
||||
return node.attrib['value']
|
||||
return None
|
||||
return data_url_params.get(name)
|
||||
|
||||
params = {}
|
||||
|
||||
@ -155,8 +158,8 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
# Not all pages define this value
|
||||
if playerKey is not None:
|
||||
params['playerKey'] = playerKey
|
||||
# The three fields hold the id of the video
|
||||
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID')
|
||||
# These fields hold the id of the video
|
||||
videoPlayer = find_param('@videoPlayer') or find_param('videoId') or find_param('videoID') or find_param('@videoList')
|
||||
if videoPlayer is not None:
|
||||
params['@videoPlayer'] = videoPlayer
|
||||
linkBase = find_param('linkBaseURL')
|
||||
@ -184,8 +187,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
|
||||
@classmethod
|
||||
def _make_brightcove_url(cls, params):
|
||||
data = compat_urllib_parse.urlencode(params)
|
||||
return cls._FEDERATED_URL_TEMPLATE % data
|
||||
return update_url_query(cls._FEDERATED_URL, params)
|
||||
|
||||
@classmethod
|
||||
def _extract_brightcove_url(cls, webpage):
|
||||
@ -239,7 +241,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
# We set the original url as the default 'Referer' header
|
||||
referer = smuggled_data.get('Referer', url)
|
||||
return self._get_video_info(
|
||||
videoPlayer[0], query_str, query, referer=referer)
|
||||
videoPlayer[0], query, referer=referer)
|
||||
elif 'playerKey' in query:
|
||||
player_key = query['playerKey']
|
||||
return self._get_playlist_info(player_key[0])
|
||||
@ -248,15 +250,14 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
'Cannot find playerKey= variable. Did you forget quotes in a shell invocation?',
|
||||
expected=True)
|
||||
|
||||
def _get_video_info(self, video_id, query_str, query, referer=None):
|
||||
request_url = self._FEDERATED_URL_TEMPLATE % query_str
|
||||
req = sanitized_Request(request_url)
|
||||
def _get_video_info(self, video_id, query, referer=None):
|
||||
headers = {}
|
||||
linkBase = query.get('linkBaseURL')
|
||||
if linkBase is not None:
|
||||
referer = linkBase[0]
|
||||
if referer is not None:
|
||||
req.add_header('Referer', referer)
|
||||
webpage = self._download_webpage(req, video_id)
|
||||
headers['Referer'] = referer
|
||||
webpage = self._download_webpage(self._FEDERATED_URL, video_id, headers=headers, query=query)
|
||||
|
||||
error_msg = self._html_search_regex(
|
||||
r"<h1>We're sorry.</h1>([\s\n]*<p>.*?</p>)+", webpage,
|
||||
@ -296,7 +297,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
'uploader': video_info.get('publisherName'),
|
||||
}
|
||||
|
||||
renditions = video_info.get('renditions')
|
||||
renditions = video_info.get('renditions', []) + video_info.get('IOSRenditions', [])
|
||||
if renditions:
|
||||
formats = []
|
||||
for rend in renditions:
|
||||
@ -318,13 +319,23 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
if ext is None:
|
||||
ext = determine_ext(url)
|
||||
size = rend.get('size')
|
||||
formats.append({
|
||||
a_format = {
|
||||
'url': url,
|
||||
'ext': ext,
|
||||
'height': rend.get('frameHeight'),
|
||||
'width': rend.get('frameWidth'),
|
||||
'filesize': size if size != 0 else None,
|
||||
})
|
||||
}
|
||||
|
||||
# m3u8 manifests with remote == false are media playlists
|
||||
# Not calling _extract_m3u8_formats here to save network traffic
|
||||
if ext == 'm3u8':
|
||||
a_format.update({
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
})
|
||||
|
||||
formats.append(a_format)
|
||||
self._sort_formats(formats)
|
||||
info['formats'] = formats
|
||||
elif video_info.get('FLVFullLengthURL') is not None:
|
||||
@ -355,7 +366,7 @@ class BrightcoveLegacyIE(InfoExtractor):
|
||||
|
||||
class BrightcoveNewIE(InfoExtractor):
|
||||
IE_NAME = 'brightcove:new'
|
||||
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>(?:ref:)?\d+)'
|
||||
_VALID_URL = r'https?://players\.brightcove\.net/(?P<account_id>\d+)/(?P<player_id>[^/]+)_(?P<embed>[^/]+)/index\.html\?.*videoId=(?P<video_id>\d+|ref:[^&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://players.brightcove.net/929656772001/e41d32dc-ec74-459e-a845-6c69f7b724ea_default/index.html?videoId=4463358922001',
|
||||
'md5': 'c8100925723840d4b0d243f7025703be',
|
||||
@ -391,6 +402,10 @@ class BrightcoveNewIE(InfoExtractor):
|
||||
# ref: prefixed video id
|
||||
'url': 'http://players.brightcove.net/3910869709001/21519b5c-4b3b-4363-accb-bdc8f358f823_default/index.html?videoId=ref:7069442',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# non numeric ref: prefixed video id
|
||||
'url': 'http://players.brightcove.net/710858724001/default_default/index.html?videoId=ref:event-stream-356',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@staticmethod
|
||||
@ -410,8 +425,8 @@ class BrightcoveNewIE(InfoExtractor):
|
||||
|
||||
# Look for iframe embeds [1]
|
||||
for _, url in re.findall(
|
||||
r'<iframe[^>]+src=(["\'])((?:https?:)//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
|
||||
entries.append(url)
|
||||
r'<iframe[^>]+src=(["\'])((?:https?:)?//players\.brightcove\.net/\d+/[^/]+/index\.html.+?)\1', webpage):
|
||||
entries.append(url if url.startswith('http') else 'http:' + url)
|
||||
|
||||
# Look for embed_in_page embeds [2]
|
||||
for video_id, account_id, player_id, embed in re.findall(
|
||||
@ -420,11 +435,11 @@ class BrightcoveNewIE(InfoExtractor):
|
||||
# According to [4] data-video-id may be prefixed with ref:
|
||||
r'''(?sx)
|
||||
<video[^>]+
|
||||
data-video-id=["\']((?:ref:)?\d+)["\'][^>]*>.*?
|
||||
data-video-id=["\'](\d+|ref:[^"\']+)["\'][^>]*>.*?
|
||||
</video>.*?
|
||||
<script[^>]+
|
||||
src=["\'](?:https?:)?//players\.brightcove\.net/
|
||||
(\d+)/([\da-f-]+)_([^/]+)/index\.min\.js
|
||||
(\d+)/([\da-f-]+)_([^/]+)/index(?:\.min)?\.js
|
||||
''', webpage):
|
||||
entries.append(
|
||||
'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s'
|
||||
@ -454,24 +469,33 @@ class BrightcoveNewIE(InfoExtractor):
|
||||
r'policyKey\s*:\s*(["\'])(?P<pk>.+?)\1',
|
||||
webpage, 'policy key', group='pk')
|
||||
|
||||
req = sanitized_Request(
|
||||
'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
|
||||
% (account_id, video_id),
|
||||
headers={'Accept': 'application/json;pk=%s' % policy_key})
|
||||
json_data = self._download_json(req, video_id)
|
||||
api_url = 'https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s' % (account_id, video_id)
|
||||
try:
|
||||
json_data = self._download_json(api_url, video_id, headers={
|
||||
'Accept': 'application/json;pk=%s' % policy_key
|
||||
})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
||||
json_data = self._parse_json(e.cause.read().decode(), video_id)
|
||||
raise ExtractorError(json_data[0]['message'], expected=True)
|
||||
raise
|
||||
|
||||
title = json_data['name']
|
||||
|
||||
formats = []
|
||||
for source in json_data.get('sources', []):
|
||||
container = source.get('container')
|
||||
source_type = source.get('type')
|
||||
src = source.get('src')
|
||||
if source_type == 'application/x-mpegURL':
|
||||
if source_type == 'application/x-mpegURL' or container == 'M2TS':
|
||||
if not src:
|
||||
continue
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
src, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id='hls', fatal=False))
|
||||
src, video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||
elif source_type == 'application/dash+xml':
|
||||
if not src:
|
||||
continue
|
||||
formats.extend(self._extract_mpd_formats(src, video_id, 'dash', fatal=False))
|
||||
else:
|
||||
streaming_src = source.get('streaming_src')
|
||||
stream_name, app_name = source.get('stream_name'), source.get('app_name')
|
||||
@ -479,15 +503,23 @@ class BrightcoveNewIE(InfoExtractor):
|
||||
continue
|
||||
tbr = float_or_none(source.get('avg_bitrate'), 1000)
|
||||
height = int_or_none(source.get('height'))
|
||||
width = int_or_none(source.get('width'))
|
||||
f = {
|
||||
'tbr': tbr,
|
||||
'width': int_or_none(source.get('width')),
|
||||
'height': height,
|
||||
'filesize': int_or_none(source.get('size')),
|
||||
'container': source.get('container'),
|
||||
'vcodec': source.get('codec'),
|
||||
'ext': source.get('container').lower(),
|
||||
'container': container,
|
||||
'ext': container.lower(),
|
||||
}
|
||||
if width == 0 and height == 0:
|
||||
f.update({
|
||||
'vcodec': 'none',
|
||||
})
|
||||
else:
|
||||
f.update({
|
||||
'width': width,
|
||||
'height': height,
|
||||
'vcodec': source.get('codec'),
|
||||
})
|
||||
|
||||
def build_format_id(kind):
|
||||
format_id = kind
|
||||
|
@ -4,12 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import js_to_json
|
||||
|
||||
|
||||
class C56IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?:www|player)\.)?56\.com/(?:.+?/)?(?:v_|(?:play_album.+-))(?P<textid>.+?)\.(?:html|swf)'
|
||||
IE_NAME = '56.com'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
||||
'md5': 'e59995ac63d0457783ea05f93f12a866',
|
||||
'info_dict': {
|
||||
@ -18,12 +19,29 @@ class C56IE(InfoExtractor):
|
||||
'title': '网事知多少 第32期:车怒',
|
||||
'duration': 283.813,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.56.com/u47/v_MTM5NjQ5ODc2.html',
|
||||
'md5': '',
|
||||
'info_dict': {
|
||||
'id': '82247482',
|
||||
'title': '爱的诅咒之杜鹃花开',
|
||||
},
|
||||
'playlist_count': 7,
|
||||
'add_ie': ['Sohu'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
||||
text_id = mobj.group('textid')
|
||||
|
||||
webpage = self._download_webpage(url, text_id)
|
||||
sohu_video_info_str = self._search_regex(
|
||||
r'var\s+sohuVideoInfo\s*=\s*({[^}]+});', webpage, 'Sohu video info', default=None)
|
||||
if sohu_video_info_str:
|
||||
sohu_video_info = self._parse_json(
|
||||
sohu_video_info_str, text_id, transform_source=js_to_json)
|
||||
return self.url_result(sohu_video_info['url'], 'Sohu')
|
||||
|
||||
page = self._download_json(
|
||||
'http://vxml.56.com/json/%s/' % text_id, text_id, 'Downloading video info')
|
||||
|
||||
|
@ -6,7 +6,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -16,7 +16,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class CamdemyIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?camdemy\.com/media/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?camdemy\.com/media/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# single file
|
||||
'url': 'http://www.camdemy.com/media/5181/',
|
||||
@ -104,7 +104,7 @@ class CamdemyIE(InfoExtractor):
|
||||
|
||||
|
||||
class CamdemyFolderIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.camdemy.com/folder/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://www.camdemy.com/folder/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# links with trailing slash
|
||||
'url': 'http://www.camdemy.com/folder/450',
|
||||
@ -139,7 +139,7 @@ class CamdemyFolderIE(InfoExtractor):
|
||||
parsed_url = list(compat_urlparse.urlparse(url))
|
||||
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
|
||||
query.update({'displayMode': 'list'})
|
||||
parsed_url[4] = compat_urllib_parse.urlencode(query)
|
||||
parsed_url[4] = compat_urllib_parse_urlencode(query)
|
||||
final_url = compat_urlparse.urlunparse(parsed_url)
|
||||
|
||||
page = self._download_webpage(final_url, folder_id)
|
||||
|
@ -6,7 +6,7 @@ from ..utils import float_or_none
|
||||
|
||||
class CanvasIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?canvas\.be/video/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
|
||||
'md5': 'ea838375a547ac787d4064d8c7860a6c',
|
||||
'info_dict': {
|
||||
@ -18,7 +18,27 @@ class CanvasIE(InfoExtractor):
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 49.02,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
# with subtitles
|
||||
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
|
||||
'info_dict': {
|
||||
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
|
||||
'display_id': 'pieter-0167',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pieter 0167',
|
||||
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 2553.08,
|
||||
'subtitles': {
|
||||
'nl': [{
|
||||
'ext': 'vtt',
|
||||
}],
|
||||
},
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
@ -54,6 +74,14 @@ class CanvasIE(InfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
subtitle_urls = data.get('subtitleUrls')
|
||||
if isinstance(subtitle_urls, list):
|
||||
for subtitle in subtitle_urls:
|
||||
subtitle_url = subtitle.get('url')
|
||||
if subtitle_url and subtitle.get('type') == 'CLOSED':
|
||||
subtitles.setdefault('nl', []).append({'url': subtitle_url})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
@ -62,4 +90,5 @@ class CanvasIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'duration': float_or_none(data.get('duration'), 1000),
|
||||
'thumbnail': data.get('posterImageUrl'),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
@ -3,12 +3,15 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .theplatform import ThePlatformIE
|
||||
from ..utils import parse_duration
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
find_xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class CBSNewsIE(ThePlatformIE):
|
||||
IE_DESC = 'CBS News'
|
||||
_VALID_URL = r'http://(?:www\.)?cbsnews\.com/(?:news|videos)/(?P<id>[\da-z_-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/(?:news|videos)/(?P<id>[\da-z_-]+)'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
@ -46,6 +49,15 @@ class CBSNewsIE(ThePlatformIE):
|
||||
},
|
||||
]
|
||||
|
||||
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
||||
closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
|
||||
return {
|
||||
'en': [{
|
||||
'ext': 'ttml',
|
||||
'url': closed_caption_e.attrib['value'],
|
||||
}]
|
||||
} if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
@ -61,18 +73,12 @@ class CBSNewsIE(ThePlatformIE):
|
||||
thumbnail = item.get('mediaImage') or item.get('thumbnail')
|
||||
|
||||
subtitles = {}
|
||||
if 'mpxRefId' in video_info:
|
||||
subtitles['en'] = [{
|
||||
'ext': 'ttml',
|
||||
'url': 'http://www.cbsnews.com/videos/captions/%s.adb_xml' % video_info['mpxRefId'],
|
||||
}]
|
||||
|
||||
formats = []
|
||||
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
|
||||
pid = item.get('media' + format_id)
|
||||
if not pid:
|
||||
continue
|
||||
release_url = 'http://link.theplatform.com/s/dJ5BDC/%s?format=SMIL&mbr=true' % pid
|
||||
release_url = 'http://link.theplatform.com/s/dJ5BDC/%s?mbr=true' % pid
|
||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % pid)
|
||||
formats.extend(tp_formats)
|
||||
subtitles = self._merge_subtitles(subtitles, tp_subtitles)
|
||||
@ -90,7 +96,7 @@ class CBSNewsIE(ThePlatformIE):
|
||||
|
||||
class CBSNewsLiveVideoIE(InfoExtractor):
|
||||
IE_DESC = 'CBS News Live Videos'
|
||||
_VALID_URL = r'http://(?:www\.)?cbsnews\.com/live/video/(?P<id>[\da-z_-]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?cbsnews\.com/live/video/(?P<id>[\da-z_-]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.cbsnews.com/live/video/clinton-sanders-prepare-to-face-off-in-nh/',
|
||||
@ -116,6 +122,7 @@ class CBSNewsLiveVideoIE(InfoExtractor):
|
||||
for entry in f4m_formats:
|
||||
# URLs without the extra param induce an 404 error
|
||||
entry.update({'extra_param_to_segment_url': hdcore_sign})
|
||||
self._sort_formats(f4m_formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -6,7 +6,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class CBSSportsIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.cbssports\.com/video/player/(?P<section>[^/]+)/(?P<id>[^/]+)'
|
||||
_VALID_URL = r'https?://www\.cbssports\.com/video/player/(?P<section>[^/]+)/(?P<id>[^/]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.cbssports.com/video/player/tennis/318462531970/0/us-open-flashbacks-1990s',
|
||||
|
@ -45,7 +45,7 @@ class CCCIE(InfoExtractor):
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r"(?s)<h3>About</h3>(.+?)<h3>",
|
||||
r'(?s)<h3>About</h3>(.+?)<h3>',
|
||||
webpage, 'description', fatal=False)
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
|
||||
|
96
youtube_dl/extractor/cda.py
Executable file
96
youtube_dl/extractor/cda.py
Executable file
@ -0,0 +1,96 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
decode_packed_codes,
|
||||
ExtractorError,
|
||||
parse_duration
|
||||
)
|
||||
|
||||
|
||||
class CDAIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cda.pl/video/5749950c',
|
||||
'md5': '6f844bf51b15f31fae165365707ae970',
|
||||
'info_dict': {
|
||||
'id': '5749950c',
|
||||
'ext': 'mp4',
|
||||
'height': 720,
|
||||
'title': 'Oto dlaczego przed zakrętem należy zwolnić.',
|
||||
'duration': 39
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cda.pl/video/57413289',
|
||||
'md5': 'a88828770a8310fc00be6c95faf7f4d5',
|
||||
'info_dict': {
|
||||
'id': '57413289',
|
||||
'ext': 'mp4',
|
||||
'title': 'Lądowanie na lotnisku na Maderze',
|
||||
'duration': 137
|
||||
}
|
||||
}, {
|
||||
'url': 'http://ebd.cda.pl/0x0/5749950c',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage('http://ebd.cda.pl/0x0/' + video_id, video_id)
|
||||
|
||||
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
||||
raise ExtractorError('This video is only available for premium users.', expected=True)
|
||||
|
||||
title = self._html_search_regex(r'<title>(.+?)</title>', webpage, 'title')
|
||||
|
||||
formats = []
|
||||
|
||||
info_dict = {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': formats,
|
||||
'duration': None,
|
||||
}
|
||||
|
||||
def extract_format(page, version):
|
||||
unpacked = decode_packed_codes(page)
|
||||
format_url = self._search_regex(
|
||||
r"url:\\'(.+?)\\'", unpacked, '%s url' % version, fatal=False)
|
||||
if not format_url:
|
||||
return
|
||||
f = {
|
||||
'url': format_url,
|
||||
}
|
||||
m = re.search(
|
||||
r'<a[^>]+data-quality="(?P<format_id>[^"]+)"[^>]+href="[^"]+"[^>]+class="[^"]*quality-btn-active[^"]*">(?P<height>[0-9]+)p',
|
||||
page)
|
||||
if m:
|
||||
f.update({
|
||||
'format_id': m.group('format_id'),
|
||||
'height': int(m.group('height')),
|
||||
})
|
||||
info_dict['formats'].append(f)
|
||||
if not info_dict['duration']:
|
||||
info_dict['duration'] = parse_duration(self._search_regex(
|
||||
r"duration:\\'(.+?)\\'", unpacked, 'duration', fatal=False))
|
||||
|
||||
extract_format(webpage, 'default')
|
||||
|
||||
for href, resolution in re.findall(
|
||||
r'<a[^>]+data-quality="[^"]+"[^>]+href="([^"]+)"[^>]+class="quality-btn"[^>]*>([0-9]+p)',
|
||||
webpage):
|
||||
webpage = self._download_webpage(
|
||||
href, video_id, 'Downloading %s version information' % resolution, fatal=False)
|
||||
if not webpage:
|
||||
# Manually report warning because empty page is returned when
|
||||
# invalid version is requested.
|
||||
self.report_warning('Unable to download %s version information' % resolution)
|
||||
continue
|
||||
extract_format(webpage, resolution)
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return info_dict
|
@ -5,7 +5,6 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
@ -13,6 +12,7 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
float_or_none,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
@ -102,7 +102,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
|
||||
req = sanitized_Request(
|
||||
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
|
||||
data=compat_urllib_parse.urlencode(data))
|
||||
data=urlencode_postdata(data))
|
||||
|
||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
||||
req.add_header('x-addr', '127.0.0.1')
|
||||
@ -129,7 +129,8 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
formats = []
|
||||
for format_id, stream_url in item['streamUrls'].items():
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
stream_url, playlist_id, 'mp4', entry_protocol='m3u8_native'))
|
||||
stream_url, playlist_id, 'mp4',
|
||||
entry_protocol='m3u8_native', fatal=False))
|
||||
self._sort_formats(formats)
|
||||
|
||||
item_id = item.get('id') or item['assetId']
|
||||
@ -177,16 +178,16 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
for divider in [1000, 60, 60, 100]:
|
||||
components.append(msec % divider)
|
||||
msec //= divider
|
||||
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
|
||||
return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
|
||||
|
||||
def _fix_subtitle(subtitle):
|
||||
for line in subtitle.splitlines():
|
||||
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
|
||||
m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
|
||||
if m:
|
||||
yield m.group(1)
|
||||
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
|
||||
yield "{0} --> {1}".format(start, stop)
|
||||
yield '{0} --> {1}'.format(start, stop)
|
||||
else:
|
||||
yield line
|
||||
|
||||
return "\r\n".join(_fix_subtitle(subtitles))
|
||||
return '\r\n'.join(_fix_subtitle(subtitles))
|
||||
|
@ -48,6 +48,7 @@ class ChaturbateIE(InfoExtractor):
|
||||
raise ExtractorError('Unable to find stream URL')
|
||||
|
||||
formats = self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -21,6 +21,10 @@ class CinemassacreIE(InfoExtractor):
|
||||
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
|
||||
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
@ -31,14 +35,18 @@ class CinemassacreIE(InfoExtractor):
|
||||
'upload_date': '20131002',
|
||||
'title': 'The Mummy’s Hand (1940)',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
# Youtube embedded video
|
||||
'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/',
|
||||
'md5': 'df4cf8a1dcedaec79a73d96d83b99023',
|
||||
'md5': 'ec9838a5520ef5409b3e4e42fcb0a3b9',
|
||||
'info_dict': {
|
||||
'id': 'OEVzPCY2T-g',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles',
|
||||
'upload_date': '20061207',
|
||||
'uploader': 'Cinemassacre',
|
||||
@ -49,12 +57,12 @@ class CinemassacreIE(InfoExtractor):
|
||||
{
|
||||
# Youtube embedded video
|
||||
'url': 'http://cinemassacre.com/2006/09/01/mckids/',
|
||||
'md5': '6eb30961fa795fedc750eac4881ad2e1',
|
||||
'md5': '7393c4e0f54602ad110c793eb7a6513a',
|
||||
'info_dict': {
|
||||
'id': 'FnxsNhuikpo',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'upload_date': '20060901',
|
||||
'uploader': 'Cinemassacre Extras',
|
||||
'uploader': 'Cinemassacre Extra',
|
||||
'description': 'md5:de9b751efa9e45fbaafd9c8a1123ed53',
|
||||
'uploader_id': 'Cinemassacre',
|
||||
'title': 'AVGN: McKids',
|
||||
@ -69,7 +77,11 @@ class CinemassacreIE(InfoExtractor):
|
||||
'description': 'Let’s Play Mario Kart 64 !! Mario Kart 64 is a classic go-kart racing game released for the Nintendo 64 (N64). Today James & Mike do 4 player Battle Mode with Kyle and Bootsy!',
|
||||
'title': 'Mario Kart 64 (Nintendo 64) James & Mike Mondays',
|
||||
'upload_date': '20150525',
|
||||
}
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
|
@ -19,7 +19,7 @@ def _decode(s):
|
||||
class CliphunterIE(InfoExtractor):
|
||||
IE_NAME = 'cliphunter'
|
||||
|
||||
_VALID_URL = r'''(?x)http://(?:www\.)?cliphunter\.com/w/
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cliphunter\.com/w/
|
||||
(?P<id>[0-9]+)/
|
||||
(?P<seo>.+?)(?:$|[#\?])
|
||||
'''
|
||||
|
@ -8,7 +8,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class ClipsyndicateIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:chic|www)\.clipsyndicate\.com/video/play(list/\d+)?/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.clipsyndicate.com/video/play/4629301/brick_briscoe',
|
||||
|
@ -6,7 +6,7 @@ import re
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_HTTPError,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -64,7 +64,7 @@ class CloudyIE(InfoExtractor):
|
||||
'errorUrl': error_url,
|
||||
})
|
||||
|
||||
data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
|
||||
data_url = self._API_URL % (video_host, compat_urllib_parse_urlencode(form))
|
||||
player_data = self._download_webpage(
|
||||
data_url, video_id, 'Downloading player data')
|
||||
data = compat_parse_qs(player_data)
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class ClubicIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
|
||||
_VALID_URL = r'https?://(?:www\.)?clubic\.com/video/(?:[^/]+/)*video.*-(?P<id>[0-9]+)\.html'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.clubic.com/video/clubic-week/video-clubic-week-2-0-le-fbi-se-lance-dans-la-photo-d-identite-448474.html',
|
||||
|
@ -51,9 +51,7 @@ class CNETIE(ThePlatformIE):
|
||||
uploader = None
|
||||
uploader_id = None
|
||||
|
||||
mpx_account = data['config']['uvpConfig']['default']['mpx_account']
|
||||
|
||||
metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id)
|
||||
metadata = self.get_metadata('kYEXFC/%s' % list(vdata['files'].values())[0], video_id)
|
||||
description = vdata.get('description') or metadata.get('description')
|
||||
duration = int_or_none(vdata.get('duration')) or metadata.get('duration')
|
||||
|
||||
@ -62,7 +60,7 @@ class CNETIE(ThePlatformIE):
|
||||
for (fkey, vid) in vdata['files'].items():
|
||||
if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']:
|
||||
continue
|
||||
release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid)
|
||||
release_url = 'http://link.theplatform.com/s/kYEXFC/%s?mbr=true' % vid
|
||||
if fkey == 'hds':
|
||||
release_url += '&manifest=f4m'
|
||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey)
|
||||
|
@ -26,14 +26,14 @@ class CNNIE(InfoExtractor):
|
||||
'upload_date': '20130609',
|
||||
},
|
||||
}, {
|
||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
"info_dict": {
|
||||
'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
|
||||
'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
|
||||
'info_dict': {
|
||||
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
||||
'ext': 'mp4',
|
||||
"title": "Student's epic speech stuns new freshmen",
|
||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
"upload_date": "20130821",
|
||||
'title': "Student's epic speech stuns new freshmen",
|
||||
'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
'upload_date': '20130821',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
|
||||
|
@ -46,9 +46,9 @@ class CollegeRamaIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
player_options_request = {
|
||||
"getPlayerOptionsRequest": {
|
||||
"ResourceId": video_id,
|
||||
"QueryString": "",
|
||||
'getPlayerOptionsRequest': {
|
||||
'ResourceId': video_id,
|
||||
'QueryString': '',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class ComCarCoffIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
|
||||
_VALID_URL = r'https?://(?:www\.)?comediansincarsgettingcoffee\.com/(?P<id>[a-z0-9\-]*)'
|
||||
_TESTS = [{
|
||||
'url': 'http://comediansincarsgettingcoffee.com/miranda-sings-happy-thanksgiving-miranda/',
|
||||
'info_dict': {
|
||||
|
@ -5,7 +5,7 @@ import re
|
||||
from .mtv import MTVServicesInfoExtractor
|
||||
from ..compat import (
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
@ -16,11 +16,11 @@ from ..utils import (
|
||||
|
||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
|
||||
(video-clips|episodes|cc-studios|video-collections|full-episodes)
|
||||
(video-clips|episodes|cc-studios|video-collections|full-episodes|shows)
|
||||
/(?P<title>.*)'''
|
||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
|
||||
'info_dict': {
|
||||
@ -29,7 +29,10 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
|
||||
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/interviews/6yx39d/exclusive-rand-paul-extended-interview',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
@ -192,13 +195,13 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
# Correct cc.com in uri
|
||||
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
|
||||
|
||||
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
|
||||
index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse_urlencode({'uri': uri}))
|
||||
idoc = self._download_xml(
|
||||
index_url, epTitle,
|
||||
'Downloading show index', 'Unable to download episode index')
|
||||
|
@ -15,14 +15,16 @@ import math
|
||||
from ..compat import (
|
||||
compat_cookiejar,
|
||||
compat_cookies,
|
||||
compat_etree_fromstring,
|
||||
compat_getpass,
|
||||
compat_http_client,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_etree_fromstring,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..downloader.f4m import remove_encrypted_media
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
age_restricted,
|
||||
@ -46,6 +48,8 @@ from ..utils import (
|
||||
xpath_with_ns,
|
||||
determine_protocol,
|
||||
parse_duration,
|
||||
mimetype2ext,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
|
||||
@ -103,7 +107,7 @@ class InfoExtractor(object):
|
||||
* protocol The protocol that will be used for the actual
|
||||
download, lower-case.
|
||||
"http", "https", "rtsp", "rtmp", "rtmpe",
|
||||
"m3u8", or "m3u8_native".
|
||||
"m3u8", "m3u8_native" or "http_dash_segments".
|
||||
* preference Order number of this format. If this field is
|
||||
present and not None, the formats get sorted
|
||||
by this field, regardless of all other values.
|
||||
@ -156,12 +160,14 @@ class InfoExtractor(object):
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: Full video description.
|
||||
uploader: Full name of the video uploader.
|
||||
license: License name the video is licensed under.
|
||||
creator: The main artist who created the video.
|
||||
release_date: The date (YYYYMMDD) when the video was released.
|
||||
timestamp: UNIX timestamp of the moment the video became available.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
If not explicitly set, calculated from timestamp.
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
uploader_url: Full URL to a personal webpage of the video uploader.
|
||||
location: Physical location where the video was filmed.
|
||||
subtitles: The available subtitles as a dictionary in the format
|
||||
{language: subformats}. "subformats" is a list sorted from
|
||||
@ -341,7 +347,7 @@ class InfoExtractor(object):
|
||||
def IE_NAME(self):
|
||||
return compat_str(type(self).__name__[:-2])
|
||||
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True):
|
||||
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers=None, query=None):
|
||||
""" Returns the response handle """
|
||||
if note is None:
|
||||
self.report_download_webpage(video_id)
|
||||
@ -350,6 +356,12 @@ class InfoExtractor(object):
|
||||
self.to_screen('%s' % (note,))
|
||||
else:
|
||||
self.to_screen('%s: %s' % (video_id, note))
|
||||
# data, headers and query params will be ignored for `Request` objects
|
||||
if isinstance(url_or_request, compat_str):
|
||||
if query:
|
||||
url_or_request = update_url_query(url_or_request, query)
|
||||
if data or headers:
|
||||
url_or_request = sanitized_Request(url_or_request, data, headers or {})
|
||||
try:
|
||||
return self._downloader.urlopen(url_or_request)
|
||||
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
||||
@ -365,13 +377,13 @@ class InfoExtractor(object):
|
||||
self._downloader.report_warning(errmsg)
|
||||
return False
|
||||
|
||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None):
|
||||
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers=None, query=None):
|
||||
""" Returns a tuple (page content as string, URL handle) """
|
||||
# Strip hashes from the URL (#1038)
|
||||
if isinstance(url_or_request, (compat_str, str)):
|
||||
url_or_request = url_or_request.partition('#')[0]
|
||||
|
||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal)
|
||||
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
|
||||
if urlh is False:
|
||||
assert not fatal
|
||||
return False
|
||||
@ -424,7 +436,7 @@ class InfoExtractor(object):
|
||||
self.to_screen('Saving request to ' + filename)
|
||||
# Working around MAX_PATH limitation on Windows (see
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
||||
if os.name == 'nt':
|
||||
if compat_os_name == 'nt':
|
||||
absfilepath = os.path.abspath(filename)
|
||||
if len(absfilepath) > 259:
|
||||
filename = '\\\\?\\' + absfilepath
|
||||
@ -458,13 +470,13 @@ class InfoExtractor(object):
|
||||
|
||||
return content
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers=None, query=None):
|
||||
""" Returns the data of the page as a string """
|
||||
success = False
|
||||
try_count = 0
|
||||
while success is False:
|
||||
try:
|
||||
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding)
|
||||
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
|
||||
success = True
|
||||
except compat_http_client.IncompleteRead as e:
|
||||
try_count += 1
|
||||
@ -479,10 +491,10 @@ class InfoExtractor(object):
|
||||
|
||||
def _download_xml(self, url_or_request, video_id,
|
||||
note='Downloading XML', errnote='Unable to download XML',
|
||||
transform_source=None, fatal=True, encoding=None):
|
||||
transform_source=None, fatal=True, encoding=None, data=None, headers=None, query=None):
|
||||
"""Return the xml as an xml.etree.ElementTree.Element"""
|
||||
xml_string = self._download_webpage(
|
||||
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding)
|
||||
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
|
||||
if xml_string is False:
|
||||
return xml_string
|
||||
if transform_source:
|
||||
@ -493,10 +505,10 @@ class InfoExtractor(object):
|
||||
note='Downloading JSON metadata',
|
||||
errnote='Unable to download JSON metadata',
|
||||
transform_source=None,
|
||||
fatal=True, encoding=None):
|
||||
fatal=True, encoding=None, data=None, headers=None, query=None):
|
||||
json_string = self._download_webpage(
|
||||
url_or_request, video_id, note, errnote, fatal=fatal,
|
||||
encoding=encoding)
|
||||
encoding=encoding, data=data, headers=headers, query=query)
|
||||
if (not fatal) and json_string is False:
|
||||
return None
|
||||
return self._parse_json(
|
||||
@ -593,7 +605,7 @@ class InfoExtractor(object):
|
||||
if mobj:
|
||||
break
|
||||
|
||||
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
|
||||
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
|
||||
_name = '\033[0;34m%s\033[0m' % name
|
||||
else:
|
||||
_name = name
|
||||
@ -636,7 +648,7 @@ class InfoExtractor(object):
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get('username', None) is not None:
|
||||
if downloader_params.get('username') is not None:
|
||||
username = downloader_params['username']
|
||||
password = downloader_params['password']
|
||||
elif downloader_params.get('usenetrc', False):
|
||||
@ -663,7 +675,7 @@ class InfoExtractor(object):
|
||||
return None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
if downloader_params.get('twofactor', None) is not None:
|
||||
if downloader_params.get('twofactor') is not None:
|
||||
return downloader_params['twofactor']
|
||||
|
||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
||||
@ -744,7 +756,7 @@ class InfoExtractor(object):
|
||||
'mature': 17,
|
||||
'restricted': 19,
|
||||
}
|
||||
return RATING_TABLE.get(rating.lower(), None)
|
||||
return RATING_TABLE.get(rating.lower())
|
||||
|
||||
def _family_friendly_search(self, html):
|
||||
# See http://schema.org/VideoObject
|
||||
@ -759,7 +771,7 @@ class InfoExtractor(object):
|
||||
'0': 18,
|
||||
'false': 18,
|
||||
}
|
||||
return RATING_TABLE.get(family_friendly.lower(), None)
|
||||
return RATING_TABLE.get(family_friendly.lower())
|
||||
|
||||
def _twitter_search_player(self, html):
|
||||
return self._html_search_meta('twitter:player', html,
|
||||
@ -851,6 +863,7 @@ class InfoExtractor(object):
|
||||
proto_preference = 0 if determine_protocol(f) in ['http', 'https'] else -0.1
|
||||
|
||||
if f.get('vcodec') == 'none': # audio only
|
||||
preference -= 50
|
||||
if self._downloader.params.get('prefer_free_formats'):
|
||||
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
|
||||
else:
|
||||
@ -861,6 +874,8 @@ class InfoExtractor(object):
|
||||
except ValueError:
|
||||
audio_ext_preference = -1
|
||||
else:
|
||||
if f.get('acodec') == 'none': # video only
|
||||
preference -= 40
|
||||
if self._downloader.params.get('prefer_free_formats'):
|
||||
ORDER = ['flv', 'mp4', 'webm']
|
||||
else:
|
||||
@ -899,6 +914,16 @@ class InfoExtractor(object):
|
||||
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
|
||||
formats)
|
||||
|
||||
@staticmethod
|
||||
def _remove_duplicate_formats(formats):
|
||||
format_urls = set()
|
||||
unique_formats = []
|
||||
for f in formats:
|
||||
if f['url'] not in format_urls:
|
||||
format_urls.add(f['url'])
|
||||
unique_formats.append(f)
|
||||
formats[:] = unique_formats
|
||||
|
||||
def _is_valid_url(self, url, video_id, item='video'):
|
||||
url = self._proto_relative_url(url, scheme='http:')
|
||||
# For now assume non HTTP(S) URLs always valid
|
||||
@ -952,12 +977,24 @@ class InfoExtractor(object):
|
||||
if manifest is False:
|
||||
return []
|
||||
|
||||
return self._parse_f4m_formats(
|
||||
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
|
||||
transform_source=transform_source, fatal=fatal)
|
||||
|
||||
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
|
||||
transform_source=lambda s: fix_xml_ampersands(s).strip(),
|
||||
fatal=True):
|
||||
formats = []
|
||||
manifest_version = '1.0'
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
|
||||
if not media_nodes:
|
||||
manifest_version = '2.0'
|
||||
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
|
||||
# Remove unsupported DRM protected media from final formats
|
||||
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
|
||||
media_nodes = remove_encrypted_media(media_nodes)
|
||||
if not media_nodes:
|
||||
return formats
|
||||
base_url = xpath_text(
|
||||
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
|
||||
'base URL', default=None)
|
||||
@ -977,7 +1014,8 @@ class InfoExtractor(object):
|
||||
# bitrate in f4m downloader
|
||||
if determine_ext(manifest_url) == 'f4m':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
manifest_url, video_id, preference, f4m_id, fatal=fatal))
|
||||
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
|
||||
transform_source=transform_source, fatal=fatal))
|
||||
continue
|
||||
tbr = int_or_none(media_el.attrib.get('bitrate'))
|
||||
formats.append({
|
||||
@ -989,8 +1027,6 @@ class InfoExtractor(object):
|
||||
'height': int_or_none(media_el.attrib.get('height')),
|
||||
'preference': preference,
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
|
||||
@ -1022,11 +1058,21 @@ class InfoExtractor(object):
|
||||
return []
|
||||
m3u8_doc, urlh = res
|
||||
m3u8_url = urlh.geturl()
|
||||
# A Media Playlist Tag MUST NOT appear in a Master Playlist
|
||||
# https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
|
||||
# The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists
|
||||
# https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
|
||||
if '#EXT-X-TARGETDURATION' in m3u8_doc:
|
||||
|
||||
# We should try extracting formats only from master playlists [1], i.e.
|
||||
# playlists that describe available qualities. On the other hand media
|
||||
# playlists [2] should be returned as is since they contain just the media
|
||||
# without qualities renditions.
|
||||
# Fortunately, master playlist can be easily distinguished from media
|
||||
# playlist based on particular tags availability. As of [1, 2] master
|
||||
# playlist tags MUST NOT appear in a media playist and vice versa.
|
||||
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
|
||||
# and MUST NOT appear in master playlist thus we can clearly detect media
|
||||
# playlist with this criterion.
|
||||
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
|
||||
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
|
||||
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
|
||||
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
|
||||
return [{
|
||||
'url': m3u8_url,
|
||||
'format_id': m3u8_id,
|
||||
@ -1073,25 +1119,34 @@ class InfoExtractor(object):
|
||||
'protocol': entry_protocol,
|
||||
'preference': preference,
|
||||
}
|
||||
codecs = last_info.get('CODECS')
|
||||
if codecs:
|
||||
# TODO: looks like video codec is not always necessarily goes first
|
||||
va_codecs = codecs.split(',')
|
||||
if va_codecs[0]:
|
||||
f['vcodec'] = va_codecs[0]
|
||||
if len(va_codecs) > 1 and va_codecs[1]:
|
||||
f['acodec'] = va_codecs[1]
|
||||
resolution = last_info.get('RESOLUTION')
|
||||
if resolution:
|
||||
width_str, height_str = resolution.split('x')
|
||||
f['width'] = int(width_str)
|
||||
f['height'] = int(height_str)
|
||||
codecs = last_info.get('CODECS')
|
||||
if codecs:
|
||||
vcodec, acodec = [None] * 2
|
||||
va_codecs = codecs.split(',')
|
||||
if len(va_codecs) == 1:
|
||||
# Audio only entries usually come with single codec and
|
||||
# no resolution. For more robustness we also check it to
|
||||
# be mp4 audio.
|
||||
if not resolution and va_codecs[0].startswith('mp4a'):
|
||||
vcodec, acodec = 'none', va_codecs[0]
|
||||
else:
|
||||
vcodec = va_codecs[0]
|
||||
else:
|
||||
vcodec, acodec = va_codecs[:2]
|
||||
f.update({
|
||||
'acodec': acodec,
|
||||
'vcodec': vcodec,
|
||||
})
|
||||
if last_media is not None:
|
||||
f['m3u8_media'] = last_media
|
||||
last_media = None
|
||||
formats.append(f)
|
||||
last_info = {}
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
@staticmethod
|
||||
@ -1106,8 +1161,8 @@ class InfoExtractor(object):
|
||||
out.append('{%s}%s' % (namespace, c))
|
||||
return '/'.join(out)
|
||||
|
||||
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None):
|
||||
smil = self._download_smil(smil_url, video_id, fatal=fatal)
|
||||
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
|
||||
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
|
||||
|
||||
if smil is False:
|
||||
assert not fatal
|
||||
@ -1124,10 +1179,10 @@ class InfoExtractor(object):
|
||||
return {}
|
||||
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
|
||||
|
||||
def _download_smil(self, smil_url, video_id, fatal=True):
|
||||
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
|
||||
return self._download_xml(
|
||||
smil_url, video_id, 'Downloading SMIL file',
|
||||
'Unable to download SMIL file', fatal=fatal)
|
||||
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
|
||||
|
||||
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
|
||||
namespace = self._parse_smil_namespace(smil)
|
||||
@ -1248,7 +1303,7 @@ class InfoExtractor(object):
|
||||
'plugin': 'flowplayer-3.2.0.1',
|
||||
}
|
||||
f4m_url += '&' if '?' in f4m_url else '?'
|
||||
f4m_url += compat_urllib_parse.urlencode(f4m_params)
|
||||
f4m_url += compat_urllib_parse_urlencode(f4m_params)
|
||||
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
|
||||
continue
|
||||
|
||||
@ -1265,8 +1320,6 @@ class InfoExtractor(object):
|
||||
})
|
||||
continue
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
||||
@ -1277,16 +1330,7 @@ class InfoExtractor(object):
|
||||
if not src or src in urls:
|
||||
continue
|
||||
urls.append(src)
|
||||
ext = textstream.get('ext') or determine_ext(src)
|
||||
if not ext:
|
||||
type_ = textstream.get('type')
|
||||
SUBTITLES_TYPES = {
|
||||
'text/vtt': 'vtt',
|
||||
'text/srt': 'srt',
|
||||
'application/smptett+xml': 'tt',
|
||||
}
|
||||
if type_ in SUBTITLES_TYPES:
|
||||
ext = SUBTITLES_TYPES[type_]
|
||||
ext = textstream.get('ext') or determine_ext(src) or mimetype2ext(textstream.get('type'))
|
||||
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
|
||||
subtitles.setdefault(lang, []).append({
|
||||
'url': src,
|
||||
@ -1422,8 +1466,9 @@ class InfoExtractor(object):
|
||||
continue
|
||||
representation_attrib = adaptation_set.attrib.copy()
|
||||
representation_attrib.update(representation.attrib)
|
||||
mime_type = representation_attrib.get('mimeType')
|
||||
content_type = mime_type.split('/')[0] if mime_type else representation_attrib.get('contentType')
|
||||
# According to page 41 of ISO/IEC 29001-1:2014, @mimeType is mandatory
|
||||
mime_type = representation_attrib['mimeType']
|
||||
content_type = mime_type.split('/')[0]
|
||||
if content_type == 'text':
|
||||
# TODO implement WebVTT downloading
|
||||
pass
|
||||
@ -1446,6 +1491,7 @@ class InfoExtractor(object):
|
||||
f = {
|
||||
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
|
||||
'url': base_url,
|
||||
'ext': mimetype2ext(mime_type),
|
||||
'width': int_or_none(representation_attrib.get('width')),
|
||||
'height': int_or_none(representation_attrib.get('height')),
|
||||
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
|
||||
@ -1491,13 +1537,12 @@ class InfoExtractor(object):
|
||||
existing_format.update(f)
|
||||
else:
|
||||
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
|
||||
self._sort_formats(formats)
|
||||
return formats
|
||||
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
now_str = now.strftime('%Y-%m-%d %H:%M')
|
||||
return name + ' ' + now_str
|
||||
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
@ -1570,7 +1615,7 @@ class InfoExtractor(object):
|
||||
return {}
|
||||
|
||||
def _get_subtitles(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
@staticmethod
|
||||
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
||||
@ -1596,7 +1641,16 @@ class InfoExtractor(object):
|
||||
return {}
|
||||
|
||||
def _get_automatic_captions(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
def mark_watched(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('mark_watched', False) and
|
||||
(self._get_login_info()[0] is not None or
|
||||
self._downloader.params.get('cookiefile') is not None)):
|
||||
self._mark_watched(*args, **kwargs)
|
||||
|
||||
def _mark_watched(self, *args, **kwargs):
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
@ -1636,7 +1690,7 @@ class SearchInfoExtractor(InfoExtractor):
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
@property
|
||||
def SEARCH_KEY(self):
|
||||
|
36
youtube_dl/extractor/commonprotocols.py
Normal file
36
youtube_dl/extractor/commonprotocols.py
Normal file
@ -0,0 +1,36 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import url_basename
|
||||
|
||||
|
||||
class RtmpIE(InfoExtractor):
|
||||
IE_DESC = False # Do not list
|
||||
_VALID_URL = r'(?i)rtmp[est]?://.+'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'rtmp://cp44293.edgefcs.net/ondemand?auth=daEcTdydfdqcsb8cZcDbAaCbhamacbbawaS-bw7dBb-bWG-GqpGFqCpNCnGoyL&aifp=v001&slist=public/unsecure/audio/2c97899446428e4301471a8cb72b4b97--audio--pmg-20110908-0900a_flv_aac_med_int.mp4',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'rtmp://edge.live.hitbox.tv/live/dimak',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
|
||||
title = compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'formats': [{
|
||||
'url': url,
|
||||
'ext': 'flv',
|
||||
'format_id': compat_urlparse.urlparse(url).scheme,
|
||||
}],
|
||||
}
|
@ -5,7 +5,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_parse_urlparse,
|
||||
compat_urlparse,
|
||||
)
|
||||
@ -45,7 +45,7 @@ class CondeNastIE(InfoExtractor):
|
||||
'wmagazine': 'W Magazine',
|
||||
}
|
||||
|
||||
_VALID_URL = r'http://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
_VALID_URL = r'https?://(?:video|www|player)\.(?P<site>%s)\.com/(?P<type>watch|series|video|embed(?:js)?)/(?P<id>[^/?#]+)' % '|'.join(_SITES.keys())
|
||||
IE_DESC = 'Condé Nast media group: %s' % ', '.join(sorted(_SITES.values()))
|
||||
|
||||
EMBED_URL = r'(?:https?:)?//player\.(?P<site>%s)\.com/(?P<type>embed(?:js)?)/.+?' % '|'.join(_SITES.keys())
|
||||
@ -97,7 +97,7 @@ class CondeNastIE(InfoExtractor):
|
||||
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
|
||||
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
|
||||
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
|
||||
data = compat_urllib_parse.urlencode({'videoId': video_id,
|
||||
data = compat_urllib_parse_urlencode({'videoId': video_id,
|
||||
'playerId': player_id,
|
||||
'target': target,
|
||||
})
|
||||
|
@ -11,8 +11,8 @@ from math import pow, sqrt, floor
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urllib_request,
|
||||
compat_urlparse,
|
||||
)
|
||||
@ -54,7 +54,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None):
|
||||
def _download_webpage(self, url_or_request, *args, **kwargs):
|
||||
request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request)
|
||||
else sanitized_Request(url_or_request))
|
||||
# Accept-Language must be set explicitly to accept any language to avoid issues
|
||||
@ -65,8 +65,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
# Crunchyroll to not work in georestriction cases in some browsers that don't place
|
||||
# the locale lang first in header. However allowing any language seems to workaround the issue.
|
||||
request.add_header('Accept-Language', '*')
|
||||
return super(CrunchyrollBaseIE, self)._download_webpage(
|
||||
request, video_id, note, errnote, fatal, tries, timeout, encoding)
|
||||
return super(CrunchyrollBaseIE, self)._download_webpage(request, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _add_skip_wall(url):
|
||||
@ -79,7 +78,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||
# See https://github.com/rg3/youtube-dl/issues/7202.
|
||||
qs['skip_wall'] = ['1']
|
||||
return compat_urlparse.urlunparse(
|
||||
parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
|
||||
parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
|
||||
|
||||
|
||||
class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
@ -180,40 +179,40 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
return assvalue
|
||||
|
||||
output = '[Script Info]\n'
|
||||
output += 'Title: %s\n' % sub_root.attrib["title"]
|
||||
output += 'Title: %s\n' % sub_root.attrib['title']
|
||||
output += 'ScriptType: v4.00+\n'
|
||||
output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
|
||||
output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
|
||||
output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
|
||||
output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
|
||||
output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
|
||||
output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
|
||||
output += """ScaledBorderAndShadow: yes
|
||||
|
||||
[V4+ Styles]
|
||||
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||
"""
|
||||
for style in sub_root.findall('./styles/style'):
|
||||
output += 'Style: ' + style.attrib["name"]
|
||||
output += ',' + style.attrib["font_name"]
|
||||
output += ',' + style.attrib["font_size"]
|
||||
output += ',' + style.attrib["primary_colour"]
|
||||
output += ',' + style.attrib["secondary_colour"]
|
||||
output += ',' + style.attrib["outline_colour"]
|
||||
output += ',' + style.attrib["back_colour"]
|
||||
output += ',' + ass_bool(style.attrib["bold"])
|
||||
output += ',' + ass_bool(style.attrib["italic"])
|
||||
output += ',' + ass_bool(style.attrib["underline"])
|
||||
output += ',' + ass_bool(style.attrib["strikeout"])
|
||||
output += ',' + style.attrib["scale_x"]
|
||||
output += ',' + style.attrib["scale_y"]
|
||||
output += ',' + style.attrib["spacing"]
|
||||
output += ',' + style.attrib["angle"]
|
||||
output += ',' + style.attrib["border_style"]
|
||||
output += ',' + style.attrib["outline"]
|
||||
output += ',' + style.attrib["shadow"]
|
||||
output += ',' + style.attrib["alignment"]
|
||||
output += ',' + style.attrib["margin_l"]
|
||||
output += ',' + style.attrib["margin_r"]
|
||||
output += ',' + style.attrib["margin_v"]
|
||||
output += ',' + style.attrib["encoding"]
|
||||
output += 'Style: ' + style.attrib['name']
|
||||
output += ',' + style.attrib['font_name']
|
||||
output += ',' + style.attrib['font_size']
|
||||
output += ',' + style.attrib['primary_colour']
|
||||
output += ',' + style.attrib['secondary_colour']
|
||||
output += ',' + style.attrib['outline_colour']
|
||||
output += ',' + style.attrib['back_colour']
|
||||
output += ',' + ass_bool(style.attrib['bold'])
|
||||
output += ',' + ass_bool(style.attrib['italic'])
|
||||
output += ',' + ass_bool(style.attrib['underline'])
|
||||
output += ',' + ass_bool(style.attrib['strikeout'])
|
||||
output += ',' + style.attrib['scale_x']
|
||||
output += ',' + style.attrib['scale_y']
|
||||
output += ',' + style.attrib['spacing']
|
||||
output += ',' + style.attrib['angle']
|
||||
output += ',' + style.attrib['border_style']
|
||||
output += ',' + style.attrib['outline']
|
||||
output += ',' + style.attrib['shadow']
|
||||
output += ',' + style.attrib['alignment']
|
||||
output += ',' + style.attrib['margin_l']
|
||||
output += ',' + style.attrib['margin_r']
|
||||
output += ',' + style.attrib['margin_v']
|
||||
output += ',' + style.attrib['encoding']
|
||||
output += '\n'
|
||||
|
||||
output += """
|
||||
@ -222,15 +221,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
"""
|
||||
for event in sub_root.findall('./events/event'):
|
||||
output += 'Dialogue: 0'
|
||||
output += ',' + event.attrib["start"]
|
||||
output += ',' + event.attrib["end"]
|
||||
output += ',' + event.attrib["style"]
|
||||
output += ',' + event.attrib["name"]
|
||||
output += ',' + event.attrib["margin_l"]
|
||||
output += ',' + event.attrib["margin_r"]
|
||||
output += ',' + event.attrib["margin_v"]
|
||||
output += ',' + event.attrib["effect"]
|
||||
output += ',' + event.attrib["text"]
|
||||
output += ',' + event.attrib['start']
|
||||
output += ',' + event.attrib['end']
|
||||
output += ',' + event.attrib['style']
|
||||
output += ',' + event.attrib['name']
|
||||
output += ',' + event.attrib['margin_l']
|
||||
output += ',' + event.attrib['margin_r']
|
||||
output += ',' + event.attrib['margin_v']
|
||||
output += ',' + event.attrib['effect']
|
||||
output += ',' + event.attrib['text']
|
||||
output += '\n'
|
||||
|
||||
return output
|
||||
@ -309,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
|
||||
playerdata_req = sanitized_Request(playerdata_url)
|
||||
playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
|
||||
playerdata_req.data = urlencode_postdata({'current_page': webpage_url})
|
||||
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
|
||||
|
||||
@ -323,7 +322,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
streamdata_req = sanitized_Request(
|
||||
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
|
||||
% (stream_id, stream_format, stream_quality),
|
||||
compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
|
||||
compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
|
||||
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
streamdata = self._download_xml(
|
||||
streamdata_req, video_id,
|
||||
@ -376,7 +375,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
|
||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||
IE_NAME = "crunchyroll:playlist"
|
||||
IE_NAME = 'crunchyroll:playlist'
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
|
||||
|
||||
_TESTS = [{
|
||||
|
@ -15,7 +15,7 @@ from .senateisvp import SenateISVPIE
|
||||
|
||||
|
||||
class CSpanIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?c-span\.org/video/\?(?P<id>[0-9a-f]+)'
|
||||
IE_DESC = 'C-SPAN'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.c-span.org/video/?313572-1/HolderonV',
|
||||
|
@ -8,7 +8,7 @@ from ..utils import parse_iso8601, ExtractorError
|
||||
class CtsNewsIE(InfoExtractor):
|
||||
IE_DESC = '華視新聞'
|
||||
# https connection failed (Connection reset)
|
||||
_VALID_URL = r'http://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html'
|
||||
_VALID_URL = r'https?://news\.cts\.com\.tw/[a-z]+/[a-z]+/\d+/(?P<id>\d+)\.html'
|
||||
_TESTS = [{
|
||||
'url': 'http://news.cts.com.tw/cts/international/201501/201501291578109.html',
|
||||
'md5': 'a9875cb790252b08431186d741beaabe',
|
||||
|
@ -57,6 +57,7 @@ class CWTVIE(InfoExtractor):
|
||||
|
||||
formats = self._extract_m3u8_formats(
|
||||
video_data['videos']['variantplaylist']['uri'], video_id, 'mp4')
|
||||
self._sort_formats(formats)
|
||||
|
||||
thumbnails = [{
|
||||
'url': image['uri'],
|
||||
|
@ -122,10 +122,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
description = self._og_search_description(webpage) or self._html_search_meta(
|
||||
'description', webpage, 'description')
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
[r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)'],
|
||||
webpage, 'view count', fatal=False))
|
||||
view_count_str = self._search_regex(
|
||||
(r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
|
||||
r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
|
||||
webpage, 'view count', fatal=False)
|
||||
if view_count_str:
|
||||
view_count_str = re.sub(r'\s', '', view_count_str)
|
||||
view_count = str_to_int(view_count_str)
|
||||
comment_count = int_or_none(self._search_regex(
|
||||
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
|
||||
webpage, 'comment count', fatal=False))
|
||||
@ -396,13 +399,13 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor):
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_dmcloud_url(self, webpage):
|
||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage)
|
||||
def _extract_dmcloud_url(cls, webpage):
|
||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
||||
mobj = re.search(
|
||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL,
|
||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
|
||||
webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
@ -8,8 +8,8 @@ import itertools
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_unquote,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -70,7 +70,7 @@ class DaumIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = compat_urllib_parse_unquote(self._match_id(url))
|
||||
query = compat_urllib_parse.urlencode({'vid': video_id})
|
||||
query = compat_urllib_parse_urlencode({'vid': video_id})
|
||||
movie_data = self._download_json(
|
||||
'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
|
||||
video_id, 'Downloading video formats info')
|
||||
@ -86,7 +86,7 @@ class DaumIE(InfoExtractor):
|
||||
formats = []
|
||||
for format_el in movie_data['output_list']['output_list']:
|
||||
profile = format_el['profile']
|
||||
format_query = compat_urllib_parse.urlencode({
|
||||
format_query = compat_urllib_parse_urlencode({
|
||||
'vid': video_id,
|
||||
'profile': profile,
|
||||
})
|
||||
|
@ -6,7 +6,7 @@ import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlencode,
|
||||
compat_str,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -15,6 +15,7 @@ from ..utils import (
|
||||
sanitized_Request,
|
||||
smuggle_url,
|
||||
unsmuggle_url,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
@ -106,7 +107,7 @@ class DCNVideoIE(DCNBaseIE):
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
compat_urllib_parse_urlencode({
|
||||
'id': video_data['id'],
|
||||
'user_id': video_data['user_id'],
|
||||
'signature': video_data['signature'],
|
||||
@ -133,7 +134,7 @@ class DCNLiveIE(DCNBaseIE):
|
||||
|
||||
webpage = self._download_webpage(
|
||||
'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
compat_urllib_parse_urlencode({
|
||||
'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
|
||||
'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
|
||||
'signature': channel_data['signature'],
|
||||
@ -174,7 +175,7 @@ class DCNSeasonIE(InfoExtractor):
|
||||
data['show_id'] = show_id
|
||||
request = sanitized_Request(
|
||||
'http://admin.mangomolo.com/analytics/index.php/plus/show',
|
||||
compat_urllib_parse.urlencode(data),
|
||||
urlencode_postdata(data),
|
||||
{
|
||||
'Origin': 'http://www.dcndigital.ae',
|
||||
'Content-Type': 'application/x-www-form-urlencoded'
|
||||
|
@ -6,7 +6,7 @@ from ..compat import compat_str
|
||||
|
||||
|
||||
class DctpTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.dctp.tv/(#/)?filme/(?P<id>.+?)/$'
|
||||
_VALID_URL = r'https?://www.dctp.tv/(#/)?filme/(?P<id>.+?)/$'
|
||||
_TEST = {
|
||||
'url': 'http://www.dctp.tv/filme/videoinstallation-fuer-eine-kaufhausfassade/',
|
||||
'info_dict': {
|
||||
|
@ -5,7 +5,7 @@ from .common import InfoExtractor
|
||||
|
||||
class DefenseGouvFrIE(InfoExtractor):
|
||||
IE_NAME = 'defense.gouv.fr'
|
||||
_VALID_URL = r'http://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)'
|
||||
_VALID_URL = r'https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1',
|
||||
|
@ -38,6 +38,7 @@ class DFBIE(InfoExtractor):
|
||||
token_el = f4m_info.find('token')
|
||||
manifest_url = token_el.attrib['url'] + '?' + 'hdnea=' + token_el.attrib['auth'] + '&hdcore=3.2.0'
|
||||
formats = self._extract_f4m_formats(manifest_url, display_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -9,7 +9,7 @@ from ..compat import compat_str
|
||||
|
||||
|
||||
class DiscoveryIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)http://(?:www\.)?(?:
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?(?:
|
||||
discovery|
|
||||
investigationdiscovery|
|
||||
discoverylife|
|
||||
@ -63,18 +63,23 @@ class DiscoveryIE(InfoExtractor):
|
||||
|
||||
video_title = info.get('playlist_title') or info.get('video_title')
|
||||
|
||||
entries = [{
|
||||
'id': compat_str(video_info['id']),
|
||||
'formats': self._extract_m3u8_formats(
|
||||
entries = []
|
||||
|
||||
for idx, video_info in enumerate(info['playlist']):
|
||||
formats = self._extract_m3u8_formats(
|
||||
video_info['src'], display_id, 'mp4', 'm3u8_native', m3u8_id='hls',
|
||||
note='Download m3u8 information for video %d' % (idx + 1)),
|
||||
'title': video_info['title'],
|
||||
'description': video_info.get('description'),
|
||||
'duration': parse_duration(video_info.get('video_length')),
|
||||
'webpage_url': video_info.get('href') or video_info.get('url'),
|
||||
'thumbnail': video_info.get('thumbnailURL'),
|
||||
'alt_title': video_info.get('secondary_title'),
|
||||
'timestamp': parse_iso8601(video_info.get('publishedDate')),
|
||||
} for idx, video_info in enumerate(info['playlist'])]
|
||||
note='Download m3u8 information for video %d' % (idx + 1))
|
||||
self._sort_formats(formats)
|
||||
entries.append({
|
||||
'id': compat_str(video_info['id']),
|
||||
'formats': formats,
|
||||
'title': video_info['title'],
|
||||
'description': video_info.get('description'),
|
||||
'duration': parse_duration(video_info.get('video_length')),
|
||||
'webpage_url': video_info.get('href') or video_info.get('url'),
|
||||
'thumbnail': video_info.get('thumbnailURL'),
|
||||
'alt_title': video_info.get('secondary_title'),
|
||||
'timestamp': parse_iso8601(video_info.get('publishedDate')),
|
||||
})
|
||||
|
||||
return self.playlist_result(entries, display_id, video_title)
|
||||
|
@ -10,7 +10,7 @@ from ..compat import (compat_str, compat_basestring)
|
||||
|
||||
class DouyuTVIE(InfoExtractor):
|
||||
IE_DESC = '斗鱼'
|
||||
_VALID_URL = r'http://(?:www\.)?douyutv\.com/(?P<id>[A-Za-z0-9]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(?P<id>[A-Za-z0-9]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.douyutv.com/iseven',
|
||||
'info_dict': {
|
||||
@ -18,7 +18,7 @@ class DouyuTVIE(InfoExtractor):
|
||||
'display_id': 'iseven',
|
||||
'ext': 'flv',
|
||||
'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'md5:c93d6692dde6fe33809a46edcbecca44',
|
||||
'description': 'md5:f34981259a03e980a3c6404190a3ed61',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': '7师傅',
|
||||
'uploader_id': '431925',
|
||||
@ -26,7 +26,7 @@ class DouyuTVIE(InfoExtractor):
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.douyutv.com/85982',
|
||||
'info_dict': {
|
||||
@ -42,7 +42,27 @@ class DouyuTVIE(InfoExtractor):
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
'skip': 'Romm not found',
|
||||
}, {
|
||||
'url': 'http://www.douyutv.com/17732',
|
||||
'info_dict': {
|
||||
'id': '17732',
|
||||
'display_id': '17732',
|
||||
'ext': 'flv',
|
||||
'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'md5:f34981259a03e980a3c6404190a3ed61',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': '7师傅',
|
||||
'uploader_id': '431925',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.douyu.com/xiaocang',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -1,6 +1,8 @@
|
||||
# encoding: utf-8
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -8,44 +10,127 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class DPlayIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.dplay\.se/[^/]+/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'https?://(?P<domain>it\.dplay\.com|www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://it.dplay.com/take-me-out/stagione-1-episodio-25/',
|
||||
'info_dict': {
|
||||
'id': '1255600',
|
||||
'display_id': 'stagione-1-episodio-25',
|
||||
'ext': 'mp4',
|
||||
'title': 'Episodio 25',
|
||||
'description': 'md5:cae5f40ad988811b197d2d27a53227eb',
|
||||
'duration': 2761,
|
||||
'timestamp': 1454701800,
|
||||
'upload_date': '20160205',
|
||||
'creator': 'RTIT',
|
||||
'series': 'Take me out',
|
||||
'season_number': 1,
|
||||
'episode_number': 25,
|
||||
'age_limit': 0,
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m manifest'],
|
||||
}, {
|
||||
'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
|
||||
'info_dict': {
|
||||
'id': '3172',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
|
||||
'ext': 'flv',
|
||||
'title': 'Svensken lär sig njuta av livet',
|
||||
'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8',
|
||||
'duration': 2650,
|
||||
'timestamp': 1365454320,
|
||||
'upload_date': '20130408',
|
||||
'creator': 'Kanal 5 (Home)',
|
||||
'series': 'Nugammalt - 77 händelser som format Sverige',
|
||||
'season_number': 1,
|
||||
'episode_number': 1,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/',
|
||||
'info_dict': {
|
||||
'id': '70816',
|
||||
'display_id': 'season-6-episode-12',
|
||||
'ext': 'flv',
|
||||
'title': 'Episode 12',
|
||||
'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90',
|
||||
'duration': 2563,
|
||||
'timestamp': 1429696800,
|
||||
'upload_date': '20150422',
|
||||
'creator': 'Kanal 4',
|
||||
'series': 'Mig og min mor',
|
||||
'season_number': 6,
|
||||
'episode_number': 12,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
domain = mobj.group('domain')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'data-video-id="(\d+)"', webpage, 'video id')
|
||||
r'data-video-id=["\'](\d+)', webpage, 'video id')
|
||||
|
||||
info = self._download_json(
|
||||
'http://www.dplay.se/api/v2/ajax/videos?video_id=' + video_id,
|
||||
'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id),
|
||||
video_id)['data'][0]
|
||||
|
||||
self._set_cookie(
|
||||
'secure.dplay.se', 'dsc-geo',
|
||||
'{"countryCode":"NL","expiry":%d}' % ((time.time() + 20 * 60) * 1000))
|
||||
# TODO: consider adding support for 'stream_type=hds', it seems to
|
||||
# require setting some cookies
|
||||
manifest_url = self._download_json(
|
||||
'https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls' % video_id,
|
||||
video_id, 'Getting manifest url for hls stream')['hls']
|
||||
formats = self._extract_m3u8_formats(
|
||||
manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native')
|
||||
title = info['title']
|
||||
|
||||
PROTOCOLS = ('hls', 'hds')
|
||||
formats = []
|
||||
|
||||
def extract_formats(protocol, manifest_url):
|
||||
if protocol == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
manifest_url, video_id, ext='mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False))
|
||||
elif protocol == 'hds':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0',
|
||||
video_id, f4m_id=protocol, fatal=False))
|
||||
|
||||
domain_tld = domain.split('.')[-1]
|
||||
if domain_tld in ('se', 'dk'):
|
||||
for protocol in PROTOCOLS:
|
||||
self._set_cookie(
|
||||
'secure.dplay.%s' % domain_tld, 'dsc-geo',
|
||||
json.dumps({
|
||||
'countryCode': domain_tld.upper(),
|
||||
'expiry': (time.time() + 20 * 60) * 1000,
|
||||
}))
|
||||
stream = self._download_json(
|
||||
'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s'
|
||||
% (domain_tld, video_id, protocol), video_id,
|
||||
'Downloading %s stream JSON' % protocol, fatal=False)
|
||||
if stream and stream.get(protocol):
|
||||
extract_formats(protocol, stream[protocol])
|
||||
else:
|
||||
for protocol in PROTOCOLS:
|
||||
if info.get(protocol):
|
||||
extract_formats(protocol, info[protocol])
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': info['title'],
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'description': info.get('video_metadata_longDescription'),
|
||||
'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
|
||||
'timestamp': int_or_none(info.get('video_publish_date')),
|
||||
'creator': info.get('video_metadata_homeChannel'),
|
||||
'series': info.get('video_metadata_show'),
|
||||
'season_number': int_or_none(info.get('season')),
|
||||
'episode_number': int_or_none(info.get('episode')),
|
||||
'age_limit': int_or_none(info.get('minimum_age')),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import itertools
|
||||
from .amp import AMPIE
|
||||
from ..compat import (
|
||||
compat_HTTPError,
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
@ -14,6 +13,7 @@ from ..utils import (
|
||||
clean_html,
|
||||
int_or_none,
|
||||
sanitized_Request,
|
||||
urlencode_postdata
|
||||
)
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ class DramaFeverBaseIE(AMPIE):
|
||||
}
|
||||
|
||||
request = sanitized_Request(
|
||||
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
|
||||
self._LOGIN_URL, urlencode_postdata(login_form))
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
|
@ -87,7 +87,7 @@ class DRBonanzaIE(InfoExtractor):
|
||||
|
||||
formats = []
|
||||
for file in info['Files']:
|
||||
if info['Type'] == "Video":
|
||||
if info['Type'] == 'Video':
|
||||
if file['Type'] in video_types:
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
@ -101,10 +101,10 @@ class DRBonanzaIE(InfoExtractor):
|
||||
if '/bonanza/' in rtmp_url:
|
||||
format['play_path'] = rtmp_url.split('/bonanza/')[1]
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
elif file['Type'] == 'Thumb':
|
||||
thumbnail = file['Location']
|
||||
elif info['Type'] == "Audio":
|
||||
if file['Type'] == "Audio":
|
||||
elif info['Type'] == 'Audio':
|
||||
if file['Type'] == 'Audio':
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
'url': file['Location'],
|
||||
@ -112,7 +112,7 @@ class DRBonanzaIE(InfoExtractor):
|
||||
'vcodec': 'none',
|
||||
})
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
elif file['Type'] == 'Thumb':
|
||||
thumbnail = file['Location']
|
||||
|
||||
description = '%s\n%s\n%s\n' % (
|
||||
|
@ -7,7 +7,7 @@ from .zdf import ZDFIE
|
||||
|
||||
class DreiSatIE(ZDFIE):
|
||||
IE_NAME = '3sat'
|
||||
_VALID_URL = r'(?:http://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
||||
_VALID_URL = r'(?:https?://)?(?:www\.)?3sat\.de/mediathek/(?:index\.php|mediathek\.php)?\?(?:(?:mode|display)=[^&]+&)*obj=(?P<id>[0-9]+)$'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://www.3sat.de/mediathek/index.php?mode=play&obj=45918',
|
||||
|
@ -15,7 +15,7 @@ class DVTVIE(InfoExtractor):
|
||||
IE_NAME = 'dvtv'
|
||||
IE_DESC = 'http://video.aktualne.cz/'
|
||||
|
||||
_VALID_URL = r'http://video\.aktualne\.cz/(?:[^/]+/)+r~(?P<id>[0-9a-f]{32})'
|
||||
_VALID_URL = r'https?://video\.aktualne\.cz/(?:[^/]+/)+r~(?P<id>[0-9a-f]{32})'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://video.aktualne.cz/dvtv/vondra-o-ceskem-stoleti-pri-pohledu-na-havla-mi-bylo-trapne/r~e5efe9ca855511e4833a0025900fea04/',
|
||||
|
85
youtube_dl/extractor/dw.py
Normal file
85
youtube_dl/extractor/dw.py
Normal file
@ -0,0 +1,85 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
from ..compat import compat_urlparse
|
||||
|
||||
|
||||
class DWIE(InfoExtractor):
|
||||
IE_NAME = 'dw'
|
||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+av-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# video
|
||||
'url': 'http://www.dw.com/en/intelligent-light/av-19112290',
|
||||
'md5': '7372046e1815c5a534b43f3c3c36e6e9',
|
||||
'info_dict': {
|
||||
'id': '19112290',
|
||||
'ext': 'mp4',
|
||||
'title': 'Intelligent light',
|
||||
'description': 'md5:90e00d5881719f2a6a5827cb74985af1',
|
||||
'upload_date': '20160311',
|
||||
}
|
||||
}, {
|
||||
# audio
|
||||
'url': 'http://www.dw.com/en/worldlink-my-business/av-19111941',
|
||||
'md5': '2814c9a1321c3a51f8a7aeb067a360dd',
|
||||
'info_dict': {
|
||||
'id': '19111941',
|
||||
'ext': 'mp3',
|
||||
'title': 'WorldLink: My business',
|
||||
'description': 'md5:bc9ca6e4e063361e21c920c53af12405',
|
||||
'upload_date': '20160311',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
media_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, media_id)
|
||||
hidden_inputs = self._hidden_inputs(webpage)
|
||||
title = hidden_inputs['media_title']
|
||||
|
||||
if hidden_inputs.get('player_type') == 'video' and hidden_inputs.get('stream_file') == '1':
|
||||
formats = self._extract_smil_formats(
|
||||
'http://www.dw.com/smil/v-%s' % media_id, media_id,
|
||||
transform_source=lambda s: s.replace(
|
||||
'rtmp://tv-od.dw.de/flash/',
|
||||
'http://tv-download.dw.de/dwtv_video/flv/'))
|
||||
self._sort_formats(formats)
|
||||
else:
|
||||
formats = [{'url': hidden_inputs['file_name']}]
|
||||
|
||||
return {
|
||||
'id': media_id,
|
||||
'title': title,
|
||||
'description': self._og_search_description(webpage),
|
||||
'thumbnail': hidden_inputs.get('preview_image'),
|
||||
'duration': int_or_none(hidden_inputs.get('file_duration')),
|
||||
'upload_date': hidden_inputs.get('display_date'),
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class DWArticleIE(InfoExtractor):
|
||||
IE_NAME = 'dw:article'
|
||||
_VALID_URL = r'https?://(?:www\.)?dw\.com/(?:[^/]+/)+a-(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.dw.com/en/no-hope-limited-options-for-refugees-in-idomeni/a-19111009',
|
||||
'md5': '8ca657f9d068bbef74d6fc38b97fc869',
|
||||
'info_dict': {
|
||||
'id': '19105868',
|
||||
'ext': 'mp4',
|
||||
'title': 'The harsh life of refugees in Idomeni',
|
||||
'description': 'md5:196015cc7e48ebf474db9399420043c7',
|
||||
'upload_date': '20160310',
|
||||
}
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
article_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, article_id)
|
||||
hidden_inputs = self._hidden_inputs(webpage)
|
||||
media_id = hidden_inputs['media_id']
|
||||
media_path = self._search_regex(r'href="([^"]+av-%s)"\s+class="overlayLink"' % media_id, webpage, 'media url')
|
||||
media_url = compat_urlparse.urljoin(url, media_path)
|
||||
return self.url_result(media_url, 'DW', media_id)
|
@ -7,7 +7,7 @@ from .common import InfoExtractor
|
||||
|
||||
|
||||
class EchoMskIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?echo\.msk\.ru/sounds/(?P<id>\d+)'
|
||||
_TEST = {
|
||||
'url': 'http://www.echo.msk.ru/sounds/1464134.html',
|
||||
'md5': '2e44b3b78daff5b458e4dbc37f191f7c',
|
||||
|
@ -17,85 +17,85 @@ class EightTracksIE(InfoExtractor):
|
||||
IE_NAME = '8tracks'
|
||||
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
||||
_TEST = {
|
||||
"name": "EightTracks",
|
||||
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
||||
"info_dict": {
|
||||
'name': 'EightTracks',
|
||||
'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
|
||||
'info_dict': {
|
||||
'id': '1336550',
|
||||
'display_id': 'youtube-dl-test-tracks-a',
|
||||
"description": "test chars: \"'/\\ä↭",
|
||||
"title": "youtube-dl test tracks \"'/\\ä↭<>",
|
||||
'description': "test chars: \"'/\\ä↭",
|
||||
'title': "youtube-dl test tracks \"'/\\ä↭<>",
|
||||
},
|
||||
"playlist": [
|
||||
'playlist': [
|
||||
{
|
||||
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
|
||||
"info_dict": {
|
||||
"id": "11885610",
|
||||
"ext": "m4a",
|
||||
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '96ce57f24389fc8734ce47f4c1abcc55',
|
||||
'info_dict': {
|
||||
'id': '11885610',
|
||||
'ext': 'm4a',
|
||||
'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "4ab26f05c1f7291ea460a3920be8021f",
|
||||
"info_dict": {
|
||||
"id": "11885608",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '4ab26f05c1f7291ea460a3920be8021f',
|
||||
'info_dict': {
|
||||
'id': '11885608',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "d30b5b5f74217410f4689605c35d1fd7",
|
||||
"info_dict": {
|
||||
"id": "11885679",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'd30b5b5f74217410f4689605c35d1fd7',
|
||||
'info_dict': {
|
||||
'id': '11885679',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "4eb0a669317cd725f6bbd336a29f923a",
|
||||
"info_dict": {
|
||||
"id": "11885680",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '4eb0a669317cd725f6bbd336a29f923a',
|
||||
'info_dict': {
|
||||
'id': '11885680',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "1893e872e263a2705558d1d319ad19e8",
|
||||
"info_dict": {
|
||||
"id": "11885682",
|
||||
"ext": "m4a",
|
||||
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '1893e872e263a2705558d1d319ad19e8',
|
||||
'info_dict': {
|
||||
'id': '11885682',
|
||||
'ext': 'm4a',
|
||||
'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "b673c46f47a216ab1741ae8836af5899",
|
||||
"info_dict": {
|
||||
"id": "11885683",
|
||||
"ext": "m4a",
|
||||
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'b673c46f47a216ab1741ae8836af5899',
|
||||
'info_dict': {
|
||||
'id': '11885683',
|
||||
'ext': 'm4a',
|
||||
'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "1d74534e95df54986da7f5abf7d842b7",
|
||||
"info_dict": {
|
||||
"id": "11885684",
|
||||
"ext": "m4a",
|
||||
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '1d74534e95df54986da7f5abf7d842b7',
|
||||
'info_dict': {
|
||||
'id': '11885684',
|
||||
'ext': 'm4a',
|
||||
'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
|
||||
"info_dict": {
|
||||
"id": "11885685",
|
||||
"ext": "m4a",
|
||||
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
|
||||
'info_dict': {
|
||||
'id': '11885685',
|
||||
'ext': 'm4a',
|
||||
'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -72,7 +72,7 @@ class EllenTVClipsIE(InfoExtractor):
|
||||
def _extract_playlist(self, webpage):
|
||||
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
|
||||
try:
|
||||
return json.loads("[{" + json_string + "}]")
|
||||
return json.loads('[{' + json_string + '}]')
|
||||
except ValueError as ve:
|
||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||
|
||||
|
@ -9,7 +9,7 @@ class ElPaisIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])'
|
||||
IE_DESC = 'El País'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html',
|
||||
'md5': '98406f301f19562170ec071b83433d55',
|
||||
'info_dict': {
|
||||
@ -19,30 +19,41 @@ class ElPaisIE(InfoExtractor):
|
||||
'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.',
|
||||
'upload_date': '20140206',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://elcomidista.elpais.com/elcomidista/2016/02/24/articulo/1456340311_668921.html#?id_externo_nwl=newsletter_diaria20160303t',
|
||||
'md5': '3bd5b09509f3519d7d9e763179b013de',
|
||||
'info_dict': {
|
||||
'id': '1456340311_668921',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cómo hacer el mejor café con cafetera italiana',
|
||||
'description': 'Que sí, que las cápsulas son cómodas. Pero si le pides algo más a la vida, quizá deberías aprender a usar bien la cafetera italiana. No tienes más que ver este vídeo y seguir sus siete normas básicas.',
|
||||
'upload_date': '20160303',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
prefix = self._html_search_regex(
|
||||
r'var url_cache = "([^"]+)";', webpage, 'URL prefix')
|
||||
r'var\s+url_cache\s*=\s*"([^"]+)";', webpage, 'URL prefix')
|
||||
video_suffix = self._search_regex(
|
||||
r"URLMediaFile = url_cache \+ '([^']+)'", webpage, 'video URL')
|
||||
r"(?:URLMediaFile|urlVideo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'", webpage, 'video URL')
|
||||
video_url = prefix + video_suffix
|
||||
thumbnail_suffix = self._search_regex(
|
||||
r"URLMediaStill = url_cache \+ '([^']+)'", webpage, 'thumbnail URL',
|
||||
fatal=False)
|
||||
r"(?:URLMediaStill|urlFotogramaFijo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'",
|
||||
webpage, 'thumbnail URL', fatal=False)
|
||||
thumbnail = (
|
||||
None if thumbnail_suffix is None
|
||||
else prefix + thumbnail_suffix)
|
||||
title = self._html_search_regex(
|
||||
'<h2 class="entry-header entry-title.*?>(.*?)</h2>',
|
||||
(r"tituloVideo\s*=\s*'([^']+)'", webpage, 'title',
|
||||
r'<h2 class="entry-header entry-title.*?>(.*?)</h2>'),
|
||||
webpage, 'title')
|
||||
date_str = self._search_regex(
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'<p class="date-header date-int updated"\s+title="([^"]+)">',
|
||||
webpage, 'upload date', fatal=False)
|
||||
upload_date = (None if date_str is None else unified_strdate(date_str))
|
||||
webpage, 'upload date', default=None) or self._html_search_meta(
|
||||
'datePublished', webpage, 'timestamp'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -1,21 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class EngadgetIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://www.engadget.com/
|
||||
(?:video(?:/5min)?/(?P<id>\d+)|
|
||||
[\d/]+/.*?)
|
||||
'''
|
||||
_VALID_URL = r'https?://www.engadget.com/video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.engadget.com/video/5min/518153925/',
|
||||
'url': 'http://www.engadget.com/video/518153925/',
|
||||
'md5': 'c6820d4828a5064447a4d9fc73f312c9',
|
||||
'info_dict': {
|
||||
'id': '518153925',
|
||||
@ -27,15 +19,4 @@ class EngadgetIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
if video_id is not None:
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
else:
|
||||
title = url_basename(url)
|
||||
webpage = self._download_webpage(url, title)
|
||||
ids = re.findall(r'<iframe[^>]+?playList=(\d+)', webpage)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': title,
|
||||
'entries': [self.url_result('5min:%s' % vid) for vid in ids]
|
||||
}
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user