Compare commits
238 Commits
2016.02.13
...
2016.03.06
Author | SHA1 | Date | |
---|---|---|---|
bdf7f13954 | |||
0f56a4b443 | |||
1b5284b13f | |||
d1e4a464cd | |||
ff059017c0 | |||
f22ba4bd60 | |||
1db772673e | |||
75313f2baa | |||
090eb8e25f | |||
a9793f58a1 | |||
7177fd24f8 | |||
1e501f6c40 | |||
2629a3802c | |||
51ce91174b | |||
107d0c421a | |||
18b0b23992 | |||
d1b29d1342 | |||
845817aadf | |||
3233a68fbb | |||
cf074e5ddd | |||
002c755248 | |||
d627cec608 | |||
1315224cbb | |||
7760b9ff4d | |||
28559564b2 | |||
fa880d20ad | |||
ae7d31af1c | |||
9d303bf29b | |||
5f1688f271 | |||
1d4c9ed90c | |||
d48352fb5d | |||
6d6536acb2 | |||
b6f94d81ea | |||
8477a69283 | |||
d58cb3ec7e | |||
8a370aedac | |||
24ca0e9c0b | |||
e1dd521e49 | |||
1255733945 | |||
3201a67f61 | |||
d0ff690d68 | |||
fb640d0a3d | |||
38f9ef31dc | |||
a8276b2680 | |||
ececca6cde | |||
8bbb4b56ee | |||
539a1641c6 | |||
1b0635aba3 | |||
429491f531 | |||
e9c0cdd389 | |||
0cae023b24 | |||
8ee239e921 | |||
fa9e259fd9 | |||
f3bdae76de | |||
03879ff054 | |||
c8398a9b87 | |||
b8972bd69d | |||
0ae937a798 | |||
4459bef203 | |||
e07237f640 | |||
8c5a994424 | |||
2eb25b256b | |||
f3bc19a989 | |||
7a8fef3173 | |||
7465e7e42d | |||
5e73a67d44 | |||
2316dc2b9a | |||
a2d7797cee | |||
fd050249af | |||
7bcd2830dd | |||
47462a125b | |||
7caf9830b0 | |||
2bc0c46f98 | |||
3318832e9d | |||
e7d2084568 | |||
c2d3cb4c63 | |||
c48dd4400f | |||
e38cafe986 | |||
85ca019d96 | |||
4a5ba28a87 | |||
82156fdbf0 | |||
6114090418 | |||
3099b31276 | |||
f17f86513e | |||
90f794c6c3 | |||
66ca2cfddd | |||
269dd2c6a7 | |||
e7998f59aa | |||
9fb556eef0 | |||
e781ab63db | |||
3e76968220 | |||
2812c24c16 | |||
d77ab8e255 | |||
4b3cd7316c | |||
6dae56384a | |||
2b2dfae83e | |||
6c10dbeae9 | |||
9173202b84 | |||
8870bb4653 | |||
7a0e7779fe | |||
a048ffc9b0 | |||
4587915b2a | |||
da665ddc25 | |||
5add979d91 | |||
20afe8bd14 | |||
940b606a07 | |||
9505053704 | |||
2c9ca78281 | |||
63719a8ac3 | |||
8fab62482a | |||
d6e9c2706f | |||
f7f2e53a0a | |||
9cdffeeb3f | |||
fbb6edd298 | |||
5eb6bdced4 | |||
5633b4d39d | |||
4435c6e98e | |||
2ebd2eac88 | |||
b78b292f0c | |||
efbd6fb8bb | |||
680079be39 | |||
e4fc8d2ebe | |||
f52354a889 | |||
59f898b7a7 | |||
8f4a2124a9 | |||
481888294d | |||
d1e440a4a1 | |||
81bdc8fdf6 | |||
e048d87fc9 | |||
e26cde0927 | |||
20108c6b90 | |||
9195ef745a | |||
d0459c530d | |||
f160785c5c | |||
5c0a57185c | |||
43479d9e9d | |||
c0da50d2b2 | |||
c24883a1c0 | |||
1b77ee6248 | |||
bf4b3b6bd9 | |||
efbeddead3 | |||
3cfeb1624a | |||
b95dc034ca | |||
86a7dbe66e | |||
b43a7a92cd | |||
6563d31710 | |||
cf89ba9eff | |||
9b01272832 | |||
58525c94d5 | |||
621bd0cda9 | |||
1610f770d7 | |||
0fc871d2f0 | |||
1ad6143061 | |||
92da3cd848 | |||
6212bcb191 | |||
d69abbd3f0 | |||
1d00a8823e | |||
5d6e1011df | |||
f5bdb44443 | |||
7efc1c2b49 | |||
132e3b74bd | |||
bdbf4ba40e | |||
acb6e97e6a | |||
445d72b8b5 | |||
92c5e11b40 | |||
0dd046c16c | |||
305168ca3e | |||
b72f6163dc | |||
33d4fdabfa | |||
cafcf657a4 | |||
101067de12 | |||
7360db05b4 | |||
c1c05c67ea | |||
399a76e67b | |||
765ac263db | |||
a4e4d7dfcd | |||
73f9c2867d | |||
9c86d50916 | |||
1d14c75f55 | |||
99709cc3f1 | |||
5bc880b988 | |||
958759f44b | |||
86bf29050e | |||
04cbc4980d | |||
8765151c8a | |||
8ec64ac683 | |||
ed8648a322 | |||
88641243ab | |||
40e146aa1e | |||
f3f9cd9234 | |||
ebf1b291d0 | |||
bc7a9cd8fb | |||
d48502b82a | |||
479ec54a8d | |||
49625662a9 | |||
8b809a079a | |||
778433cb90 | |||
411cb8f476 | |||
63bf4f0dc0 | |||
80e59a0d5d | |||
8bbd3d1476 | |||
e725e4bced | |||
08d65046f0 | |||
44b9745000 | |||
9654fc875b | |||
0f425e65ec | |||
199e724291 | |||
e277f2a63b | |||
f4db09178a | |||
86be3cdc2a | |||
cb64ccc715 | |||
f66a3c7bc2 | |||
fe80df3080 | |||
1932476c13 | |||
d2c1f79f20 | |||
8eacae8cf9 | |||
c8a80fd818 | |||
b9e8d7140a | |||
6eff2605d6 | |||
fd7a3ea4a4 | |||
8d3eeb36d7 | |||
8e0548e180 | |||
a517bb4b1e | |||
9dcefb23a1 | |||
d9da74bc06 | |||
5e19323ed9 | |||
611c1dd96e | |||
d800609c62 | |||
c78c9cd10d | |||
e76394f36c | |||
080e09557d | |||
fca2e6d5a6 | |||
b45f2b1d6e | |||
fc2e70ee90 | |||
b4561e857f | |||
7023251239 | |||
e2bd68c901 | |||
c43fe0268c |
3
.gitignore
vendored
3
.gitignore
vendored
@ -1,5 +1,6 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.class
|
||||
*~
|
||||
*.DS_Store
|
||||
wine-py2exe/
|
||||
@ -32,4 +33,4 @@ test/testdata
|
||||
.tox
|
||||
youtube-dl.zsh
|
||||
.idea
|
||||
.idea/*
|
||||
.idea/*
|
||||
|
4
AUTHORS
4
AUTHORS
@ -157,3 +157,7 @@ Founder Fang
|
||||
Andrew Alexeyew
|
||||
Saso Bezlaj
|
||||
Erwin de Haan
|
||||
Jens Wille
|
||||
Robin Houtevelts
|
||||
Patrick Griffis
|
||||
Aidan Rowe
|
||||
|
@ -1,6 +1,6 @@
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
@ -92,7 +92,9 @@ If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
|
||||
After you have ensured this site is distributing it's content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
@ -140,16 +142,17 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
|
3
Makefile
3
Makefile
@ -3,6 +3,7 @@ all: youtube-dl README.md CONTRIBUTING.md README.txt youtube-dl.1 youtube-dl.bas
|
||||
clean:
|
||||
rm -rf youtube-dl.1.temp.md youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/ youtube-dl.tar.gz youtube-dl.zsh youtube-dl.fish *.dump *.part *.info.json *.mp4 *.flv *.mp3 *.avi CONTRIBUTING.md.tmp youtube-dl youtube-dl.exe
|
||||
find . -name "*.pyc" -delete
|
||||
find . -name "*.class" -delete
|
||||
|
||||
PREFIX ?= /usr/local
|
||||
BINDIR ?= $(PREFIX)/bin
|
||||
@ -44,7 +45,7 @@ test:
|
||||
ot: offlinetest
|
||||
|
||||
offlinetest: codetest
|
||||
nosetests --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py
|
||||
$(PYTHON) -m nose --verbose test --exclude test_download.py --exclude test_age_restriction.py --exclude test_subtitles.py --exclude test_write_annotations.py --exclude test_youtube_lists.py --exclude test_iqiyi_sdk_interpreter.py
|
||||
|
||||
tar: youtube-dl.tar.gz
|
||||
|
||||
|
53
README.md
53
README.md
@ -80,6 +80,8 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
on Windows)
|
||||
--flat-playlist Do not extract the videos of a playlist,
|
||||
only list them.
|
||||
--mark-watched Mark videos watched (YouTube only)
|
||||
--no-mark-watched Do not mark videos watched (YouTube only)
|
||||
--no-color Do not emit color codes in output
|
||||
|
||||
## Network Options:
|
||||
@ -409,13 +411,18 @@ which means you can modify it, redistribute it or use it however you like.
|
||||
|
||||
# CONFIGURATION
|
||||
|
||||
You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`. For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime and use a proxy:
|
||||
You can configure youtube-dl by placing any supported command line option to a configuration file. On Linux, the system wide configuration file is located at `/etc/youtube-dl.conf` and the user wide configuration file at `~/.config/youtube-dl/config`. On Windows, the user wide configuration file locations are `%APPDATA%\youtube-dl\config.txt` or `C:\Users\<user name>\youtube-dl.conf`.
|
||||
|
||||
For example, with the following configuration file youtube-dl will always extract the audio, not copy the mtime, use a proxy and save all videos under `Movies` directory in your home directory:
|
||||
```
|
||||
--extract-audio
|
||||
-x
|
||||
--no-mtime
|
||||
--proxy 127.0.0.1:3128
|
||||
-o ~/Movies/%(title)s.%(ext)s
|
||||
```
|
||||
|
||||
Note that options in configuration file are just the same options aka switches used in regular command line calls thus there **must be no whitespace** after `-` or `--`, e.g. `-o` or `--proxy` but not `- o` or `-- proxy`.
|
||||
|
||||
You can use `--ignore-config` if you want to disable the configuration file for a particular youtube-dl run.
|
||||
|
||||
### Authentication with `.netrc` file
|
||||
@ -440,7 +447,11 @@ On Windows you may also need to setup the `%HOME%` environment variable manually
|
||||
|
||||
# OUTPUT TEMPLATE
|
||||
|
||||
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
|
||||
The `-o` option allows users to indicate a template for the output file names.
|
||||
|
||||
**tl;dr:** [navigate me to examples](#output-template-examples).
|
||||
|
||||
The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parentheses, followed by a lowercase S. Allowed names are:
|
||||
|
||||
- `id`: Video identifier
|
||||
- `title`: Video title
|
||||
@ -449,6 +460,7 @@ The `-o` option allows users to indicate a template for the output file names. T
|
||||
- `alt_title`: A secondary title of the video
|
||||
- `display_id`: An alternative identifier for the video
|
||||
- `uploader`: Full name of the video uploader
|
||||
- `license`: License name the video is licensed under
|
||||
- `creator`: The main artist who created the video
|
||||
- `release_date`: The date (YYYYMMDD) when the video was released
|
||||
- `timestamp`: UNIX timestamp of the moment the video became available
|
||||
@ -513,7 +525,9 @@ The current default template is `%(title)s-%(id)s.%(ext)s`.
|
||||
|
||||
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
|
||||
|
||||
Examples (note on Windows you may need to use double quotes instead of single):
|
||||
#### Output template examples
|
||||
|
||||
Note on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
$ youtube-dl --get-filename -o '%(title)s.%(ext)s' BaW_jenozKc
|
||||
@ -525,6 +539,9 @@ youtube-dl_test_video_.mp4 # A simple file name
|
||||
# Download YouTube playlist videos in separate directory indexed by video order in a playlist
|
||||
$ youtube-dl -o '%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re
|
||||
|
||||
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
||||
$ youtube-dl -o '%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s' https://www.youtube.com/user/TheLinuxFoundation/playlists
|
||||
|
||||
# Download Udemy course keeping each chapter in separate directory under MyVideos directory in your home
|
||||
$ youtube-dl -u user -p password -o '~/MyVideos/%(playlist)s/%(chapter_number)s - %(chapter)s/%(title)s.%(ext)s' https://www.udemy.com/java-tutorial/
|
||||
|
||||
@ -543,6 +560,8 @@ But sometimes you may want to download in a different format, for example when y
|
||||
|
||||
The general syntax for format selection is `--format FORMAT` or shorter `-f FORMAT` where `FORMAT` is a *selector expression*, i.e. an expression that describes format or formats you would like to download.
|
||||
|
||||
**tl;dr:** [navigate me to examples](#format-selection-examples).
|
||||
|
||||
The simplest case is requesting a specific format, for example with `-f 22` you can download the format with format code equal to 22. You can get the list of available format codes for particular video using `--list-formats` or `-F`. Note that these format codes are extractor specific.
|
||||
|
||||
You can also use a file extension (currently `3gp`, `aac`, `flv`, `m4a`, `mp3`, `mp4`, `ogg`, `wav`, `webm` are supported) to download best quality format of particular file extension served as a single file, e.g. `-f webm` will download best quality format with `webm` extension served as a single file.
|
||||
@ -588,11 +607,14 @@ You can merge the video and audio of two formats into a single file using `-f <v
|
||||
|
||||
Format selectors can also be grouped using parentheses, for example if you want to download the best mp4 and webm formats with a height lower than 480 you can use `-f '(mp4,webm)[height<480]'`.
|
||||
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see #5447, #5456). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||
Since the end of April 2015 and version 2015.04.26 youtube-dl uses `-f bestvideo+bestaudio/best` as default format selection (see [#5447](https://github.com/rg3/youtube-dl/issues/5447), [#5456](https://github.com/rg3/youtube-dl/issues/5456)). If ffmpeg or avconv are installed this results in downloading `bestvideo` and `bestaudio` separately and muxing them together into a single file giving the best overall quality available. Otherwise it falls back to `best` and results in downloading the best available quality served as a single file. `best` is also needed for videos that don't come from YouTube because they don't provide the audio and video in two different files. If you want to only download some DASH formats (for example if you are not interested in getting videos with a resolution higher than 1080p), you can add `-f bestvideo[height<=?1080]+bestaudio/best` to your configuration file. Note that if you use youtube-dl to stream to `stdout` (and most likely to pipe it to your media player then), i.e. you explicitly specify output template as `-o -`, youtube-dl still uses `-f best` format selection in order to start content delivery immediately to your player and not to wait until `bestvideo` and `bestaudio` are downloaded and muxed.
|
||||
|
||||
If you want to preserve the old format selection behavior (prior to youtube-dl 2015.04.26), i.e. you want to download the best available quality media served as a single file, you should explicitly specify your choice with `-f best`. You may want to add it to the [configuration file](#configuration) in order not to type it every time you run youtube-dl.
|
||||
|
||||
Examples (note on Windows you may need to use double quotes instead of single):
|
||||
#### Format selection examples
|
||||
|
||||
Note on Windows you may need to use double quotes instead of single.
|
||||
|
||||
```bash
|
||||
# Download best mp4 format available or any other best if no mp4 available
|
||||
$ youtube-dl -f 'bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best'
|
||||
@ -733,7 +755,7 @@ means you're using an outdated version of Python. Please update to Python 2.6 or
|
||||
|
||||
### What is this binary file? Where has the code gone?
|
||||
|
||||
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
|
||||
Since June 2012 ([#342](https://github.com/rg3/youtube-dl/issues/342)) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
|
||||
|
||||
### The exe throws a *Runtime error from Visual C++*
|
||||
|
||||
@ -816,7 +838,9 @@ If you want to create a build of youtube-dl yourself, you'll need
|
||||
|
||||
### Adding support for a new site
|
||||
|
||||
If you want to add support for a new site, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
If you want to add support for a new site, first of all **make sure** this site is **not dedicated to [copyright infringement](#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. youtube-dl does **not support** such sites thus pull requests adding support for them **will be rejected**.
|
||||
|
||||
After you have ensured this site is distributing it's content legally, you can follow this quick list (assuming your service is called `yourextractor`):
|
||||
|
||||
1. [Fork this repository](https://github.com/rg3/youtube-dl/fork)
|
||||
2. Check out the source code with `git clone git@github.com:YOUR_GITHUB_USERNAME/youtube-dl.git`
|
||||
@ -864,16 +888,17 @@ If you want to add support for a new site, you can follow this quick list (assum
|
||||
```
|
||||
5. Add an import in [`youtube_dl/extractor/__init__.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/__init__.py).
|
||||
6. Run `python test/test_download.py TestDownload.test_YourExtractor`. This *should fail* at first, but you can continually re-run it until you're done. If you decide to add more than one test, then rename ``_TEST`` to ``_TESTS`` and make it into a list of dictionaries. The tests will then be named `TestDownload.test_YourExtractor`, `TestDownload.test_YourExtractor_1`, `TestDownload.test_YourExtractor_2`, etc.
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py#L62-L200). Add tests and code for as many as you want.
|
||||
8. If you can, check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
9. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
7. Have a look at [`youtube_dl/extractor/common.py`](https://github.com/rg3/youtube-dl/blob/master/youtube_dl/extractor/common.py) for possible helper methods and a [detailed description of what your extractor should and may return](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L68-L226). Add tests and code for as many as you want.
|
||||
8. Keep in mind that the only mandatory fields in info dict for successful extraction process are `id`, `title` and either `url` or `formats`, i.e. these are the critical data the extraction does not make any sense without. This means that [any field](https://github.com/rg3/youtube-dl/blob/58525c94d547be1c8167d16c298bdd75506db328/youtube_dl/extractor/common.py#L138-L226) apart from aforementioned mandatory ones should be treated **as optional** and extraction should be **tolerate** to situations when sources for these fields can potentially be unavailable (even if they always available at the moment) and **future-proof** in order not to break the extraction of general purpose mandatory fields. For example, if you have some intermediate dict `meta` that is a source of metadata and it has a key `summary` that you want to extract and put into resulting info dict as `description`, you should be ready that this key may be missing from the `meta` dict, i.e. you should extract it as `meta.get('summary')` and not `meta['summary']`. Similarly, you should pass `fatal=False` when extracting data from a webpage with `_search_regex/_html_search_regex`.
|
||||
9. Check the code with [flake8](https://pypi.python.org/pypi/flake8).
|
||||
10. When the tests pass, [add](http://git-scm.com/docs/git-add) the new files and [commit](http://git-scm.com/docs/git-commit) them and [push](http://git-scm.com/docs/git-push) the result, like this:
|
||||
|
||||
$ git add youtube_dl/extractor/__init__.py
|
||||
$ git add youtube_dl/extractor/yourextractor.py
|
||||
$ git commit -m '[yourextractor] Add new extractor'
|
||||
$ git push origin yourextractor
|
||||
|
||||
10. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
11. Finally, [create a pull request](https://help.github.com/articles/creating-a-pull-request). We'll then review and merge it.
|
||||
|
||||
In any case, thank you very much for your contributions!
|
||||
|
||||
@ -935,9 +960,9 @@ with youtube_dl.YoutubeDL(ydl_opts) as ydl:
|
||||
|
||||
Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/issues>. Unless you were prompted so or there is another pertinent reason (e.g. GitHub fails to accept the bug report), please do not send bug reports via personal email. For discussions, join us in the IRC channel [#youtube-dl](irc://chat.freenode.net/#youtube-dl) on freenode ([webchat](http://webchat.freenode.net/?randomnick=1&channels=youtube-dl)).
|
||||
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. add `-v` flag to your command line, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
**Please include the full output of youtube-dl when run with `-v`**, i.e. **add** `-v` flag to **your command line**, copy the **whole** output and post it in the issue body wrapped in \`\`\` for better formatting. It should look similar to this:
|
||||
```
|
||||
$ youtube-dl -v http://www.youtube.com/watch?v=BaW_jenozKcj
|
||||
$ youtube-dl -v <your command line>
|
||||
[debug] System config: []
|
||||
[debug] User config: []
|
||||
[debug] Command-line args: [u'-v', u'http://www.youtube.com/watch?v=BaW_jenozKcj']
|
||||
|
@ -30,6 +30,7 @@
|
||||
- **AlJazeera**
|
||||
- **Allocine**
|
||||
- **AlphaPorno**
|
||||
- **AnimeOnDemand**
|
||||
- **anitube.se**
|
||||
- **AnySex**
|
||||
- **Aparat**
|
||||
@ -49,6 +50,7 @@
|
||||
- **arte.tv:ddc**
|
||||
- **arte.tv:embed**
|
||||
- **arte.tv:future**
|
||||
- **arte.tv:magazine**
|
||||
- **AtresPlayer**
|
||||
- **ATTTechChannel**
|
||||
- **AudiMedia**
|
||||
@ -75,6 +77,7 @@
|
||||
- **BleacherReportCMS**
|
||||
- **blinkx**
|
||||
- **Bloomberg**
|
||||
- **BokeCC**
|
||||
- **Bpb**: Bundeszentrale für politische Bildung
|
||||
- **BR**: Bayerischer Rundfunk Mediathek
|
||||
- **Break**
|
||||
@ -191,6 +194,7 @@
|
||||
- **faz.net**
|
||||
- **fc2**
|
||||
- **Fczenit**
|
||||
- **features.aol.com**
|
||||
- **fernsehkritik.tv**
|
||||
- **Firstpost**
|
||||
- **FiveTV**
|
||||
@ -290,6 +294,7 @@
|
||||
- **kontrtube**: KontrTube.ru - Труба зовёт
|
||||
- **KrasView**: Красвью
|
||||
- **Ku6**
|
||||
- **KUSI**
|
||||
- **kuwo:album**: 酷我音乐 - 专辑
|
||||
- **kuwo:category**: 酷我音乐 - 分类
|
||||
- **kuwo:chart**: 酷我音乐 - 排行榜
|
||||
@ -298,12 +303,11 @@
|
||||
- **kuwo:song**: 酷我音乐
|
||||
- **la7.tv**
|
||||
- **Laola1Tv**
|
||||
- **Le**: 乐视网
|
||||
- **Lecture2Go**
|
||||
- **Lemonde**
|
||||
- **Letv**: 乐视网
|
||||
- **LePlaylist**
|
||||
- **LetvCloud**: 乐视云
|
||||
- **LetvPlaylist**
|
||||
- **LetvTv**
|
||||
- **Libsyn**
|
||||
- **life:embed**
|
||||
- **lifenews**: LIFE | NEWS
|
||||
@ -360,7 +364,7 @@
|
||||
- **MySpace:album**
|
||||
- **MySpass**
|
||||
- **Myvi**
|
||||
- **myvideo**
|
||||
- **myvideo** (Currently broken)
|
||||
- **MyVidster**
|
||||
- **n-tv.de**
|
||||
- **NationalGeographic**
|
||||
@ -410,6 +414,7 @@
|
||||
- **NowTV** (Currently broken)
|
||||
- **NowTVList**
|
||||
- **nowvideo**: NowVideo
|
||||
- **Noz**
|
||||
- **npo**: npo.nl and ntr.nl
|
||||
- **npo.nl:live**
|
||||
- **npo.nl:radio**
|
||||
@ -417,6 +422,7 @@
|
||||
- **Npr**
|
||||
- **NRK**
|
||||
- **NRKPlaylist**
|
||||
- **NRKSkole**: NRK Skole
|
||||
- **NRKTV**: NRK TV and NRK Radio
|
||||
- **ntv.ru**
|
||||
- **Nuvid**
|
||||
@ -460,6 +466,7 @@
|
||||
- **PornHd**
|
||||
- **PornHub**
|
||||
- **PornHubPlaylist**
|
||||
- **PornHubUserVideos**
|
||||
- **Pornotube**
|
||||
- **PornoVoisines**
|
||||
- **PornoXO**
|
||||
@ -522,6 +529,7 @@
|
||||
- **screen.yahoo:search**: Yahoo screen search
|
||||
- **Screencast**
|
||||
- **ScreencastOMatic**
|
||||
- **ScreenJunkies**
|
||||
- **ScreenwaveMedia**
|
||||
- **SenateISVP**
|
||||
- **ServingSys**
|
||||
@ -555,7 +563,6 @@
|
||||
- **southpark.de**
|
||||
- **southpark.nl**
|
||||
- **southparkstudios.dk**
|
||||
- **Space**
|
||||
- **SpankBang**
|
||||
- **Spankwire**
|
||||
- **Spiegel**
|
||||
@ -615,6 +622,7 @@
|
||||
- **TMZ**
|
||||
- **TMZArticle**
|
||||
- **TNAFlix**
|
||||
- **TNAFlixNetworkEmbed**
|
||||
- **toggle**
|
||||
- **tou.tv**
|
||||
- **Toypics**: Toypics user profile
|
||||
@ -655,6 +663,7 @@
|
||||
- **twitch:video**
|
||||
- **twitch:vod**
|
||||
- **twitter**
|
||||
- **twitter:amplify**
|
||||
- **twitter:card**
|
||||
- **Ubu**
|
||||
- **udemy**
|
||||
@ -664,6 +673,7 @@
|
||||
- **Urort**: NRK P3 Urørt
|
||||
- **ustream**
|
||||
- **ustream:channel**
|
||||
- **Ustudio**
|
||||
- **Varzesh3**
|
||||
- **Vbox7**
|
||||
- **VeeHD**
|
||||
@ -679,7 +689,7 @@
|
||||
- **video.mit.edu**
|
||||
- **VideoDetective**
|
||||
- **videofy.me**
|
||||
- **VideoMega** (Currently broken)
|
||||
- **VideoMega**
|
||||
- **videomore**
|
||||
- **videomore:season**
|
||||
- **videomore:video**
|
||||
|
@ -11,8 +11,11 @@ import sys
|
||||
|
||||
import youtube_dl.extractor
|
||||
from youtube_dl import YoutubeDL
|
||||
from youtube_dl.utils import (
|
||||
from youtube_dl.compat import (
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
)
|
||||
from youtube_dl.utils import (
|
||||
preferredencoding,
|
||||
write_string,
|
||||
)
|
||||
@ -42,7 +45,7 @@ def report_warning(message):
|
||||
Print the message to stderr, it will be prefixed with 'WARNING:'
|
||||
If stderr is a tty file the 'WARNING:' will be colored
|
||||
'''
|
||||
if sys.stderr.isatty() and os.name != 'nt':
|
||||
if sys.stderr.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
|
@ -234,7 +234,7 @@ class TestFormatSelection(unittest.TestCase):
|
||||
|
||||
def test_youtube_format_selection(self):
|
||||
order = [
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
|
||||
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '17', '36', '13',
|
||||
# Apple HTTP Live Streaming
|
||||
'96', '95', '94', '93', '92', '132', '151',
|
||||
# 3D
|
||||
|
@ -52,7 +52,12 @@ class TestHTTP(unittest.TestCase):
|
||||
('localhost', 0), HTTPTestRequestHandler)
|
||||
self.httpd.socket = ssl.wrap_socket(
|
||||
self.httpd.socket, certfile=certfn, server_side=True)
|
||||
self.port = self.httpd.socket.getsockname()[1]
|
||||
if os.name == 'java':
|
||||
# In Jython SSLSocket is not a subclass of socket.socket
|
||||
sock = self.httpd.socket.sock
|
||||
else:
|
||||
sock = self.httpd.socket
|
||||
self.port = sock.getsockname()[1]
|
||||
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
|
||||
self.server_thread.daemon = True
|
||||
self.server_thread.start()
|
||||
|
47
test/test_iqiyi_sdk_interpreter.py
Normal file
47
test/test_iqiyi_sdk_interpreter.py
Normal file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import unicode_literals
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from test.helper import FakeYDL
|
||||
from youtube_dl.extractor import IqiyiIE
|
||||
|
||||
|
||||
class IqiyiIEWithCredentials(IqiyiIE):
|
||||
def _get_login_info(self):
|
||||
return 'foo', 'bar'
|
||||
|
||||
|
||||
class WarningLogger(object):
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
|
||||
def warning(self, msg):
|
||||
self.messages.append(msg)
|
||||
|
||||
def debug(self, msg):
|
||||
pass
|
||||
|
||||
def error(self, msg):
|
||||
pass
|
||||
|
||||
|
||||
class TestIqiyiSDKInterpreter(unittest.TestCase):
|
||||
def test_iqiyi_sdk_interpreter(self):
|
||||
'''
|
||||
Test the functionality of IqiyiSDKInterpreter by trying to log in
|
||||
|
||||
If `sign` is incorrect, /validate call throws an HTTP 556 error
|
||||
'''
|
||||
logger = WarningLogger()
|
||||
ie = IqiyiIEWithCredentials(FakeYDL({'logger': logger}))
|
||||
ie._login()
|
||||
self.assertTrue('unable to log in:' in logger.messages[0])
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -18,6 +18,7 @@ import xml.etree.ElementTree
|
||||
from youtube_dl.utils import (
|
||||
age_restricted,
|
||||
args_to_str,
|
||||
encode_base_n,
|
||||
clean_html,
|
||||
DateRange,
|
||||
detect_exe_version,
|
||||
@ -35,6 +36,7 @@ from youtube_dl.utils import (
|
||||
is_html,
|
||||
js_to_json,
|
||||
limit_length,
|
||||
ohdave_rsa_encrypt,
|
||||
OnDemandPagedList,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
@ -59,6 +61,7 @@ from youtube_dl.utils import (
|
||||
lowercase_escape,
|
||||
url_basename,
|
||||
urlencode_postdata,
|
||||
update_url_query,
|
||||
version_tuple,
|
||||
xpath_with_ns,
|
||||
xpath_element,
|
||||
@ -74,6 +77,8 @@ from youtube_dl.utils import (
|
||||
)
|
||||
from youtube_dl.compat import (
|
||||
compat_etree_fromstring,
|
||||
compat_urlparse,
|
||||
compat_parse_qs,
|
||||
)
|
||||
|
||||
|
||||
@ -248,6 +253,7 @@ class TestUtil(unittest.TestCase):
|
||||
self.assertEqual(
|
||||
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
|
||||
'20150202')
|
||||
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
|
||||
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
|
||||
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
|
||||
|
||||
@ -451,6 +457,40 @@ class TestUtil(unittest.TestCase):
|
||||
data = urlencode_postdata({'username': 'foo@bar.com', 'password': '1234'})
|
||||
self.assertTrue(isinstance(data, bytes))
|
||||
|
||||
def test_update_url_query(self):
|
||||
def query_dict(url):
|
||||
return compat_parse_qs(compat_urlparse.urlparse(url).query)
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
|
||||
query_dict('http://example.com/path?quality=HD&format=mp4'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
|
||||
query_dict('http://example.com/path?system=LINUX&system=WINDOWS'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?manifest=f4m', {'manifest': []})),
|
||||
query_dict('http://example.com/path'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
|
||||
query_dict('http://example.com/path?system=LINUX'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
|
||||
query_dict('http://example.com/path?fields=id,formats,subtitles'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'width': 1080, 'height': 720})),
|
||||
query_dict('http://example.com/path?width=1080&height=720'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'bitrate': 5020.43})),
|
||||
query_dict('http://example.com/path?bitrate=5020.43'))
|
||||
self.assertEqual(query_dict(update_url_query(
|
||||
'http://example.com/path', {'test': '第二行тест'})),
|
||||
query_dict('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
|
||||
|
||||
def test_dict_get(self):
|
||||
FALSE_VALUES = {
|
||||
'none': None,
|
||||
@ -792,6 +832,24 @@ The first line
|
||||
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
|
||||
['--check-certificate=true'])
|
||||
|
||||
def test_ohdave_rsa_encrypt(self):
|
||||
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
|
||||
e = 65537
|
||||
|
||||
self.assertEqual(
|
||||
ohdave_rsa_encrypt(b'aa111222', e, N),
|
||||
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
|
||||
|
||||
def test_encode_base_n(self):
|
||||
self.assertEqual(encode_base_n(0, 30), '0')
|
||||
self.assertEqual(encode_base_n(80, 30), '2k')
|
||||
|
||||
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
|
||||
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
|
||||
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
|
||||
|
||||
self.assertRaises(ValueError, encode_base_n, 0, 70)
|
||||
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
@ -24,9 +24,6 @@ import time
|
||||
import tokenize
|
||||
import traceback
|
||||
|
||||
if os.name == 'nt':
|
||||
import ctypes
|
||||
|
||||
from .compat import (
|
||||
compat_basestring,
|
||||
compat_cookiejar,
|
||||
@ -34,6 +31,7 @@ from .compat import (
|
||||
compat_get_terminal_size,
|
||||
compat_http_client,
|
||||
compat_kwargs,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_tokenize_tokenize,
|
||||
compat_urllib_error,
|
||||
@ -87,6 +85,7 @@ from .extractor import get_info_extractor, gen_extractors
|
||||
from .downloader import get_suitable_downloader
|
||||
from .downloader.rtmp import rtmpdump_version
|
||||
from .postprocessor import (
|
||||
FFmpegFixupM3u8PP,
|
||||
FFmpegFixupM4aPP,
|
||||
FFmpegFixupStretchedPP,
|
||||
FFmpegMergerPP,
|
||||
@ -95,6 +94,9 @@ from .postprocessor import (
|
||||
)
|
||||
from .version import __version__
|
||||
|
||||
if compat_os_name == 'nt':
|
||||
import ctypes
|
||||
|
||||
|
||||
class YoutubeDL(object):
|
||||
"""YoutubeDL class.
|
||||
@ -450,7 +452,7 @@ class YoutubeDL(object):
|
||||
def to_console_title(self, message):
|
||||
if not self.params.get('consoletitle', False):
|
||||
return
|
||||
if os.name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
|
||||
if compat_os_name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
|
||||
# c_wchar_p() might not be necessary if `message` is
|
||||
# already of type unicode()
|
||||
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
||||
@ -521,7 +523,7 @@ class YoutubeDL(object):
|
||||
else:
|
||||
if self.params.get('no_warnings'):
|
||||
return
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt':
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;33mWARNING:\033[0m'
|
||||
else:
|
||||
_msg_header = 'WARNING:'
|
||||
@ -533,7 +535,7 @@ class YoutubeDL(object):
|
||||
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
||||
in red if stderr is a tty file.
|
||||
'''
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and os.name != 'nt':
|
||||
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
|
||||
_msg_header = '\033[0;31mERROR:\033[0m'
|
||||
else:
|
||||
_msg_header = 'ERROR:'
|
||||
@ -566,7 +568,7 @@ class YoutubeDL(object):
|
||||
elif template_dict.get('height'):
|
||||
template_dict['resolution'] = '%sp' % template_dict['height']
|
||||
elif template_dict.get('width'):
|
||||
template_dict['resolution'] = '?x%d' % template_dict['width']
|
||||
template_dict['resolution'] = '%dx?' % template_dict['width']
|
||||
|
||||
sanitize = lambda k, v: sanitize_filename(
|
||||
compat_str(v),
|
||||
@ -605,12 +607,12 @@ class YoutubeDL(object):
|
||||
if rejecttitle:
|
||||
if re.search(rejecttitle, title, re.IGNORECASE):
|
||||
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
||||
date = info_dict.get('upload_date', None)
|
||||
date = info_dict.get('upload_date')
|
||||
if date is not None:
|
||||
dateRange = self.params.get('daterange', DateRange())
|
||||
if date not in dateRange:
|
||||
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
||||
view_count = info_dict.get('view_count', None)
|
||||
view_count = info_dict.get('view_count')
|
||||
if view_count is not None:
|
||||
min_views = self.params.get('min_views')
|
||||
if min_views is not None and view_count < min_views:
|
||||
@ -747,18 +749,18 @@ class YoutubeDL(object):
|
||||
new_result, download=download, extra_info=extra_info)
|
||||
elif result_type == 'playlist' or result_type == 'multi_video':
|
||||
# We process each entry in the playlist
|
||||
playlist = ie_result.get('title', None) or ie_result.get('id', None)
|
||||
playlist = ie_result.get('title') or ie_result.get('id')
|
||||
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
||||
|
||||
playlist_results = []
|
||||
|
||||
playliststart = self.params.get('playliststart', 1) - 1
|
||||
playlistend = self.params.get('playlistend', None)
|
||||
playlistend = self.params.get('playlistend')
|
||||
# For backwards compatibility, interpret -1 as whole list
|
||||
if playlistend == -1:
|
||||
playlistend = None
|
||||
|
||||
playlistitems_str = self.params.get('playlist_items', None)
|
||||
playlistitems_str = self.params.get('playlist_items')
|
||||
playlistitems = None
|
||||
if playlistitems_str is not None:
|
||||
def iter_playlistitems(format):
|
||||
@ -782,7 +784,7 @@ class YoutubeDL(object):
|
||||
entries = ie_entries[playliststart:playlistend]
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
|
||||
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
|
||||
(ie_result['extractor'], playlist, n_all_entries, n_entries))
|
||||
elif isinstance(ie_entries, PagedList):
|
||||
if playlistitems:
|
||||
@ -796,7 +798,7 @@ class YoutubeDL(object):
|
||||
playliststart, playlistend)
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Downloading %d videos" %
|
||||
'[%s] playlist %s: Downloading %d videos' %
|
||||
(ie_result['extractor'], playlist, n_entries))
|
||||
else: # iterable
|
||||
if playlistitems:
|
||||
@ -807,7 +809,7 @@ class YoutubeDL(object):
|
||||
ie_entries, playliststart, playlistend))
|
||||
n_entries = len(entries)
|
||||
self.to_screen(
|
||||
"[%s] playlist %s: Downloading %d videos" %
|
||||
'[%s] playlist %s: Downloading %d videos' %
|
||||
(ie_result['extractor'], playlist, n_entries))
|
||||
|
||||
if self.params.get('playlistreverse', False):
|
||||
@ -1631,12 +1633,14 @@ class YoutubeDL(object):
|
||||
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
|
||||
if success:
|
||||
if success and filename != '-':
|
||||
# Fixup content
|
||||
fixup_policy = self.params.get('fixup')
|
||||
if fixup_policy is None:
|
||||
fixup_policy = 'detect_or_warn'
|
||||
|
||||
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
|
||||
|
||||
stretched_ratio = info_dict.get('stretched_ratio')
|
||||
if stretched_ratio is not None and stretched_ratio != 1:
|
||||
if fixup_policy == 'warn':
|
||||
@ -1649,15 +1653,18 @@ class YoutubeDL(object):
|
||||
info_dict['__postprocessors'].append(stretched_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: Non-uniform pixel ratio (%s). Install ffmpeg or avconv to fix this automatically.' % (
|
||||
info_dict['id'], stretched_ratio))
|
||||
'%s: Non-uniform pixel ratio (%s). %s'
|
||||
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
if info_dict.get('requested_formats') is None and info_dict.get('container') == 'm4a_dash':
|
||||
if (info_dict.get('requested_formats') is None and
|
||||
info_dict.get('container') == 'm4a_dash'):
|
||||
if fixup_policy == 'warn':
|
||||
self.report_warning('%s: writing DASH m4a. Only some players support this container.' % (
|
||||
info_dict['id']))
|
||||
self.report_warning(
|
||||
'%s: writing DASH m4a. '
|
||||
'Only some players support this container.'
|
||||
% info_dict['id'])
|
||||
elif fixup_policy == 'detect_or_warn':
|
||||
fixup_pp = FFmpegFixupM4aPP(self)
|
||||
if fixup_pp.available:
|
||||
@ -1665,8 +1672,27 @@ class YoutubeDL(object):
|
||||
info_dict['__postprocessors'].append(fixup_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: writing DASH m4a. Only some players support this container. Install ffmpeg or avconv to fix this automatically.' % (
|
||||
info_dict['id']))
|
||||
'%s: writing DASH m4a. '
|
||||
'Only some players support this container. %s'
|
||||
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
if (info_dict.get('protocol') == 'm3u8_native' or
|
||||
info_dict.get('protocol') == 'm3u8' and
|
||||
self.params.get('hls_prefer_native')):
|
||||
if fixup_policy == 'warn':
|
||||
self.report_warning('%s: malformated aac bitstream.' % (
|
||||
info_dict['id']))
|
||||
elif fixup_policy == 'detect_or_warn':
|
||||
fixup_pp = FFmpegFixupM3u8PP(self)
|
||||
if fixup_pp.available:
|
||||
info_dict.setdefault('__postprocessors', [])
|
||||
info_dict['__postprocessors'].append(fixup_pp)
|
||||
else:
|
||||
self.report_warning(
|
||||
'%s: malformated aac bitstream. %s'
|
||||
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
|
||||
else:
|
||||
assert fixup_policy in ('ignore', 'never')
|
||||
|
||||
|
@ -355,6 +355,7 @@ def _real_main(argv=None):
|
||||
'youtube_include_dash_manifest': opts.youtube_include_dash_manifest,
|
||||
'encoding': opts.encoding,
|
||||
'extract_flat': opts.extract_flat,
|
||||
'mark_watched': opts.mark_watched,
|
||||
'merge_output_format': opts.merge_output_format,
|
||||
'postprocessors': postprocessors,
|
||||
'fixup': opts.fixup,
|
||||
|
@ -7,7 +7,7 @@ from __future__ import unicode_literals
|
||||
|
||||
import sys
|
||||
|
||||
if __package__ is None and not hasattr(sys, "frozen"):
|
||||
if __package__ is None and not hasattr(sys, 'frozen'):
|
||||
# direct call of __main__.py
|
||||
import os.path
|
||||
path = os.path.realpath(os.path.abspath(__file__))
|
||||
|
@ -161,7 +161,7 @@ def aes_decrypt_text(data, password, key_size_bytes):
|
||||
nonce = data[:NONCE_LENGTH_BYTES]
|
||||
cipher = data[NONCE_LENGTH_BYTES:]
|
||||
|
||||
class Counter:
|
||||
class Counter(object):
|
||||
__value = nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)
|
||||
|
||||
def next_value(self):
|
||||
|
@ -181,20 +181,20 @@ except ImportError: # Python < 3.4
|
||||
# parameter := attribute "=" value
|
||||
url = req.get_full_url()
|
||||
|
||||
scheme, data = url.split(":", 1)
|
||||
mediatype, data = data.split(",", 1)
|
||||
scheme, data = url.split(':', 1)
|
||||
mediatype, data = data.split(',', 1)
|
||||
|
||||
# even base64 encoded data URLs might be quoted so unquote in any case:
|
||||
data = compat_urllib_parse_unquote_to_bytes(data)
|
||||
if mediatype.endswith(";base64"):
|
||||
if mediatype.endswith(';base64'):
|
||||
data = binascii.a2b_base64(data)
|
||||
mediatype = mediatype[:-7]
|
||||
|
||||
if not mediatype:
|
||||
mediatype = "text/plain;charset=US-ASCII"
|
||||
mediatype = 'text/plain;charset=US-ASCII'
|
||||
|
||||
headers = email.message_from_string(
|
||||
"Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
|
||||
'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
|
||||
|
||||
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
|
||||
|
||||
@ -268,7 +268,7 @@ except ImportError: # Python 2
|
||||
nv = name_value.split('=', 1)
|
||||
if len(nv) != 2:
|
||||
if strict_parsing:
|
||||
raise ValueError("bad query field: %r" % (name_value,))
|
||||
raise ValueError('bad query field: %r' % (name_value,))
|
||||
# Handle case of a control-name with no equal sign
|
||||
if keep_blank_values:
|
||||
nv.append('')
|
||||
@ -326,6 +326,9 @@ def compat_ord(c):
|
||||
return ord(c)
|
||||
|
||||
|
||||
compat_os_name = os._name if os.name == 'java' else os.name
|
||||
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
compat_getenv = os.getenv
|
||||
compat_expanduser = os.path.expanduser
|
||||
@ -346,7 +349,7 @@ else:
|
||||
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
|
||||
# for different platforms with correct environment variables decoding.
|
||||
|
||||
if os.name == 'posix':
|
||||
if compat_os_name == 'posix':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
|
||||
do nothing."""
|
||||
@ -370,7 +373,7 @@ else:
|
||||
userhome = pwent.pw_dir
|
||||
userhome = userhome.rstrip('/')
|
||||
return (userhome + path[i:]) or '/'
|
||||
elif os.name == 'nt' or os.name == 'ce':
|
||||
elif compat_os_name == 'nt' or compat_os_name == 'ce':
|
||||
def compat_expanduser(path):
|
||||
"""Expand ~ and ~user constructs.
|
||||
|
||||
@ -466,7 +469,7 @@ if sys.version_info < (2, 7):
|
||||
if err is not None:
|
||||
raise err
|
||||
else:
|
||||
raise socket.error("getaddrinfo returns an empty list")
|
||||
raise socket.error('getaddrinfo returns an empty list')
|
||||
else:
|
||||
compat_socket_create_connection = socket.create_connection
|
||||
|
||||
@ -556,6 +559,7 @@ __all__ = [
|
||||
'compat_itertools_count',
|
||||
'compat_kwargs',
|
||||
'compat_ord',
|
||||
'compat_os_name',
|
||||
'compat_parse_qs',
|
||||
'compat_print',
|
||||
'compat_shlex_split',
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from ..compat import compat_os_name
|
||||
from ..utils import (
|
||||
encodeFilename,
|
||||
error_to_compat_str,
|
||||
@ -157,7 +158,7 @@ class FileDownloader(object):
|
||||
|
||||
def slow_down(self, start_time, now, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
rate_limit = self.params.get('ratelimit', None)
|
||||
rate_limit = self.params.get('ratelimit')
|
||||
if rate_limit is None or byte_counter == 0:
|
||||
return
|
||||
if now is None:
|
||||
@ -219,7 +220,7 @@ class FileDownloader(object):
|
||||
if self.params.get('progress_with_newline', False):
|
||||
self.to_screen(fullmsg)
|
||||
else:
|
||||
if os.name == 'nt':
|
||||
if compat_os_name == 'nt':
|
||||
prev_len = getattr(self, '_report_progress_prev_line_length',
|
||||
0)
|
||||
if prev_len > len(fullmsg):
|
||||
|
@ -1,67 +1,59 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from .common import FileDownloader
|
||||
from ..utils import sanitized_Request
|
||||
from .fragment import FragmentFD
|
||||
from ..utils import (
|
||||
sanitize_open,
|
||||
encodeFilename,
|
||||
)
|
||||
|
||||
|
||||
class DashSegmentsFD(FileDownloader):
|
||||
class DashSegmentsFD(FragmentFD):
|
||||
"""
|
||||
Download segments in a DASH manifest
|
||||
"""
|
||||
|
||||
FD_NAME = 'dashsegments'
|
||||
|
||||
def real_download(self, filename, info_dict):
|
||||
self.report_destination(filename)
|
||||
tmpfilename = self.temp_name(filename)
|
||||
base_url = info_dict['url']
|
||||
segment_urls = info_dict['segment_urls']
|
||||
segment_urls = [info_dict['segment_urls'][0]] if self.params.get('test', False) else info_dict['segment_urls']
|
||||
initialization_url = info_dict.get('initialization_url')
|
||||
|
||||
is_test = self.params.get('test', False)
|
||||
remaining_bytes = self._TEST_FILE_SIZE if is_test else None
|
||||
byte_counter = 0
|
||||
ctx = {
|
||||
'filename': filename,
|
||||
'total_frags': len(segment_urls) + (1 if initialization_url else 0),
|
||||
}
|
||||
|
||||
def append_url_to_file(outf, target_url, target_name, remaining_bytes=None):
|
||||
self.to_screen('[DashSegments] %s: Downloading %s' % (info_dict['id'], target_name))
|
||||
req = sanitized_Request(target_url)
|
||||
if remaining_bytes is not None:
|
||||
req.add_header('Range', 'bytes=0-%d' % (remaining_bytes - 1))
|
||||
|
||||
data = self.ydl.urlopen(req).read()
|
||||
|
||||
if remaining_bytes is not None:
|
||||
data = data[:remaining_bytes]
|
||||
|
||||
outf.write(data)
|
||||
return len(data)
|
||||
self._prepare_and_start_frag_download(ctx)
|
||||
|
||||
def combine_url(base_url, target_url):
|
||||
if re.match(r'^https?://', target_url):
|
||||
return target_url
|
||||
return '%s%s%s' % (base_url, '' if base_url.endswith('/') else '/', target_url)
|
||||
|
||||
with open(tmpfilename, 'wb') as outf:
|
||||
if info_dict.get('initialization_url'):
|
||||
append_url_to_file(
|
||||
outf, combine_url(base_url, info_dict['initialization_url']),
|
||||
'initialization segment')
|
||||
for i, segment_url in enumerate(segment_urls):
|
||||
segment_len = append_url_to_file(
|
||||
outf, combine_url(base_url, segment_url),
|
||||
'segment %d / %d' % (i + 1, len(segment_urls)),
|
||||
remaining_bytes)
|
||||
byte_counter += segment_len
|
||||
if remaining_bytes is not None:
|
||||
remaining_bytes -= segment_len
|
||||
if remaining_bytes <= 0:
|
||||
break
|
||||
segments_filenames = []
|
||||
|
||||
self.try_rename(tmpfilename, filename)
|
||||
def append_url_to_file(target_url, target_filename):
|
||||
success = ctx['dl'].download(target_filename, {'url': combine_url(base_url, target_url)})
|
||||
if not success:
|
||||
return False
|
||||
down, target_sanitized = sanitize_open(target_filename, 'rb')
|
||||
ctx['dest_stream'].write(down.read())
|
||||
down.close()
|
||||
segments_filenames.append(target_sanitized)
|
||||
|
||||
self._hook_progress({
|
||||
'downloaded_bytes': byte_counter,
|
||||
'total_bytes': byte_counter,
|
||||
'filename': filename,
|
||||
'status': 'finished',
|
||||
})
|
||||
if initialization_url:
|
||||
append_url_to_file(initialization_url, ctx['tmpfilename'] + '-Init')
|
||||
for i, segment_url in enumerate(segment_urls):
|
||||
segment_filename = '%s-Seg%d' % (ctx['tmpfilename'], i)
|
||||
append_url_to_file(segment_url, segment_filename)
|
||||
|
||||
self._finish_frag_download(ctx)
|
||||
|
||||
for segment_file in segments_filenames:
|
||||
os.remove(encodeFilename(segment_file))
|
||||
|
||||
return True
|
||||
|
@ -38,7 +38,7 @@ class FragmentFD(FileDownloader):
|
||||
'continuedl': True,
|
||||
'quiet': True,
|
||||
'noprogress': True,
|
||||
'ratelimit': self.params.get('ratelimit', None),
|
||||
'ratelimit': self.params.get('ratelimit'),
|
||||
'retries': self.params.get('retries', 0),
|
||||
'test': self.params.get('test', False),
|
||||
}
|
||||
@ -99,7 +99,8 @@ class FragmentFD(FileDownloader):
|
||||
state['eta'] = self.calc_eta(
|
||||
start, time_now, estimated_size,
|
||||
state['downloaded_bytes'])
|
||||
state['speed'] = s.get('speed')
|
||||
state['speed'] = s.get('speed') or ctx.get('speed')
|
||||
ctx['speed'] = state['speed']
|
||||
ctx['prev_frag_downloaded_bytes'] = frag_downloaded_bytes
|
||||
self._hook_progress(state)
|
||||
|
||||
|
@ -140,8 +140,8 @@ class HttpFD(FileDownloader):
|
||||
|
||||
if data_len is not None:
|
||||
data_len = int(data_len) + resume_len
|
||||
min_data_len = self.params.get("min_filesize", None)
|
||||
max_data_len = self.params.get("max_filesize", None)
|
||||
min_data_len = self.params.get('min_filesize')
|
||||
max_data_len = self.params.get('max_filesize')
|
||||
if min_data_len is not None and data_len < min_data_len:
|
||||
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
|
||||
return False
|
||||
|
@ -94,15 +94,15 @@ class RtmpFD(FileDownloader):
|
||||
return proc.returncode
|
||||
|
||||
url = info_dict['url']
|
||||
player_url = info_dict.get('player_url', None)
|
||||
page_url = info_dict.get('page_url', None)
|
||||
app = info_dict.get('app', None)
|
||||
play_path = info_dict.get('play_path', None)
|
||||
tc_url = info_dict.get('tc_url', None)
|
||||
flash_version = info_dict.get('flash_version', None)
|
||||
player_url = info_dict.get('player_url')
|
||||
page_url = info_dict.get('page_url')
|
||||
app = info_dict.get('app')
|
||||
play_path = info_dict.get('play_path')
|
||||
tc_url = info_dict.get('tc_url')
|
||||
flash_version = info_dict.get('flash_version')
|
||||
live = info_dict.get('rtmp_live', False)
|
||||
conn = info_dict.get('rtmp_conn', None)
|
||||
protocol = info_dict.get('rtmp_protocol', None)
|
||||
conn = info_dict.get('rtmp_conn')
|
||||
protocol = info_dict.get('rtmp_protocol')
|
||||
real_time = info_dict.get('rtmp_real_time', False)
|
||||
no_resume = info_dict.get('no_resume', False)
|
||||
continue_dl = self.params.get('continuedl', True)
|
||||
|
@ -20,9 +20,13 @@ from .aftonbladet import AftonbladetIE
|
||||
from .airmozilla import AirMozillaIE
|
||||
from .aljazeera import AlJazeeraIE
|
||||
from .alphaporno import AlphaPornoIE
|
||||
from .animeondemand import AnimeOnDemandIE
|
||||
from .anitube import AnitubeIE
|
||||
from .anysex import AnySexIE
|
||||
from .aol import AolIE
|
||||
from .aol import (
|
||||
AolIE,
|
||||
AolFeaturesIE,
|
||||
)
|
||||
from .allocine import AllocineIE
|
||||
from .aparat import AparatIE
|
||||
from .appleconnect import AppleConnectIE
|
||||
@ -44,6 +48,7 @@ from .arte import (
|
||||
ArteTVFutureIE,
|
||||
ArteTVCinemaIE,
|
||||
ArteTVDDCIE,
|
||||
ArteTVMagazineIE,
|
||||
ArteTVEmbedIE,
|
||||
)
|
||||
from .atresplayer import AtresPlayerIE
|
||||
@ -72,6 +77,7 @@ from .bleacherreport import (
|
||||
)
|
||||
from .blinkx import BlinkxIE
|
||||
from .bloomberg import BloombergIE
|
||||
from .bokecc import BokeCCIE
|
||||
from .bpb import BpbIE
|
||||
from .br import BRIE
|
||||
from .breakcom import BreakIE
|
||||
@ -337,6 +343,7 @@ from .konserthusetplay import KonserthusetPlayIE
|
||||
from .kontrtube import KontrTubeIE
|
||||
from .krasview import KrasViewIE
|
||||
from .ku6 import Ku6IE
|
||||
from .kusi import KUSIIE
|
||||
from .kuwo import (
|
||||
KuwoIE,
|
||||
KuwoAlbumIE,
|
||||
@ -349,10 +356,9 @@ from .la7 import LA7IE
|
||||
from .laola1tv import Laola1TvIE
|
||||
from .lecture2go import Lecture2GoIE
|
||||
from .lemonde import LemondeIE
|
||||
from .letv import (
|
||||
LetvIE,
|
||||
LetvTvIE,
|
||||
LetvPlaylistIE,
|
||||
from .leeco import (
|
||||
LeIE,
|
||||
LePlaylistIE,
|
||||
LetvCloudIE,
|
||||
)
|
||||
from .libsyn import LibsynIE
|
||||
@ -489,6 +495,7 @@ from .nowtv import (
|
||||
NowTVIE,
|
||||
NowTVListIE,
|
||||
)
|
||||
from .noz import NozIE
|
||||
from .npo import (
|
||||
NPOIE,
|
||||
NPOLiveIE,
|
||||
@ -502,6 +509,7 @@ from .npr import NprIE
|
||||
from .nrk import (
|
||||
NRKIE,
|
||||
NRKPlaylistIE,
|
||||
NRKSkoleIE,
|
||||
NRKTVIE,
|
||||
)
|
||||
from .ntvde import NTVDeIE
|
||||
@ -552,6 +560,7 @@ from .pornhd import PornHdIE
|
||||
from .pornhub import (
|
||||
PornHubIE,
|
||||
PornHubPlaylistIE,
|
||||
PornHubUserVideosIE,
|
||||
)
|
||||
from .pornotube import PornotubeIE
|
||||
from .pornovoisines import PornoVoisinesIE
|
||||
@ -619,6 +628,7 @@ from .sbs import SBSIE
|
||||
from .scivee import SciVeeIE
|
||||
from .screencast import ScreencastIE
|
||||
from .screencastomatic import ScreencastOMaticIE
|
||||
from .screenjunkies import ScreenJunkiesIE
|
||||
from .screenwavemedia import ScreenwaveMediaIE, TeamFourIE
|
||||
from .senateisvp import SenateISVPIE
|
||||
from .servingsys import ServingSysIE
|
||||
@ -664,7 +674,6 @@ from .southpark import (
|
||||
SouthParkEsIE,
|
||||
SouthParkNlIE
|
||||
)
|
||||
from .space import SpaceIE
|
||||
from .spankbang import SpankBangIE
|
||||
from .spankwire import SpankwireIE
|
||||
from .spiegel import SpiegelIE, SpiegelArticleIE
|
||||
@ -732,6 +741,7 @@ from .tmz import (
|
||||
TMZArticleIE,
|
||||
)
|
||||
from .tnaflix import (
|
||||
TNAFlixNetworkEmbedIE,
|
||||
TNAFlixIE,
|
||||
EMPFlixIE,
|
||||
MovieFapIE,
|
||||
@ -793,7 +803,11 @@ from .twitch import (
|
||||
TwitchBookmarksIE,
|
||||
TwitchStreamIE,
|
||||
)
|
||||
from .twitter import TwitterCardIE, TwitterIE
|
||||
from .twitter import (
|
||||
TwitterCardIE,
|
||||
TwitterIE,
|
||||
TwitterAmplifyIE,
|
||||
)
|
||||
from .ubu import UbuIE
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
@ -804,6 +818,7 @@ from .digiteka import DigitekaIE
|
||||
from .unistra import UnistraIE
|
||||
from .urort import UrortIE
|
||||
from .ustream import UstreamIE, UstreamChannelIE
|
||||
from .ustudio import UstudioIE
|
||||
from .varzesh3 import Varzesh3IE
|
||||
from .vbox7 import Vbox7IE
|
||||
from .veehd import VeeHDIE
|
||||
|
@ -28,7 +28,7 @@ class AENetworksIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'eg47EERs_JsZ',
|
||||
'ext': 'mp4',
|
||||
'title': "Winter Is Coming",
|
||||
'title': 'Winter Is Coming',
|
||||
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
|
||||
},
|
||||
'params': {
|
||||
|
160
youtube_dl/extractor/animeondemand.py
Normal file
160
youtube_dl/extractor/animeondemand.py
Normal file
@ -0,0 +1,160 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
encode_dict,
|
||||
ExtractorError,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
||||
|
||||
class AnimeOnDemandIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?anime-on-demand\.de/anime/(?P<id>\d+)'
|
||||
_LOGIN_URL = 'https://www.anime-on-demand.de/users/sign_in'
|
||||
_APPLY_HTML5_URL = 'https://www.anime-on-demand.de/html5apply'
|
||||
_NETRC_MACHINE = 'animeondemand'
|
||||
_TEST = {
|
||||
'url': 'https://www.anime-on-demand.de/anime/161',
|
||||
'info_dict': {
|
||||
'id': '161',
|
||||
'title': 'Grimgar, Ashes and Illusions (OmU)',
|
||||
'description': 'md5:6681ce3c07c7189d255ac6ab23812d31',
|
||||
},
|
||||
'playlist_mincount': 4,
|
||||
}
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
if username is None:
|
||||
return
|
||||
|
||||
login_page = self._download_webpage(
|
||||
self._LOGIN_URL, None, 'Downloading login page')
|
||||
|
||||
login_form = self._form_hidden_inputs('new_user', login_page)
|
||||
|
||||
login_form.update({
|
||||
'user[login]': username,
|
||||
'user[password]': password,
|
||||
})
|
||||
|
||||
post_url = self._search_regex(
|
||||
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
|
||||
'post url', default=self._LOGIN_URL, group='url')
|
||||
|
||||
if not post_url.startswith('http'):
|
||||
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
|
||||
|
||||
request = sanitized_Request(
|
||||
post_url, urlencode_postdata(encode_dict(login_form)))
|
||||
request.add_header('Referer', self._LOGIN_URL)
|
||||
|
||||
response = self._download_webpage(
|
||||
request, None, 'Logging in as %s' % username)
|
||||
|
||||
if all(p not in response for p in ('>Logout<', 'href="/users/sign_out"')):
|
||||
error = self._search_regex(
|
||||
r'<p class="alert alert-danger">(.+?)</p>',
|
||||
response, 'error', default=None)
|
||||
if error:
|
||||
raise ExtractorError('Unable to login: %s' % error, expected=True)
|
||||
raise ExtractorError('Unable to log in')
|
||||
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def _real_extract(self, url):
|
||||
anime_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, anime_id)
|
||||
|
||||
if 'data-playlist=' not in webpage:
|
||||
self._download_webpage(
|
||||
self._APPLY_HTML5_URL, anime_id,
|
||||
'Activating HTML5 beta', 'Unable to apply HTML5 beta')
|
||||
webpage = self._download_webpage(url, anime_id)
|
||||
|
||||
csrf_token = self._html_search_meta(
|
||||
'csrf-token', webpage, 'csrf token', fatal=True)
|
||||
|
||||
anime_title = self._html_search_regex(
|
||||
r'(?s)<h1[^>]+itemprop="name"[^>]*>(.+?)</h1>',
|
||||
webpage, 'anime name')
|
||||
anime_description = self._html_search_regex(
|
||||
r'(?s)<div[^>]+itemprop="description"[^>]*>(.+?)</div>',
|
||||
webpage, 'anime description', default=None)
|
||||
|
||||
entries = []
|
||||
|
||||
for episode_html in re.findall(r'(?s)<h3[^>]+class="episodebox-title".+?>Episodeninhalt<', webpage):
|
||||
m = re.search(
|
||||
r'class="episodebox-title"[^>]+title="Episode (?P<number>\d+) - (?P<title>.+?)"', episode_html)
|
||||
if not m:
|
||||
continue
|
||||
|
||||
episode_number = int(m.group('number'))
|
||||
episode_title = m.group('title')
|
||||
video_id = 'episode-%d' % episode_number
|
||||
|
||||
common_info = {
|
||||
'id': video_id,
|
||||
'series': anime_title,
|
||||
'episode': episode_title,
|
||||
'episode_number': episode_number,
|
||||
}
|
||||
|
||||
formats = []
|
||||
|
||||
playlist_url = self._search_regex(
|
||||
r'data-playlist=(["\'])(?P<url>.+?)\1',
|
||||
episode_html, 'data playlist', default=None, group='url')
|
||||
if playlist_url:
|
||||
request = sanitized_Request(
|
||||
compat_urlparse.urljoin(url, playlist_url),
|
||||
headers={
|
||||
'X-Requested-With': 'XMLHttpRequest',
|
||||
'X-CSRF-Token': csrf_token,
|
||||
'Referer': url,
|
||||
'Accept': 'application/json, text/javascript, */*; q=0.01',
|
||||
})
|
||||
|
||||
playlist = self._download_json(
|
||||
request, video_id, 'Downloading playlist JSON', fatal=False)
|
||||
if playlist:
|
||||
playlist = playlist['playlist'][0]
|
||||
title = playlist['title']
|
||||
description = playlist.get('description')
|
||||
for source in playlist.get('sources', []):
|
||||
file_ = source.get('file')
|
||||
if file_ and determine_ext(file_) == 'm3u8':
|
||||
formats = self._extract_m3u8_formats(
|
||||
file_, video_id, 'mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id='hls')
|
||||
|
||||
if formats:
|
||||
f = common_info.copy()
|
||||
f.update({
|
||||
'title': title,
|
||||
'description': description,
|
||||
'formats': formats,
|
||||
})
|
||||
entries.append(f)
|
||||
|
||||
m = re.search(
|
||||
r'data-dialog-header=(["\'])(?P<title>.+?)\1[^>]+href=(["\'])(?P<href>.+?)\3[^>]*>Teaser<',
|
||||
episode_html)
|
||||
if m:
|
||||
f = common_info.copy()
|
||||
f.update({
|
||||
'id': '%s-teaser' % f['id'],
|
||||
'title': m.group('title'),
|
||||
'url': compat_urlparse.urljoin(url, m.group('href')),
|
||||
})
|
||||
entries.append(f)
|
||||
|
||||
return self.playlist_result(entries, anime_id, anime_title, anime_description)
|
@ -1,24 +1,11 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class AolIE(InfoExtractor):
|
||||
IE_NAME = 'on.aol.com'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:
|
||||
aol-video:|
|
||||
http://on\.aol\.com/
|
||||
(?:
|
||||
video/.*-|
|
||||
playlist/(?P<playlist_display_id>[^/?#]+?)-(?P<playlist_id>[0-9]+)[?#].*_videoid=
|
||||
)
|
||||
)
|
||||
(?P<id>[0-9]+)
|
||||
(?:$|\?)
|
||||
'''
|
||||
_VALID_URL = r'(?:aol-video:|http://on\.aol\.com/video/.*-)(?P<id>[0-9]+)(?:$|\?)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://on.aol.com/video/u-s--official-warns-of-largest-ever-irs-phone-scam-518167793?icid=OnHomepageC2Wide_MustSee_Img',
|
||||
@ -29,42 +16,31 @@ class AolIE(InfoExtractor):
|
||||
'title': 'U.S. Official Warns Of \'Largest Ever\' IRS Phone Scam',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}, {
|
||||
'url': 'http://on.aol.com/playlist/brace-yourself---todays-weirdest-news-152147?icid=OnHomepageC4_Omg_Img#_videoid=518184316',
|
||||
'info_dict': {
|
||||
'id': '152147',
|
||||
'title': 'Brace Yourself - Today\'s Weirdest News',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
playlist_id = mobj.group('playlist_id')
|
||||
if not playlist_id or self._downloader.params.get('noplaylist'):
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
video_id = self._match_id(url)
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
|
||||
self.to_screen('Downloading playlist %s - add --no-playlist to just download video %s' % (playlist_id, video_id))
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
title = self._html_search_regex(
|
||||
r'<h1 class="video-title[^"]*">(.+?)</h1>', webpage, 'title')
|
||||
playlist_html = self._search_regex(
|
||||
r"(?s)<ul\s+class='video-related[^']*'>(.*?)</ul>", webpage,
|
||||
'playlist HTML')
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': 'aol-video:%s' % m.group('id'),
|
||||
'ie_key': 'Aol',
|
||||
} for m in re.finditer(
|
||||
r"<a\s+href='.*videoid=(?P<id>[0-9]+)'\s+class='video-thumb'>",
|
||||
playlist_html)]
|
||||
class AolFeaturesIE(InfoExtractor):
|
||||
IE_NAME = 'features.aol.com'
|
||||
_VALID_URL = r'http://features\.aol\.com/video/(?P<id>[^/?#]+)'
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': playlist_id,
|
||||
'display_id': mobj.group('playlist_display_id'),
|
||||
'title': title,
|
||||
'entries': entries,
|
||||
}
|
||||
_TESTS = [{
|
||||
'url': 'http://features.aol.com/video/behind-secret-second-careers-late-night-talk-show-hosts',
|
||||
'md5': '7db483bb0c09c85e241f84a34238cc75',
|
||||
'info_dict': {
|
||||
'id': '519507715',
|
||||
'ext': 'mp4',
|
||||
'title': 'What To Watch - February 17, 2016',
|
||||
},
|
||||
'add_ie': ['FiveMin'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
return self.url_result(self._search_regex(
|
||||
r'<script type="text/javascript" src="(https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js[^"]+)"',
|
||||
webpage, '5min embed url'), 'FiveMin')
|
||||
|
@ -12,7 +12,7 @@ from ..utils import (
|
||||
|
||||
class AppleTrailersIE(InfoExtractor):
|
||||
IE_NAME = 'appletrailers'
|
||||
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.|movie)?trailers\.apple\.com/(?:trailers|ca)/(?P<company>[^/]+)/(?P<movie>[^/]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://trailers.apple.com/trailers/wb/manofsteel/',
|
||||
'info_dict': {
|
||||
@ -73,6 +73,9 @@ class AppleTrailersIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://trailers.apple.com/ca/metropole/autrui/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://movietrailers.apple.com/trailers/focus_features/kuboandthetwostrings/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_JSON_RE = r'iTunes.playURL\((.*?)\);'
|
||||
|
@ -23,7 +23,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class ArteTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de)/.*-(?P<id>.*?)\.html'
|
||||
_VALID_URL = r'http://videos\.arte\.tv/(?P<lang>fr|de|en|es)/.*-(?P<id>.*?)\.html'
|
||||
IE_NAME = 'arte.tv'
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -63,7 +63,7 @@ class ArteTvIE(InfoExtractor):
|
||||
|
||||
class ArteTVPlus7IE(InfoExtractor):
|
||||
IE_NAME = 'arte.tv:+7'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/guide/(?P<lang>fr|de|en|es)/(?:(?:sendungen|emissions|embed)/)?(?P<id>[^/]+)/(?P<name>[^/?#&+])'
|
||||
|
||||
@classmethod
|
||||
def _extract_url_info(cls, url):
|
||||
@ -102,13 +102,32 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
iframe_url = find_iframe_url(webpage, None)
|
||||
if not iframe_url:
|
||||
embed_url = self._html_search_regex(
|
||||
r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url')
|
||||
player = self._download_json(
|
||||
embed_url, video_id, 'Downloading player page')
|
||||
iframe_url = find_iframe_url(player['html'])
|
||||
json_url = compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
r'arte_vp_url_oembed=\'([^\']+?)\'', webpage, 'embed url', default=None)
|
||||
if embed_url:
|
||||
player = self._download_json(
|
||||
embed_url, video_id, 'Downloading player page')
|
||||
iframe_url = find_iframe_url(player['html'])
|
||||
# en and es URLs produce react-based pages with different layout (e.g.
|
||||
# http://www.arte.tv/guide/en/053330-002-A/carnival-italy?zone=world)
|
||||
if not iframe_url:
|
||||
program = self._search_regex(
|
||||
r'program\s*:\s*({.+?["\']embed_html["\'].+?}),?\s*\n',
|
||||
webpage, 'program', default=None)
|
||||
if program:
|
||||
embed_html = self._parse_json(program, video_id)
|
||||
if embed_html:
|
||||
iframe_url = find_iframe_url(embed_html['embed_html'])
|
||||
if iframe_url:
|
||||
json_url = compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(iframe_url).query)['json_url'][0]
|
||||
if json_url:
|
||||
return self._extract_from_json_url(json_url, video_id, lang)
|
||||
# Differend kind of embed URL (e.g.
|
||||
# http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium)
|
||||
embed_url = self._search_regex(
|
||||
r'<iframe[^>]+src=(["\'])(?P<url>.+?)\1',
|
||||
webpage, 'embed url', group='url')
|
||||
return self.url_result(embed_url)
|
||||
|
||||
def _extract_from_json_url(self, json_url, video_id, lang):
|
||||
info = self._download_json(json_url, video_id)
|
||||
@ -116,7 +135,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
|
||||
upload_date_str = player_info.get('shootingDate')
|
||||
if not upload_date_str:
|
||||
upload_date_str = player_info.get('VDA', '').split(' ')[0]
|
||||
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
|
||||
|
||||
title = player_info['VTI'].strip()
|
||||
subtitle = player_info.get('VSU', '').strip()
|
||||
@ -132,27 +151,30 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
}
|
||||
qfunc = qualities(['HQ', 'MQ', 'EQ', 'SQ'])
|
||||
|
||||
LANGS = {
|
||||
'fr': 'F',
|
||||
'de': 'A',
|
||||
'en': 'E[ANG]',
|
||||
'es': 'E[ESP]',
|
||||
}
|
||||
|
||||
formats = []
|
||||
for format_id, format_dict in player_info['VSR'].items():
|
||||
f = dict(format_dict)
|
||||
versionCode = f.get('versionCode')
|
||||
|
||||
langcode = {
|
||||
'fr': 'F',
|
||||
'de': 'A',
|
||||
}.get(lang, lang)
|
||||
lang_rexs = [r'VO?%s' % langcode, r'VO?.-ST%s' % langcode]
|
||||
lang_pref = (
|
||||
None if versionCode is None else (
|
||||
10 if any(re.match(r, versionCode) for r in lang_rexs)
|
||||
else -10))
|
||||
langcode = LANGS.get(lang, lang)
|
||||
lang_rexs = [r'VO?%s-' % re.escape(langcode), r'VO?.-ST%s$' % re.escape(langcode)]
|
||||
lang_pref = None
|
||||
if versionCode:
|
||||
matched_lang_rexs = [r for r in lang_rexs if re.match(r, versionCode)]
|
||||
lang_pref = -10 if not matched_lang_rexs else 10 * len(matched_lang_rexs)
|
||||
source_pref = 0
|
||||
if versionCode is not None:
|
||||
# The original version with subtitles has lower relevance
|
||||
if re.match(r'VO-ST(F|A)', versionCode):
|
||||
if re.match(r'VO-ST(F|A|E)', versionCode):
|
||||
source_pref -= 10
|
||||
# The version with sourds/mal subtitles has also lower relevance
|
||||
elif re.match(r'VO?(F|A)-STM\1', versionCode):
|
||||
elif re.match(r'VO?(F|A|E)-STM\1', versionCode):
|
||||
source_pref -= 9
|
||||
format = {
|
||||
'format_id': format_id,
|
||||
@ -185,7 +207,7 @@ class ArteTVPlus7IE(InfoExtractor):
|
||||
# It also uses the arte_vp_url url from the webpage to extract the information
|
||||
class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:creative'
|
||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de)/(?:magazine?/)?(?P<id>[^?#]+)'
|
||||
_VALID_URL = r'https?://creative\.arte\.tv/(?P<lang>fr|de|en|es)/(?:magazine?/)?(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://creative.arte.tv/de/magazin/agentur-amateur-corporate-design',
|
||||
@ -209,7 +231,7 @@ class ArteTVCreativeIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:future'
|
||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://future\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://future.arte.tv/fr/info-sciences/les-ecrevisses-aussi-sont-anxieuses',
|
||||
@ -217,6 +239,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
'id': '050940-028-A',
|
||||
'ext': 'mp4',
|
||||
'title': 'Les écrevisses aussi peuvent être anxieuses',
|
||||
'upload_date': '20140902',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://future.arte.tv/fr/la-science-est-elle-responsable',
|
||||
@ -226,7 +249,7 @@ class ArteTVFutureIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:ddc'
|
||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://ddc\.arte\.tv/(?P<lang>emission|folge)/(?P<id>[^/?#&]+)'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id, lang = self._extract_url_info(url)
|
||||
@ -244,7 +267,7 @@ class ArteTVDDCIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVConcertIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:concert'
|
||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://concert\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://concert.arte.tv/de/notwist-im-pariser-konzertclub-divan-du-monde',
|
||||
@ -261,7 +284,7 @@ class ArteTVConcertIE(ArteTVPlus7IE):
|
||||
|
||||
class ArteTVCinemaIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:cinema'
|
||||
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>de|fr)/(?P<id>.+)'
|
||||
_VALID_URL = r'https?://cinema\.arte\.tv/(?P<lang>fr|de|en|es)/(?P<id>.+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://cinema.arte.tv/de/node/38291',
|
||||
@ -276,6 +299,37 @@ class ArteTVCinemaIE(ArteTVPlus7IE):
|
||||
}
|
||||
|
||||
|
||||
class ArteTVMagazineIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:magazine'
|
||||
_VALID_URL = r'https?://(?:www\.)?arte\.tv/magazine/[^/]+/(?P<lang>fr|de|en|es)/(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
# Embedded via <iframe src="http://www.arte.tv/arte_vp/index.php?json_url=..."
|
||||
'url': 'http://www.arte.tv/magazine/trepalium/fr/entretien-avec-le-realisateur-vincent-lannoo-trepalium',
|
||||
'md5': '2a9369bcccf847d1c741e51416299f25',
|
||||
'info_dict': {
|
||||
'id': '065965-000-A',
|
||||
'ext': 'mp4',
|
||||
'title': 'Trepalium - Extrait Ep.01',
|
||||
'upload_date': '20160121',
|
||||
},
|
||||
}, {
|
||||
# Embedded via <iframe src="http://www.arte.tv/guide/fr/embed/054813-004-A/medium"
|
||||
'url': 'http://www.arte.tv/magazine/trepalium/fr/episode-0406-replay-trepalium',
|
||||
'md5': 'fedc64fc7a946110fe311634e79782ca',
|
||||
'info_dict': {
|
||||
'id': '054813-004_PLUS7-F',
|
||||
'ext': 'mp4',
|
||||
'title': 'Trepalium (4/6)',
|
||||
'description': 'md5:10057003c34d54e95350be4f9b05cb40',
|
||||
'upload_date': '20160218',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.arte.tv/magazine/metropolis/de/frank-woeste-german-paris-metropolis',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class ArteTVEmbedIE(ArteTVPlus7IE):
|
||||
IE_NAME = 'arte.tv:embed'
|
||||
_VALID_URL = r'''(?x)
|
||||
|
@ -10,9 +10,9 @@ from ..utils import (
|
||||
|
||||
|
||||
class AudiMediaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?audimedia\.tv/(?:en|de)/vid/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
|
||||
_TEST = {
|
||||
'url': 'https://audimedia.tv/en/vid/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test',
|
||||
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
|
||||
'md5': '79a8b71c46d49042609795ab59779b66',
|
||||
'info_dict': {
|
||||
'id': '1565',
|
||||
@ -32,7 +32,10 @@ class AudiMediaIE(InfoExtractor):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
raw_payload = self._search_regex(r'<script[^>]+class="amtv-embed"[^>]+id="([^"]+)"', webpage, 'raw payload')
|
||||
raw_payload = self._search_regex([
|
||||
r'class="amtv-embed"[^>]+id="([^"]+)"',
|
||||
r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
|
||||
], webpage, 'raw payload')
|
||||
_, stage_mode, video_id, lang = raw_payload.split('-')
|
||||
|
||||
# TODO: handle s and e stage_mode (live streams and ended live streams)
|
||||
@ -59,13 +62,19 @@ class AudiMediaIE(InfoExtractor):
|
||||
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
|
||||
if not video_version_url:
|
||||
continue
|
||||
formats.append({
|
||||
f = {
|
||||
'url': video_version_url,
|
||||
'width': int_or_none(video_version.get('width')),
|
||||
'height': int_or_none(video_version.get('height')),
|
||||
'abr': int_or_none(video_version.get('audio_bitrate')),
|
||||
'vbr': int_or_none(video_version.get('video_bitrate')),
|
||||
})
|
||||
}
|
||||
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
|
||||
if bitrate:
|
||||
f.update({
|
||||
'format_id': 'http-%s' % bitrate,
|
||||
})
|
||||
formats.append(f)
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
|
@ -86,7 +86,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||
'id': 'b00yng1d',
|
||||
'ext': 'flv',
|
||||
'title': 'The Voice UK: Series 3: Blind Auditions 5',
|
||||
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
|
||||
'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
|
||||
'duration': 5100,
|
||||
},
|
||||
'params': {
|
||||
|
@ -28,10 +28,10 @@ class BleacherReportIE(InfoExtractor):
|
||||
'add_ie': ['Ooyala'],
|
||||
}, {
|
||||
'url': 'http://bleacherreport.com/articles/2586817-aussie-golfers-get-fright-of-their-lives-after-being-chased-by-angry-kangaroo',
|
||||
'md5': 'af5f90dc9c7ba1c19d0a3eac806bbf50',
|
||||
'md5': '6a5cd403418c7b01719248ca97fb0692',
|
||||
'info_dict': {
|
||||
'id': '2586817',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'Aussie Golfers Get Fright of Their Lives After Being Chased by Angry Kangaroo',
|
||||
'timestamp': 1446839961,
|
||||
'uploader': 'Sean Fay',
|
||||
@ -93,10 +93,14 @@ class BleacherReportCMSIE(AMPIE):
|
||||
'md5': '8c2c12e3af7805152675446c905d159b',
|
||||
'info_dict': {
|
||||
'id': '8fd44c2f-3dc5-4821-9118-2c825a98c0e1',
|
||||
'ext': 'flv',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cena vs. Rollins Would Expose the Heavyweight Division',
|
||||
'description': 'md5:984afb4ade2f9c0db35f3267ed88b36e',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
60
youtube_dl/extractor/bokecc.py
Normal file
60
youtube_dl/extractor/bokecc.py
Normal file
@ -0,0 +1,60 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_parse_qs
|
||||
from ..utils import ExtractorError
|
||||
|
||||
|
||||
class BokeCCBaseIE(InfoExtractor):
|
||||
def _extract_bokecc_formats(self, webpage, video_id, format_id=None):
|
||||
player_params_str = self._html_search_regex(
|
||||
r'<(?:script|embed)[^>]+src="http://p\.bokecc\.com/player\?([^"]+)',
|
||||
webpage, 'player params')
|
||||
|
||||
player_params = compat_parse_qs(player_params_str)
|
||||
|
||||
info_xml = self._download_xml(
|
||||
'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % (
|
||||
player_params['siteid'][0], player_params['vid'][0]), video_id)
|
||||
|
||||
formats = [{
|
||||
'format_id': format_id,
|
||||
'url': quality.find('./copy').attrib['playurl'],
|
||||
'preference': int(quality.attrib['value']),
|
||||
} for quality in info_xml.findall('./video/quality')]
|
||||
|
||||
self._sort_formats(formats)
|
||||
|
||||
return formats
|
||||
|
||||
|
||||
class BokeCCIE(BokeCCBaseIE):
|
||||
_IE_DESC = 'CC视频'
|
||||
_VALID_URL = r'http://union\.bokecc\.com/playvideo\.bo\?(?P<query>.*)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://union.bokecc.com/playvideo.bo?vid=E44D40C15E65EA30&uid=CD0C5D3C8614B28B',
|
||||
'info_dict': {
|
||||
'id': 'CD0C5D3C8614B28B_E44D40C15E65EA30',
|
||||
'ext': 'flv',
|
||||
'title': 'BokeCC Video',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
qs = compat_parse_qs(re.match(self._VALID_URL, url).group('query'))
|
||||
if not qs.get('vid') or not qs.get('uid'):
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
|
||||
video_id = '%s_%s' % (qs['uid'][0], qs['vid'][0])
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': 'BokeCC Video', # no title provided in the webpage
|
||||
'formats': self._extract_bokecc_formats(webpage, video_id),
|
||||
}
|
@ -4,12 +4,13 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import js_to_json
|
||||
|
||||
|
||||
class C56IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:(?:www|player)\.)?56\.com/(?:.+?/)?(?:v_|(?:play_album.+-))(?P<textid>.+?)\.(?:html|swf)'
|
||||
IE_NAME = '56.com'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.56.com/u39/v_OTM0NDA3MTY.html',
|
||||
'md5': 'e59995ac63d0457783ea05f93f12a866',
|
||||
'info_dict': {
|
||||
@ -18,12 +19,29 @@ class C56IE(InfoExtractor):
|
||||
'title': '网事知多少 第32期:车怒',
|
||||
'duration': 283.813,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.56.com/u47/v_MTM5NjQ5ODc2.html',
|
||||
'md5': '',
|
||||
'info_dict': {
|
||||
'id': '82247482',
|
||||
'title': '爱的诅咒之杜鹃花开',
|
||||
},
|
||||
'playlist_count': 7,
|
||||
'add_ie': ['Sohu'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url, flags=re.VERBOSE)
|
||||
text_id = mobj.group('textid')
|
||||
|
||||
webpage = self._download_webpage(url, text_id)
|
||||
sohu_video_info_str = self._search_regex(
|
||||
r'var\s+sohuVideoInfo\s*=\s*({[^}]+});', webpage, 'Sohu video info', default=None)
|
||||
if sohu_video_info_str:
|
||||
sohu_video_info = self._parse_json(
|
||||
sohu_video_info_str, text_id, transform_source=js_to_json)
|
||||
return self.url_result(sohu_video_info['url'], 'Sohu')
|
||||
|
||||
page = self._download_json(
|
||||
'http://vxml.56.com/json/%s/' % text_id, text_id, 'Downloading video info')
|
||||
|
||||
|
@ -6,7 +6,7 @@ from ..utils import float_or_none
|
||||
|
||||
class CanvasIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?canvas\.be/video/(?:[^/]+/)*(?P<id>[^/?#&]+)'
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
|
||||
'md5': 'ea838375a547ac787d4064d8c7860a6c',
|
||||
'info_dict': {
|
||||
@ -18,7 +18,27 @@ class CanvasIE(InfoExtractor):
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 49.02,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
# with subtitles
|
||||
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
|
||||
'info_dict': {
|
||||
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
|
||||
'display_id': 'pieter-0167',
|
||||
'ext': 'mp4',
|
||||
'title': 'Pieter 0167',
|
||||
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'duration': 2553.08,
|
||||
'subtitles': {
|
||||
'nl': [{
|
||||
'ext': 'vtt',
|
||||
}],
|
||||
},
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
@ -54,6 +74,14 @@ class CanvasIE(InfoExtractor):
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
subtitles = {}
|
||||
subtitle_urls = data.get('subtitleUrls')
|
||||
if isinstance(subtitle_urls, list):
|
||||
for subtitle in subtitle_urls:
|
||||
subtitle_url = subtitle.get('url')
|
||||
if subtitle_url and subtitle.get('type') == 'CLOSED':
|
||||
subtitles.setdefault('nl', []).append({'url': subtitle_url})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
@ -62,4 +90,5 @@ class CanvasIE(InfoExtractor):
|
||||
'formats': formats,
|
||||
'duration': float_or_none(data.get('duration'), 1000),
|
||||
'thumbnail': data.get('posterImageUrl'),
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
@ -3,7 +3,10 @@ from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .theplatform import ThePlatformIE
|
||||
from ..utils import parse_duration
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
find_xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class CBSNewsIE(ThePlatformIE):
|
||||
@ -46,6 +49,15 @@ class CBSNewsIE(ThePlatformIE):
|
||||
},
|
||||
]
|
||||
|
||||
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
|
||||
closed_caption_e = find_xpath_attr(smil, self._xpath_ns('.//param', namespace), 'name', 'ClosedCaptionURL')
|
||||
return {
|
||||
'en': [{
|
||||
'ext': 'ttml',
|
||||
'url': closed_caption_e.attrib['value'],
|
||||
}]
|
||||
} if closed_caption_e is not None and closed_caption_e.attrib.get('value') else []
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
@ -61,12 +73,6 @@ class CBSNewsIE(ThePlatformIE):
|
||||
thumbnail = item.get('mediaImage') or item.get('thumbnail')
|
||||
|
||||
subtitles = {}
|
||||
if 'mpxRefId' in video_info:
|
||||
subtitles['en'] = [{
|
||||
'ext': 'ttml',
|
||||
'url': 'http://www.cbsnews.com/videos/captions/%s.adb_xml' % video_info['mpxRefId'],
|
||||
}]
|
||||
|
||||
formats = []
|
||||
for format_id in ['RtmpMobileLow', 'RtmpMobileHigh', 'Hls', 'RtmpDesktop']:
|
||||
pid = item.get('media' + format_id)
|
||||
|
@ -45,7 +45,7 @@ class CCCIE(InfoExtractor):
|
||||
title = self._html_search_regex(
|
||||
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
|
||||
description = self._html_search_regex(
|
||||
r"(?s)<h3>About</h3>(.+?)<h3>",
|
||||
r'(?s)<h3>About</h3>(.+?)<h3>',
|
||||
webpage, 'description', fatal=False)
|
||||
upload_date = unified_strdate(self._html_search_regex(
|
||||
r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
|
||||
|
@ -177,16 +177,16 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||
for divider in [1000, 60, 60, 100]:
|
||||
components.append(msec % divider)
|
||||
msec //= divider
|
||||
return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
|
||||
return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
|
||||
|
||||
def _fix_subtitle(subtitle):
|
||||
for line in subtitle.splitlines():
|
||||
m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
|
||||
m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
|
||||
if m:
|
||||
yield m.group(1)
|
||||
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
|
||||
yield "{0} --> {1}".format(start, stop)
|
||||
yield '{0} --> {1}'.format(start, stop)
|
||||
else:
|
||||
yield line
|
||||
|
||||
return "\r\n".join(_fix_subtitle(subtitles))
|
||||
return '\r\n'.join(_fix_subtitle(subtitles))
|
||||
|
@ -21,6 +21,10 @@ class CinemassacreIE(InfoExtractor):
|
||||
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
|
||||
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
|
||||
@ -31,14 +35,18 @@ class CinemassacreIE(InfoExtractor):
|
||||
'upload_date': '20131002',
|
||||
'title': 'The Mummy’s Hand (1940)',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
# Youtube embedded video
|
||||
'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/',
|
||||
'md5': 'df4cf8a1dcedaec79a73d96d83b99023',
|
||||
'md5': 'ec9838a5520ef5409b3e4e42fcb0a3b9',
|
||||
'info_dict': {
|
||||
'id': 'OEVzPCY2T-g',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles',
|
||||
'upload_date': '20061207',
|
||||
'uploader': 'Cinemassacre',
|
||||
@ -49,12 +57,12 @@ class CinemassacreIE(InfoExtractor):
|
||||
{
|
||||
# Youtube embedded video
|
||||
'url': 'http://cinemassacre.com/2006/09/01/mckids/',
|
||||
'md5': '6eb30961fa795fedc750eac4881ad2e1',
|
||||
'md5': '7393c4e0f54602ad110c793eb7a6513a',
|
||||
'info_dict': {
|
||||
'id': 'FnxsNhuikpo',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'upload_date': '20060901',
|
||||
'uploader': 'Cinemassacre Extras',
|
||||
'uploader': 'Cinemassacre Extra',
|
||||
'description': 'md5:de9b751efa9e45fbaafd9c8a1123ed53',
|
||||
'uploader_id': 'Cinemassacre',
|
||||
'title': 'AVGN: McKids',
|
||||
@ -69,7 +77,11 @@ class CinemassacreIE(InfoExtractor):
|
||||
'description': 'Let’s Play Mario Kart 64 !! Mario Kart 64 is a classic go-kart racing game released for the Nintendo 64 (N64). Today James & Mike do 4 player Battle Mode with Kyle and Bootsy!',
|
||||
'title': 'Mario Kart 64 (Nintendo 64) James & Mike Mondays',
|
||||
'upload_date': '20150525',
|
||||
}
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
|
@ -51,9 +51,7 @@ class CNETIE(ThePlatformIE):
|
||||
uploader = None
|
||||
uploader_id = None
|
||||
|
||||
mpx_account = data['config']['uvpConfig']['default']['mpx_account']
|
||||
|
||||
metadata = self.get_metadata('%s/%s' % (mpx_account, list(vdata['files'].values())[0]), video_id)
|
||||
metadata = self.get_metadata('kYEXFC/%s' % list(vdata['files'].values())[0], video_id)
|
||||
description = vdata.get('description') or metadata.get('description')
|
||||
duration = int_or_none(vdata.get('duration')) or metadata.get('duration')
|
||||
|
||||
@ -62,7 +60,7 @@ class CNETIE(ThePlatformIE):
|
||||
for (fkey, vid) in vdata['files'].items():
|
||||
if fkey == 'hls_phone' and 'hls_tablet' in vdata['files']:
|
||||
continue
|
||||
release_url = 'http://link.theplatform.com/s/%s/%s?format=SMIL&mbr=true' % (mpx_account, vid)
|
||||
release_url = 'http://link.theplatform.com/s/kYEXFC/%s?format=SMIL&mbr=true' % vid
|
||||
if fkey == 'hds':
|
||||
release_url += '&manifest=f4m'
|
||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(release_url, video_id, 'Downloading %s SMIL data' % fkey)
|
||||
|
@ -26,14 +26,14 @@ class CNNIE(InfoExtractor):
|
||||
'upload_date': '20130609',
|
||||
},
|
||||
}, {
|
||||
"url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
|
||||
"md5": "b5cc60c60a3477d185af8f19a2a26f4e",
|
||||
"info_dict": {
|
||||
'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
|
||||
'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
|
||||
'info_dict': {
|
||||
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
|
||||
'ext': 'mp4',
|
||||
"title": "Student's epic speech stuns new freshmen",
|
||||
"description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
"upload_date": "20130821",
|
||||
'title': "Student's epic speech stuns new freshmen",
|
||||
'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
|
||||
'upload_date': '20130821',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
|
||||
|
@ -46,9 +46,9 @@ class CollegeRamaIE(InfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
player_options_request = {
|
||||
"getPlayerOptionsRequest": {
|
||||
"ResourceId": video_id,
|
||||
"QueryString": "",
|
||||
'getPlayerOptionsRequest': {
|
||||
'ResourceId': video_id,
|
||||
'QueryString': '',
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,11 +16,11 @@ from ..utils import (
|
||||
|
||||
class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://(?:www\.)?cc\.com/
|
||||
(video-clips|episodes|cc-studios|video-collections|full-episodes)
|
||||
(video-clips|episodes|cc-studios|video-collections|full-episodes|shows)
|
||||
/(?P<title>.*)'''
|
||||
_FEED_URL = 'http://comedycentral.com/feeds/mrss/'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://www.cc.com/video-clips/kllhuv/stand-up-greg-fitzsimmons--uncensored---too-good-of-a-mother',
|
||||
'md5': 'c4f48e9eda1b16dd10add0744344b6d8',
|
||||
'info_dict': {
|
||||
@ -29,7 +29,10 @@ class ComedyCentralIE(MTVServicesInfoExtractor):
|
||||
'title': 'CC:Stand-Up|Greg Fitzsimmons: Life on Stage|Uncensored - Too Good of a Mother',
|
||||
'description': 'After a certain point, breastfeeding becomes c**kblocking.',
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.cc.com/shows/the-daily-show-with-trevor-noah/interviews/6yx39d/exclusive-rand-paul-extended-interview',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
|
||||
class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
@ -192,7 +195,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
|
||||
if len(altMovieParams) == 0:
|
||||
raise ExtractorError('unable to find Flash URL in webpage ' + url)
|
||||
else:
|
||||
mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
|
||||
mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
|
||||
|
||||
uri = mMovieParams[0][1]
|
||||
# Correct cc.com in uri
|
||||
|
@ -15,13 +15,14 @@ import math
|
||||
from ..compat import (
|
||||
compat_cookiejar,
|
||||
compat_cookies,
|
||||
compat_etree_fromstring,
|
||||
compat_getpass,
|
||||
compat_http_client,
|
||||
compat_os_name,
|
||||
compat_str,
|
||||
compat_urllib_error,
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
compat_str,
|
||||
compat_etree_fromstring,
|
||||
)
|
||||
from ..utils import (
|
||||
NO_DEFAULT,
|
||||
@ -46,6 +47,7 @@ from ..utils import (
|
||||
xpath_with_ns,
|
||||
determine_protocol,
|
||||
parse_duration,
|
||||
mimetype2ext,
|
||||
)
|
||||
|
||||
|
||||
@ -156,12 +158,14 @@ class InfoExtractor(object):
|
||||
thumbnail: Full URL to a video thumbnail image.
|
||||
description: Full video description.
|
||||
uploader: Full name of the video uploader.
|
||||
license: License name the video is licensed under.
|
||||
creator: The main artist who created the video.
|
||||
release_date: The date (YYYYMMDD) when the video was released.
|
||||
timestamp: UNIX timestamp of the moment the video became available.
|
||||
upload_date: Video upload date (YYYYMMDD).
|
||||
If not explicitly set, calculated from timestamp.
|
||||
uploader_id: Nickname or id of the video uploader.
|
||||
uploader_url: Full URL to a personal webpage of the video uploader.
|
||||
location: Physical location where the video was filmed.
|
||||
subtitles: The available subtitles as a dictionary in the format
|
||||
{language: subformats}. "subformats" is a list sorted from
|
||||
@ -424,7 +428,7 @@ class InfoExtractor(object):
|
||||
self.to_screen('Saving request to ' + filename)
|
||||
# Working around MAX_PATH limitation on Windows (see
|
||||
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
|
||||
if os.name == 'nt':
|
||||
if compat_os_name == 'nt':
|
||||
absfilepath = os.path.abspath(filename)
|
||||
if len(absfilepath) > 259:
|
||||
filename = '\\\\?\\' + absfilepath
|
||||
@ -593,7 +597,7 @@ class InfoExtractor(object):
|
||||
if mobj:
|
||||
break
|
||||
|
||||
if not self._downloader.params.get('no_color') and os.name != 'nt' and sys.stderr.isatty():
|
||||
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
|
||||
_name = '\033[0;34m%s\033[0m' % name
|
||||
else:
|
||||
_name = name
|
||||
@ -636,7 +640,7 @@ class InfoExtractor(object):
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
# Attempt to use provided username and password or .netrc data
|
||||
if downloader_params.get('username', None) is not None:
|
||||
if downloader_params.get('username') is not None:
|
||||
username = downloader_params['username']
|
||||
password = downloader_params['password']
|
||||
elif downloader_params.get('usenetrc', False):
|
||||
@ -663,7 +667,7 @@ class InfoExtractor(object):
|
||||
return None
|
||||
downloader_params = self._downloader.params
|
||||
|
||||
if downloader_params.get('twofactor', None) is not None:
|
||||
if downloader_params.get('twofactor') is not None:
|
||||
return downloader_params['twofactor']
|
||||
|
||||
return compat_getpass('Type %s and press [Return]: ' % note)
|
||||
@ -744,7 +748,7 @@ class InfoExtractor(object):
|
||||
'mature': 17,
|
||||
'restricted': 19,
|
||||
}
|
||||
return RATING_TABLE.get(rating.lower(), None)
|
||||
return RATING_TABLE.get(rating.lower())
|
||||
|
||||
def _family_friendly_search(self, html):
|
||||
# See http://schema.org/VideoObject
|
||||
@ -759,7 +763,7 @@ class InfoExtractor(object):
|
||||
'0': 18,
|
||||
'false': 18,
|
||||
}
|
||||
return RATING_TABLE.get(family_friendly.lower(), None)
|
||||
return RATING_TABLE.get(family_friendly.lower())
|
||||
|
||||
def _twitter_search_player(self, html):
|
||||
return self._html_search_meta('twitter:player', html,
|
||||
@ -899,6 +903,16 @@ class InfoExtractor(object):
|
||||
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
|
||||
formats)
|
||||
|
||||
@staticmethod
|
||||
def _remove_duplicate_formats(formats):
|
||||
format_urls = set()
|
||||
unique_formats = []
|
||||
for f in formats:
|
||||
if f['url'] not in format_urls:
|
||||
format_urls.add(f['url'])
|
||||
unique_formats.append(f)
|
||||
formats[:] = unique_formats
|
||||
|
||||
def _is_valid_url(self, url, video_id, item='video'):
|
||||
url = self._proto_relative_url(url, scheme='http:')
|
||||
# For now assume non HTTP(S) URLs always valid
|
||||
@ -1022,11 +1036,21 @@ class InfoExtractor(object):
|
||||
return []
|
||||
m3u8_doc, urlh = res
|
||||
m3u8_url = urlh.geturl()
|
||||
# A Media Playlist Tag MUST NOT appear in a Master Playlist
|
||||
# https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
|
||||
# The EXT-X-TARGETDURATION tag is REQUIRED for every M3U8 Media Playlists
|
||||
# https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
|
||||
if '#EXT-X-TARGETDURATION' in m3u8_doc:
|
||||
|
||||
# We should try extracting formats only from master playlists [1], i.e.
|
||||
# playlists that describe available qualities. On the other hand media
|
||||
# playlists [2] should be returned as is since they contain just the media
|
||||
# without qualities renditions.
|
||||
# Fortunately, master playlist can be easily distinguished from media
|
||||
# playlist based on particular tags availability. As of [1, 2] master
|
||||
# playlist tags MUST NOT appear in a media playist and vice versa.
|
||||
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
|
||||
# and MUST NOT appear in master playlist thus we can clearly detect media
|
||||
# playlist with this criterion.
|
||||
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
|
||||
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
|
||||
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
|
||||
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
|
||||
return [{
|
||||
'url': m3u8_url,
|
||||
'format_id': m3u8_id,
|
||||
@ -1073,19 +1097,29 @@ class InfoExtractor(object):
|
||||
'protocol': entry_protocol,
|
||||
'preference': preference,
|
||||
}
|
||||
codecs = last_info.get('CODECS')
|
||||
if codecs:
|
||||
# TODO: looks like video codec is not always necessarily goes first
|
||||
va_codecs = codecs.split(',')
|
||||
if va_codecs[0]:
|
||||
f['vcodec'] = va_codecs[0]
|
||||
if len(va_codecs) > 1 and va_codecs[1]:
|
||||
f['acodec'] = va_codecs[1]
|
||||
resolution = last_info.get('RESOLUTION')
|
||||
if resolution:
|
||||
width_str, height_str = resolution.split('x')
|
||||
f['width'] = int(width_str)
|
||||
f['height'] = int(height_str)
|
||||
codecs = last_info.get('CODECS')
|
||||
if codecs:
|
||||
vcodec, acodec = [None] * 2
|
||||
va_codecs = codecs.split(',')
|
||||
if len(va_codecs) == 1:
|
||||
# Audio only entries usually come with single codec and
|
||||
# no resolution. For more robustness we also check it to
|
||||
# be mp4 audio.
|
||||
if not resolution and va_codecs[0].startswith('mp4a'):
|
||||
vcodec, acodec = 'none', va_codecs[0]
|
||||
else:
|
||||
vcodec = va_codecs[0]
|
||||
else:
|
||||
vcodec, acodec = va_codecs[:2]
|
||||
f.update({
|
||||
'acodec': acodec,
|
||||
'vcodec': vcodec,
|
||||
})
|
||||
if last_media is not None:
|
||||
f['m3u8_media'] = last_media
|
||||
last_media = None
|
||||
@ -1277,16 +1311,7 @@ class InfoExtractor(object):
|
||||
if not src or src in urls:
|
||||
continue
|
||||
urls.append(src)
|
||||
ext = textstream.get('ext') or determine_ext(src)
|
||||
if not ext:
|
||||
type_ = textstream.get('type')
|
||||
SUBTITLES_TYPES = {
|
||||
'text/vtt': 'vtt',
|
||||
'text/srt': 'srt',
|
||||
'application/smptett+xml': 'tt',
|
||||
}
|
||||
if type_ in SUBTITLES_TYPES:
|
||||
ext = SUBTITLES_TYPES[type_]
|
||||
ext = textstream.get('ext') or determine_ext(src) or mimetype2ext(textstream.get('type'))
|
||||
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
|
||||
subtitles.setdefault(lang, []).append({
|
||||
'url': src,
|
||||
@ -1497,7 +1522,7 @@ class InfoExtractor(object):
|
||||
def _live_title(self, name):
|
||||
""" Generate the title for a live video """
|
||||
now = datetime.datetime.now()
|
||||
now_str = now.strftime("%Y-%m-%d %H:%M")
|
||||
now_str = now.strftime('%Y-%m-%d %H:%M')
|
||||
return name + ' ' + now_str
|
||||
|
||||
def _int(self, v, name, fatal=False, **kwargs):
|
||||
@ -1570,7 +1595,7 @@ class InfoExtractor(object):
|
||||
return {}
|
||||
|
||||
def _get_subtitles(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
@staticmethod
|
||||
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
|
||||
@ -1596,7 +1621,16 @@ class InfoExtractor(object):
|
||||
return {}
|
||||
|
||||
def _get_automatic_captions(self, *args, **kwargs):
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
def mark_watched(self, *args, **kwargs):
|
||||
if (self._downloader.params.get('mark_watched', False) and
|
||||
(self._get_login_info()[0] is not None or
|
||||
self._downloader.params.get('cookiefile') is not None)):
|
||||
self._mark_watched(*args, **kwargs)
|
||||
|
||||
def _mark_watched(self, *args, **kwargs):
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
|
||||
class SearchInfoExtractor(InfoExtractor):
|
||||
@ -1636,7 +1670,7 @@ class SearchInfoExtractor(InfoExtractor):
|
||||
|
||||
def _get_n_results(self, query, n):
|
||||
"""Get a specified number of results for a query"""
|
||||
raise NotImplementedError("This method must be implemented by subclasses")
|
||||
raise NotImplementedError('This method must be implemented by subclasses')
|
||||
|
||||
@property
|
||||
def SEARCH_KEY(self):
|
||||
|
@ -180,40 +180,40 @@ class CrunchyrollIE(CrunchyrollBaseIE):
|
||||
return assvalue
|
||||
|
||||
output = '[Script Info]\n'
|
||||
output += 'Title: %s\n' % sub_root.attrib["title"]
|
||||
output += 'Title: %s\n' % sub_root.attrib['title']
|
||||
output += 'ScriptType: v4.00+\n'
|
||||
output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
|
||||
output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
|
||||
output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
|
||||
output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
|
||||
output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
|
||||
output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
|
||||
output += """ScaledBorderAndShadow: yes
|
||||
|
||||
[V4+ Styles]
|
||||
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
|
||||
"""
|
||||
for style in sub_root.findall('./styles/style'):
|
||||
output += 'Style: ' + style.attrib["name"]
|
||||
output += ',' + style.attrib["font_name"]
|
||||
output += ',' + style.attrib["font_size"]
|
||||
output += ',' + style.attrib["primary_colour"]
|
||||
output += ',' + style.attrib["secondary_colour"]
|
||||
output += ',' + style.attrib["outline_colour"]
|
||||
output += ',' + style.attrib["back_colour"]
|
||||
output += ',' + ass_bool(style.attrib["bold"])
|
||||
output += ',' + ass_bool(style.attrib["italic"])
|
||||
output += ',' + ass_bool(style.attrib["underline"])
|
||||
output += ',' + ass_bool(style.attrib["strikeout"])
|
||||
output += ',' + style.attrib["scale_x"]
|
||||
output += ',' + style.attrib["scale_y"]
|
||||
output += ',' + style.attrib["spacing"]
|
||||
output += ',' + style.attrib["angle"]
|
||||
output += ',' + style.attrib["border_style"]
|
||||
output += ',' + style.attrib["outline"]
|
||||
output += ',' + style.attrib["shadow"]
|
||||
output += ',' + style.attrib["alignment"]
|
||||
output += ',' + style.attrib["margin_l"]
|
||||
output += ',' + style.attrib["margin_r"]
|
||||
output += ',' + style.attrib["margin_v"]
|
||||
output += ',' + style.attrib["encoding"]
|
||||
output += 'Style: ' + style.attrib['name']
|
||||
output += ',' + style.attrib['font_name']
|
||||
output += ',' + style.attrib['font_size']
|
||||
output += ',' + style.attrib['primary_colour']
|
||||
output += ',' + style.attrib['secondary_colour']
|
||||
output += ',' + style.attrib['outline_colour']
|
||||
output += ',' + style.attrib['back_colour']
|
||||
output += ',' + ass_bool(style.attrib['bold'])
|
||||
output += ',' + ass_bool(style.attrib['italic'])
|
||||
output += ',' + ass_bool(style.attrib['underline'])
|
||||
output += ',' + ass_bool(style.attrib['strikeout'])
|
||||
output += ',' + style.attrib['scale_x']
|
||||
output += ',' + style.attrib['scale_y']
|
||||
output += ',' + style.attrib['spacing']
|
||||
output += ',' + style.attrib['angle']
|
||||
output += ',' + style.attrib['border_style']
|
||||
output += ',' + style.attrib['outline']
|
||||
output += ',' + style.attrib['shadow']
|
||||
output += ',' + style.attrib['alignment']
|
||||
output += ',' + style.attrib['margin_l']
|
||||
output += ',' + style.attrib['margin_r']
|
||||
output += ',' + style.attrib['margin_v']
|
||||
output += ',' + style.attrib['encoding']
|
||||
output += '\n'
|
||||
|
||||
output += """
|
||||
@ -222,15 +222,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
"""
|
||||
for event in sub_root.findall('./events/event'):
|
||||
output += 'Dialogue: 0'
|
||||
output += ',' + event.attrib["start"]
|
||||
output += ',' + event.attrib["end"]
|
||||
output += ',' + event.attrib["style"]
|
||||
output += ',' + event.attrib["name"]
|
||||
output += ',' + event.attrib["margin_l"]
|
||||
output += ',' + event.attrib["margin_r"]
|
||||
output += ',' + event.attrib["margin_v"]
|
||||
output += ',' + event.attrib["effect"]
|
||||
output += ',' + event.attrib["text"]
|
||||
output += ',' + event.attrib['start']
|
||||
output += ',' + event.attrib['end']
|
||||
output += ',' + event.attrib['style']
|
||||
output += ',' + event.attrib['name']
|
||||
output += ',' + event.attrib['margin_l']
|
||||
output += ',' + event.attrib['margin_r']
|
||||
output += ',' + event.attrib['margin_v']
|
||||
output += ',' + event.attrib['effect']
|
||||
output += ',' + event.attrib['text']
|
||||
output += '\n'
|
||||
|
||||
return output
|
||||
@ -376,7 +376,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
|
||||
|
||||
|
||||
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
|
||||
IE_NAME = "crunchyroll:playlist"
|
||||
IE_NAME = 'crunchyroll:playlist'
|
||||
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
|
||||
|
||||
_TESTS = [{
|
||||
|
@ -122,10 +122,13 @@ class DailymotionIE(DailymotionBaseInfoExtractor):
|
||||
description = self._og_search_description(webpage) or self._html_search_meta(
|
||||
'description', webpage, 'description')
|
||||
|
||||
view_count = str_to_int(self._search_regex(
|
||||
[r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:(\d+)"',
|
||||
r'video_views_count[^>]+>\s+([\d\.,]+)'],
|
||||
webpage, 'view count', fatal=False))
|
||||
view_count_str = self._search_regex(
|
||||
(r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([\s\d,.]+)"',
|
||||
r'video_views_count[^>]+>\s+([\s\d\,.]+)'),
|
||||
webpage, 'view count', fatal=False)
|
||||
if view_count_str:
|
||||
view_count_str = re.sub(r'\s', '', view_count_str)
|
||||
view_count = str_to_int(view_count_str)
|
||||
comment_count = int_or_none(self._search_regex(
|
||||
r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserComments:(\d+)"',
|
||||
webpage, 'comment count', fatal=False))
|
||||
@ -396,13 +399,13 @@ class DailymotionCloudIE(DailymotionBaseInfoExtractor):
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_dmcloud_url(self, webpage):
|
||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % self._VALID_EMBED_URL, webpage)
|
||||
def _extract_dmcloud_url(cls, webpage):
|
||||
mobj = re.search(r'<iframe[^>]+src=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL, webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
||||
mobj = re.search(
|
||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % self._VALID_EMBED_URL,
|
||||
r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](%s)[\'"]' % cls._VALID_EMBED_URL,
|
||||
webpage)
|
||||
if mobj:
|
||||
return mobj.group(1)
|
||||
|
@ -18,7 +18,7 @@ class DouyuTVIE(InfoExtractor):
|
||||
'display_id': 'iseven',
|
||||
'ext': 'flv',
|
||||
'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'md5:c93d6692dde6fe33809a46edcbecca44',
|
||||
'description': 'md5:f34981259a03e980a3c6404190a3ed61',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': '7师傅',
|
||||
'uploader_id': '431925',
|
||||
@ -26,7 +26,7 @@ class DouyuTVIE(InfoExtractor):
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.douyutv.com/85982',
|
||||
'info_dict': {
|
||||
@ -42,7 +42,24 @@ class DouyuTVIE(InfoExtractor):
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
'skip': 'Romm not found',
|
||||
}, {
|
||||
'url': 'http://www.douyutv.com/17732',
|
||||
'info_dict': {
|
||||
'id': '17732',
|
||||
'display_id': '17732',
|
||||
'ext': 'flv',
|
||||
'title': 're:^清晨醒脑!T-ara根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
|
||||
'description': 'md5:f34981259a03e980a3c6404190a3ed61',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'uploader': '7师傅',
|
||||
'uploader_id': '431925',
|
||||
'is_live': True,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -1,6 +1,8 @@
|
||||
# encoding: utf-8
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
|
||||
from .common import InfoExtractor
|
||||
@ -8,44 +10,125 @@ from ..utils import int_or_none
|
||||
|
||||
|
||||
class DPlayIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www\.dplay\.se/[^/]+/(?P<id>[^/?#]+)'
|
||||
_VALID_URL = r'http://(?P<domain>it\.dplay\.com|www\.dplay\.(?:dk|se|no))/[^/]+/(?P<id>[^/?#]+)'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://it.dplay.com/take-me-out/stagione-1-episodio-25/',
|
||||
'info_dict': {
|
||||
'id': '1255600',
|
||||
'display_id': 'stagione-1-episodio-25',
|
||||
'ext': 'mp4',
|
||||
'title': 'Episodio 25',
|
||||
'description': 'md5:cae5f40ad988811b197d2d27a53227eb',
|
||||
'duration': 2761,
|
||||
'timestamp': 1454701800,
|
||||
'upload_date': '20160205',
|
||||
'creator': 'RTIT',
|
||||
'series': 'Take me out',
|
||||
'season_number': 1,
|
||||
'episode_number': 25,
|
||||
'age_limit': 0,
|
||||
},
|
||||
'expected_warnings': ['Unable to download f4m manifest'],
|
||||
}, {
|
||||
'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/',
|
||||
'info_dict': {
|
||||
'id': '3172',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'season-1-svensken-lar-sig-njuta-av-livet',
|
||||
'ext': 'flv',
|
||||
'title': 'Svensken lär sig njuta av livet',
|
||||
'description': 'md5:d3819c9bccffd0fe458ca42451dd50d8',
|
||||
'duration': 2650,
|
||||
'timestamp': 1365454320,
|
||||
'upload_date': '20130408',
|
||||
'creator': 'Kanal 5 (Home)',
|
||||
'series': 'Nugammalt - 77 händelser som format Sverige',
|
||||
'season_number': 1,
|
||||
'episode_number': 1,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.dplay.dk/mig-og-min-mor/season-6-episode-12/',
|
||||
'info_dict': {
|
||||
'id': '70816',
|
||||
'display_id': 'season-6-episode-12',
|
||||
'ext': 'flv',
|
||||
'title': 'Episode 12',
|
||||
'description': 'md5:9c86e51a93f8a4401fc9641ef9894c90',
|
||||
'duration': 2563,
|
||||
'timestamp': 1429696800,
|
||||
'upload_date': '20150422',
|
||||
'creator': 'Kanal 4',
|
||||
'series': 'Mig og min mor',
|
||||
'season_number': 6,
|
||||
'episode_number': 12,
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.dplay.no/pga-tour/season-1-hoydepunkter-18-21-februar/',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
display_id = mobj.group('id')
|
||||
domain = mobj.group('domain')
|
||||
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'data-video-id="(\d+)"', webpage, 'video id')
|
||||
r'data-video-id=["\'](\d+)', webpage, 'video id')
|
||||
|
||||
info = self._download_json(
|
||||
'http://www.dplay.se/api/v2/ajax/videos?video_id=' + video_id,
|
||||
'http://%s/api/v2/ajax/videos?video_id=%s' % (domain, video_id),
|
||||
video_id)['data'][0]
|
||||
|
||||
self._set_cookie(
|
||||
'secure.dplay.se', 'dsc-geo',
|
||||
'{"countryCode":"NL","expiry":%d}' % ((time.time() + 20 * 60) * 1000))
|
||||
# TODO: consider adding support for 'stream_type=hds', it seems to
|
||||
# require setting some cookies
|
||||
manifest_url = self._download_json(
|
||||
'https://secure.dplay.se/secure/api/v2/user/authorization/stream/%s?stream_type=hls' % video_id,
|
||||
video_id, 'Getting manifest url for hls stream')['hls']
|
||||
formats = self._extract_m3u8_formats(
|
||||
manifest_url, video_id, ext='mp4', entry_protocol='m3u8_native')
|
||||
title = info['title']
|
||||
|
||||
PROTOCOLS = ('hls', 'hds')
|
||||
formats = []
|
||||
|
||||
def extract_formats(protocol, manifest_url):
|
||||
if protocol == 'hls':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
manifest_url, video_id, ext='mp4',
|
||||
entry_protocol='m3u8_native', m3u8_id=protocol, fatal=False))
|
||||
elif protocol == 'hds':
|
||||
formats.extend(self._extract_f4m_formats(
|
||||
manifest_url + '&hdcore=3.8.0&plugin=flowplayer-3.8.0.0',
|
||||
video_id, f4m_id=protocol, fatal=False))
|
||||
|
||||
domain_tld = domain.split('.')[-1]
|
||||
if domain_tld in ('se', 'dk'):
|
||||
for protocol in PROTOCOLS:
|
||||
self._set_cookie(
|
||||
'secure.dplay.%s' % domain_tld, 'dsc-geo',
|
||||
json.dumps({
|
||||
'countryCode': domain_tld.upper(),
|
||||
'expiry': (time.time() + 20 * 60) * 1000,
|
||||
}))
|
||||
stream = self._download_json(
|
||||
'https://secure.dplay.%s/secure/api/v2/user/authorization/stream/%s?stream_type=%s'
|
||||
% (domain_tld, video_id, protocol), video_id,
|
||||
'Downloading %s stream JSON' % protocol, fatal=False)
|
||||
if stream and stream.get(protocol):
|
||||
extract_formats(protocol, stream[protocol])
|
||||
else:
|
||||
for protocol in PROTOCOLS:
|
||||
if info.get(protocol):
|
||||
extract_formats(protocol, info[protocol])
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'title': info['title'],
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'description': info.get('video_metadata_longDescription'),
|
||||
'duration': int_or_none(info.get('video_metadata_length'), scale=1000),
|
||||
'timestamp': int_or_none(info.get('video_publish_date')),
|
||||
'creator': info.get('video_metadata_homeChannel'),
|
||||
'series': info.get('video_metadata_show'),
|
||||
'season_number': int_or_none(info.get('season')),
|
||||
'episode_number': int_or_none(info.get('episode')),
|
||||
'age_limit': int_or_none(info.get('minimum_age')),
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ class DRBonanzaIE(InfoExtractor):
|
||||
|
||||
formats = []
|
||||
for file in info['Files']:
|
||||
if info['Type'] == "Video":
|
||||
if info['Type'] == 'Video':
|
||||
if file['Type'] in video_types:
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
@ -101,10 +101,10 @@ class DRBonanzaIE(InfoExtractor):
|
||||
if '/bonanza/' in rtmp_url:
|
||||
format['play_path'] = rtmp_url.split('/bonanza/')[1]
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
elif file['Type'] == 'Thumb':
|
||||
thumbnail = file['Location']
|
||||
elif info['Type'] == "Audio":
|
||||
if file['Type'] == "Audio":
|
||||
elif info['Type'] == 'Audio':
|
||||
if file['Type'] == 'Audio':
|
||||
format = parse_filename_info(file['Location'])
|
||||
format.update({
|
||||
'url': file['Location'],
|
||||
@ -112,7 +112,7 @@ class DRBonanzaIE(InfoExtractor):
|
||||
'vcodec': 'none',
|
||||
})
|
||||
formats.append(format)
|
||||
elif file['Type'] == "Thumb":
|
||||
elif file['Type'] == 'Thumb':
|
||||
thumbnail = file['Location']
|
||||
|
||||
description = '%s\n%s\n%s\n' % (
|
||||
|
@ -17,85 +17,85 @@ class EightTracksIE(InfoExtractor):
|
||||
IE_NAME = '8tracks'
|
||||
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
||||
_TEST = {
|
||||
"name": "EightTracks",
|
||||
"url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
|
||||
"info_dict": {
|
||||
'name': 'EightTracks',
|
||||
'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
|
||||
'info_dict': {
|
||||
'id': '1336550',
|
||||
'display_id': 'youtube-dl-test-tracks-a',
|
||||
"description": "test chars: \"'/\\ä↭",
|
||||
"title": "youtube-dl test tracks \"'/\\ä↭<>",
|
||||
'description': "test chars: \"'/\\ä↭",
|
||||
'title': "youtube-dl test tracks \"'/\\ä↭<>",
|
||||
},
|
||||
"playlist": [
|
||||
'playlist': [
|
||||
{
|
||||
"md5": "96ce57f24389fc8734ce47f4c1abcc55",
|
||||
"info_dict": {
|
||||
"id": "11885610",
|
||||
"ext": "m4a",
|
||||
"title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '96ce57f24389fc8734ce47f4c1abcc55',
|
||||
'info_dict': {
|
||||
'id': '11885610',
|
||||
'ext': 'm4a',
|
||||
'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "4ab26f05c1f7291ea460a3920be8021f",
|
||||
"info_dict": {
|
||||
"id": "11885608",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '4ab26f05c1f7291ea460a3920be8021f',
|
||||
'info_dict': {
|
||||
'id': '11885608',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "d30b5b5f74217410f4689605c35d1fd7",
|
||||
"info_dict": {
|
||||
"id": "11885679",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'd30b5b5f74217410f4689605c35d1fd7',
|
||||
'info_dict': {
|
||||
'id': '11885679',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "4eb0a669317cd725f6bbd336a29f923a",
|
||||
"info_dict": {
|
||||
"id": "11885680",
|
||||
"ext": "m4a",
|
||||
"title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '4eb0a669317cd725f6bbd336a29f923a',
|
||||
'info_dict': {
|
||||
'id': '11885680',
|
||||
'ext': 'm4a',
|
||||
'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "1893e872e263a2705558d1d319ad19e8",
|
||||
"info_dict": {
|
||||
"id": "11885682",
|
||||
"ext": "m4a",
|
||||
"title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '1893e872e263a2705558d1d319ad19e8',
|
||||
'info_dict': {
|
||||
'id': '11885682',
|
||||
'ext': 'm4a',
|
||||
'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "b673c46f47a216ab1741ae8836af5899",
|
||||
"info_dict": {
|
||||
"id": "11885683",
|
||||
"ext": "m4a",
|
||||
"title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'b673c46f47a216ab1741ae8836af5899',
|
||||
'info_dict': {
|
||||
'id': '11885683',
|
||||
'ext': 'm4a',
|
||||
'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "1d74534e95df54986da7f5abf7d842b7",
|
||||
"info_dict": {
|
||||
"id": "11885684",
|
||||
"ext": "m4a",
|
||||
"title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': '1d74534e95df54986da7f5abf7d842b7',
|
||||
'info_dict': {
|
||||
'id': '11885684',
|
||||
'ext': 'm4a',
|
||||
'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
},
|
||||
{
|
||||
"md5": "f081f47af8f6ae782ed131d38b9cd1c0",
|
||||
"info_dict": {
|
||||
"id": "11885685",
|
||||
"ext": "m4a",
|
||||
"title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
"uploader_id": "ytdl"
|
||||
'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
|
||||
'info_dict': {
|
||||
'id': '11885685',
|
||||
'ext': 'm4a',
|
||||
'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
|
||||
'uploader_id': 'ytdl'
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -72,7 +72,7 @@ class EllenTVClipsIE(InfoExtractor):
|
||||
def _extract_playlist(self, webpage):
|
||||
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
|
||||
try:
|
||||
return json.loads("[{" + json_string + "}]")
|
||||
return json.loads('[{' + json_string + '}]')
|
||||
except ValueError as ve:
|
||||
raise ExtractorError('Failed to download JSON', cause=ve)
|
||||
|
||||
|
@ -9,7 +9,7 @@ class ElPaisIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[^.]+\.)?elpais\.com/.*/(?P<id>[^/#?]+)\.html(?:$|[?#])'
|
||||
IE_DESC = 'El País'
|
||||
|
||||
_TEST = {
|
||||
_TESTS = [{
|
||||
'url': 'http://blogs.elpais.com/la-voz-de-inaki/2014/02/tiempo-nuevo-recetas-viejas.html',
|
||||
'md5': '98406f301f19562170ec071b83433d55',
|
||||
'info_dict': {
|
||||
@ -19,30 +19,41 @@ class ElPaisIE(InfoExtractor):
|
||||
'description': 'De lunes a viernes, a partir de las ocho de la mañana, Iñaki Gabilondo nos cuenta su visión de la actualidad nacional e internacional.',
|
||||
'upload_date': '20140206',
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://elcomidista.elpais.com/elcomidista/2016/02/24/articulo/1456340311_668921.html#?id_externo_nwl=newsletter_diaria20160303t',
|
||||
'md5': '3bd5b09509f3519d7d9e763179b013de',
|
||||
'info_dict': {
|
||||
'id': '1456340311_668921',
|
||||
'ext': 'mp4',
|
||||
'title': 'Cómo hacer el mejor café con cafetera italiana',
|
||||
'description': 'Que sí, que las cápsulas son cómodas. Pero si le pides algo más a la vida, quizá deberías aprender a usar bien la cafetera italiana. No tienes más que ver este vídeo y seguir sus siete normas básicas.',
|
||||
'upload_date': '20160303',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
prefix = self._html_search_regex(
|
||||
r'var url_cache = "([^"]+)";', webpage, 'URL prefix')
|
||||
r'var\s+url_cache\s*=\s*"([^"]+)";', webpage, 'URL prefix')
|
||||
video_suffix = self._search_regex(
|
||||
r"URLMediaFile = url_cache \+ '([^']+)'", webpage, 'video URL')
|
||||
r"(?:URLMediaFile|urlVideo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'", webpage, 'video URL')
|
||||
video_url = prefix + video_suffix
|
||||
thumbnail_suffix = self._search_regex(
|
||||
r"URLMediaStill = url_cache \+ '([^']+)'", webpage, 'thumbnail URL',
|
||||
fatal=False)
|
||||
r"(?:URLMediaStill|urlFotogramaFijo_\d+)\s*=\s*url_cache\s*\+\s*'([^']+)'",
|
||||
webpage, 'thumbnail URL', fatal=False)
|
||||
thumbnail = (
|
||||
None if thumbnail_suffix is None
|
||||
else prefix + thumbnail_suffix)
|
||||
title = self._html_search_regex(
|
||||
'<h2 class="entry-header entry-title.*?>(.*?)</h2>',
|
||||
(r"tituloVideo\s*=\s*'([^']+)'", webpage, 'title',
|
||||
r'<h2 class="entry-header entry-title.*?>(.*?)</h2>'),
|
||||
webpage, 'title')
|
||||
date_str = self._search_regex(
|
||||
upload_date = unified_strdate(self._search_regex(
|
||||
r'<p class="date-header date-int updated"\s+title="([^"]+)">',
|
||||
webpage, 'upload date', fatal=False)
|
||||
upload_date = (None if date_str is None else unified_strdate(date_str))
|
||||
webpage, 'upload date', default=None) or self._html_search_meta(
|
||||
'datePublished', webpage, 'timestamp'))
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -1,21 +1,13 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class EngadgetIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)https?://www.engadget.com/
|
||||
(?:video(?:/5min)?/(?P<id>\d+)|
|
||||
[\d/]+/.*?)
|
||||
'''
|
||||
_VALID_URL = r'https?://www.engadget.com/video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.engadget.com/video/5min/518153925/',
|
||||
'url': 'http://www.engadget.com/video/518153925/',
|
||||
'md5': 'c6820d4828a5064447a4d9fc73f312c9',
|
||||
'info_dict': {
|
||||
'id': '518153925',
|
||||
@ -27,15 +19,4 @@ class EngadgetIE(InfoExtractor):
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
if video_id is not None:
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
else:
|
||||
title = url_basename(url)
|
||||
webpage = self._download_webpage(url, title)
|
||||
ids = re.findall(r'<iframe[^>]+?playList=(\d+)', webpage)
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'title': title,
|
||||
'entries': [self.url_result('5min:%s' % vid) for vid in ids]
|
||||
}
|
||||
return self.url_result('5min:%s' % video_id)
|
||||
|
@ -14,14 +14,14 @@ class EveryonesMixtapeIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
|
||||
"info_dict": {
|
||||
'info_dict': {
|
||||
'id': '5bfseWNmlds',
|
||||
'ext': 'mp4',
|
||||
"title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
||||
"uploader": "FKR.TV",
|
||||
"uploader_id": "frenchkissrecords",
|
||||
"description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
||||
"upload_date": "20081015"
|
||||
'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
|
||||
'uploader': 'FKR.TV',
|
||||
'uploader_id': 'frenchkissrecords',
|
||||
'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
|
||||
'upload_date': '20081015'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # This is simply YouTube
|
||||
|
@ -41,7 +41,7 @@ class ExfmIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
song_id = mobj.group('id')
|
||||
info_url = "http://ex.fm/api/v3/song/%s" % song_id
|
||||
info_url = 'http://ex.fm/api/v3/song/%s' % song_id
|
||||
info = self._download_json(info_url, song_id)['song']
|
||||
song_url = info['url']
|
||||
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
|
||||
|
@ -34,8 +34,9 @@ class FacebookIE(InfoExtractor):
|
||||
video/video\.php|
|
||||
photo\.php|
|
||||
video\.php|
|
||||
video/embed
|
||||
)\?(?:.*?)(?:v|video_id)=|
|
||||
video/embed|
|
||||
story\.php
|
||||
)\?(?:.*?)(?:v|video_id|story_fbid)=|
|
||||
[^/]+/videos/(?:[^/]+/)?
|
||||
)|
|
||||
facebook:
|
||||
@ -92,6 +93,9 @@ class FacebookIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'facebook:544765982287235',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://m.facebook.com/story.php?story_fbid=1035862816472149&id=116132035111903',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _login(self):
|
||||
@ -186,7 +190,7 @@ class FacebookIE(InfoExtractor):
|
||||
if not video_data:
|
||||
server_js_data = self._parse_json(self._search_regex(
|
||||
r'handleServerJS\(({.+})\);', webpage, 'server js data'), video_id)
|
||||
for item in server_js_data['instances']:
|
||||
for item in server_js_data.get('instances', []):
|
||||
if item[1][0] == 'VideoConfig':
|
||||
video_data = video_data_list2dict(item[2][0]['videoData'])
|
||||
break
|
||||
@ -208,10 +212,13 @@ class FacebookIE(InfoExtractor):
|
||||
for src_type in ('src', 'src_no_ratelimit'):
|
||||
src = f[0].get('%s_%s' % (quality, src_type))
|
||||
if src:
|
||||
preference = -10 if format_id == 'progressive' else 0
|
||||
if quality == 'hd':
|
||||
preference += 5
|
||||
formats.append({
|
||||
'format_id': '%s_%s_%s' % (format_id, quality, src_type),
|
||||
'url': src,
|
||||
'preference': -10 if format_id == 'progressive' else 0,
|
||||
'preference': preference,
|
||||
})
|
||||
dash_manifest = f[0].get('dash_manifest')
|
||||
if dash_manifest:
|
||||
|
@ -52,7 +52,7 @@ class FazIE(InfoExtractor):
|
||||
formats = []
|
||||
for pref, code in enumerate(['LOW', 'HIGH', 'HQ']):
|
||||
encoding = xpath_element(encodings, code)
|
||||
if encoding:
|
||||
if encoding is not None:
|
||||
encoding_url = xpath_text(encoding, 'FILENAME')
|
||||
if encoding_url:
|
||||
formats.append({
|
||||
|
@ -87,7 +87,7 @@ class FC2IE(InfoExtractor):
|
||||
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
|
||||
|
||||
info_url = (
|
||||
"http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
|
||||
'http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&'.
|
||||
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
|
||||
|
||||
info_webpage = self._download_webpage(
|
||||
|
@ -1,5 +1,7 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
@ -16,12 +18,7 @@ from ..utils import (
|
||||
|
||||
class FiveMinIE(InfoExtractor):
|
||||
IE_NAME = '5min'
|
||||
_VALID_URL = r'''(?x)
|
||||
(?:https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?:.*?&)?playList=|
|
||||
https?://(?:(?:massively|www)\.)?joystiq\.com/video/|
|
||||
5min:)
|
||||
(?P<id>\d+)
|
||||
'''
|
||||
_VALID_URL = r'(?:5min:(?P<id>\d+)(?::(?P<sid>\d+))?|https?://[^/]*?5min\.com/Scripts/PlayerSeed\.js\?(?P<query>.*))'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
@ -45,6 +42,7 @@ class FiveMinIE(InfoExtractor):
|
||||
'title': 'How to Make a Next-Level Fruit Salad',
|
||||
'duration': 184,
|
||||
},
|
||||
'skip': 'no longer available',
|
||||
},
|
||||
]
|
||||
_ERRORS = {
|
||||
@ -91,20 +89,33 @@ class FiveMinIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
sid = mobj.group('sid')
|
||||
|
||||
if mobj.group('query'):
|
||||
qs = compat_parse_qs(mobj.group('query'))
|
||||
if not qs.get('playList'):
|
||||
raise ExtractorError('Invalid URL', expected=True)
|
||||
video_id = qs['playList'][0]
|
||||
if qs.get('sid'):
|
||||
sid = qs['sid'][0]
|
||||
|
||||
embed_url = 'https://embed.5min.com/playerseed/?playList=%s' % video_id
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
'Downloading embed page')
|
||||
sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
|
||||
query = compat_urllib_parse.urlencode({
|
||||
'func': 'GetResults',
|
||||
'playlist': video_id,
|
||||
'sid': sid,
|
||||
'isPlayerSeed': 'true',
|
||||
'url': embed_url,
|
||||
})
|
||||
if not sid:
|
||||
embed_page = self._download_webpage(embed_url, video_id,
|
||||
'Downloading embed page')
|
||||
sid = self._search_regex(r'sid=(\d+)', embed_page, 'sid')
|
||||
|
||||
response = self._download_json(
|
||||
'https://syn.5min.com/handlers/SenseHandler.ashx?' + query,
|
||||
'https://syn.5min.com/handlers/SenseHandler.ashx?' +
|
||||
compat_urllib_parse.urlencode({
|
||||
'func': 'GetResults',
|
||||
'playlist': video_id,
|
||||
'sid': sid,
|
||||
'isPlayerSeed': 'true',
|
||||
'url': embed_url,
|
||||
}),
|
||||
video_id)
|
||||
if not response['success']:
|
||||
raise ExtractorError(
|
||||
@ -118,9 +129,7 @@ class FiveMinIE(InfoExtractor):
|
||||
parsed_video_url = compat_urllib_parse_urlparse(compat_parse_qs(
|
||||
compat_urllib_parse_urlparse(info['EmbededURL']).query)['videoUrl'][0])
|
||||
for rendition in info['Renditions']:
|
||||
if rendition['RenditionType'] == 'm3u8':
|
||||
formats.extend(self._extract_m3u8_formats(rendition['Url'], video_id, m3u8_id='hls'))
|
||||
elif rendition['RenditionType'] == 'aac':
|
||||
if rendition['RenditionType'] == 'aac' or rendition['RenditionType'] == 'm3u8':
|
||||
continue
|
||||
else:
|
||||
rendition_url = compat_urlparse.urlunparse(parsed_video_url._replace(path=replace_extension(parsed_video_url.path.replace('//', '/%s/' % rendition['ID']), rendition['RenditionType'])))
|
||||
|
@ -36,6 +36,10 @@ class FoxNewsIE(AMPIE):
|
||||
# 'upload_date': '20141204',
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://video.foxnews.com/v/video-embed.html?video_id=3937480&d=video.foxnews.com',
|
||||
|
@ -10,7 +10,7 @@ class FranceInterIE(InfoExtractor):
|
||||
_TEST = {
|
||||
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
|
||||
'md5': '4764932e466e6f6c79c317d2e74f6884',
|
||||
"info_dict": {
|
||||
'info_dict': {
|
||||
'id': '793962',
|
||||
'ext': 'mp3',
|
||||
'title': 'L’Histoire dans les jeux vidéo',
|
||||
|
@ -289,7 +289,7 @@ class FranceTVIE(FranceTVBaseInfoExtractor):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_id, catalogue = self._html_search_regex(
|
||||
r'href="http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"',
|
||||
webpage, 'video ID').split('@')
|
||||
return self._extract_video(video_id, catalogue)
|
||||
|
||||
|
@ -14,7 +14,7 @@ class FreespeechIE(InfoExtractor):
|
||||
'url': 'https://www.freespeech.org/video/obama-romney-campaign-colorado-ahead-debate-0',
|
||||
'info_dict': {
|
||||
'id': 'poKsVCZ64uU',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||
'description': 'Obama, Romney Campaign in Colorado Ahead of Debate',
|
||||
'uploader': 'freespeechtv',
|
||||
|
@ -12,8 +12,8 @@ class FreeVideoIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'vysukany-zadecek-22033',
|
||||
'ext': 'mp4',
|
||||
"title": "vysukany-zadecek-22033",
|
||||
"age_limit": 18,
|
||||
'title': 'vysukany-zadecek-22033',
|
||||
'age_limit': 18,
|
||||
},
|
||||
'skip': 'Blocked outside .cz',
|
||||
}
|
||||
|
@ -47,6 +47,7 @@ from .senateisvp import SenateISVPIE
|
||||
from .svt import SVTIE
|
||||
from .pornhub import PornHubIE
|
||||
from .xhamster import XHamsterEmbedIE
|
||||
from .tnaflix import TNAFlixNetworkEmbedIE
|
||||
from .vimeo import VimeoIE
|
||||
from .dailymotion import DailymotionCloudIE
|
||||
from .onionstudios import OnionStudiosIE
|
||||
@ -1573,6 +1574,11 @@ class GenericIE(InfoExtractor):
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'VK')
|
||||
|
||||
# Look for embedded Odnoklassniki player
|
||||
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
return self.url_result(mobj.group('url'), 'Odnoklassniki')
|
||||
|
||||
# Look for embedded ivi player
|
||||
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
|
||||
if mobj is not None:
|
||||
@ -1628,6 +1634,11 @@ class GenericIE(InfoExtractor):
|
||||
if xhamster_urls:
|
||||
return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed')
|
||||
|
||||
# Look for embedded TNAFlixNetwork player
|
||||
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
|
||||
if tnaflix_urls:
|
||||
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
|
||||
|
||||
# Look for embedded Tvigle player
|
||||
mobj = re.search(
|
||||
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
|
||||
|
@ -65,7 +65,7 @@ class GloboIE(InfoExtractor):
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
class MD5:
|
||||
class MD5(object):
|
||||
HEX_FORMAT_LOWERCASE = 0
|
||||
HEX_FORMAT_UPPERCASE = 1
|
||||
BASE64_PAD_CHARACTER_DEFAULT_COMPLIANCE = ''
|
||||
|
@ -82,7 +82,7 @@ class GoogleDriveIE(InfoExtractor):
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
'thumbnail': self._og_search_thumbnail(webpage, default=None),
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -11,8 +11,8 @@ class HentaiStigmaIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'inyouchuu-etsu-bonus',
|
||||
'ext': 'mp4',
|
||||
"title": "Inyouchuu Etsu Bonus",
|
||||
"age_limit": 18,
|
||||
'title': 'Inyouchuu Etsu Bonus',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ class ImdbIE(InfoExtractor):
|
||||
for f_url, f_name in extra_formats]
|
||||
format_pages.append(player_page)
|
||||
|
||||
quality = qualities(['SD', '480p', '720p'])
|
||||
quality = qualities(('SD', '480p', '720p', '1080p'))
|
||||
formats = []
|
||||
for format_page in format_pages:
|
||||
json_data = self._search_regex(
|
||||
|
@ -73,7 +73,7 @@ class IndavideoEmbedIE(InfoExtractor):
|
||||
'url': self._proto_relative_url(thumbnail)
|
||||
} for thumbnail in video.get('thumbnails', [])]
|
||||
|
||||
tags = [tag['title'] for tag in video.get('tags', [])]
|
||||
tags = [tag['title'] for tag in video.get('tags') or []]
|
||||
|
||||
return {
|
||||
'id': video.get('id') or video_id,
|
||||
|
@ -4,15 +4,12 @@ from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse_unquote,
|
||||
compat_parse_qs,
|
||||
)
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import determine_ext
|
||||
from .bokecc import BokeCCBaseIE
|
||||
|
||||
|
||||
class InfoQIE(InfoExtractor):
|
||||
class InfoQIE(BokeCCBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
@ -38,26 +35,6 @@ class InfoQIE(InfoExtractor):
|
||||
},
|
||||
}]
|
||||
|
||||
def _extract_bokecc_videos(self, webpage, video_id):
|
||||
# TODO: bokecc.com is a Chinese video cloud platform
|
||||
# It should have an independent extractor but I don't have other
|
||||
# examples using bokecc
|
||||
player_params_str = self._html_search_regex(
|
||||
r'<script[^>]+src="http://p\.bokecc\.com/player\?([^"]+)',
|
||||
webpage, 'player params', default=None)
|
||||
|
||||
player_params = compat_parse_qs(player_params_str)
|
||||
|
||||
info_xml = self._download_xml(
|
||||
'http://p.bokecc.com/servlet/playinfo?uid=%s&vid=%s&m=1' % (
|
||||
player_params['siteid'][0], player_params['vid'][0]), video_id)
|
||||
|
||||
return [{
|
||||
'format_id': 'bokecc',
|
||||
'url': quality.find('./copy').attrib['playurl'],
|
||||
'preference': int(quality.attrib['value']),
|
||||
} for quality in info_xml.findall('./video/quality')]
|
||||
|
||||
def _extract_rtmp_videos(self, webpage):
|
||||
# The server URL is hardcoded
|
||||
video_url = 'rtmpe://video.infoq.com/cfx/st/'
|
||||
@ -101,7 +78,7 @@ class InfoQIE(InfoExtractor):
|
||||
|
||||
if '/cn/' in url:
|
||||
# for China videos, HTTP video URL exists but always fails with 403
|
||||
formats = self._extract_bokecc_videos(webpage, video_id)
|
||||
formats = self._extract_bokecc_formats(webpage, video_id)
|
||||
else:
|
||||
formats = self._extract_rtmp_videos(webpage) + self._extract_http_videos(webpage)
|
||||
|
||||
|
@ -2,14 +2,163 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import hashlib
|
||||
import itertools
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse
|
||||
from ..utils import ExtractorError
|
||||
from ..compat import (
|
||||
compat_parse_qs,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
compat_urllib_parse_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
decode_packed_codes,
|
||||
ExtractorError,
|
||||
ohdave_rsa_encrypt,
|
||||
remove_start,
|
||||
sanitized_Request,
|
||||
urlencode_postdata,
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
def md5_text(text):
|
||||
return hashlib.md5(text.encode('utf-8')).hexdigest()
|
||||
|
||||
|
||||
class IqiyiSDK(object):
|
||||
def __init__(self, target, ip, timestamp):
|
||||
self.target = target
|
||||
self.ip = ip
|
||||
self.timestamp = timestamp
|
||||
|
||||
@staticmethod
|
||||
def split_sum(data):
|
||||
return compat_str(sum(map(lambda p: int(p, 16), list(data))))
|
||||
|
||||
@staticmethod
|
||||
def digit_sum(num):
|
||||
if isinstance(num, int):
|
||||
num = compat_str(num)
|
||||
return compat_str(sum(map(int, num)))
|
||||
|
||||
def even_odd(self):
|
||||
even = self.digit_sum(compat_str(self.timestamp)[::2])
|
||||
odd = self.digit_sum(compat_str(self.timestamp)[1::2])
|
||||
return even, odd
|
||||
|
||||
def preprocess(self, chunksize):
|
||||
self.target = md5_text(self.target)
|
||||
chunks = []
|
||||
for i in range(32 // chunksize):
|
||||
chunks.append(self.target[chunksize * i:chunksize * (i + 1)])
|
||||
if 32 % chunksize:
|
||||
chunks.append(self.target[32 - 32 % chunksize:])
|
||||
return chunks, list(map(int, self.ip.split('.')))
|
||||
|
||||
def mod(self, modulus):
|
||||
chunks, ip = self.preprocess(32)
|
||||
self.target = chunks[0] + ''.join(map(lambda p: compat_str(p % modulus), ip))
|
||||
|
||||
def split(self, chunksize):
|
||||
modulus_map = {
|
||||
4: 256,
|
||||
5: 10,
|
||||
8: 100,
|
||||
}
|
||||
|
||||
chunks, ip = self.preprocess(chunksize)
|
||||
ret = ''
|
||||
for i in range(len(chunks)):
|
||||
ip_part = compat_str(ip[i] % modulus_map[chunksize]) if i < 4 else ''
|
||||
if chunksize == 8:
|
||||
ret += ip_part + chunks[i]
|
||||
else:
|
||||
ret += chunks[i] + ip_part
|
||||
self.target = ret
|
||||
|
||||
def handle_input16(self):
|
||||
self.target = md5_text(self.target)
|
||||
self.target = self.split_sum(self.target[:16]) + self.target + self.split_sum(self.target[16:])
|
||||
|
||||
def handle_input8(self):
|
||||
self.target = md5_text(self.target)
|
||||
ret = ''
|
||||
for i in range(4):
|
||||
part = self.target[8 * i:8 * (i + 1)]
|
||||
ret += self.split_sum(part) + part
|
||||
self.target = ret
|
||||
|
||||
def handleSum(self):
|
||||
self.target = md5_text(self.target)
|
||||
self.target = self.split_sum(self.target) + self.target
|
||||
|
||||
def date(self, scheme):
|
||||
self.target = md5_text(self.target)
|
||||
d = time.localtime(self.timestamp)
|
||||
strings = {
|
||||
'y': compat_str(d.tm_year),
|
||||
'm': '%02d' % d.tm_mon,
|
||||
'd': '%02d' % d.tm_mday,
|
||||
}
|
||||
self.target += ''.join(map(lambda c: strings[c], list(scheme)))
|
||||
|
||||
def split_time_even_odd(self):
|
||||
even, odd = self.even_odd()
|
||||
self.target = odd + md5_text(self.target) + even
|
||||
|
||||
def split_time_odd_even(self):
|
||||
even, odd = self.even_odd()
|
||||
self.target = even + md5_text(self.target) + odd
|
||||
|
||||
def split_ip_time_sum(self):
|
||||
chunks, ip = self.preprocess(32)
|
||||
self.target = compat_str(sum(ip)) + chunks[0] + self.digit_sum(self.timestamp)
|
||||
|
||||
def split_time_ip_sum(self):
|
||||
chunks, ip = self.preprocess(32)
|
||||
self.target = self.digit_sum(self.timestamp) + chunks[0] + compat_str(sum(ip))
|
||||
|
||||
|
||||
class IqiyiSDKInterpreter(object):
|
||||
def __init__(self, sdk_code):
|
||||
self.sdk_code = sdk_code
|
||||
|
||||
def run(self, target, ip, timestamp):
|
||||
self.sdk_code = decode_packed_codes(self.sdk_code)
|
||||
|
||||
functions = re.findall(r'input=([a-zA-Z0-9]+)\(input', self.sdk_code)
|
||||
|
||||
sdk = IqiyiSDK(target, ip, timestamp)
|
||||
|
||||
other_functions = {
|
||||
'handleSum': sdk.handleSum,
|
||||
'handleInput8': sdk.handle_input8,
|
||||
'handleInput16': sdk.handle_input16,
|
||||
'splitTimeEvenOdd': sdk.split_time_even_odd,
|
||||
'splitTimeOddEven': sdk.split_time_odd_even,
|
||||
'splitIpTimeSum': sdk.split_ip_time_sum,
|
||||
'splitTimeIpSum': sdk.split_time_ip_sum,
|
||||
}
|
||||
for function in functions:
|
||||
if re.match(r'mod\d+', function):
|
||||
sdk.mod(int(function[3:]))
|
||||
elif re.match(r'date[ymd]{3}', function):
|
||||
sdk.date(function[4:])
|
||||
elif re.match(r'split\d+', function):
|
||||
sdk.split(int(function[5:]))
|
||||
elif function in other_functions:
|
||||
other_functions[function]()
|
||||
else:
|
||||
raise ExtractorError('Unknown funcion %s' % function)
|
||||
|
||||
return sdk.target
|
||||
|
||||
|
||||
class IqiyiIE(InfoExtractor):
|
||||
@ -18,6 +167,8 @@ class IqiyiIE(InfoExtractor):
|
||||
|
||||
_VALID_URL = r'http://(?:[^.]+\.)?iqiyi\.com/.+\.html'
|
||||
|
||||
_NETRC_MACHINE = 'iqiyi'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.iqiyi.com/v_19rrojlavg.html',
|
||||
'md5': '2cb594dc2781e6c941a110d8f358118b',
|
||||
@ -93,6 +244,35 @@ class IqiyiIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://yule.iqiyi.com/pcb.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# VIP-only video. The first 2 parts (6 minutes) are available without login
|
||||
# MD5 sums omitted as values are different on Travis CI and my machine
|
||||
'url': 'http://www.iqiyi.com/v_19rrny4w8w.html',
|
||||
'info_dict': {
|
||||
'id': 'f3cf468b39dddb30d676f89a91200dc1',
|
||||
'title': '泰坦尼克号',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': 'f3cf468b39dddb30d676f89a91200dc1_part1',
|
||||
'ext': 'f4v',
|
||||
'title': '泰坦尼克号',
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': 'f3cf468b39dddb30d676f89a91200dc1_part2',
|
||||
'ext': 'f4v',
|
||||
'title': '泰坦尼克号',
|
||||
},
|
||||
}],
|
||||
'expected_warnings': ['Needs a VIP account for full video'],
|
||||
}, {
|
||||
'url': 'http://www.iqiyi.com/a_19rrhb8ce1.html',
|
||||
'info_dict': {
|
||||
'id': '202918101',
|
||||
'title': '灌篮高手 国语版',
|
||||
},
|
||||
'playlist_count': 101,
|
||||
}]
|
||||
|
||||
_FORMATS_MAP = [
|
||||
@ -104,11 +284,98 @@ class IqiyiIE(InfoExtractor):
|
||||
('10', 'h1'),
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def md5_text(text):
|
||||
return hashlib.md5(text.encode('utf-8')).hexdigest()
|
||||
def _real_initialize(self):
|
||||
self._login()
|
||||
|
||||
def construct_video_urls(self, data, video_id, _uuid):
|
||||
@staticmethod
|
||||
def _rsa_fun(data):
|
||||
# public key extracted from http://static.iqiyi.com/js/qiyiV2/20160129180840/jobs/i18n/i18nIndex.js
|
||||
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
|
||||
e = 65537
|
||||
|
||||
return ohdave_rsa_encrypt(data, e, N)
|
||||
|
||||
def _login(self):
|
||||
(username, password) = self._get_login_info()
|
||||
|
||||
# No authentication to be performed
|
||||
if not username:
|
||||
return True
|
||||
|
||||
data = self._download_json(
|
||||
'http://kylin.iqiyi.com/get_token', None,
|
||||
note='Get token for logging', errnote='Unable to get token for logging')
|
||||
sdk = data['sdk']
|
||||
timestamp = int(time.time())
|
||||
target = '/apis/reglogin/login.action?lang=zh_TW&area_code=null&email=%s&passwd=%s&agenttype=1&from=undefined&keeplogin=0&piccode=&fromurl=&_pos=1' % (
|
||||
username, self._rsa_fun(password.encode('utf-8')))
|
||||
|
||||
interp = IqiyiSDKInterpreter(sdk)
|
||||
sign = interp.run(target, data['ip'], timestamp)
|
||||
|
||||
validation_params = {
|
||||
'target': target,
|
||||
'server': 'BEA3AA1908656AABCCFF76582C4C6660',
|
||||
'token': data['token'],
|
||||
'bird_src': 'f8d91d57af224da7893dd397d52d811a',
|
||||
'sign': sign,
|
||||
'bird_t': timestamp,
|
||||
}
|
||||
validation_result = self._download_json(
|
||||
'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None,
|
||||
note='Validate credentials', errnote='Unable to validate credentials')
|
||||
|
||||
MSG_MAP = {
|
||||
'P00107': 'please login via the web interface and enter the CAPTCHA code',
|
||||
'P00117': 'bad username or password',
|
||||
}
|
||||
|
||||
code = validation_result['code']
|
||||
if code != 'A00000':
|
||||
msg = MSG_MAP.get(code)
|
||||
if not msg:
|
||||
msg = 'error %s' % code
|
||||
if validation_result.get('msg'):
|
||||
msg += ': ' + validation_result['msg']
|
||||
self._downloader.report_warning('unable to log in: ' + msg)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _authenticate_vip_video(self, api_video_url, video_id, tvid, _uuid, do_report_warning):
|
||||
auth_params = {
|
||||
# version and platform hard-coded in com/qiyi/player/core/model/remote/AuthenticationRemote.as
|
||||
'version': '2.0',
|
||||
'platform': 'b6c13e26323c537d',
|
||||
'aid': tvid,
|
||||
'tvid': tvid,
|
||||
'uid': '',
|
||||
'deviceId': _uuid,
|
||||
'playType': 'main', # XXX: always main?
|
||||
'filename': os.path.splitext(url_basename(api_video_url))[0],
|
||||
}
|
||||
|
||||
qd_items = compat_parse_qs(compat_urllib_parse_urlparse(api_video_url).query)
|
||||
for key, val in qd_items.items():
|
||||
auth_params[key] = val[0]
|
||||
|
||||
auth_req = sanitized_Request(
|
||||
'http://api.vip.iqiyi.com/services/ckn.action',
|
||||
urlencode_postdata(auth_params))
|
||||
# iQiyi server throws HTTP 405 error without the following header
|
||||
auth_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
||||
auth_result = self._download_json(
|
||||
auth_req, video_id,
|
||||
note='Downloading video authentication JSON',
|
||||
errnote='Unable to download video authentication JSON')
|
||||
if auth_result['code'] == 'Q00506': # requires a VIP account
|
||||
if do_report_warning:
|
||||
self.report_warning('Needs a VIP account for full video')
|
||||
return False
|
||||
|
||||
return auth_result
|
||||
|
||||
def construct_video_urls(self, data, video_id, _uuid, tvid):
|
||||
def do_xor(x, y):
|
||||
a = y % 3
|
||||
if a == 1:
|
||||
@ -134,9 +401,10 @@ class IqiyiIE(InfoExtractor):
|
||||
note='Download path key of segment %d for format %s' % (segment_index + 1, format_id)
|
||||
)['t']
|
||||
t = str(int(math.floor(int(tm) / (600.0))))
|
||||
return self.md5_text(t + mg + x)
|
||||
return md5_text(t + mg + x)
|
||||
|
||||
video_urls_dict = {}
|
||||
need_vip_warning_report = True
|
||||
for format_item in data['vp']['tkl'][0]['vs']:
|
||||
if 0 < int(format_item['bid']) <= 10:
|
||||
format_id = self.get_format(format_item['bid'])
|
||||
@ -155,11 +423,13 @@ class IqiyiIE(InfoExtractor):
|
||||
vl = segment['l']
|
||||
if not vl.startswith('/'):
|
||||
vl = get_encode_code(vl)
|
||||
key = get_path_key(
|
||||
vl.split('/')[-1].split('.')[0], format_id, segment_index)
|
||||
is_vip_video = '/vip/' in vl
|
||||
filesize = segment['b']
|
||||
base_url = data['vp']['du'].split('/')
|
||||
base_url.insert(-1, key)
|
||||
if not is_vip_video:
|
||||
key = get_path_key(
|
||||
vl.split('/')[-1].split('.')[0], format_id, segment_index)
|
||||
base_url.insert(-1, key)
|
||||
base_url = '/'.join(base_url)
|
||||
param = {
|
||||
'su': _uuid,
|
||||
@ -170,8 +440,23 @@ class IqiyiIE(InfoExtractor):
|
||||
'ct': '',
|
||||
'tn': str(int(time.time()))
|
||||
}
|
||||
api_video_url = base_url + vl + '?' + \
|
||||
compat_urllib_parse.urlencode(param)
|
||||
api_video_url = base_url + vl
|
||||
if is_vip_video:
|
||||
api_video_url = api_video_url.replace('.f4v', '.hml')
|
||||
auth_result = self._authenticate_vip_video(
|
||||
api_video_url, video_id, tvid, _uuid, need_vip_warning_report)
|
||||
if auth_result is False:
|
||||
need_vip_warning_report = False
|
||||
break
|
||||
param.update({
|
||||
't': auth_result['data']['t'],
|
||||
# cid is hard-coded in com/qiyi/player/core/player/RuntimeData.as
|
||||
'cid': 'afbe8fd3d73448c9',
|
||||
'vid': video_id,
|
||||
'QY00001': auth_result['data']['u'],
|
||||
})
|
||||
api_video_url += '?' if '?' not in api_video_url else '&'
|
||||
api_video_url += compat_urllib_parse.urlencode(param)
|
||||
js = self._download_json(
|
||||
api_video_url, video_id,
|
||||
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
|
||||
@ -195,16 +480,17 @@ class IqiyiIE(InfoExtractor):
|
||||
tail = tm + tvid
|
||||
param = {
|
||||
'key': 'fvip',
|
||||
'src': self.md5_text('youtube-dl'),
|
||||
'src': md5_text('youtube-dl'),
|
||||
'tvId': tvid,
|
||||
'vid': video_id,
|
||||
'vinfo': 1,
|
||||
'tm': tm,
|
||||
'enc': self.md5_text(enc_key + tail),
|
||||
'enc': md5_text(enc_key + tail),
|
||||
'qyid': _uuid,
|
||||
'tn': random.random(),
|
||||
'um': 0,
|
||||
'authkey': self.md5_text(self.md5_text('') + tail),
|
||||
'authkey': md5_text(md5_text('') + tail),
|
||||
'k_tag': 1,
|
||||
}
|
||||
|
||||
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
|
||||
@ -212,40 +498,75 @@ class IqiyiIE(InfoExtractor):
|
||||
raw_data = self._download_json(api_url, video_id)
|
||||
return raw_data
|
||||
|
||||
def get_enc_key(self, swf_url, video_id):
|
||||
def get_enc_key(self, video_id):
|
||||
# TODO: automatic key extraction
|
||||
# last update at 2016-01-22 for Zombie::bite
|
||||
enc_key = '6ab6d0280511493ba85594779759d4ed'
|
||||
return enc_key
|
||||
|
||||
def _extract_playlist(self, webpage):
|
||||
PAGE_SIZE = 50
|
||||
|
||||
links = re.findall(
|
||||
r'<a[^>]+class="site-piclist_pic_link"[^>]+href="(http://www\.iqiyi\.com/.+\.html)"',
|
||||
webpage)
|
||||
if not links:
|
||||
return
|
||||
|
||||
album_id = self._search_regex(
|
||||
r'albumId\s*:\s*(\d+),', webpage, 'album ID')
|
||||
album_title = self._search_regex(
|
||||
r'data-share-title="([^"]+)"', webpage, 'album title', fatal=False)
|
||||
|
||||
entries = list(map(self.url_result, links))
|
||||
|
||||
# Start from 2 because links in the first page are already on webpage
|
||||
for page_num in itertools.count(2):
|
||||
pagelist_page = self._download_webpage(
|
||||
'http://cache.video.qiyi.com/jp/avlist/%s/%d/%d/' % (album_id, page_num, PAGE_SIZE),
|
||||
album_id,
|
||||
note='Download playlist page %d' % page_num,
|
||||
errnote='Failed to download playlist page %d' % page_num)
|
||||
pagelist = self._parse_json(
|
||||
remove_start(pagelist_page, 'var tvInfoJs='), album_id)
|
||||
vlist = pagelist['data']['vlist']
|
||||
for item in vlist:
|
||||
entries.append(self.url_result(item['vurl']))
|
||||
if len(vlist) < PAGE_SIZE:
|
||||
break
|
||||
|
||||
return self.playlist_result(entries, album_id, album_title)
|
||||
|
||||
def _real_extract(self, url):
|
||||
webpage = self._download_webpage(
|
||||
url, 'temp_id', note='download video page')
|
||||
|
||||
# There's no simple way to determine whether an URL is a playlist or not
|
||||
# So detect it
|
||||
playlist_result = self._extract_playlist(webpage)
|
||||
if playlist_result:
|
||||
return playlist_result
|
||||
|
||||
tvid = self._search_regex(
|
||||
r'data-player-tvid\s*=\s*[\'"](\d+)', webpage, 'tvid')
|
||||
video_id = self._search_regex(
|
||||
r'data-player-videoid\s*=\s*[\'"]([a-f\d]+)', webpage, 'video_id')
|
||||
swf_url = self._search_regex(
|
||||
r'(http://[^\'"]+MainPlayer[^.]+\.swf)', webpage, 'swf player URL')
|
||||
_uuid = uuid.uuid4().hex
|
||||
|
||||
enc_key = self.get_enc_key(swf_url, video_id)
|
||||
enc_key = self.get_enc_key(video_id)
|
||||
|
||||
raw_data = self.get_raw_data(tvid, video_id, enc_key, _uuid)
|
||||
|
||||
if raw_data['code'] != 'A000000':
|
||||
raise ExtractorError('Unable to load data. Error code: ' + raw_data['code'])
|
||||
|
||||
if not raw_data['data']['vp']['tkl']:
|
||||
raise ExtractorError('No support iQiqy VIP video')
|
||||
|
||||
data = raw_data['data']
|
||||
|
||||
title = data['vi']['vn']
|
||||
|
||||
# generate video_urls_dict
|
||||
video_urls_dict = self.construct_video_urls(
|
||||
data, video_id, _uuid)
|
||||
data, video_id, _uuid, tvid)
|
||||
|
||||
# construct info
|
||||
entries = []
|
||||
|
@ -30,7 +30,7 @@ class JeuxVideoIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, title)
|
||||
title = self._html_search_meta('name', webpage) or self._og_search_title(webpage)
|
||||
config_url = self._html_search_regex(
|
||||
r'data-src="(/contenu/medias/video.php.*?)"',
|
||||
r'data-src(?:set-video)?="(/contenu/medias/video.php.*?)"',
|
||||
webpage, 'config URL')
|
||||
config_url = 'http://www.jeuxvideo.com' + config_url
|
||||
|
||||
|
@ -7,7 +7,46 @@ from .common import InfoExtractor
|
||||
from ..utils import int_or_none
|
||||
|
||||
|
||||
class JWPlatformIE(InfoExtractor):
|
||||
class JWPlatformBaseIE(InfoExtractor):
|
||||
def _parse_jwplayer_data(self, jwplayer_data, video_id, require_title=True):
|
||||
video_data = jwplayer_data['playlist'][0]
|
||||
subtitles = {}
|
||||
for track in video_data['tracks']:
|
||||
if track['kind'] == 'captions':
|
||||
subtitles[track['label']] = [{'url': self._proto_relative_url(track['file'])}]
|
||||
|
||||
formats = []
|
||||
for source in video_data['sources']:
|
||||
source_url = self._proto_relative_url(source['file'])
|
||||
source_type = source.get('type') or ''
|
||||
if source_type in ('application/vnd.apple.mpegurl', 'hls'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, video_id, 'mp4', 'm3u8_native', fatal=False))
|
||||
elif source_type.startswith('audio'):
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'width': int_or_none(source.get('width')),
|
||||
'height': int_or_none(source.get('height')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_data['title'] if require_title else video_data.get('title'),
|
||||
'description': video_data.get('description'),
|
||||
'thumbnail': self._proto_relative_url(video_data.get('image')),
|
||||
'timestamp': int_or_none(video_data.get('pubdate')),
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
||||
class JWPlatformIE(JWPlatformBaseIE):
|
||||
_VALID_URL = r'(?:https?://content\.jwplatform\.com/(?:feeds|players|jw6)/|jwplatform:)(?P<id>[a-zA-Z0-9]{8})'
|
||||
_TEST = {
|
||||
'url': 'http://content.jwplatform.com/players/nPripu9l-ALJ3XQCI.js',
|
||||
@ -33,38 +72,4 @@ class JWPlatformIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
json_data = self._download_json('http://content.jwplatform.com/feeds/%s.json' % video_id, video_id)
|
||||
video_data = json_data['playlist'][0]
|
||||
subtitles = {}
|
||||
for track in video_data['tracks']:
|
||||
if track['kind'] == 'captions':
|
||||
subtitles[track['label']] = [{'url': self._proto_relative_url(track['file'])}]
|
||||
|
||||
formats = []
|
||||
for source in video_data['sources']:
|
||||
source_url = self._proto_relative_url(source['file'])
|
||||
source_type = source.get('type') or ''
|
||||
if source_type == 'application/vnd.apple.mpegurl':
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, video_id, 'mp4', 'm3u8_native', fatal=False))
|
||||
elif source_type.startswith('audio'):
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'vcodec': 'none',
|
||||
})
|
||||
else:
|
||||
formats.append({
|
||||
'url': source_url,
|
||||
'width': int_or_none(source.get('width')),
|
||||
'height': int_or_none(source.get('height')),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_data['title'],
|
||||
'description': video_data.get('description'),
|
||||
'thumbnail': self._proto_relative_url(video_data.get('image')),
|
||||
'timestamp': int_or_none(video_data.get('pubdate')),
|
||||
'subtitles': subtitles,
|
||||
'formats': formats,
|
||||
}
|
||||
return self._parse_jwplayer_data(json_data, video_id)
|
||||
|
@ -28,7 +28,7 @@ class KankanIE(InfoExtractor):
|
||||
|
||||
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
|
||||
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
|
||||
gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
|
||||
gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
|
||||
gcid = gcids[-1]
|
||||
|
||||
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
|
||||
|
@ -14,10 +14,10 @@ class KhanAcademyIE(InfoExtractor):
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.khanacademy.org/video/one-time-pad',
|
||||
'md5': '7021db7f2d47d4fff89b13177cb1e8f4',
|
||||
'md5': '7b391cce85e758fb94f763ddc1bbb979',
|
||||
'info_dict': {
|
||||
'id': 'one-time-pad',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'The one-time pad',
|
||||
'description': 'The perfect cipher',
|
||||
'duration': 176,
|
||||
|
99
youtube_dl/extractor/kusi.py
Normal file
99
youtube_dl/extractor/kusi.py
Normal file
@ -0,0 +1,99 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote_plus
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
timeconvert,
|
||||
update_url_query,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class KUSIIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?kusi\.com/(?P<path>story/.+|video\?clipId=(?P<clipId>\d+))'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.kusi.com/story/31183873/turko-files-case-closed-put-on-hold',
|
||||
'md5': 'f926e7684294cf8cb7bdf8858e1b3988',
|
||||
'info_dict': {
|
||||
'id': '12203019',
|
||||
'ext': 'mp4',
|
||||
'title': 'Turko Files: Case Closed! & Put On Hold!',
|
||||
'duration': 231.0,
|
||||
'upload_date': '20160210',
|
||||
'timestamp': 1455087571,
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
}, {
|
||||
'url': 'http://kusi.com/video?clipId=12203019',
|
||||
'info_dict': {
|
||||
'id': '12203019',
|
||||
'ext': 'mp4',
|
||||
'title': 'Turko Files: Case Closed! & Put On Hold!',
|
||||
'duration': 231.0,
|
||||
'upload_date': '20160210',
|
||||
'timestamp': 1455087571,
|
||||
'thumbnail': 're:^https?://.*\.jpg$'
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # Same as previous one
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
clip_id = mobj.group('clipId')
|
||||
video_id = clip_id or mobj.group('path')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if clip_id is None:
|
||||
video_id = clip_id = self._html_search_regex(
|
||||
r'"clipId"\s*,\s*"(\d+)"', webpage, 'clip id')
|
||||
|
||||
affiliate_id = self._search_regex(
|
||||
r'affiliateId\s*:\s*\'([^\']+)\'', webpage, 'affiliate id')
|
||||
|
||||
# See __Packages/worldnow/model/GalleryModel.as of WNGallery.swf
|
||||
xml_url = update_url_query('http://www.kusi.com/build.asp', {
|
||||
'buildtype': 'buildfeaturexmlrequest',
|
||||
'featureType': 'Clip',
|
||||
'featureid': clip_id,
|
||||
'affiliateno': affiliate_id,
|
||||
'clientgroupid': '1',
|
||||
'rnd': int(round(random.random() * 1000000)),
|
||||
})
|
||||
|
||||
doc = self._download_xml(xml_url, video_id)
|
||||
|
||||
video_title = xpath_text(doc, 'HEADLINE', fatal=True)
|
||||
duration = float_or_none(xpath_text(doc, 'DURATION'), scale=1000)
|
||||
description = xpath_text(doc, 'ABSTRACT')
|
||||
thumbnail = xpath_text(doc, './THUMBNAILIMAGE/FILENAME')
|
||||
createtion_time = timeconvert(xpath_text(doc, 'rfc822creationdate'))
|
||||
|
||||
quality_options = doc.find('{http://search.yahoo.com/mrss/}group').findall('{http://search.yahoo.com/mrss/}content')
|
||||
formats = []
|
||||
for quality in quality_options:
|
||||
formats.append({
|
||||
'url': compat_urllib_parse_unquote_plus(quality.attrib['url']),
|
||||
'height': int_or_none(quality.attrib.get('height')),
|
||||
'width': int_or_none(quality.attrib.get('width')),
|
||||
'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': video_title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
'thumbnail': thumbnail,
|
||||
'timestamp': createtion_time,
|
||||
}
|
@ -68,6 +68,7 @@ class KuwoIE(KuwoBaseIE):
|
||||
'id': '6446136',
|
||||
'ext': 'mp3',
|
||||
'title': '心',
|
||||
'description': 'md5:b2ab6295d014005bfc607525bfc1e38a',
|
||||
'creator': 'IU',
|
||||
'upload_date': '20150518',
|
||||
},
|
||||
|
@ -1,86 +1,125 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
sanitized_Request,
|
||||
unified_strdate,
|
||||
urlencode_postdata,
|
||||
xpath_element,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class Laola1TvIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/.*?/(?P<id>[0-9]+)\.html'
|
||||
_TEST = {
|
||||
_VALID_URL = r'https?://(?:www\.)?laola1\.tv/(?P<lang>[a-z]+)-(?P<portal>[a-z]+)/[^/]+/(?P<slug>[^/?#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie/227883.html',
|
||||
'info_dict': {
|
||||
'id': '227883',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'straubing-tigers-koelner-haie',
|
||||
'ext': 'flv',
|
||||
'title': 'Straubing Tigers - Kölner Haie',
|
||||
'categories': ['Eishockey'],
|
||||
'upload_date': '20140912',
|
||||
'is_live': False,
|
||||
'categories': ['Eishockey'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.laola1.tv/de-de/video/straubing-tigers-koelner-haie',
|
||||
'info_dict': {
|
||||
'id': '464602',
|
||||
'display_id': 'straubing-tigers-koelner-haie',
|
||||
'ext': 'flv',
|
||||
'title': 'Straubing Tigers - Kölner Haie',
|
||||
'upload_date': '20160129',
|
||||
'is_live': False,
|
||||
'categories': ['Eishockey'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
mobj = re.match(self._VALID_URL, url)
|
||||
video_id = mobj.group('id')
|
||||
display_id = mobj.group('slug')
|
||||
lang = mobj.group('lang')
|
||||
portal = mobj.group('portal')
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
iframe_url = self._search_regex(
|
||||
r'<iframe[^>]*?class="main_tv_player"[^>]*?src="([^"]+)"',
|
||||
webpage, 'iframe URL')
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
iframe = self._download_webpage(
|
||||
iframe_url, video_id, note='Downloading iframe')
|
||||
flashvars_m = re.findall(
|
||||
r'flashvars\.([_a-zA-Z0-9]+)\s*=\s*"([^"]*)";', iframe)
|
||||
flashvars = dict((m[0], m[1]) for m in flashvars_m)
|
||||
iframe_url = self._search_regex(
|
||||
r'<iframe[^>]*?id="videoplayer"[^>]*?src="([^"]+)"',
|
||||
webpage, 'iframe url')
|
||||
|
||||
video_id = self._search_regex(
|
||||
r'videoid=(\d+)', iframe_url, 'video id')
|
||||
|
||||
iframe = self._download_webpage(compat_urlparse.urljoin(
|
||||
url, iframe_url), display_id, 'Downloading iframe')
|
||||
|
||||
partner_id = self._search_regex(
|
||||
r'partnerid\s*:\s*"([^"]+)"', iframe, 'partner id')
|
||||
r'partnerid\s*:\s*(["\'])(?P<partner_id>.+?)\1',
|
||||
iframe, 'partner id', group='partner_id')
|
||||
|
||||
xml_url = ('http://www.laola1.tv/server/hd_video.php?' +
|
||||
'play=%s&partner=%s&portal=%s&v5ident=&lang=%s' % (
|
||||
video_id, partner_id, portal, lang))
|
||||
hd_doc = self._download_xml(xml_url, video_id)
|
||||
hd_doc = self._download_xml(
|
||||
'http://www.laola1.tv/server/hd_video.php?%s'
|
||||
% compat_urllib_parse.urlencode({
|
||||
'play': video_id,
|
||||
'partner': partner_id,
|
||||
'portal': portal,
|
||||
'lang': lang,
|
||||
'v5ident': '',
|
||||
}), display_id)
|
||||
|
||||
title = xpath_text(hd_doc, './/video/title', fatal=True)
|
||||
flash_url = xpath_text(hd_doc, './/video/url', fatal=True)
|
||||
uploader = xpath_text(hd_doc, './/video/meta_organistation')
|
||||
is_live = xpath_text(hd_doc, './/video/islive') == 'true'
|
||||
_v = lambda x, **k: xpath_text(hd_doc, './/video/' + x, **k)
|
||||
title = _v('title', fatal=True)
|
||||
|
||||
categories = xpath_text(hd_doc, './/video/meta_sports')
|
||||
if categories:
|
||||
categories = categories.split(',')
|
||||
req = sanitized_Request(
|
||||
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
|
||||
compat_urllib_parse.urlencode({
|
||||
'videoId': video_id,
|
||||
'target': '2',
|
||||
'label': 'laola1tv',
|
||||
'area': _v('area'),
|
||||
}),
|
||||
urlencode_postdata(
|
||||
dict((i, v) for i, v in enumerate(_v('req_liga_abos').split(',')))))
|
||||
|
||||
ident = random.randint(10000000, 99999999)
|
||||
token_url = '%s&ident=%s&klub=0&unikey=0×tamp=%s&auth=%s' % (
|
||||
flash_url, ident, flashvars['timestamp'], flashvars['auth'])
|
||||
token_url = self._download_json(req, display_id)['data']['stream-access'][0]
|
||||
token_doc = self._download_xml(token_url, display_id, 'Downloading token')
|
||||
|
||||
token_doc = self._download_xml(
|
||||
token_url, video_id, note='Downloading token')
|
||||
token_attrib = token_doc.find('.//token').attrib
|
||||
if token_attrib.get('auth') in ('blocked', 'restricted'):
|
||||
token_attrib = xpath_element(token_doc, './/token').attrib
|
||||
token_auth = token_attrib['auth']
|
||||
|
||||
if token_auth in ('blocked', 'restricted', 'error'):
|
||||
raise ExtractorError(
|
||||
'Token error: %s' % token_attrib.get('comment'), expected=True)
|
||||
'Token error: %s' % token_attrib['comment'], expected=True)
|
||||
|
||||
video_url = '%s?hdnea=%s&hdcore=3.2.0' % (
|
||||
token_attrib['url'], token_attrib['auth'])
|
||||
formats = self._extract_f4m_formats(
|
||||
'%s?hdnea=%s&hdcore=3.2.0' % (token_attrib['url'], token_auth),
|
||||
video_id, f4m_id='hds')
|
||||
|
||||
categories_str = _v('meta_sports')
|
||||
categories = categories_str.split(',') if categories_str else []
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'is_live': is_live,
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'url': video_url,
|
||||
'uploader': uploader,
|
||||
'upload_date': unified_strdate(_v('time_date')),
|
||||
'uploader': _v('meta_organisation'),
|
||||
'categories': categories,
|
||||
'ext': 'mp4',
|
||||
'is_live': _v('islive') == 'true',
|
||||
'formats': formats,
|
||||
}
|
||||
|
@ -1,36 +1,39 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import re
|
||||
import time
|
||||
import base64
|
||||
import hashlib
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_ord,
|
||||
compat_str,
|
||||
compat_urllib_parse,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
encode_data_uri,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
orderedSet,
|
||||
parse_iso8601,
|
||||
sanitized_Request,
|
||||
int_or_none,
|
||||
str_or_none,
|
||||
encode_data_uri,
|
||||
url_basename,
|
||||
)
|
||||
|
||||
|
||||
class LetvIE(InfoExtractor):
|
||||
class LeIE(InfoExtractor):
|
||||
IE_DESC = '乐视网'
|
||||
_VALID_URL = r'http://www\.letv\.com/ptv/vplay/(?P<id>\d+).html'
|
||||
_VALID_URL = r'http://www\.le\.com/ptv/vplay/(?P<id>\d+)\.html'
|
||||
|
||||
_URL_TEMPLATE = 'http://www.le.com/ptv/vplay/%s.html'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.letv.com/ptv/vplay/22005890.html',
|
||||
'url': 'http://www.le.com/ptv/vplay/22005890.html',
|
||||
'md5': 'edadcfe5406976f42f9f266057ee5e40',
|
||||
'info_dict': {
|
||||
'id': '22005890',
|
||||
@ -42,7 +45,7 @@ class LetvIE(InfoExtractor):
|
||||
'hls_prefer_native': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.letv.com/ptv/vplay/1415246.html',
|
||||
'url': 'http://www.le.com/ptv/vplay/1415246.html',
|
||||
'info_dict': {
|
||||
'id': '1415246',
|
||||
'ext': 'mp4',
|
||||
@ -54,7 +57,7 @@ class LetvIE(InfoExtractor):
|
||||
},
|
||||
}, {
|
||||
'note': 'This video is available only in Mainland China, thus a proxy is needed',
|
||||
'url': 'http://www.letv.com/ptv/vplay/1118082.html',
|
||||
'url': 'http://www.le.com/ptv/vplay/1118082.html',
|
||||
'md5': '2424c74948a62e5f31988438979c5ad1',
|
||||
'info_dict': {
|
||||
'id': '1118082',
|
||||
@ -94,17 +97,16 @@ class LetvIE(InfoExtractor):
|
||||
return encrypted_data
|
||||
encrypted_data = encrypted_data[5:]
|
||||
|
||||
_loc4_ = bytearray()
|
||||
while encrypted_data:
|
||||
b = compat_ord(encrypted_data[0])
|
||||
_loc4_.extend([b // 16, b & 0x0f])
|
||||
encrypted_data = encrypted_data[1:]
|
||||
_loc4_ = bytearray(2 * len(encrypted_data))
|
||||
for idx, val in enumerate(encrypted_data):
|
||||
b = compat_ord(val)
|
||||
_loc4_[2 * idx] = b // 16
|
||||
_loc4_[2 * idx + 1] = b % 16
|
||||
idx = len(_loc4_) - 11
|
||||
_loc4_ = _loc4_[idx:] + _loc4_[:idx]
|
||||
_loc7_ = bytearray()
|
||||
while _loc4_:
|
||||
_loc7_.append(_loc4_[0] * 16 + _loc4_[1])
|
||||
_loc4_ = _loc4_[2:]
|
||||
_loc7_ = bytearray(len(encrypted_data))
|
||||
for i in range(len(encrypted_data)):
|
||||
_loc7_[i] = _loc4_[2 * i] * 16 + _loc4_[2 * i + 1]
|
||||
|
||||
return bytes(_loc7_)
|
||||
|
||||
@ -117,10 +119,10 @@ class LetvIE(InfoExtractor):
|
||||
'splatid': 101,
|
||||
'format': 1,
|
||||
'tkey': self.calc_time_key(int(time.time())),
|
||||
'domain': 'www.letv.com'
|
||||
'domain': 'www.le.com'
|
||||
}
|
||||
play_json_req = sanitized_Request(
|
||||
'http://api.letv.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
|
||||
'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
|
||||
)
|
||||
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
|
||||
if cn_verification_proxy:
|
||||
@ -193,26 +195,51 @@ class LetvIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class LetvTvIE(InfoExtractor):
|
||||
_VALID_URL = r'http://www.letv.com/tv/(?P<id>\d+).html'
|
||||
class LePlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'http://[a-z]+\.le\.com/[a-z]+/(?P<id>[a-z0-9_]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.letv.com/tv/46177.html',
|
||||
'url': 'http://www.le.com/tv/46177.html',
|
||||
'info_dict': {
|
||||
'id': '46177',
|
||||
'title': '美人天下',
|
||||
'description': 'md5:395666ff41b44080396e59570dbac01c'
|
||||
},
|
||||
'playlist_count': 35
|
||||
}, {
|
||||
'url': 'http://tv.le.com/izt/wuzetian/index.html',
|
||||
'info_dict': {
|
||||
'id': 'wuzetian',
|
||||
'title': '武媚娘传奇',
|
||||
'description': 'md5:e12499475ab3d50219e5bba00b3cb248'
|
||||
},
|
||||
# This playlist contains some extra videos other than the drama itself
|
||||
'playlist_mincount': 96
|
||||
}, {
|
||||
'url': 'http://tv.le.com/pzt/lswjzzjc/index.shtml',
|
||||
# This series is moved to http://www.le.com/tv/10005297.html
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.le.com/comic/92063.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://list.le.com/listn/c1009_sc532002_d2_p1_o1.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if LeIE.suitable(url) else super(LePlaylistIE, cls).suitable(url)
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
page = self._download_webpage(url, playlist_id)
|
||||
|
||||
media_urls = list(set(re.findall(
|
||||
r'http://www.letv.com/ptv/vplay/\d+.html', page)))
|
||||
entries = [self.url_result(media_url, ie='Letv')
|
||||
for media_url in media_urls]
|
||||
# Currently old domain names are still used in playlists
|
||||
media_ids = orderedSet(re.findall(
|
||||
r'<a[^>]+href="http://www\.letv\.com/ptv/vplay/(\d+)\.html', page))
|
||||
entries = [self.url_result(LeIE._URL_TEMPLATE % media_id, ie='Le')
|
||||
for media_id in media_ids]
|
||||
|
||||
title = self._html_search_meta('keywords', page,
|
||||
fatal=False).split(',')[0]
|
||||
@ -222,31 +249,9 @@ class LetvTvIE(InfoExtractor):
|
||||
playlist_description=description)
|
||||
|
||||
|
||||
class LetvPlaylistIE(LetvTvIE):
|
||||
_VALID_URL = r'http://tv.letv.com/[a-z]+/(?P<id>[a-z]+)/index.s?html'
|
||||
_TESTS = [{
|
||||
'url': 'http://tv.letv.com/izt/wuzetian/index.html',
|
||||
'info_dict': {
|
||||
'id': 'wuzetian',
|
||||
'title': '武媚娘传奇',
|
||||
'description': 'md5:e12499475ab3d50219e5bba00b3cb248'
|
||||
},
|
||||
# This playlist contains some extra videos other than the drama itself
|
||||
'playlist_mincount': 96
|
||||
}, {
|
||||
'url': 'http://tv.letv.com/pzt/lswjzzjc/index.shtml',
|
||||
'info_dict': {
|
||||
'id': 'lswjzzjc',
|
||||
# The title should be "劲舞青春", but I can't find a simple way to
|
||||
# determine the playlist title
|
||||
'title': '乐视午间自制剧场',
|
||||
'description': 'md5:b1eef244f45589a7b5b1af9ff25a4489'
|
||||
},
|
||||
'playlist_mincount': 7
|
||||
}]
|
||||
|
||||
|
||||
class LetvCloudIE(InfoExtractor):
|
||||
# Most of *.letv.com is changed to *.le.com on 2016/01/02
|
||||
# but yuntv.letv.com is kept, so also keep the extractor name
|
||||
IE_DESC = '乐视云'
|
||||
_VALID_URL = r'https?://yuntv\.letv\.com/bcloud.html\?.+'
|
||||
|
||||
@ -327,7 +332,7 @@ class LetvCloudIE(InfoExtractor):
|
||||
formats.append({
|
||||
'url': url,
|
||||
'ext': determine_ext(decoded_url),
|
||||
'format_id': int_or_none(play_url.get('vtype')),
|
||||
'format_id': str_or_none(play_url.get('vtype')),
|
||||
'format_note': str_or_none(play_url.get('definition')),
|
||||
'width': int_or_none(play_url.get('vwidth')),
|
||||
'height': int_or_none(play_url.get('vheight')),
|
@ -20,18 +20,18 @@ class LifeNewsIE(InfoExtractor):
|
||||
_VALID_URL = r'http://lifenews\.ru/(?:mobile/)?(?P<section>news|video)/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://lifenews.ru/news/126342',
|
||||
'md5': 'e1b50a5c5fb98a6a544250f2e0db570a',
|
||||
# single video embedded via video/source
|
||||
'url': 'http://lifenews.ru/news/98736',
|
||||
'md5': '77c95eaefaca216e32a76a343ad89d23',
|
||||
'info_dict': {
|
||||
'id': '126342',
|
||||
'id': '98736',
|
||||
'ext': 'mp4',
|
||||
'title': 'МВД разыскивает мужчин, оставивших в IKEA сумку с автоматом',
|
||||
'description': 'Камеры наблюдения гипермаркета зафиксировали троих мужчин, спрятавших оружейный арсенал в камере хранения.',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'upload_date': '20140130',
|
||||
'title': 'Мужчина нашел дома архив оборонного завода',
|
||||
'description': 'md5:3b06b1b39b5e2bea548e403d99b8bf26',
|
||||
'upload_date': '20120805',
|
||||
}
|
||||
}, {
|
||||
# video in <iframe>
|
||||
# single video embedded via iframe
|
||||
'url': 'http://lifenews.ru/news/152125',
|
||||
'md5': '77d19a6f0886cd76bdbf44b4d971a273',
|
||||
'info_dict': {
|
||||
@ -42,15 +42,33 @@ class LifeNewsIE(InfoExtractor):
|
||||
'upload_date': '20150402',
|
||||
}
|
||||
}, {
|
||||
# two videos embedded via iframe
|
||||
'url': 'http://lifenews.ru/news/153461',
|
||||
'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795',
|
||||
'info_dict': {
|
||||
'id': '153461',
|
||||
'ext': 'mp4',
|
||||
'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве',
|
||||
'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.',
|
||||
'upload_date': '20150505',
|
||||
}
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': '9b6ef8bc0ffa25aebc8bdb40d89ab795',
|
||||
'info_dict': {
|
||||
'id': '153461-video1',
|
||||
'ext': 'mp4',
|
||||
'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 1)',
|
||||
'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.',
|
||||
'upload_date': '20150505',
|
||||
},
|
||||
}, {
|
||||
'md5': 'ebb3bf3b1ce40e878d0d628e93eb0322',
|
||||
'info_dict': {
|
||||
'id': '153461-video2',
|
||||
'ext': 'mp4',
|
||||
'title': 'В Москве спасли потерявшегося медвежонка, который спрятался на дереве (Видео 2)',
|
||||
'description': 'Маленький хищник не смог найти дорогу домой и обрел временное убежище на тополе недалеко от жилого массива, пока его не нашла соседская собака.',
|
||||
'upload_date': '20150505',
|
||||
},
|
||||
}],
|
||||
}, {
|
||||
'url': 'http://lifenews.ru/video/13035',
|
||||
'only_matching': True,
|
||||
@ -65,10 +83,14 @@ class LifeNewsIE(InfoExtractor):
|
||||
'http://lifenews.ru/%s/%s' % (section, video_id),
|
||||
video_id, 'Downloading page')
|
||||
|
||||
videos = re.findall(r'<video.*?poster="(?P<poster>[^"]+)".*?src="(?P<video>[^"]+)".*?></video>', webpage)
|
||||
iframe_link = self._html_search_regex(
|
||||
'<iframe[^>]+src=["\']([^"\']+)["\']', webpage, 'iframe link', default=None)
|
||||
if not videos and not iframe_link:
|
||||
video_urls = re.findall(
|
||||
r'<video[^>]+><source[^>]+src=["\'](.+?)["\']', webpage)
|
||||
|
||||
iframe_links = re.findall(
|
||||
r'<iframe[^>]+src=["\']((?:https?:)?//embed\.life\.ru/embed/.+?)["\']',
|
||||
webpage)
|
||||
|
||||
if not video_urls and not iframe_links:
|
||||
raise ExtractorError('No media links available for %s' % video_id)
|
||||
|
||||
title = remove_end(
|
||||
@ -95,31 +117,44 @@ class LifeNewsIE(InfoExtractor):
|
||||
'upload_date': upload_date,
|
||||
}
|
||||
|
||||
def make_entry(video_id, media, video_number=None):
|
||||
def make_entry(video_id, video_url, index=None):
|
||||
cur_info = dict(common_info)
|
||||
cur_info.update({
|
||||
'id': video_id,
|
||||
'url': media[1],
|
||||
'thumbnail': media[0],
|
||||
'title': title if video_number is None else '%s-video%s' % (title, video_number),
|
||||
'id': video_id if not index else '%s-video%s' % (video_id, index),
|
||||
'url': video_url,
|
||||
'title': title if not index else '%s (Видео %s)' % (title, index),
|
||||
})
|
||||
return cur_info
|
||||
|
||||
if iframe_link:
|
||||
iframe_link = self._proto_relative_url(iframe_link, 'http:')
|
||||
cur_info = dict(common_info)
|
||||
cur_info.update({
|
||||
'_type': 'url_transparent',
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'url': iframe_link,
|
||||
})
|
||||
def make_video_entry(video_id, video_url, index=None):
|
||||
video_url = compat_urlparse.urljoin(url, video_url)
|
||||
return make_entry(video_id, video_url, index)
|
||||
|
||||
def make_iframe_entry(video_id, video_url, index=None):
|
||||
video_url = self._proto_relative_url(video_url, 'http:')
|
||||
cur_info = make_entry(video_id, video_url, index)
|
||||
cur_info['_type'] = 'url_transparent'
|
||||
return cur_info
|
||||
|
||||
if len(videos) == 1:
|
||||
return make_entry(video_id, videos[0])
|
||||
else:
|
||||
return [make_entry(video_id, media, video_number + 1) for video_number, media in enumerate(videos)]
|
||||
if len(video_urls) == 1 and not iframe_links:
|
||||
return make_video_entry(video_id, video_urls[0])
|
||||
|
||||
if len(iframe_links) == 1 and not video_urls:
|
||||
return make_iframe_entry(video_id, iframe_links[0])
|
||||
|
||||
entries = []
|
||||
|
||||
if video_urls:
|
||||
for num, video_url in enumerate(video_urls, 1):
|
||||
entries.append(make_video_entry(video_id, video_url, num))
|
||||
|
||||
if iframe_links:
|
||||
for num, iframe_link in enumerate(iframe_links, len(video_urls) + 1):
|
||||
entries.append(make_iframe_entry(video_id, iframe_link, num))
|
||||
|
||||
playlist = common_info.copy()
|
||||
playlist.update(self.playlist_result(entries, video_id, title, description))
|
||||
return playlist
|
||||
|
||||
|
||||
class LifeEmbedIE(InfoExtractor):
|
||||
|
@ -47,7 +47,7 @@ class LiveLeakIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '801_1409392012',
|
||||
'ext': 'mp4',
|
||||
'description': "Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.",
|
||||
'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.',
|
||||
'uploader': 'bony333',
|
||||
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ class LivestreamIE(InfoExtractor):
|
||||
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
||||
base_ele = find_xpath_attr(
|
||||
smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase')
|
||||
base = base_ele.get('content') if base_ele else 'http://livestreamvod-f.akamaihd.net/'
|
||||
base = base_ele.get('content') if base_ele is not None else 'http://livestreamvod-f.akamaihd.net/'
|
||||
|
||||
formats = []
|
||||
video_nodes = smil.findall(self._xpath_ns('.//video', namespace))
|
||||
|
@ -14,7 +14,7 @@ from ..utils import (
|
||||
|
||||
class MDRIE(InfoExtractor):
|
||||
IE_DESC = 'MDR.DE and KiKA'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+(?P<id>\d+)(?:_.+?)?\.html'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:mdr|kika)\.de/(?:.*)/[a-z]+-?(?P<id>\d+)(?:_.+?)?\.html'
|
||||
|
||||
_TESTS = [{
|
||||
# MDR regularly deletes its videos
|
||||
@ -60,6 +60,9 @@ class MDRIE(InfoExtractor):
|
||||
}, {
|
||||
'url': 'http://www.kika.de/sendungen/einzelsendungen/weihnachtsprogramm/einzelsendung2534.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.mdr.de/mediathek/mdr-videos/a/video-1334.html',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
@ -68,8 +71,8 @@ class MDRIE(InfoExtractor):
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
data_url = self._search_regex(
|
||||
r'dataURL\s*:\s*(["\'])(?P<url>/.+/(?:video|audio)[0-9]+-avCustom\.xml)\1',
|
||||
webpage, 'data url', group='url')
|
||||
r'(?:dataURL|playerXml(?:["\'])?)\s*:\s*(["\'])(?P<url>\\?/.+/(?:video|audio)-?[0-9]+-avCustom\.xml)\1',
|
||||
webpage, 'data url', default=None, group='url').replace('\/', '/')
|
||||
|
||||
doc = self._download_xml(
|
||||
compat_urlparse.urljoin(url, data_url), video_id)
|
||||
|
@ -99,7 +99,7 @@ class OCWMITIE(InfoExtractor):
|
||||
'url': 'http://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-041-probabilistic-systems-analysis-and-applied-probability-fall-2010/video-lectures/lecture-7-multiple-variables-expectations-independence/',
|
||||
'info_dict': {
|
||||
'id': 'EObHWIEKGjA',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'Lecture 7: Multiple Discrete Random Variables: Expectations, Conditioning, Independence',
|
||||
'description': 'In this lecture, the professor discussed multiple random variables, expectations, and binomial distribution.',
|
||||
'upload_date': '20121109',
|
||||
|
@ -38,7 +38,7 @@ class MofosexIE(InfoExtractor):
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
extension = os.path.splitext(path)[1][1:]
|
||||
format = path.split('/')[5].split('_')[:2]
|
||||
format = "-".join(format)
|
||||
format = '-'.join(format)
|
||||
|
||||
age_limit = self._rta_search(webpage)
|
||||
|
||||
|
@ -5,6 +5,7 @@ import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
str_to_int,
|
||||
unified_strdate,
|
||||
)
|
||||
@ -12,55 +13,62 @@ from ..utils import (
|
||||
|
||||
class MotherlessIE(InfoExtractor):
|
||||
_VALID_URL = r'http://(?:www\.)?motherless\.com/(?:g/[a-z0-9_]+/)?(?P<id>[A-Z0-9]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://motherless.com/AC3FFE1',
|
||||
'md5': '310f62e325a9fafe64f68c0bccb6e75f',
|
||||
'info_dict': {
|
||||
'id': 'AC3FFE1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fucked in the ass while playing PS3',
|
||||
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
|
||||
'upload_date': '20100913',
|
||||
'uploader_id': 'famouslyfuckedup',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://motherless.com/532291B',
|
||||
'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
|
||||
'info_dict': {
|
||||
'id': '532291B',
|
||||
'ext': 'mp4',
|
||||
'title': 'Amazing girl playing the omegle game, PERFECT!',
|
||||
'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen', 'game', 'hairy'],
|
||||
'upload_date': '20140622',
|
||||
'uploader_id': 'Sulivana7x',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
},
|
||||
{
|
||||
'url': 'http://motherless.com/g/cosplay/633979F',
|
||||
'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
|
||||
'info_dict': {
|
||||
'id': '633979F',
|
||||
'ext': 'mp4',
|
||||
'title': 'Turtlette',
|
||||
'categories': ['superheroine heroine superher'],
|
||||
'upload_date': '20140827',
|
||||
'uploader_id': 'shade0230',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
_TESTS = [{
|
||||
'url': 'http://motherless.com/AC3FFE1',
|
||||
'md5': '310f62e325a9fafe64f68c0bccb6e75f',
|
||||
'info_dict': {
|
||||
'id': 'AC3FFE1',
|
||||
'ext': 'mp4',
|
||||
'title': 'Fucked in the ass while playing PS3',
|
||||
'categories': ['Gaming', 'anal', 'reluctant', 'rough', 'Wife'],
|
||||
'upload_date': '20100913',
|
||||
'uploader_id': 'famouslyfuckedup',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
]
|
||||
}, {
|
||||
'url': 'http://motherless.com/532291B',
|
||||
'md5': 'bc59a6b47d1f958e61fbd38a4d31b131',
|
||||
'info_dict': {
|
||||
'id': '532291B',
|
||||
'ext': 'mp4',
|
||||
'title': 'Amazing girl playing the omegle game, PERFECT!',
|
||||
'categories': ['Amateur', 'webcam', 'omegle', 'pink', 'young', 'masturbate', 'teen',
|
||||
'game', 'hairy'],
|
||||
'upload_date': '20140622',
|
||||
'uploader_id': 'Sulivana7x',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
},
|
||||
'skip': '404',
|
||||
}, {
|
||||
'url': 'http://motherless.com/g/cosplay/633979F',
|
||||
'md5': '0b2a43f447a49c3e649c93ad1fafa4a0',
|
||||
'info_dict': {
|
||||
'id': '633979F',
|
||||
'ext': 'mp4',
|
||||
'title': 'Turtlette',
|
||||
'categories': ['superheroine heroine superher'],
|
||||
'upload_date': '20140827',
|
||||
'uploader_id': 'shade0230',
|
||||
'thumbnail': 're:http://.*\.jpg',
|
||||
'age_limit': 18,
|
||||
}
|
||||
}, {
|
||||
# no keywords
|
||||
'url': 'http://motherless.com/8B4BBC1',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if any(p in webpage for p in (
|
||||
'<title>404 - MOTHERLESS.COM<',
|
||||
">The page you're looking for cannot be found.<")):
|
||||
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'id="view-upload-title">\s+([^<]+)<', webpage, 'title')
|
||||
video_url = self._html_search_regex(
|
||||
@ -86,7 +94,7 @@ class MotherlessIE(InfoExtractor):
|
||||
r'"thumb-member-username">\s+<a href="/m/([^"]+)"',
|
||||
webpage, 'uploader_id')
|
||||
|
||||
categories = self._html_search_meta('keywords', webpage)
|
||||
categories = self._html_search_meta('keywords', webpage, default=None)
|
||||
if categories:
|
||||
categories = [cat.strip() for cat in categories.split(',')]
|
||||
|
||||
|
@ -11,6 +11,7 @@ from ..utils import (
|
||||
ExtractorError,
|
||||
find_xpath_attr,
|
||||
fix_xml_ampersands,
|
||||
float_or_none,
|
||||
HEADRequest,
|
||||
sanitized_Request,
|
||||
unescapeHTML,
|
||||
@ -110,7 +111,8 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||
uri = itemdoc.find('guid').text
|
||||
video_id = self._id_from_uri(uri)
|
||||
self.report_extraction(video_id)
|
||||
mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
|
||||
content_el = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content')))
|
||||
mediagen_url = content_el.attrib['url']
|
||||
# Remove the templates, like &device={device}
|
||||
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
|
||||
if 'acceptMethods' not in mediagen_url:
|
||||
@ -165,6 +167,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||
'id': video_id,
|
||||
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
|
||||
'description': description,
|
||||
'duration': float_or_none(content_el.attrib.get('duration')),
|
||||
}
|
||||
|
||||
def _get_feed_query(self, uri):
|
||||
|
@ -18,8 +18,8 @@ class MySpassIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': '11741',
|
||||
'ext': 'mp4',
|
||||
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
|
||||
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
|
||||
'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
|
||||
'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@ from ..utils import (
|
||||
|
||||
|
||||
class MyVideoIE(InfoExtractor):
|
||||
_WORKING = False
|
||||
_VALID_URL = r'http://(?:www\.)?myvideo\.de/(?:[^/]+/)?watch/(?P<id>[0-9]+)/[^?/]+.*'
|
||||
IE_NAME = 'myvideo'
|
||||
_TEST = {
|
||||
|
@ -1,18 +1,26 @@
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import functools
|
||||
import os.path
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import (
|
||||
compat_urllib_parse,
|
||||
compat_urlparse,
|
||||
)
|
||||
from ..utils import (
|
||||
parse_duration,
|
||||
int_or_none,
|
||||
OnDemandPagedList,
|
||||
parse_duration,
|
||||
remove_start,
|
||||
xpath_text,
|
||||
xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class NBAIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)?video/(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
|
||||
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?P<path>(?:[^/]+/)+(?P<id>[^?]*?))/?(?:/index\.html)?(?:\?.*)?$'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
|
||||
'md5': '9e7729d3010a9c71506fd1248f74e4f4',
|
||||
@ -44,14 +52,101 @@ class NBAIE(InfoExtractor):
|
||||
'timestamp': 1432134543,
|
||||
'upload_date': '20150520',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.nba.com/clippers/news/doc-rivers-were-not-trading-blake',
|
||||
'info_dict': {
|
||||
'id': '1455672027478-Doc_Feb16_720',
|
||||
'ext': 'mp4',
|
||||
'title': 'Practice: Doc Rivers - 2/16/16',
|
||||
'description': 'Head Coach Doc Rivers addresses the media following practice.',
|
||||
'upload_date': '20160217',
|
||||
'timestamp': 1455672000,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#',
|
||||
'info_dict': {
|
||||
'id': 'timberwolves',
|
||||
'title': 'Shootaround Access - Dec. 12 | Andrew Wiggins',
|
||||
},
|
||||
'playlist_count': 30,
|
||||
'params': {
|
||||
# Download the whole playlist takes too long time
|
||||
'playlist_items': '1-30',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.nba.com/timberwolves/wiggins-shootaround#',
|
||||
'info_dict': {
|
||||
'id': 'Wigginsmp4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Shootaround Access - Dec. 12 | Andrew Wiggins',
|
||||
'description': 'Wolves rookie Andrew Wiggins addresses the media after Friday\'s shootaround.',
|
||||
'upload_date': '20141212',
|
||||
'timestamp': 1418418600,
|
||||
},
|
||||
'params': {
|
||||
'noplaylist': True,
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
}]
|
||||
|
||||
_PAGE_SIZE = 30
|
||||
|
||||
def _fetch_page(self, team, video_id, page):
|
||||
search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse.urlencode({
|
||||
'type': 'teamvideo',
|
||||
'start': page * self._PAGE_SIZE + 1,
|
||||
'npp': (page + 1) * self._PAGE_SIZE + 1,
|
||||
'sort': 'recent',
|
||||
'output': 'json',
|
||||
'site': team,
|
||||
})
|
||||
results = self._download_json(
|
||||
search_url, video_id, note='Download page %d of playlist data' % page)['results'][0]
|
||||
for item in results:
|
||||
yield self.url_result(compat_urlparse.urljoin('http://www.nba.com/', item['url']))
|
||||
|
||||
def _extract_playlist(self, orig_path, video_id, webpage):
|
||||
team = orig_path.split('/')[0]
|
||||
|
||||
if self._downloader.params.get('noplaylist'):
|
||||
self.to_screen('Downloading just video because of --no-playlist')
|
||||
video_path = self._search_regex(
|
||||
r'nbaVideoCore\.firstVideo\s*=\s*\'([^\']+)\';', webpage, 'video path')
|
||||
video_url = 'http://www.nba.com/%s/video/%s' % (team, video_path)
|
||||
return self.url_result(video_url)
|
||||
|
||||
self.to_screen('Downloading playlist - add --no-playlist to just download video')
|
||||
playlist_title = self._og_search_title(webpage, fatal=False)
|
||||
entries = OnDemandPagedList(
|
||||
functools.partial(self._fetch_page, team, video_id),
|
||||
self._PAGE_SIZE, use_cache=True)
|
||||
|
||||
return self.playlist_result(entries, team, playlist_title)
|
||||
|
||||
def _real_extract(self, url):
|
||||
path, video_id = re.match(self._VALID_URL, url).groups()
|
||||
orig_path = path
|
||||
if path.startswith('nba/'):
|
||||
path = path[3:]
|
||||
|
||||
if 'video/' not in path:
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
path = remove_start(self._search_regex(r'data-videoid="([^"]+)"', webpage, 'video id'), '/')
|
||||
|
||||
if path == '{{id}}':
|
||||
return self._extract_playlist(orig_path, video_id, webpage)
|
||||
|
||||
# See prepareContentId() of pkgCvp.js
|
||||
if path.startswith('video/teams'):
|
||||
path = 'video/channels/proxy/' + path[6:]
|
||||
|
||||
video_info = self._download_xml('http://www.nba.com/%s.xml' % path, video_id)
|
||||
video_id = xpath_text(video_info, 'slug')
|
||||
video_id = os.path.splitext(xpath_text(video_info, 'slug'))[0]
|
||||
title = xpath_text(video_info, 'headline')
|
||||
description = xpath_text(video_info, 'description')
|
||||
duration = parse_duration(xpath_text(video_info, 'length'))
|
||||
|
@ -18,14 +18,14 @@ class NerdCubedFeedIE(InfoExtractor):
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
|
||||
feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed')
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'title': feed_entry['title'],
|
||||
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
|
||||
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
|
||||
'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
|
||||
'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'],
|
||||
} for feed_entry in feed]
|
||||
|
||||
return {
|
||||
|
69
youtube_dl/extractor/noz.py
Normal file
69
youtube_dl/extractor/noz.py
Normal file
@ -0,0 +1,69 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class NozIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?noz\.de/video/(?P<id>[0-9]+)/'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.noz.de/video/25151/32-Deutschland-gewinnt-Badminton-Lnderspiel-in-Melle',
|
||||
'info_dict': {
|
||||
'id': '25151',
|
||||
'ext': 'mp4',
|
||||
'duration': 215,
|
||||
'title': '3:2 - Deutschland gewinnt Badminton-Länderspiel in Melle',
|
||||
'description': 'Vor rund 370 Zuschauern gewinnt die deutsche Badminton-Nationalmannschaft am Donnerstag ein EM-Vorbereitungsspiel gegen Frankreich in Melle. Video Moritz Frankenberg.',
|
||||
'thumbnail': 're:^http://.*\.jpg',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
description = self._og_search_description(webpage)
|
||||
|
||||
edge_url = self._html_search_regex(
|
||||
r'<script\s+(?:type="text/javascript"\s+)?src="(.*?/videojs_.*?)"',
|
||||
webpage, 'edge URL')
|
||||
edge_content = self._download_webpage(edge_url, 'meta configuration')
|
||||
|
||||
config_url_encoded = self._search_regex(
|
||||
r'so\.addVariable\("config_url","[^,]*,(.*?)"',
|
||||
edge_content, 'config URL'
|
||||
)
|
||||
config_url = compat_urllib_parse_unquote(config_url_encoded)
|
||||
|
||||
doc = self._download_xml(config_url, 'video configuration')
|
||||
title = xpath_text(doc, './/title')
|
||||
thumbnail = xpath_text(doc, './/article/thumbnail/url')
|
||||
duration = int_or_none(xpath_text(
|
||||
doc, './/article/movie/file/duration'))
|
||||
formats = []
|
||||
for qnode in doc.findall('.//article/movie/file/qualities/qual'):
|
||||
video_node = qnode.find('./html_urls/video_url[@format="video/mp4"]')
|
||||
if video_node is None:
|
||||
continue # auto
|
||||
formats.append({
|
||||
'url': video_node.text,
|
||||
'format_name': xpath_text(qnode, './name'),
|
||||
'format_id': xpath_text(qnode, './id'),
|
||||
'height': int_or_none(xpath_text(qnode, './height')),
|
||||
'width': int_or_none(xpath_text(qnode, './width')),
|
||||
'tbr': int_or_none(xpath_text(qnode, './bitrate'), scale=1000),
|
||||
})
|
||||
self._sort_formats(formats)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': formats,
|
||||
'title': title,
|
||||
'duration': duration,
|
||||
'description': description,
|
||||
'thumbnail': thumbnail,
|
||||
}
|
@ -4,7 +4,10 @@ from __future__ import unicode_literals
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urlparse
|
||||
from ..compat import (
|
||||
compat_urlparse,
|
||||
compat_urllib_parse_unquote,
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
ExtractorError,
|
||||
@ -87,7 +90,7 @@ class NRKIE(InfoExtractor):
|
||||
|
||||
|
||||
class NRKPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video)(?:[^/]+/)+(?P<id>[^/]+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/(?!video|skole)(?:[^/]+/)+(?P<id>[^/]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://www.nrk.no/troms/gjenopplev-den-historiske-solformorkelsen-1.12270763',
|
||||
@ -126,6 +129,37 @@ class NRKPlaylistIE(InfoExtractor):
|
||||
entries, playlist_id, playlist_title, playlist_description)
|
||||
|
||||
|
||||
class NRKSkoleIE(InfoExtractor):
|
||||
IE_DESC = 'NRK Skole'
|
||||
_VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/klippdetalj?.*\btopic=(?P<id>[^/?#&]+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://nrk.no/skole/klippdetalj?topic=nrk:klipp/616532',
|
||||
'md5': '04cd85877cc1913bce73c5d28a47e00f',
|
||||
'info_dict': {
|
||||
'id': '6021',
|
||||
'ext': 'flv',
|
||||
'title': 'Genetikk og eneggede tvillinger',
|
||||
'description': 'md5:3aca25dcf38ec30f0363428d2b265f8d',
|
||||
'duration': 399,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.nrk.no/skole/klippdetalj?topic=nrk%3Aklipp%2F616532#embed',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.nrk.no/skole/klippdetalj?topic=urn:x-mediadb:21379',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = compat_urllib_parse_unquote(self._match_id(url))
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
nrk_id = self._search_regex(r'data-nrk-id=["\'](\d+)', webpage, 'nrk id')
|
||||
return self.url_result('nrk:%s' % nrk_id)
|
||||
|
||||
|
||||
class NRKTVIE(InfoExtractor):
|
||||
IE_DESC = 'NRK TV and NRK Radio'
|
||||
_VALID_URL = r'(?P<baseurl>https?://(?:tv|radio)\.nrk(?:super)?\.no/)(?:serie/[^/]+|program)/(?P<id>[a-zA-Z]{4}\d{8})(?:/\d{2}-\d{2}-\d{4})?(?:#del=(?P<part_id>\d+))?'
|
||||
|
@ -112,6 +112,7 @@ class ORFTVthekIE(InfoExtractor):
|
||||
% geo_str),
|
||||
fatal=False)
|
||||
|
||||
self._check_formats(formats, video_id)
|
||||
self._sort_formats(formats)
|
||||
|
||||
upload_date = unified_strdate(sd['created_date'])
|
||||
|
@ -337,6 +337,21 @@ class PBSIE(InfoExtractor):
|
||||
'skip_download': True, # requires ffmpeg
|
||||
},
|
||||
},
|
||||
{
|
||||
# Serves hd only via wigget/partnerplayer page
|
||||
'url': 'http://www.pbs.org/video/2365641075/',
|
||||
'info_dict': {
|
||||
'id': '2365641075',
|
||||
'ext': 'mp4',
|
||||
'title': 'FRONTLINE - Netanyahu at War',
|
||||
'duration': 6852,
|
||||
'thumbnail': 're:^https?://.*\.jpg$',
|
||||
'formats': 'mincount:8',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True, # requires ffmpeg
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'http://player.pbs.org/widget/partnerplayer/2365297708/?start=0&end=0&chapterbar=false&endscreen=false&topbar=true',
|
||||
'only_matching': True,
|
||||
@ -437,34 +452,54 @@ class PBSIE(InfoExtractor):
|
||||
for vid_id in video_id]
|
||||
return self.playlist_result(entries, display_id)
|
||||
|
||||
info = None
|
||||
redirects = []
|
||||
redirect_urls = set()
|
||||
|
||||
def extract_redirect_urls(info):
|
||||
for encoding_name in ('recommended_encoding', 'alternate_encoding'):
|
||||
redirect = info.get(encoding_name)
|
||||
if not redirect:
|
||||
continue
|
||||
redirect_url = redirect.get('url')
|
||||
if redirect_url and redirect_url not in redirect_urls:
|
||||
redirects.append(redirect)
|
||||
redirect_urls.add(redirect_url)
|
||||
|
||||
try:
|
||||
info = self._download_json(
|
||||
video_info = self._download_json(
|
||||
'http://player.pbs.org/videoInfo/%s?format=json&type=partner' % video_id,
|
||||
display_id, 'Downloading video info JSON')
|
||||
extract_redirect_urls(video_info)
|
||||
info = video_info
|
||||
except ExtractorError as e:
|
||||
# videoInfo API may not work for some videos
|
||||
if not isinstance(e.cause, compat_HTTPError) or e.cause.code != 404:
|
||||
raise
|
||||
# videoInfo API may not work for some videos, fallback to portalplayer API
|
||||
|
||||
# Player pages may also serve different qualities
|
||||
for page in ('widget/partnerplayer', 'portalplayer'):
|
||||
player = self._download_webpage(
|
||||
'http://player.pbs.org/portalplayer/%s' % video_id, display_id)
|
||||
info = self._parse_json(
|
||||
self._search_regex(
|
||||
r'(?s)PBS\.videoData\s*=\s*({.+?});\n',
|
||||
player, 'video data', default='{}'),
|
||||
display_id, transform_source=js_to_json, fatal=False)
|
||||
'http://player.pbs.org/%s/%s' % (page, video_id),
|
||||
display_id, 'Downloading %s page' % page, fatal=False)
|
||||
if player:
|
||||
video_info = self._parse_json(
|
||||
self._search_regex(
|
||||
r'(?s)PBS\.videoData\s*=\s*({.+?});\n',
|
||||
player, '%s video data' % page, default='{}'),
|
||||
display_id, transform_source=js_to_json, fatal=False)
|
||||
if video_info:
|
||||
extract_redirect_urls(video_info)
|
||||
if not info:
|
||||
info = video_info
|
||||
|
||||
formats = []
|
||||
for encoding_name in ('recommended_encoding', 'alternate_encoding'):
|
||||
redirect = info.get(encoding_name)
|
||||
if not redirect:
|
||||
continue
|
||||
redirect_url = redirect.get('url')
|
||||
if not redirect_url:
|
||||
continue
|
||||
for num, redirect in enumerate(redirects):
|
||||
redirect_id = redirect.get('eeid')
|
||||
|
||||
redirect_info = self._download_json(
|
||||
redirect_url + '?format=json', display_id,
|
||||
'Downloading %s video url info' % encoding_name)
|
||||
'%s?format=json' % redirect['url'], display_id,
|
||||
'Downloading %s video url info' % (redirect_id or num))
|
||||
|
||||
if redirect_info['status'] == 'error':
|
||||
raise ExtractorError(
|
||||
@ -483,8 +518,9 @@ class PBSIE(InfoExtractor):
|
||||
else:
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': redirect.get('eeid'),
|
||||
'format_id': redirect_id,
|
||||
})
|
||||
self._remove_duplicate_formats(formats)
|
||||
self._sort_formats(formats)
|
||||
|
||||
rating_str = info.get('rating')
|
||||
|
@ -11,6 +11,7 @@ from ..compat import (
|
||||
)
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
sanitized_Request,
|
||||
str_to_int,
|
||||
)
|
||||
@ -23,13 +24,18 @@ class PornHubIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:[a-z]+\.)?pornhub\.com/(?:view_video\.php\?viewkey=|embed/)(?P<id>[0-9a-z]+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=648719015',
|
||||
'md5': '882f488fa1f0026f023f33576004a2ed',
|
||||
'md5': '1e19b41231a02eba417839222ac9d58e',
|
||||
'info_dict': {
|
||||
'id': '648719015',
|
||||
'ext': 'mp4',
|
||||
"uploader": "Babes",
|
||||
"title": "Seductive Indian beauty strips down and fingers her pink pussy",
|
||||
"age_limit": 18
|
||||
'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
|
||||
'uploader': 'Babes',
|
||||
'duration': 361,
|
||||
'view_count': int,
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 18,
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
|
||||
@ -67,13 +73,23 @@ class PornHubIE(InfoExtractor):
|
||||
'PornHub said: %s' % error_msg,
|
||||
expected=True, video_id=video_id)
|
||||
|
||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
||||
flashvars = self._parse_json(
|
||||
self._search_regex(
|
||||
r'var\s+flashv1ars_\d+\s*=\s*({.+?});', webpage, 'flashvars', default='{}'),
|
||||
video_id)
|
||||
if flashvars:
|
||||
video_title = flashvars.get('video_title')
|
||||
thumbnail = flashvars.get('image_url')
|
||||
duration = int_or_none(flashvars.get('video_duration'))
|
||||
else:
|
||||
video_title, thumbnail, duration = [None] * 3
|
||||
|
||||
if not video_title:
|
||||
video_title = self._html_search_regex(r'<h1 [^>]+>([^<]+)', webpage, 'title')
|
||||
|
||||
video_uploader = self._html_search_regex(
|
||||
r'(?s)From: .+?<(?:a href="/users/|a href="/channels/|span class="username)[^>]+>(.+?)<',
|
||||
webpage, 'uploader', fatal=False)
|
||||
thumbnail = self._html_search_regex(r'"image_url":"([^"]+)', webpage, 'thumbnail', fatal=False)
|
||||
if thumbnail:
|
||||
thumbnail = compat_urllib_parse_unquote(thumbnail)
|
||||
|
||||
view_count = self._extract_count(
|
||||
r'<span class="count">([\d,\.]+)</span> views', webpage, 'view')
|
||||
@ -95,7 +111,7 @@ class PornHubIE(InfoExtractor):
|
||||
path = compat_urllib_parse_urlparse(video_url).path
|
||||
extension = os.path.splitext(path)[1][1:]
|
||||
format = path.split('/')[5].split('_')[:2]
|
||||
format = "-".join(format)
|
||||
format = '-'.join(format)
|
||||
|
||||
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
|
||||
if m is None:
|
||||
@ -120,6 +136,7 @@ class PornHubIE(InfoExtractor):
|
||||
'uploader': video_uploader,
|
||||
'title': video_title,
|
||||
'thumbnail': thumbnail,
|
||||
'duration': duration,
|
||||
'view_count': view_count,
|
||||
'like_count': like_count,
|
||||
'dislike_count': dislike_count,
|
||||
@ -129,7 +146,31 @@ class PornHubIE(InfoExtractor):
|
||||
}
|
||||
|
||||
|
||||
class PornHubPlaylistIE(InfoExtractor):
|
||||
class PornHubPlaylistBaseIE(InfoExtractor):
|
||||
def _extract_entries(self, webpage):
|
||||
return [
|
||||
self.url_result('http://www.pornhub.com/%s' % video_url, PornHubIE.ie_key())
|
||||
for video_url in set(re.findall(
|
||||
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage))
|
||||
]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
entries = self._extract_entries(webpage)
|
||||
|
||||
playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
|
||||
playlist_id)
|
||||
|
||||
return self.playlist_result(
|
||||
entries, playlist_id, playlist.get('title'), playlist.get('description'))
|
||||
|
||||
|
||||
class PornHubPlaylistIE(PornHubPlaylistBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/playlist/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.pornhub.com/playlist/6201671',
|
||||
@ -140,21 +181,20 @@ class PornHubPlaylistIE(InfoExtractor):
|
||||
'playlist_mincount': 35,
|
||||
}]
|
||||
|
||||
|
||||
class PornHubUserVideosIE(PornHubPlaylistBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?pornhub\.com/users/(?P<id>[^/]+)/videos'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.pornhub.com/users/rushandlia/videos',
|
||||
'info_dict': {
|
||||
'id': 'rushandlia',
|
||||
},
|
||||
'playlist_mincount': 13,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
user_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
webpage = self._download_webpage(url, user_id)
|
||||
|
||||
entries = [
|
||||
self.url_result('http://www.pornhub.com/%s' % video_url, 'PornHub')
|
||||
for video_url in set(re.findall(
|
||||
r'href="/?(view_video\.php\?.*\bviewkey=[\da-z]+[^"]*)"', webpage))
|
||||
]
|
||||
|
||||
playlist = self._parse_json(
|
||||
self._search_regex(
|
||||
r'playlistObject\s*=\s*({.+?});', webpage, 'playlist'),
|
||||
playlist_id)
|
||||
|
||||
return self.playlist_result(
|
||||
entries, playlist_id, playlist.get('title'), playlist.get('description'))
|
||||
return self.playlist_result(self._extract_entries(webpage), user_id)
|
||||
|
@ -56,7 +56,7 @@ class PornoVoisinesIE(InfoExtractor):
|
||||
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
|
||||
description = self._html_search_regex(
|
||||
r'<article id="descriptif">(.+?)</article>',
|
||||
webpage, "description", fatal=False, flags=re.DOTALL)
|
||||
webpage, 'description', fatal=False, flags=re.DOTALL)
|
||||
|
||||
thumbnail = self._search_regex(
|
||||
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
|
||||
|
@ -12,14 +12,14 @@ class PyvideoIE(InfoExtractor):
|
||||
_TESTS = [
|
||||
{
|
||||
'url': 'http://pyvideo.org/video/1737/become-a-logging-expert-in-30-minutes',
|
||||
'md5': 'de317418c8bc76b1fd8633e4f32acbc6',
|
||||
'md5': '520915673e53a5c5d487c36e0c4d85b5',
|
||||
'info_dict': {
|
||||
'id': '24_4WWkSmNo',
|
||||
'ext': 'mp4',
|
||||
'ext': 'webm',
|
||||
'title': 'Become a logging expert in 30 minutes',
|
||||
'description': 'md5:9665350d466c67fb5b1598de379021f7',
|
||||
'upload_date': '20130320',
|
||||
'uploader': 'NextDayVideo',
|
||||
'uploader': 'Next Day Video',
|
||||
'uploader_id': 'NextDayVideo',
|
||||
},
|
||||
'add_ie': ['Youtube'],
|
||||
|
@ -28,16 +28,16 @@ class RadioBremenIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
meta_url = "http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s" % video_id
|
||||
meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id
|
||||
meta_doc = self._download_webpage(
|
||||
meta_url, video_id, 'Downloading metadata')
|
||||
title = self._html_search_regex(
|
||||
r"<h1.*>(?P<title>.+)</h1>", meta_doc, "title")
|
||||
r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title')
|
||||
description = self._html_search_regex(
|
||||
r"<p>(?P<description>.*)</p>", meta_doc, "description", fatal=False)
|
||||
r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False)
|
||||
duration = parse_duration(self._html_search_regex(
|
||||
r"Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>",
|
||||
meta_doc, "duration", fatal=False))
|
||||
r'Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>',
|
||||
meta_doc, 'duration', fatal=False))
|
||||
|
||||
page_doc = self._download_webpage(
|
||||
url, video_id, 'Downloading video information')
|
||||
@ -51,7 +51,7 @@ class RadioBremenIE(InfoExtractor):
|
||||
formats = [{
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'width': int(mobj.group("width")),
|
||||
'width': int(mobj.group('width')),
|
||||
}]
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -16,9 +16,9 @@ class RadioFranceIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'one-one',
|
||||
'ext': 'ogg',
|
||||
"title": "One to one",
|
||||
"description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
|
||||
"uploader": "Thomas Hercouët",
|
||||
'title': 'One to one',
|
||||
'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
|
||||
'uploader': 'Thomas Hercouët',
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -18,11 +18,11 @@ class RBMARadioIE(InfoExtractor):
|
||||
'info_dict': {
|
||||
'id': 'ford-lopatin-live-at-primavera-sound-2011',
|
||||
'ext': 'mp3',
|
||||
"uploader_id": "ford-lopatin",
|
||||
"location": "Spain",
|
||||
"description": "Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
|
||||
"uploader": "Ford & Lopatin",
|
||||
"title": "Live at Primavera Sound 2011",
|
||||
'uploader_id': 'ford-lopatin',
|
||||
'location': 'Spain',
|
||||
'description': 'Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.',
|
||||
'uploader': 'Ford & Lopatin',
|
||||
'title': 'Live at Primavera Sound 2011',
|
||||
},
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user